text
stringlengths 8
6.05M
|
|---|
import math
from hypothesis import given, assume
from hypothesis.strategies import sampled_from, floats, data, integers
from pytest import raises
from renard.renard import (RenardSeriesKey, series, rrange, find_less_than_or_equal, find_greater_than_or_equal,
find_nearest,
find_less_than, find_greater_than, find_nearest_few, open_rrange, R10, precision)
@given(series_key=sampled_from(RenardSeriesKey))
def test_series_cardinality(series_key):
assert len(series(series_key)) == series_key.cardinality
@given(series_key=sampled_from(RenardSeriesKey),
low=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_rrange_cardinality_over_one_order_of_magnitude(series_key, low):
high = low * 10.0
assume(math.isfinite(high))
values = list(rrange(series_key, low, high))
include_end = bool(high in values)
cardinality = series_key.cardinality + include_end
assert len(values) == cardinality
@given(series_key=sampled_from(RenardSeriesKey),
low=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False),
high=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_rrange_strictly_ordered(series_key, low, high):
assume(low < high)
values = list(rrange(series_key, low, high))
assert all(values[i] < values[i+1] for i in range(len(values)-1))
@given(series_key=sampled_from(RenardSeriesKey),
low=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_open_rrange_cardinality_over_one_order_of_magnitude(series_key, low):
high = low * 10.0
assume(math.isfinite(high))
values = list(open_rrange(series_key, low, high))
cardinality = series_key.cardinality
assert len(values) == cardinality
@given(series_key=sampled_from(RenardSeriesKey),
low=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False),
high=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_open_rrange_strictly_ordered(series_key, low, high):
assume(low < high)
values = list(open_rrange(series_key, low, high))
assert all(values[i] < values[i+1] for i in range(len(values)-1))
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_less_than_or_equal(series_key, value):
assert find_less_than_or_equal(series_key, value) <= value
@given(data())
def test_less_than_or_equal_returns_value_from_series(data):
series_key = data.draw(sampled_from(RenardSeriesKey))
value = data.draw(sampled_from(series(series_key)))
assert find_less_than_or_equal(series_key, value) == value
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_less_than(series_key, value):
assert find_less_than(series_key, value) < value
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_greater_than_or_equal(series_key, value):
assert find_greater_than_or_equal(series_key, value) >= value
@given(data())
def test_greater_than_or_equal_returns_value_from_series(data):
series_key = data.draw(sampled_from(RenardSeriesKey))
value = data.draw(sampled_from(series(series_key)))
assert find_greater_than_or_equal(series_key, value) == value
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_greater_than(series_key, value):
assert find_greater_than(series_key, value) > value
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_find_nearest_in_range(series_key, value):
nearest = find_nearest(series_key, value)
assert find_less_than_or_equal(series_key, value) <= nearest <= find_greater_than_or_equal(series_key, value)
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_find_nearest_is_nearest(series_key, value):
nearest = find_nearest(series_key, value)
lower = find_less_than_or_equal(series_key, value)
upper = find_greater_than_or_equal(series_key, value)
assert (((nearest == lower) and (nearest - lower <= upper - nearest))
or ((nearest == upper) and (upper - nearest <= nearest - lower)))
@given(data())
def test_nearest_returns_value_from_series(data):
series_key = data.draw(sampled_from(RenardSeriesKey))
value = data.draw(sampled_from(series(series_key)))
assert find_nearest(series_key, value) == value
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False),
num=sampled_from((1, 2, 3)))
def test_find_nearest_few_has_correct_cardinality(series_key, value, num):
assert len(find_nearest_few(series_key, value, num)) == num
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False),
num=integers())
def test_find_nearest_few_raises_error_with_num_out_of_range(series_key, value, num):
assume(num not in {1, 2, 3})
with raises(ValueError):
find_nearest_few(series_key, value, num)
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_find_nearest_three_includes_at_least_one_less(series_key, value):
assert any(v < value for v in find_nearest_few(series_key, value))
@given(series_key=sampled_from(RenardSeriesKey),
value=floats(min_value=1e-35, max_value=1e35, allow_nan=False, allow_infinity=False))
def test_find_nearest_three_includes_at_least_one_greater(series_key, value):
assert any(v > value for v in find_nearest_few(series_key, value))
def test_erange_start_infinite_raises_value_error():
with raises(ValueError):
inf = float("inf")
rrange(R10, inf, 10)
def test_erange_stop_infinite_raises_value_error():
with raises(ValueError):
rrange(R10, 10, float("inf"))
def test_erange_start_too_small_raises_value_error():
with raises(ValueError):
rrange(R10, 0, 10)
def test_erange_stop_too_small_raises_value_error():
with raises(ValueError):
rrange(R10, 10, 0)
def test_erange_start_stop_in_wrong_order_raises_value_error():
with raises(ValueError):
rrange(R10, 10, 8)
def test_open_erange_start_infinite_raises_value_error():
with raises(ValueError):
inf = float("inf")
open_rrange(R10, inf, 10)
def test_open_erange_stop_infinite_raises_value_error():
with raises(ValueError):
open_rrange(R10, 10, float("inf"))
def test_open_erange_start_too_small_raises_value_error():
with raises(ValueError):
open_rrange(R10, 0, 10)
def test_open_erange_stop_too_small_raises_value_error():
with raises(ValueError):
open_rrange(R10, 10, 0)
def test_open_erange_start_stop_in_wrong_order_raises_value_error():
with raises(ValueError):
open_rrange(R10, 10, 8)
def test_illegal_series_key_raises_value_error():
with raises(ValueError):
series(13)
@given(series_key=sampled_from(RenardSeriesKey))
def test_series_precision_is_positive(series_key):
assert precision(series_key) > 0
def test_illegal_precision_series_key_raises_value_error():
with raises(ValueError):
precision(object())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from image_cropping import ImageRatioField
from ckeditor.fields import RichTextField
from multisitesutils.models import SiteModel
from magicgallery.models import GalleryItem
from .models import Widget, SiteLink
from .managers import BaseContentManager
class BaseContent(SiteModel):
# must be defined at children models, this is the value used by
# get_widget_type method
_widget_type = None
PICTURE_FILTERS = (
('', 'Original'),
('image-xpro2', 'XPro2'),
('image-willow', 'Willow'),
('image-inkwell', 'Inkwell'),
('image-walden', 'Walden'),
('image-toaster', 'Toaster'),
('image-sierra', 'Sierra'),
('image-nashville', 'Nashville'),
('image-mayfair', 'Mayfair'),
('image-kelvin', 'Kelvin'),
('image-hudson', 'Hudson'),
('image-brannan', 'Brannan'),
('image-1977', '1977'),
('image-blur', 'Blur'),
)
widget = models.ForeignKey(Widget, verbose_name=_('widget'))
title = models.CharField(
_('title'), max_length=128, default='Edit title',
blank=True)
short_content = models.CharField(
_('short content'), max_length=512,
default='Edit content',
blank=True)
long_content = RichTextField(
_('long content'), default='', blank=True)
picture = models.ForeignKey(GalleryItem, null=True, blank=True)
picture_filter = models.CharField(
_('Image Filter'), max_length=32, default='',
choices=PICTURE_FILTERS, blank=True)
order = models.PositiveIntegerField(_('order'), default=99)
is_active = models.BooleanField(_('active'), default=True)
site_link = models.ForeignKey(
SiteLink, verbose_name=_('link'), null=True, blank=True,
on_delete=models.SET_NULL,
help_text='''By selecting a target link, it will be possible for
clicking over contents and send the user to other page''')
link_label = models.CharField(
_('link label'), max_length=64, default='', blank=True)
objects = models.Manager()
site_objects = BaseContentManager()
class Meta:
abstract = True
ordering = ['order']
def __unicode__(self):
return '{0}.{1}'.format(self.is_active, self.title)
def _content(self):
pass
@classmethod
def style_list(cls):
raise Exception("A 'style_list' method is missing for {0}".format(
cls))
@classmethod
def can_edit_description(cls):
return True
@classmethod
def picture_upload_help_text(cls_obj):
fields = cls_obj._meta.fields
cropping_lst = filter(lambda m: isinstance(m, ImageRatioField), fields)
if cropping_lst:
cropping_field = cropping_lst[0]
height = cropping_field.height
width = cropping_field.width
shape = 'rectangular'
if width == height:
shape = 'squared'
size = '1MB'
if width > 1000 or height > 1000:
size = '2MB'
return 'Use a {0} image ({1}x{2}), max {3}'.format(
shape, width, height, size)
return ''
@property
def content(self):
return self._content()
@property
def model_name(self):
return self._meta.model_name
@property
def enable_picture(self):
"""
make contents have images manipulation, for contents such as Icon
this method must be overwritten as False
"""
return True
@property
def link_url(self):
if self.site_link:
url = self.site_link.url
# means it belongs to landingpage
if url.startswith("#"):
return '/' + url
return url
return ''
@property
def link_name(self):
if self.site_link:
return self.link_label or self.site_link.name or ''
@property
def allow_delete(self):
return True
@property
def delete_url(self):
reverse_name = 'magiccontent.{0}.delete'.format(self._meta.model_name)
return reverse(reverse_name, args=[self.widget.pk, self.pk])
@property
def update_url(self):
reverse_name = 'magiccontent.{0}.update'.format(self._meta.model_name)
return reverse(reverse_name, args=[self.widget.pk, self.pk])
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch import optim
from graphgallery.nn.models import TorchKeras
from graphgallery.nn.layers.pytorch import GCNConv, activations
from graphgallery.nn.metrics.pytorch import Accuracy
from graphgallery.nn.init.pytorch import glorot_uniform, zeros
class SimPGCN(TorchKeras):
def __init__(self,
in_features,
out_features,
hids=[64],
acts=[None],
lambda_=5.0,
gamma=0.1,
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
super().__init__()
self.lambda_ = lambda_
self.gamma = gamma
assert hids, "hids should not empty"
layers = nn.ModuleList()
act_layers = nn.ModuleList()
inc = in_features
for hid, act in zip(hids, acts):
layers.append(GCNConv(in_features,
hid,
bias=bias))
act_layers.append(activations.get(act))
inc = hid
layers.append(GCNConv(inc,
out_features,
bias=bias))
act_layers.append(activations.get(None))
self.layers = layers
self.act_layers = act_layers
self.scores = nn.ParameterList()
self.bias = nn.ParameterList()
self.D_k = nn.ParameterList()
self.D_bias = nn.ParameterList()
for hid in [in_features] + hids:
self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
self.bias.append(nn.Parameter(torch.FloatTensor(1)))
self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))
# discriminator for ssl
self.linear = nn.Linear(hids[-1], 1)
self.compile(loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay),
metrics=[Accuracy()])
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
for s in self.scores:
glorot_uniform(s)
for b in self.bias:
# fill in b with postive value to make
# score s closer to 1 at the beginning
zeros(b)
for Dk in self.D_k:
glorot_uniform(Dk)
for b in self.D_bias:
zeros(b)
def forward(self, x, adj, adj_knn=None):
adj_knn = self.from_cache(adj_knn=adj_knn)
gamma = self.gamma
embeddings = None
for ix, (layer, act) in enumerate(zip(self.layers, self.act_layers)):
s = torch.sigmoid(x @ self.scores[ix] + self.bias[ix])
Dk = x @ self.D_k[ix] + self.D_bias[ix]
x = s * act(layer(x, adj)) + (1 - s) * act(layer(x, adj_knn)) + gamma * Dk * act(layer(x))
if ix < len(self.layers) - 1:
x = self.dropout(x)
if ix == len(self.layers) - 2:
embeddings = x.clone()
# self.ss = torch.cat((s_i.view(1, -1), s_o.view(1, -1), gamma * Dk_i.view(1, -1), gamma * Dk_o.view(1, -1)), dim=0)
if self.training:
return x, embeddings
else:
return x
def regression_loss(self, embeddings, pseudo_labels=None, node_pairs=None):
pseudo_labels, node_pairs = self.from_cache(pseudo_labels=pseudo_labels,
node_pairs=node_pairs)
k = 10000
if len(node_pairs[0]) > k:
sampled = np.random.choice(len(node_pairs[0]), k, replace=False)
embeddings0 = embeddings[node_pairs[0][sampled]]
embeddings1 = embeddings[node_pairs[1][sampled]]
embeddings = self.linear(torch.abs(embeddings0 - embeddings1))
loss = F.mse_loss(embeddings, pseudo_labels[sampled].unsqueeze(-1), reduction='mean')
else:
embeddings0 = embeddings[node_pairs[0]]
embeddings1 = embeddings[node_pairs[1]]
embeddings = self.linear(torch.abs(embeddings0 - embeddings1))
loss = F.mse_loss(embeddings, pseudo_labels.unsqueeze(-1), reduction='mean')
return loss
def train_step_on_batch(self,
x,
y=None,
out_weight=None,
device="cpu"):
self.train()
optimizer = self.optimizer
loss_fn = self.loss
metrics = self.metrics
optimizer.zero_grad()
assert len(x) == 5
*x, pseudo_labels, node_pairs = x
out, embeddings = self(*x)
if out_weight is not None:
out = out[out_weight]
# TODO
loss = loss_fn(out, y) + self.lambda_ * self.regression_loss(embeddings, pseudo_labels, node_pairs)
loss.backward()
optimizer.step()
for metric in metrics:
metric.update_state(y.cpu(), out.detach().cpu())
results = [loss.cpu().detach()] + [metric.result() for metric in metrics]
return dict(zip(self.metrics_names, results))
|
from sys import stdin
from math import ceil, log
from decimal import Decimal as d
class RMQ(object):
def __init__(self, numbers):
self.e = []
n = len(numbers)
if (n & (n-1))!=0:
x = ceil(log(n, 2))
self.n = 2**x;
while n != self.n:
numbers.append(d('Infinity'))
n += 1
else: self.n = n
temp = numbers[::-1]
for i in range(0, 2*(self.n-1), 2):
temp.append(min(temp[i], temp[i+1]))
temp.append(d('Infinity'))
self.e = temp[::-1]
self.size = len(temp)
def recover(self, idx):
while idx > 1:
parent = idx//2
self.e[parent] = min(self.e[2*parent], self.e[2*parent+1])
idx = parent
def min(self, left, right, begin = 1, end = None, idx = 1):
if end is None: end = self.n
if left==begin and right==end:
return self.e[idx]
mid = (begin + end) // 2
if left >= begin and right <= mid:
return self.min(left, right, begin, mid, 2*idx)
elif left > mid and right <= end:
return self.min(left, right, mid+1, end, 2*idx+1)
else:
left_child = self.min(left, mid, begin, mid, 2*idx)
right_child = self.min(mid+1, right, mid+1, end, 2*idx+1)
return min(left_child, right_child)
def set(self, origin, value):
idx = self.size//2+origin-1
self.e[idx] = value
self.recover(idx)
if __name__ == '__main__':
f = open('source/rmq2.txt', 'r')
n, m = map(int, f.readline().split())
numbers = list(map(int, f.readline().split()))
rmq = RMQ(numbers)
for i in range(0, m):
c, x, y = f.readline().split()
if c == 'Min':
print(rmq.min(int(x), int(y)))
elif c == 'Set':
rmq.set(int(x), int(y))
|
""" pluginSearch.py
* This class looks for plugins and creates a dictionary containing.
the plugin models. Plugin objects can be instanciated elsewhere.
The plugins are identified by a certain string contained in the
first x charcters of the python file. Plugins should have a .py
extension.
John Eslick, Carnegie Mellon University, 2014
See LICENSE.md for license and copyright details.
"""
import sys
import os
import importlib
import logging
import imp
_log = logging.getLogger("foqus." + __name__)
class plugins():
"""
This class maintains a list of DFO solver plugins
"""
def __init__(self, idString, pathList, charLimit=200):
self.idString = idString
self.pathList = pathList
self.charLimit = charLimit
self.plugins = {}
self.importPlugins()
def importPlugins(self):
'''
check files in self.pathList to see if they are plugins
'''
for p in self.pathList:
if os.path.exists(p):
sys.path.append(p)
pgfiles = os.listdir(p)
for fname in pgfiles:
mname = fname.rsplit('.', 1) #split off extension
if len(mname) > 1 and mname[1] == 'py':
with open(os.path.join(p, fname), 'r',
encoding="utf-8") as f:
try:
l = self.idString in f.read(self.charLimit)
except:
_log.exception("error reading py file")
l = False
if not l:
continue
try:
if mname[0] in self.plugins:
_log.info("Reloading Plugin: {}".format(
os.path.join(p, fname)))
self.plugins[mname[0]] = \
imp.reload(self.plugins[mname[0]])
else:
logging.getLogger("foqus." + __name__).\
info("Loading Plugin: " + \
os.path.join(p, fname))
self.plugins[mname[0]] = \
importlib.import_module(mname[0])
except:
_log.exception("Error Loading Plugin: {}".format(
os.path.join(p, fname)))
# Now check that the plugins have what they need to be used
for pkey, p in list(self.plugins.items()):
try:
av = p.checkAvailable()
except:
av = False
if not av:
del self.plugins[pkey]
_log.info("Removing plugin, due to missing dependency: " + pkey)
|
#this is a simple program which verifies the creditcard number using luhns algorithm
while True:
card_number=list(map(int,input("Enter the card number here")))
if len(card_number)==16:
break
checker=card_number.pop()
for x in range(14,-1,-2):
card_number[x]*=2
if card_number[x]/10>=1:
card_number[x]=card_number[x]%10+1
new=sum(card_number)
new =(new*9)%10
if new ==checker:
print("yes that is a valid card number")
else: print("no that is not a valid card number")
|
#!/usr/bin/python
import mysql.connector
##
# Create .fasta file for trans and prot seqs from gene_db database
##
### Start up connection
dbh_gene_db = mysql.connector.connect(user='s12', password='jazzduck', database='gene_db')
cursor_gene_db = dbh_gene_db.cursor()
# select all gene + transcripts from trancript table
query = ("SELECT gene.gene_id ,trans_id, sequence, translation FROM gene, transcript WHERE gene.id=transcript.gene_id;")
cursor_gene_db.execute(query)
trans_out = "/home/grobeln2/trans.fasta"
prot_out = "/home/grobeln2/prot.fasta"
fh_trn = open(trans_out, 'w')
fh_prt = open(prot_out, 'w')
for line in cursor_gene_db:
# generate seq name
seq_name = ">" + line[0] + "_" + line[1]
# write to file
fh_trn.write("%s\n%s\n" % (seq_name, line[2]))
fh_prt.write("%s\n%s\n" % (seq_name, line[3]))
# close files
fh_trn.close
fh_prt.close
|
from util import NUM_NODES, read_file, get_initial_state, open_nodes, calc_heuristics
def pathfinding():
open_states = []
initial_state = get_initial_state(nodes)
open_states.append(initial_state)
while len(open_states) > 0:
current_state = open_states[0]
if len(current_state.path) == NUM_NODES:
return current_state
open_states = open_nodes(nodes, current_state, open_states)
open_states.pop(0)
open_states.sort(key=lambda x: (x.distance + x.node.h))
return None
nodes = read_file()
calc_heuristics(nodes)
final_state = pathfinding()
print()
print("FINAL_PATH:")
for n in final_state.path:
print(n.index)
|
MOVIE_API_KEY = '<19eb2c37e12c55e93facdf16eae63d25>'
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import fields, models
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit = 'res.partner'
contract_count = fields.Integer(compute='_compute_contract_count', string='Contract Count')
contract_ids = fields.One2many('sale.contract', 'partner_id', 'Contract')
def _compute_contract_count(self):
# retrieve all children partners and prefetch 'parent_id' on them
all_partners = self.with_context(active_test=False).search([('id', 'child_of', self.ids)])
all_partners.read(['parent_id'])
_logger.warning(all_partners)
sale_contract_groups = self.env['sale.contract'].read_group(
domain=[('partner_id', 'in', all_partners.ids)],
fields=['partner_id'], groupby=['partner_id']
)
_logger.warning(sale_contract_groups)
partners = self.browse()
for group in sale_contract_groups:
partner = self.browse(group['partner_id'][0])
while partner:
if partner in self:
partner.contract_count += group['partner_id_count']
partners |= partner
partner = partner.parent_id
(self - partners).contract_count = 0
|
import numpy as np
import pandas as pd
import networkx as nx
class Simulador_MIP():
def __init__(self, archivo_domestico, archivo_total):
self.carga_datos(archivo_domestico, archivo_total)
self.define_varibles_calculadas()
self.vectores_coeficientes()
self.matrices_coeficientes_tecnicos()
self.variables_macroeconomicas()
self.crea_red()
def carga_datos(self,archivo_domestico, archivo_total):
# ToDo: Hacer cambios para hacer la improtacion mas rapida
datosD = pd.read_excel(archivo_domestico,skiprows=5,index_col=0)
datosT = pd.read_excel(archivo_total,skiprows=5,index_col=0)
flujos_intermedios = ["111110 - Cultivo de soya","931810 - Actividades de seguridad nacional"]
self.Zd = self.obtenerSubmatriz(flujos_intermedios,flujos_intermedios,datosD)
self.Zt = self.obtenerSubmatriz(flujos_intermedios,flujos_intermedios,datosT)
demanda_final = ["CP - Consumo Privado","YA0 - Discrepancia estadística"]
self.Fd = self.obtenerSubmatriz(flujos_intermedios,demanda_final,datosD)
Ft = self.obtenerSubmatriz(flujos_intermedios,demanda_final,datosT)
self.Ft = np.delete(Ft,-2,1)
self.etiSCIAN = datosD.loc[flujos_intermedios[0]:flujos_intermedios[1]].index
eti = self.etiSCIAN.str.slice(9)
reemp_dic = {
"Fabricación de" : "Fab.",
"Cultivo de" : "C.",
"Explotación de" : "Exp.",
"Producción" : "Prod.",
"producción" : "prod.",
"Minería de" : "Min.",
"Elaboración de" : "Elab.",
"Transporte de" : "Trans.",
"Transporte" : "Trans.",
"Alquiler" : "Alq.",
"Servicios de" : "S.",
"Servicios" : "S."
}
eti2 = list(self.etiSCIAN)
for key,val in reemp_dic.items():
eti2 = list(map(lambda s: s.replace(key, val), eti2))
self.eti2 = eti2
self.REM = self.obtenerSubmatriz(["D.1 - Remuneración de los asalariados"]*2,flujos_intermedios,datosD)
self.PT = self.obtenerSubmatriz(["PT - Puestos de trabajo"]*2,flujos_intermedios,datosD)
self.PTR = self.obtenerSubmatriz(["PTR - Puestos de trabajo remunerados"]*2,flujos_intermedios,datosD)
self.PIB = self.obtenerSubmatriz(["B.1bP - Producto interno bruto"]*2,flujos_intermedios,datosD)
def obtenerSubmatriz(self,renglones,columnas,datos):
datos2 = datos.fillna(0)
datos_interes = datos2.loc[renglones[0]:renglones[1],columnas[0]:columnas[1]]
matriz = np.array(datos_interes)
return matriz
def invD(self, vector):
if vector.ndim > 1 :
vector = vector.flatten()
invertida = np.where(vector==0,0,1/vector)
diagonal = np.diag(invertida)
return diagonal
def define_varibles_calculadas(self):
self.n = len(self.Zd)
self.fd = np.sum(self.Fd,1)
ft = np.sum(self.Ft,1)
self.x = np.sum(self.Zd,1) + self.fd
self.M = self.Zt - self.Zd
Fm = self.Ft - self.Fd
Iota = np.ones([1,self.n])
def vectores_coeficientes(self):
invD_x = self.invD(self.x)
self.rem = np.matmul(self.REM,invD_x)
self.pt = np.matmul(self.PT,invD_x)
self.ptr = np.matmul(self.PTR,invD_x)
self.pib = np.matmul(self.PIB,invD_x)
imp = np.matmul(np.sum(self.M,0),invD_x)
def matrices_coeficientes_tecnicos(self):
#A = Zd . invD[x];(*Matriz de coeficientes técnicos domésticos*)
self.A = np.matmul(self.Zd,self.invD(self.x))
#Am = M . invD[x];(*Matriz de coeficientes técnicos de importaciones*)
self.Am = np.matmul(self.M,self.invD(self.x))
#At = A + Am;(*Matriz de coeficientes técnicos totales*)
self.At = self.A + self.Am
#L = Inverse[IdentityMatrix[n] - A];(*Matriz inversa de Leontief*)
L = np.linalg.inv( np.identity(self.n) - self.A)
def variables_macroeconomicas(self):
#xtot = Total[x];(*Valor Bruto de la Producción total*)
self.xtot = np.sum(self.x)
#PIBtot = Total[PIB];(*PIB total*)
self.PIBtot = np.sum(self.PIB)
#REMtot = Total[REM];(*Remuneraciones totales*)
self.REMtot = np.sum(self.REM)
#PTtot = Total[PT];(*Puestos de trabajo totales*)
self.PTtot = np.sum(self.PT)
#PTRtot = Total[PTR];(*Puestos de trabajo remunerados totales*)
self.PTRtot = np.sum(self.PTR)
#IMPtot = Total[Total[M]];(*Importaciones totales*)
self.IMPtot = np.sum(self.M)
#macro0 = {xtot, PIBtot, REMtot, PTtot, PTRtot, IMPtot};(*Lista de variables macro originales*)
self.macro0 = np.array([self.xtot, self.PIBtot, self.REMtot, self.PTtot, self.PTRtot, self.IMPtot])
def simular_tablas(self,lista):
etiMacro = ["Producción (millones de pesos)", "PIB (millones de pesos)", "Remuneraciones (millones de pesos)", "Puestos de Trabajo", "Puestos de Trabajo Remunerados", "Importaciones (millones de pesos)"]
#numSust = Length[lista];(*Número de insumos a integrar*)
numSust = len(lista)
Amod,AMmod = self.simula_adyacencia(lista)
#Lmod = Inverse[ IdentityMatrix[n] - Amod];(*Inversa de Leontief doméstica modificada*)
Lmod = np.linalg.inv( np.identity(self.n) - Amod)
#xmod = Lmod . fd;(*Vector de VBP modificado*)
xmod = np.matmul(Lmod , self.fd)
#PIBmod = pib . xmod;(*Total del PIB modificado*)
PIBmod = np.matmul(self.pib, xmod)[0]
#PIBmodVec = pib*xmod;(*Vector del PIB por actividad modificado*)
PIBmodVec = self.pib*xmod
#REMmod = rem . xmod;(*Total de Remuneraciones modificadas*)
REMmod = np.matmul(self.rem, xmod)[0]
#REMmodVec = rem*xmod;(*Vector de Remuneraciones por actividad modificadas*)
REMmodVec = self.rem*xmod
#PTmod = pt . xmod;(*Total del PT modificados*)
PTmod = np.matmul(self.pt , xmod)[0]
#PTmodVec = pt*xmod;(*Vector de PT por actividad modificados*)
PTmodVec = self.pt*xmod
#PTRmod = ptr . xmod;(*Total del PTR modificados*)
PTRmod = np.matmul(self.ptr , xmod)[0]
#PTRmodVec = ptr*xmod;(*Vector del PTR por actividad modificados*)
PTRmodVec = self.ptr*xmod
#IMPmod = Total[AMmod] . xmod;(*Total del importaciones modificadas*)
IMPmod = np.matmul( np.sum(AMmod,0), xmod)
#(*-- Generación de tablas con resultados*)
#(*Tabla de resultados macro*)
resMacro = [np.sum(xmod) , PIBmod , REMmod , PTmod , PTRmod , IMPmod ] - self.macro0
tablaMacro = pd.DataFrame({"Variable":etiMacro,
"Niveles originales (millones de pesos)":self.macro0,
"Variación (millones de pesos)":resMacro.flatten(),
"Variación porcentual (%)":(resMacro/self.macro0*100).flatten()})
#(*Tabla de principales sectores afectados vía PIB*)
tablaPIB = pd.DataFrame({"Variable":self.etiSCIAN,
"Niveles originales (millones de pesos)":self.PIB.flatten(),
"Variación (millones de pesos)":(PIBmodVec - self.PIB).flatten(),
"Variación porcentual (%)":((np.matmul(PIBmodVec , self.invD(self.PIB)) - 1)*100).flatten()})
#(*Tabla de principales sectores afectados vía Remuneraciones*)
tablaREM = pd.DataFrame({"Variable":self.etiSCIAN,
"Niveles originales (millones de pesos)":self.REM.flatten(),
"Variación (millones de pesos)":(REMmodVec - self.REM).flatten(),
"Variación porcentual (%)":((np.matmul(REMmodVec , self.invD(self.REM)) - 1)*100).flatten()})
#(*Tabla de principales sectores afectados vía Puestos de Trabajo Remunerados*)
tablaPTR = pd.DataFrame({"Variable":self.etiSCIAN,
"Niveles originales (Puestos)":self.PTR.flatten(),
"Variación (Puestos)":(PTRmodVec - self.PTR).flatten(),
"Variación porcentual (%)":((np.matmul(PTRmodVec , self.invD(self.PTR)) - 1)*100).flatten()})
#(*Tabla de principales sectores afectados vía Puestos de Trabajo*)
tablaPT = pd.DataFrame({"Variable":self.etiSCIAN,
"Niveles originales (Puestos)":self.PT.flatten(),
"Variación (Puestos)":(PTmodVec - self.PT).flatten(),
"Variación porcentual (%)":((np.matmul(PTmodVec , self.invD(self.PT)) - 1)*100).flatten()})
return [tablaMacro, tablaPIB, tablaREM, tablaPTR, tablaPT]
def simula_adyacencia(self,lista):
#Amod = ReplacePart[A, listaSustA];(*Matriz de coeficientes técnicos modificada*)
#AMmod = ReplacePart[Am, listaSustAm];(*Matriz de coeficientes importados modificada*)
Amod = self.A.copy()
AMmod = self.Am.copy()
for elem in lista:
Amod[elem[0],elem[1]] += (elem[2]/100)*self.Am[elem[0],elem[1]]
AMmod[elem[0],elem[1]] = (1 - elem[2]/100)* self.Am[elem[0],elem[1]]
return [Amod,AMmod]
def crea_red(self):
DG = nx.DiGraph()
DG.add_edges_from([(i,j,{"domestico":self.A[i,j],"total":destino,"importaciones":self.Am[i,j]})
for i, origen in enumerate(self.At)
for j, destino in enumerate(origen)
if destino > 0 and i != j and not i in [446, 447]])
for n in DG.nodes:
DG.nodes[n]["etiqueta"] = self.etiSCIAN[n]
self.red = DG
def cerradura(self, elementos,funcion_pseudoclausura, red, **arg):
sub_red = nx.DiGraph()
nodos_elementos = [(x,red.nodes[x]) for x in elementos]
sub_red.add_nodes_from(nodos_elementos)
clausura = funcion_pseudoclausura(sub_red,red,**arg)
while sub_red.nodes != clausura.nodes and sub_red.edges != clausura.edges:
sub_red = clausura
clausura = funcion_pseudoclausura(sub_red,red,**arg)
return sub_red
def presouclausura_atras(self,elementos,red,nivel,filtro):
presouclausura = elementos.copy()
for x in elementos.nodes:
for y in red.nodes :
if red.has_edge(y,x) and red.edges[y,x][nivel]>filtro:
if not presouclausura.has_node(y):
presouclausura.add_node(y,**red.nodes[y])
if not presouclausura.has_edge(y,x):
presouclausura.add_edge(y,x,**red.edges[y,x])
return presouclausura
def modifica_red(self,lista):
redMod = self.red.copy()
for elem in lista:
#Amod[elem[0],elem[1]] += (elem[2]/100)*self.Am[elem[0],elem[1]]
redMod.edges[elem[0],elem[1]]["domestico"] += (elem[2]/100)*redMod.edges[elem[0],elem[1]]["importaciones"]
redMod.edges[elem[0],elem[1]]["importaciones"] = redMod.edges[elem[0],elem[1]]["total"] - redMod.edges[elem[0],elem[1]]["domestico"]
return redMod
def simula_red(self, sector_inicial, filtro=0.04, lista=[[]]):
red_domestica = self.cerradura([sector_inicial],self.presouclausura_atras,self.red,nivel="domestico",filtro=filtro)
red_total = self.cerradura([sector_inicial],self.presouclausura_atras,self.red,nivel="total",filtro=filtro)
inicios = [x[0] for x in lista]
red_simulada = self.cerradura(inicios,self.presouclausura_atras,self.modifica_red(lista),nivel="domestico",filtro=filtro)
for e in red_total.edges:
if red_domestica.has_edge(*e):
tipo = "domestico"
elif any([l[0] == e[0] and l[1] == e[1] for l in lista]):
tipo = "simulado_directo"
elif red_simulada.has_edge(*e):
tipo = "simulado_indirecto"
else:
tipo = "total"
red_total.edges[e]["tipo"] = tipo
for n in red_total.nodes:
if n == sector_inicial:
tipo = "inicial"
elif red_domestica.has_node(n):
tipo = "domestico"
else:
tipo = "total"
red_total.nodes[n]["tipo"] = tipo
return red_total
|
def reverse_string_in_place(input):
input = list(input)
start = 0
end = len(input) - 1
while(start < end):
input[start], input[end] = input[end], input[start]
start += 1
end -= 1
return input
if __name__ == "__main__":
print(reverse_string_in_place("inputs"))
|
# -*- coding: utf-8 -*-
import json
import numpy as np
import copy
import time
import os
import sigraph
import pandas as pd
import random
#Pytorch
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
#Brainvisa
from soma import aims
#from deepsulci.sulci_labeling.method.unet import UnetSulciLabeling
from deepsulci.deeptools.dataset import extract_data#, SulciDataset
from deepsulci.deeptools.models import UNet3D
from deepsulci.sulci_labeling.analyse.stats import esi_score
from deepsulci.sulci_labeling.method.cutting import cutting
from deepsulci.deeptools.early_stopping import EarlyStopping
from dataset import SulciDataset
from divide_lr import DivideLr
from pattern_class import UnetPatternSulciLabelling
# -------------------------------------------------------------------------------------------------- #
# Classe permettant d'entraîner le modèle UNET3D sur une cohorte
# --------------------------------------------------------------------------------------------------- #
class UnetTrainingSulciLabelling(UnetPatternSulciLabelling):
def __init__(self, graphs, hemi, cuda=-1, working_path=None, dict_model={},
dict_names=None, dict_bck2=None, sulci_side_list=None):
super().__init__(graphs, hemi, cuda, working_path, dict_model, dict_names, dict_bck2, sulci_side_list)
#results
self.results = {'lr': [],
'momentum': [],
'batch_size': [],
'num_epochs': [],
'graphs_train': [],
'graphs_test': [],
'patience': {},
'epoch_loss_val': [],
'epoch_acc_val': [],
'epoch_loss_train': [],
'epoch_acc_train': [],
'best_acc': [],
'best_epoch': [],
'divide_lr_epoch': [],
'duration': [],
'threshold_scores': {}
}
def load_network(self):
# NETWORK
# Load file
print('Network initialization...')
self.model = UNet3D(self.num_channel, len(self.sulci_side_list), final_sigmoid=self.final_sigmoid,
interpolate=self.interpolate, dropout=0., conv_layer_order=self.conv_layer_order,
init_channel_number=self.num_filter)
if self.num_conv > 1:
fac = (self.dict_trained_model['init_channel_number'] - len(self.sulci_side_list)) / self.num_conv
num_channel = self.dict_trained_model['init_channel_number']
self.model.final_conv = nn.Sequential()
for n in range(self.num_conv):
self.model.final_conv.add_module(str(n), nn.Conv3d(num_channel - round(n * fac), num_channel - round((n + 1) * fac), 1))
self.model = self.model.to(self.device)
def learning(self, lr, momentum, num_epochs, gfile_list_train, gfile_list_test, batch_size=1, patience={}, save_results=True):
#Training
#Error
if self.sulci_side_list is None or self.dict_bck2 is None or self.dict_bck2 is None:
print('Error : extract data from graphs before learning')
return 1
# # DATASET / DATALOADERS # #
print('Extract validation dataloader...')
valdataset = SulciDataset(
gfile_list_test, self.dict_sulci,
train=False, translation_file=self.trfile,
dict_bck2=self.dict_bck2, dict_names=self.dict_names)
if batch_size == 1:
valloader = torch.utils.data.DataLoader(
valdataset, batch_size=batch_size,
shuffle=False, num_workers=0)
else:
val_img_size = [0, 0, 0]
for inputs, _ in valdataset:
size = inputs.size()
val_img_size = [np.max([val_img_size[i], size[i + 1]]) for i in range(len(val_img_size))]
print('Val dataset image size:', val_img_size, sep=' ')
valdataset_resized = SulciDataset(
gfile_list_test, self.dict_sulci,
train=False, translation_file=self.trfile,
dict_bck2=self.dict_bck2, dict_names=self.dict_names, img_size=val_img_size)
valloader = torch.utils.data.DataLoader(
valdataset_resized, batch_size=batch_size,
shuffle=False, num_workers=0)
print('Extract train dataloader...')
traindataset = SulciDataset(
gfile_list_train, self.dict_sulci,
train=True, translation_file=self.trfile,
dict_bck2=self.dict_bck2, dict_names=self.dict_names)
if batch_size == 1:
trainloader = torch.utils.data.DataLoader(
traindataset, batch_size=batch_size,
shuffle=False, num_workers=0)
else:
random.seed(42)
np.random.seed(42)
train_img_size = [0, 0, 0]
for _ in range(num_epochs):
for inputs, _ in traindataset:
size = inputs.size()
train_img_size = [np.max([train_img_size[i], size[i + 1]]) for i in range(len(train_img_size))]
print('Train dataset image size:', train_img_size, sep=' ')
traindataset_resized = SulciDataset(
gfile_list_train, self.dict_sulci,
train=True, translation_file=self.trfile,
dict_bck2=self.dict_bck2, dict_names=self.dict_names, img_size=train_img_size)
trainloader = torch.utils.data.DataLoader(
traindataset_resized, batch_size=batch_size,
shuffle=False, num_workers=0)
np.random.seed(42)
random.seed(42)
# # MODEL # #
self.load_network()
optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=momentum, weight_decay=0)
criterion = nn.CrossEntropyLoss(ignore_index=-1)
if save_results:
num_training = len(self.results['lr'])
self.results['lr'].append(lr)
self.results['momentum'].append(momentum)
self.results['batch_size'].append(batch_size)
self.results['num_epochs'].append(num_epochs)
self.results['graphs_test'].append(list(gfile_list_test))
self.results['graphs_train'].append(list(gfile_list_train))
self.results['patience'] = patience
if batch_size > 1:
if num_training == 0:
self.results['train_image_size'] = [int(i) for i in train_img_size]
self.results['val_image_size'] = [int(i) for i in val_img_size]
else:
self.results['train_image_size'].append([int(i) for i in train_img_size])
self.results['val_image_size'].append([int(i) for i in val_img_size])
log_dir = os.path.join(self.working_path + '/tensorboard/' + self.model_name)
os.makedirs(log_dir, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir+'/cv'+str(num_training)) #, comment=)
# early stopping
if "early_stopping" in patience.keys():
es_stop = EarlyStopping(patience=patience['early_stopping'])
if "divide_lr" in patience.keys():
divide_lr = DivideLr(patience=patience['divide_lr'])
# # TRAINING # #
print('training...')
since = time.time()
best_model_wts = copy.deepcopy(self.model.state_dict())
best_acc, epoch_acc = 0., 0.
best_epoch = 0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
start_time = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
self.model.train() # Set model to training mode
else:
self.model.eval() # Set model to evaluate mode
running_loss = 0.0
# compute dataloader
dataloader = trainloader if phase == 'train' else valloader
# Iterate over data.
y_pred, y_true = [], []
for batch, (inputs, labels) in enumerate(dataloader):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
y_pred.extend(preds[labels != self.background].tolist())
y_true.extend(labels[labels != self.background].tolist())
if batch_size > 1:
print('Batch n°{:.0f}/{:.0f} || Loss: {:.4f}'.format(batch+1, np.ceil(len(dataloader.dataset)/batch_size), loss.item()))
epoch_loss = running_loss / len(dataloader.dataset)
epoch_acc = 1 - esi_score(
y_true, y_pred,
[self.dict_sulci[ss] for ss in self.sslist])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if save_results:
writer.add_scalar('Loss/'+phase, epoch_loss, epoch)
writer.add_scalar('Accuracy/'+phase, epoch_acc, epoch)
if epoch == 0:
self.results['epoch_loss_'+phase].append([epoch_loss])
self.results['epoch_acc_'+phase].append([epoch_acc])
else:
self.results['epoch_loss_'+phase][num_training].append(epoch_loss)
self.results['epoch_acc_'+phase][num_training].append(epoch_acc)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_epoch = epoch
best_model_wts = copy.deepcopy(self.model.state_dict())
# divide_lr
if 'divide_lr' in patience.keys():
divide_lr(epoch_loss, self.model)
if divide_lr.divide_lr:
lr = lr / 10
print('\tDivide learning rate. New value: {}'.format(lr))
optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=momentum)
self.results['divide_lr_epoch'].append(epoch)
# early_stopping
if "early_stopping" in patience.keys():
es_stop(epoch_loss, self.model)
if es_stop.early_stop:
print("Early stopping")
break
print('Epoch took %i s.' % (time.time() - start_time))
print('\n')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}, Epoch {}'.format(best_acc, best_epoch))
if save_results:
self.results['best_acc'].append(best_acc)
self.results['best_epoch'].append(best_epoch)
self.results['duration'].append(time_elapsed)
writer.close()
# load best model weights
self.model.load_state_dict(best_model_wts)
def reset_results(self):
self.results = {'lr': [],
'momentum': [],
'batch_size': [],
'num_epochs': [],
'graphs_train': [],
'graphs_test': [],
'patience': {},
'train_image_size': [],
'val_image_size': [],
'epoch_loss_val': [],
'epoch_acc_val': [],
'epoch_loss_train': [],
'epoch_acc_train': [],
'best_acc': [],
'best_epoch': [],
'divide_lr_epoch': [],
'duration': [],
'threshold_scores': {}
}
|
import cv2
import numpy
from matplotlib import pyplot as plt
img = cv2.imread('./../sheep.jpg',0)
# laplacian = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)
laplacian = cv2.Sobel(sobelx,cv2.CV_64F,0,1,ksize=5)
plt.subplot(2,2,1),plt.imshow(img,cmap='gray')
plt.title('original'),plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian,cmap='gray')
plt.title('laplacian'),plt.xticks([]),plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx,cmap='gray')
plt.title('Sobel X'),plt.xticks([]),plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely,cmap='gray')
plt.title('Sobel Y'),plt.xticks([]),plt.yticks([])
plt.show()
|
import pandas as pd
import numpy as np
data = pd.read_csv('/data-out/titanic.csv', index_col='PassengerId')
passengers = len(data)
"""
Сколько м/ж
"""
male = (data['Sex'].value_counts()['male'])
female = len(data) - male
ans1 = (male, female)
with open('/data-out/1.txt', 'w') as f:
f.write(str(male))
f.write(str(female))
f.close()
"""
Сколько выжило в процентах
"""
survived = (data['Survived'].value_counts()[1])
ans2 = ("%.2f" % (survived/passengers*100))
with open('/data-out/2.txt', 'w') as f:
f.write(ans2)
f.close()
"""
Сколько отн первому классу в процентах
"""
firstclass = (data['Pclass'].value_counts()[1])
ans3 = ("%.2f" % (firstclass/passengers*100))
with open('/data-out/3.txt', 'w') as f:
f.write(ans3)
f.close()
"""
Средний и медианный возраст
"""
ans4 = ("%.2f" % data['Age'].mean(), "%.2f" % data['Age'].median())
with open('/data-out/4.txt', 'w') as f:
f.write(str(ans4))
f.close()
"""
Корреляция между двумя колонками
"""
ans5 = ("%.2f" % data.corr()['SibSp']['Parch'])
with open('/data-out/5.txt', 'w') as f:
f.write(str(ans5))
f.close()
print(data['Name'])
|
from django.conf.urls import url
from .views import GithubWebHook
urlpatterns = [
url(r'^github/web/', GithubWebHook.as_view(), name='github_web'),
]
|
import sqlite3
import time
import os.path
import feedparser
from datetime import datetime
import re
from flask import g
from views import app
DATABASE = 'tmp/rockneurotiko.sqlite'
DEBUG = True
SECRET_KEY = 'zumTUzM3IhUVQgeX9c55'
def connect_db():
"""Returns a new connection to the sqlite database"""
return sqlite3.connect(app.config['DATABASE'], detect_types=sqlite3.PARSE_DECLTYPES)
def init_db():
"""Create the database if it doesn't exist"""
if not os.path.isfile(app.config['DATABASE']):
app.logger.debug('DB disappeared, making a new one')
f = app.open_resource('schema.sql')
db = connect_db()
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one = False):
"""Query database returning dictionary"""
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def populate_database():
init_db()
if data_is_stale():
load_twitter()
load_github()
def data_is_stale():
"""Find the last entry in the sqlite database to determine if we need to
refresh the data. This stops us from pulling them each request"""
try:
last_updated = g.db.cursor().execute('select last_refresh from entries order by last_refresh desc limit 1').fetchone()[0]
except:
return True
if not last_updated or (datetime.now() - last_updated).seconds > 10800:
return True
return False
def load_twitter():
#Chanchullo para RSS twitter: http://www.visioncritical.com/blog/how-add-twitter-feeds-feedly-or-how-add-rss-feeds-hootsuite-quick-tip
twitter = feedparser.parse("https://script.google.com/macros/s/AKfycbw-WRJKn60YSrSXsxnT7Cv1SiOg0bPBj_fwEksDsvgXBtt60R4/exec")
g.db.cursor().execute('DELETE FROM twitter')
for entry in twitter.entries:
data = parseLongURL(entry['summary'])
g.db.cursor().execute('INSERT INTO twitter VALUES (?, ?, ?, ?, ?, ?, ?,?)',
(None,
entry['link'],
"static/twitter_1.png",
data,
"twitter",
datetime.strptime(entry['published'][:-6], '%a, %d %b %Y %H:%M:%S'),
datetime.now(),
None))
g.db.commit()
def parseLongURL(cont):
http = re.findall(r'(http|ftp|https):\/\/([\w\-_]+(?:(?:\.[\w\-_]+)+))([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?', cont)
for j in http:
url = "%s://%s%s" % (j[0],j[1],j[2])
splitted = cont.split(url)
if len(url) > 25:
url = url[:25] + "..."
if len(splitted) == 2:
cont = splitted[0] + url + splitted[1]
else:
print url, splitted
#print cont[cont.find(url):cont.find(url)+len(url)]
return cont
def load_github():
github = feedparser.parse("http://github.com/rockneurotiko.atom")
g.db.cursor().execute('DELETE FROM github')
for entry in github.entries:
title = entry['link'].split('/')[4]
author = entry['title'].split()[0]
g.db.cursor().execute('INSERT INTO github VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(None,
entry['link'],
"static/cog.png",
entry['title'],
datetime.strptime(entry['updated'][:-1], '%Y-%m-%dT%H:%M:%S'),
datetime.now(),
title,
author))
g.db.commit()
def getFromSource(source, limit=20):
return query_db("select * from " + source + " order by updated desc limit " + str(limit))
|
from employee import Employee
class MyNode:
def __init__(self,data,next1 = None):
self.__data = data
self.__next = next1
def getNext(self):
return self.__next
def setNext(self,other):
self.__next = other
def getData(self):
return self.__data
def setData(self,other1):
self.__data = other1
|
#!/usr/bin/env python
"""
Josh's Dumb-Ass Classifier
"""
import pprint
import sys,os, copy
sys.path.append(os.environ.get("TCP_DIR") + '/Software/feature_extract/Code/extractors')
import sdss, ned, ng
class bogus_sdss:
def __init__(self):
self.in_footprint = False
self.feature = {}
class JDAC:
def __init__(self,pos=(176.70883 ,11.79869),verbose=True,seeing=2.5, do_sdss=True):
self.verbose=verbose
self.pos = pos
self.seeing = seeing/2 # in arcsec ... to make this circumnuclear
if do_sdss:
self._get_sdss_feature()
else:
self.s = bogus_sdss()
#self._get_ned_features()
self._get_mansi_gals()
self.val_add = {}
self.set_nearest_type()
self.set_probability()
if self.val_add["nearest_type_confidence"] < 0.95 and self.val_add["nearest_type"] in ["galaxy", "stellar"]:
#pprint.pprint(self.val_add)
#print "***"*10
self.ttt = copy.copy(self.val_add)
## probability is low here...compute for the other case
if self.ttt["nearest_type"] == "galaxy":
self.val_add["nearest_type"] = "stellar"
else:
self.val_add["nearest_type"] = "galaxy"
self.val_add["nearest_type_confidence"] = 1.0 - self.ttt["nearest_type_confidence"]
self.set_probability()
self.val_add.pop("nearest_type_from")
self.ttt.update({'alt': self.val_add})
self.val_add = self.ttt
#pprint.pprint(self.val_add)
self.make_clean_prob()
self.make_clean_prob(doalt=True)
#pprint.pprint(self.p)
def make_clean_prob(self,doalt=False):
if not doalt:
self.p = {}
for k,v in self.val_add.iteritems():
if k not in ["alt", "nearest_type", "nearest_type_confidence", "prob"]:
if not self.p.has_key("flags"):
self.p.update({"flags": {k: v}})
else:
self.p["flags"].update({k: v})
else:
if self.val_add.has_key("alt"):
self.ttt = copy.copy(self.val_add)
self.val_add = self.ttt['alt']
else:
return
if self.val_add['nearest_type'] in ['qso','galaxy']:
top_prob = self.val_add['nearest_type_confidence']
self.p.update({"extragalactic": {"val": top_prob}})
for k,v in self.val_add['prob'].iteritems():
self.p['extragalactic'].update({k: {'val': v}})
elif self.val_add['nearest_type'] in ['stellar','cv']:
top_prob = self.val_add['nearest_type_confidence']
self.p.update({"galactic": {"val": top_prob}})
multi = self.val_add['prob']['VarStar']['prob']
self.val_add['prob']['VarStar'].pop("prob")
for k,v in self.val_add['prob']['VarStar'].iteritems():
self.p['galactic'].update({k: {'val': v}})
if doalt:
self.val_add = self.ttt
def _get_sdss_feature(self):
self.s = sdss.sdssq(pos=self.pos,verbose=self.verbose,run_on_instance=False)
self.s.feature_maker()
def _get_ned_features(self):
self.n = ned.NED(pos=self.pos,verbose=self.verbose)
def _get_mansi_gals(self,radius=5):
self.mansi = ng.GalGetter(verbose=self.verbose)
self.mansi.getgals(pos=self.pos,radius=float(radius)/60.,sort_by="phys",max_d=500.0)
self.m = self.mansi.grab_rez()
def set_probability(self):
if self.val_add["nearest_type_from"] == "unset":
## there's no context
self.val_add.update({"prob": {}})
return
if self.val_add["nearest_type"] == "unset":
## it's not in there, but in the footprint
self.val_add.update({"prob": {'roid': 0.6, 'sn': 0.1, 'VarStar': {"prob": 0.2, 'low_mass_flare': 0.8}}})
return
if self.val_add['nearest_type_from'] == "mansi" and self.val_add["nearest_type"] == "galaxy":
## could be a rather nearby galaxy. But's let's check to make sure it's not some
## more distant galaxy in projection
self.tmp = copy.copy(self.val_add)
self.set_nearest_type(skip_mansi=True)
## if the mansi confidence is low and the sdss confidence is high, then set that
if self.tmp["nearest_type_confidence"] <= 0.9 and \
self.val_add["nearest_type"] == "galaxy" and \
self.val_add["nearest_type_confidence"] >= 0.60:
## we're good ... keep the val_add from sdss and note possible nearby big galaxy
self.val_add.update({"possible_mansi_gal_nearby": True, 'possible_mansi_gal_mansi_pos': self.m['closest_in_light_galaxy_position']})
## call ourselves again...this time without the mansi galaxy
self.set_probability()
else:
## no, the SDSS looks to be tentative, revert back to Mansi
self.val_add = self.tmp
del self.tmp
## now we can really belive mansi
if self.m['closest_in_light_physical_offset_in_kpc'] < 0.5:
if self.m['closest_in_light_angular_offset_in_arcmin'] < self.seeing/60.0:
self.val_add.update({"apparently_nuclear": True})
else:
# outside seeing disk
self.val_add.update({"apparently_circumnuclear": True})
else:
## largert physical offset
if self.m['closest_in_light_angular_offset_in_arcmin'] < self.seeing/60.0:
self.val_add.update({"apparently_circumnuclear": True})
if self.m['closest_in_light_dm'] < 28.5:
## this is a really nearby galaxy < 5 Mpc
self.val_add.update({"very_nearby_gal": True})
self.val_add.update({"prob": {"SN": 0.05, "Nova": 0.9}})
return
else:
self.val_add.update({"outskirts_of_mansi_nearby_gal": True})
self.val_add.update({"prob": {"SN": 0.9}})
if self.val_add.has_key("apparently_nuclear"):
if self.val_add.has_key("prob"):
self.val_add['prob'].update({"SN": 0.1, "AGN": 0.85, "TDF": 0.05})
else:
self.val_add.update({"prob":{"SN": 0.1, "AGN": 0.85, "TDF": 0.05}})
return
if self.val_add.has_key("apparently_circumnuclear"):
self.val_add.update({"prob":{"SN": 0.95, "AGN": 0.05}})
else:
if self.val_add["nearest_type"] == "galaxy":
if self.ss["dist_in_arcmin"] < self.seeing/60.0:
## apparently right on top of the light centroid
if self.ss["dered_r"] > 21.0:
self.val_add.update({"prob":{"SN": 0.9, "AGN": 0.05}})
else:
# if it's bright and right on top...
self.val_add.update({"apparently_circumnuclear": True})
self.val_add.update({"prob":{"SN": 0.3, "AGN": 0.7}})
return
else:
self.val_add.update({"prob":{"SN": 0.9}})
return
elif self.val_add["nearest_type"] == "qso":
if self.ss["dist_in_arcmin"] < self.seeing/60.0:
self.val_add.update({"prob":{"SN": 0.02, "AGN": 0.95}})
return
else:
self.val_add.update({"prob":{"SN": 0.05, "AGN": 0.93}})
return
else:
## a star, probably
## is it red?
if self.ss["dered_r"] - self.ss["dered_i"] > 1.5:
self.val_add.update({"prob":{'VarStar': {"prob": 1.0, 'low_mass_flare': 0.8}}})
return
# blue
if self.ss["dered_r"] - self.ss["dered_i"] < 0.5 or self.val_add["nearest_type"] == "cv":
self.val_add.update({"prob":{'VarStar': {"prob": 1.0, 'CV': 0.8}}})
return
if self.ss["spectral_flag"] != None:
## probably a white dwarf spectroscopically
if self.ss["spectral_flag"] in ['d','D']:
self.val_add.update({"prob":{'VarStar': {"prob": 1.0, 'CV': 0.8}}})
return
# no idea
self.val_add.update({"prob":{'VarStar': {"prob": 1.0}}})
return
def set_nearest_type(self,skip_mansi=False):
## is it in the SDSS footprint or mani?
if not self.s.in_footprint and not self.m.has_key("closest_in_light"):
self.val_add.update({'nearest_type_from': "unset", 'nearest_type': "not_in_footprint", "nearest_type_confidence": 1.0})
return
if not skip_mansi:
## let's look at mansi's result for now
if self.m.has_key("closest_in_light") and self.m.has_key('closest_in_light_physical_offset_in_kpc'):
## decreasing confidence in this being a galaxy
if self.m['closest_in_light_physical_offset_in_kpc'] < 3.0 and self.m['closest_in_light'] < 1.0:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.98})
elif self.m['closest_in_light_physical_offset_in_kpc'] < 5.0 and self.m['closest_in_light'] < 2.0:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.9})
elif self.m['closest_in_light_physical_offset_in_kpc'] < 10.0 and self.m['closest_in_light'] < 3.0:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.8})
elif self.m['closest_in_light_physical_offset_in_kpc'] < 15.0 and self.m['closest_in_light'] < 3.5:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.7})
elif self.m['closest_in_light_physical_offset_in_kpc'] < 40.0 and self.m['closest_in_light'] < 5.0:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.6})
else:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.55})
self.val_add.update({'nearest_type_from': 'mansi'})
return
## let's look at SDSS
self.ss = self.s.feature
if self.ss.has_key("dist_in_arcmin"):
if self.ss["dist_in_arcmin"] > 0.12:
## this is pretty far away for a SDSS galaxy
self.val_add.update({'nearest_type': "unset", "nearest_type_confidence": 0.5})
self.val_add.update({'nearest_type_from': 'sdss'})
return
if self.ss.has_key("best_offset_in_kpc"):
if self.ss["best_offset_in_kpc"] > 100.0:
## this is pretty far away for a SDSS galaxy
self.val_add.update({'nearest_type': "unset", "nearest_type_confidence": 0.5})
self.val_add.update({'nearest_type_from': 'sdss'})
return
if self.ss.has_key("type"):
if self.ss['type'] == 'galaxy' or self.ss['classtype'] == 'gal':
if self.ss.has_key("segue_class"):
if self.ss['segue_class'] == 'galaxy':
if self.ss.has_key("spec_z") and self.ss.has_key('spec_confidence'):
if self.ss['spec_z'] > 0.01 and self.ss['spec_confidence'] > 0.3:
if self.ss.has_key("best_offset_in_petro_g"):
if self.ss["best_offset_in_petro_g"] < 10.0:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 1.0})
else:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.95})
else:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.90})
else:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.85})
else:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.85})
else:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.80})
else:
self.val_add.update({'nearest_type': "stellar", "nearest_type_confidence": 0.95})
## if the source is too faint, then it's hard to trust the classification
if self.ss.has_key('dered_r'):
if self.ss['dered_r'] > 21.5 and self.ss["spec_z"] == None:
## really hard to trust what's happening here.
if self.ss['type'] == 'galaxy':
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.55})
else:
self.val_add.update({'nearest_type': "stellar", "nearest_type_confidence": 0.55})
if self.ss.get('spectral_flag','') in ['xxxx', 'nnbn', 'enbn', 'ecbn']:
if self.val_add.has_key("nearest_type_confidence"):
if self.val_add["nearest_type_confidence"] < 0.9:
self.val_add.update({'nearest_type': "galaxy", "nearest_type_confidence": 0.9})
if self.ss.get("classtype",'') == "qso":
self.val_add.update({'nearest_type': "qso", "nearest_type_confidence": 0.70})
if self.ss["segue_star_type"] == "broadline":
self.val_add.update({'nearest_type': "qso", "nearest_type_confidence": 0.95})
if self.ss["spectral_stellar_type"] == "cv" or self.ss["spec_zWarning"] == "not_qso":
self.val_add.update({'nearest_type': "cv", "nearest_type_confidence": 0.95})
if self.ss.get("spectral_stellar_type",'') == 'qso' or (self.ss.get("bestz",'') > 0.6 and self.ss["bestz_err"] < 0.1):
self.val_add.update({'nearest_type': "qso", "nearest_type_confidence": 0.95})
self.val_add.update({'nearest_type_from': 'sdss'})
return
if __name__ == "__main__":
#j = JDAC(seeing=2.5)
j = JDAC(pos=(57.434622 , -3.264654 ))
# try looking at j.p
import pprint
pprint.pprint(j.p)
|
#!/usr/bin/env python
'''
package.py: part of singularity package
'''
from singularity.runscript import get_runscript_parameters
from singularity.utils import zip_up, read_file
from singularity.cli import Singularity
import tempfile
import tarfile
import hashlib
import zipfile
import json
import os
def package(image_path,output_folder=None,runscript=True,software=True,remove_image=False,verbose=False,S=None):
'''package will take an image and generate a zip (including the image
to a user specified output_folder.
:param image_path: full path to singularity image file
:param runscript: if True, will extract runscript to include in package as runscript
:param software: if True, will extract files.txt and folders.txt to package
:param remove_image: if True, will not include original image in package (default,False)
:param verbose: be verbose when using singularity --export (default,False)
:param S: the Singularity object (optional) will be created if not required.
'''
if S == None:
S = Singularity(verbose=verbose)
tmptar = S.export(image_path=image_path,pipe=False)
tar = tarfile.open(tmptar)
members = tar.getmembers()
image_name = os.path.basename(image_path)
zip_name = "%s.zip" %(image_name.replace(" ","_"))
# Include the image in the package?
if remove_image:
to_package = dict()
else:
to_package = {image_name:image_path}
# Package the image with an md5 sum as VERSION
version = get_image_hash(image_path)
to_package["VERSION"] = version
# Look for runscript
if runscript == True:
try:
runscript_member = tar.getmember("./singularity")
runscript_file = tar.extractfile("./singularity")
runscript = runscript_file.read()
to_package["runscript"] = runscript
print("Found runscript!")
# Try to extract input args, only python supported, will return None otherwise
params_json = get_runscript_parameters(runscript=runscript,
name=image_name,
version=version)
if params_json != None:
print('Extracted runscript params!')
to_package['%s.json' %(image_name)] = params_json
except KeyError:
print("No runscript found in image!")
if software == True:
print("Adding software list to package!")
files = [x.path for x in members if x.isfile()]
folders = [x.path for x in members if x.isdir()]
to_package["files.txt"] = files
to_package["folders.txt"] = folders
# Do zip up here - let's start with basic structures
zipfile = zip_up(to_package,zip_name=zip_name,output_folder=output_folder)
print("Package created at %s" %(zipfile))
# return package to user
return zipfile
def list_package(package_path):
'''list_package will list the contents of a package, without reading anything into memory
:package_path: the full path to the package
'''
zf = zipfile.ZipFile(package_path, 'r')
return zf.namelist()
def calculate_similarity(pkg1,pkg2,include_files=False,include_folders=True):
'''calculate_similarity will calculate similarity of images in packages based on
a comparator list of (files or folders) in each package, default will calculate
2.0*len(intersect) / total package1 + total package2
:param pkg1: packaged image 1
:param pkg2: packaged image 2
:param include_files: boolean, default False. If true, will include files
:param include_folders: boolean, default True. If true, will include files
'''
# Base names will be indices for full lists for comparator return object
pkg1_name = os.path.basename(pkg1)
pkg2_name = os.path.basename(pkg2)
comparison = compare_package(pkg1,pkg2,include_files=include_files,include_folders=include_folders)
score = 2.0*len(comparison["intersect"]) / (len(comparison[pkg1_name])+len(comparison[pkg2_name]))
# Alert user if images are identical
if score == 1.0:
print("Package %s and %s are identical by this metric!" %(pkg1_name,pkg2_name))
return score
def compare_package(pkg1,pkg2,include_files=False,include_folders=True):
'''compare_package will return the lists of files or folders (or both) that are
different and equal between two packages
:param pkg1: package 1
:param pkg1: package 2
:param include_files: boolean, default False. If true, will include files
:param include_folders: boolean, default True. If true, will include files
:param get_score: if True, will calculate overall similarity as 2*len(intersect) / len(uniques) + len(intersect)
'''
if include_files == False and include_folders == False:
print("Please specify include_files and/or include_folders to be True.")
else:
# For future reference
pkg1_name = os.path.basename(pkg1)
pkg2_name = os.path.basename(pkg2)
# Lists for all comparators for each package
pkg1_comparators = []
pkg2_comparators = []
pkg1_includes = list_package(pkg1)
pkg2_includes = list_package(pkg2)
# Include files in comparison?
if include_files == True:
if "files.txt" in pkg1_includes and "files.txt" in pkg2_includes:
pkg1_comparators += load_package(pkg1,get="files.txt")["files.txt"]
pkg2_comparators += load_package(pkg2,get="files.txt")["files.txt"]
# Include folders in comparison?
if include_folders == True:
if "folders.txt" in pkg2_includes and "folders.txt" in pkg2_includes:
pkg1_comparators += load_package(pkg1,get="folders.txt")["folders.txt"]
pkg2_comparators += load_package(pkg2,get="folders.txt")["folders.txt"]
# Do the comparison
intersect = [x for x in pkg1_comparators if x in pkg2_comparators]
unique_pkg1 = [x for x in pkg1_comparators if x not in pkg2_comparators]
unique_pkg2 = [x for x in pkg2_comparators if x not in pkg1_comparators]
# Return data structure
comparison = {"intersect":intersect,
"unique_%s" %(pkg1_name): unique_pkg1,
"unique_%s" %(pkg2_name): unique_pkg2,
pkg1_name:pkg1_comparators,
pkg2_name:pkg2_comparators}
return comparison
def load_package(package_path,get=None):
'''load_package will return the contents of a package, read into memory
:param package_path: the full path to the package
:param get: the files to load. If none specified, all things loaded
'''
if get == None:
get = list_package(package_path)
# Open the zipfile
zf = zipfile.ZipFile(package_path, 'r')
# The user might have provided a string and not a list
if isinstance(get,str):
get = [get]
retrieved = dict()
for g in get:
filename,ext = os.path.splitext(g)
if ext in [".img"]:
tmpdir = tempfile.mkdtemp()
print("Extracting image %s to %s..." %(g,tmpdir))
image_extracted_path = zf.extract(g,tmpdir)
retrieved[g] = image_extracted_path
elif ext in [".txt"] or g == "runscript":
retrieved[g] = zf.read(g).decode('utf-8').split('\n')
elif g == "VERSION":
retrieved[g] = zf.read(g).decode('utf-8')
elif ext in [".json"]:
retrieved[g] = json.loads(zf.read(g).decode('utf-8'))
else:
print("Unknown extension %s, skipping %s" %(ext,g))
return retrieved
def get_image_hash(image_path):
'''get_image_hash will return an md5 hash of the file. Since we don't have git commits
this seems like a reasonable option to "version" an image, since we can easily say yay or nay
if the image matches the spec file
:param image_path: full path to the singularity image
'''
print("Generating unique version of image (md5 hash)")
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def docker2singularity(docker_image,output_folder=None):
'''docker2singulrity is a wrapper for the Singularity.docker2singularity
client function. Does not currently include runscript (/singularity) in image,
but does export full docker image spec under /singularity.json
:param docker_image: the full docker repo/image,eg "ubuntu:latest"
:param output_folder: the output folder to create the image in. If not
specified, will use pwd.
'''
S = Singularity()
docker_image = S.docker2singularity(docker_image=docker_image,
output_dir=output_folder)
return docker_image
|
'''
python test.py output_file
'''
import numpy as np
import sys
from numpy.linalg import inv, det
from math import pi, exp
import time
if len(sys.argv) != 2:
print("python test.py output_file")
exit()
p_zero = 24720/(24720+7841)
mean_zero = np.load("gen_model/0_mean.npy")
mean_one = np.load("gen_model/1_mean.npy")
cov_mutual = np.load("gen_model/mutual_cov.npy")
test_x = np.load("gen_model/test_feature.npy")
with open(sys.argv[1], 'w') as f:
f.write("id,label\n")
i = 1
for row in test_x:
X = row - mean_zero
up1 = -0.5*X.dot(inv(cov_mutual)).dot(X.transpose())
X = row - mean_one
up2 = -0.5*X.dot(inv(cov_mutual)).dot(X.transpose())
ratio = exp(up2[0,0] - up1[0,0])
prob = p_zero / (p_zero + (1-p_zero)*ratio)
if prob > 0.5:
label = '0'
else:
label = '1'
line = str(i) + "," + label + '\n'
f.write(line)
i += 1
|
from django.shortcuts import render_to_response, HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib import messages
from mapFriends.facebook import test_token, get_authorization_url,get_token,get_user_data,get_user_friends,get_coordinates
from mapFriends.images import take_image
from mapFriends.forms import LoginForm, RegisterForm
from mapFriends.models import UserProfile
def home(request):
return render_to_response('index.html', {}, context_instance=RequestContext(request))
def login_view(request):
if request.method == "POST":
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
print "[login] Usuario valido"
login(request, user)
messages.add_message(request, messages.INFO, "Hola %s, te has logueado correctamente" % username)
return HttpResponseRedirect("/")
else:
print "[login] Usuario no activo"
messages.add_message(request, messages.ERROR, "Usuario no activo")
return HttpResponseRedirect("/")
else:
print "[login] Usuario o pass incorrecto"
messages.add_message(request, messages.ERROR, "Usuario o contrasena incorrecta")
form = LoginForm()
ctx = {'login' : form}
print "[login] Enviando formulario de registro"
return render_to_response('login.html', ctx, context_instance=RequestContext(request))
def register_view(request):
form = RegisterForm()
if request.method == "POST":
token = request.session['token']
name = request.session['name']
register = RegisterForm(request.POST)
if register.is_valid():
password = register.cleaned_data['password']
try:
u = User.objects.get(username=name['name'])
ctx = {'register' : form}
print "[register] Usuario ya existe"
messages.add_message(request, messages.WARNING, "%s ya existe, logueate" %name['name'])
return HttpResponseRedirect('/login')
except User.DoesNotExist:
user = User.objects.create_user(username=name['name'], email=name['email'], password=password)
facebook_user = UserProfile(user=user, access_token=token['access_token'][0])
facebook_user.save()
ctx = {}
print "[register] Registrando Usuario"
messages.add_message(request, messages.INFO, "%s registrado correctamente" %name['name'])
return render_to_response('index.html', ctx, context_instance=RequestContext(request))
else:
ctx = {'register' : form}
print "[register] Password no coinciden"
messages.add_message(request, messages.ERROR, "Passsword no coinciden")
return render_to_response('register.html', ctx, context_instance=RequestContext(request))
if 'code' not in request.GET: #Comprobamos si tenemos acceso a sus datos
url = get_authorization_url(request)
print "[register] Obteniendo facebook"
return HttpResponseRedirect(url)
token = get_token(request)
name = get_user_data(request, token['access_token'][0])
request.session['token'] = token
request.session['name'] = name
ctx = {'register' : form}
print "[register] Enviando un formuladio de registro"
return render_to_response('register.html', ctx, context_instance=RequestContext(request))
def map(request):
user = User.objects.filter(username=request.user)
profile = UserProfile.objects.get(user=user)
token = profile.access_token
print "Ejecutando"
url = test_token(request, token)#Test if token is valid
if url != "":
print "URL NOT EMPTY"
return HttpResponseRedirect(url)
print "[map] Change token"
profile = UserProfile.objects.get(user=user)
token = profile.access_token
data = get_user_data(request, token)
friends, sites = get_user_friends(request, token)
location = get_coordinates(request, sites, token)
friends = take_image(friends)
ctx = {'data' : data, 'friends' : friends, 'places' : location}
return render_to_response('map.html', ctx, context_instance=RequestContext(request))
def logout_view(request):
messages.add_message(request, messages.INFO, "%s , te has deslogueado correctamente" %request.user)
logout(request)
print "[logout] Usuario deslogueado"
return HttpResponseRedirect("/")
|
#!/usr/bin/python
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml import sax
import socket
import sys
import time
# Idiom for Python enums
class ServerType:
MDSERVER, MDREPDAEMON = range(2)
class RepDaemonSession:
def __init__(self, node, logDict):
self.node = node
self.logsSent = int(logDict["repdaemon.Session." + node + ".LogsSent"])
self.dataSent = int(logDict["repdaemon.Session." + node + ".DataSent"])
self.ackedXid = long(logDict["repdaemon.Session." + node + ".AckedXid"])
self.sentXid = long(logDict["repdaemon.Session." + node + ".SentXid"])
self.uptime = int(logDict["repdaemon.Session." + node + ".Uptime"])
self.sessionID = int(logDict["repdaemon.Session." + node + ".SessionID"])
def __str__(self):
return str(self.__dict__)
class RepDaemonLog:
def __init__(self, logDict):
self.logsPending = int(logDict["repdaemon.LogsPending"])
self.logsSentTotal = int(logDict["repdaemon.LogsSent.Total"])
self.dataSentTotal = int(logDict["repdaemon.DataSent.Total"])
self.writeXid = long(logDict["repdaemon.WriteXid"])
self.sessionsCurrent = int(logDict["repdaemon.Sessions.Current"])
self.sessionsTotal = int(logDict["repdaemon.Sessions.Total"])
self.uptime = int(logDict["repdaemon.Uptime"])
self.nodeName = logDict["NodeName"]
self.sessions = {}
for x in logDict:
if x.startswith("repdaemon.Session."):
# Extract the node name
tokens = x.split(".")
node = tokens[2]
# Add this node if it wasn't already found
if (node not in self.sessions):
self.sessions[node] = RepDaemonSession(node, logDict)
def __str__(self):
lines = [x + ":" + str(self.__dict__[x]) for x in self.__dict__]
lines = filter(lambda x: not x.startswith("sessions:"), lines)
lines.sort()
out = "\n".join(lines)
out += "\nActive Sessions\n "
out += "\n ".join([str(x) for x in self.sessions.values()])
return out
class MDServerLog:
def __init__(self, logDict):
self.preparedConnections = int(logDict["PreparedConnections"])
self.maxSessions = int(logDict["MaxSessions"])
self.usedSessions = int(logDict["UsedSessions"])
self.usedConnections = int(logDict["UsedConnections"])
self.maxConnections = int(logDict["MaxConnections"])
self.sessionStorage = logDict["SessionStorage"]
self.uptime = int(logDict["Uptime"])
self.nodeName = logDict["NodeName"]
# self.logsWritten = int(logDict["repmaster.LogsWritten"])
# self.updates = int(logDict["mdserver.Updates"])
# self.commands = int(logDict["mdserver.Commands"])
def __str__(self):
lines = [x + ":" + str(self.__dict__[x]) for x in self.__dict__]
return "\n".join(lines)
class ContentGenerator(sax.handler.ContentHandler):
"""
Parses the XML answer obtained from the server
"""
def __init__(self):
sax.handler.ContentHandler.__init__(self)
self.dict = {}
self.key = None
self.nextKey = False
# ContentHandler methods
def startDocument(self):
pass
def startElement(self, name, attrs):
self.nextKey = (name == "Key")
def endElement(self, name):
pass
def characters(self, content):
content = content.strip()
if len(content) == 0:
return
if self.nextKey:
self.key = content
# Make sure the map has an entry for this key, even if
# the server sends an empty value string.
self.dict[self.key] = ""
self.nextKey = False
else:
if self.key != None:
self.dict[self.key] = content
self.key = None
def ignorableWhitespace(self, content):
pass
def processingInstruction(self, target, data):
pass
class Parser:
def __init__(self):
self.handler = ContentGenerator()
def parse(self, serverType, xmlData):
self.handler.dict.clear()
sax.parseString(xmlData, self.handler)
if serverType == ServerType.MDSERVER:
return MDServerLog(self.handler.dict)
elif serverType == ServerType.MDREPDAEMON:
return RepDaemonLog(self.handler.dict)
else:
return self.handler.dict
def pollAMGA(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
data = ""
# Read the three header lines
while True:
data += s.recv(1024)
if data.count("\n") == 3:
break
if data.startswith("ARDA Metadata Server"):
instance = ServerType.MDSERVER
elif data.startswith("ARDA Metadata Replication Daemon"):
instance = ServerType.MDREPDAEMON
else:
raise Exception("Unknown server identifier: " + data)
# Send the command
s.send('statistics\n\n')
data = ""
while True:
newData = s.recv(4096)
if newData == "":
break
data += newData
s.close()
return (instance, data)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", type="int", default=8822,
help="AMGA instance port")
parser.add_option("-i", "--interval", dest="interval", type="int", default=1,
help="Poll interval")
(opts, args) = parser.parse_args()
if len(args) == 0:
host = "localhost"
else:
host = args[0]
print "Querying: " + host + ":" + str(opts.port)
parser = Parser()
log = None
# count = 0
while True:
(serverType, xmlData) = pollAMGA(host, opts.port)
#print xmlData
oldLog = log
log = parser.parse(serverType, xmlData)
print log
# count += 1
# print "Queries:",count
time.sleep(opts.interval)
# while True:
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.connect((host, opts.port))
# while True:
# data = s.recv(1024)
# if data == "":
# break
# count += 1
# print "Queries:",count
if __name__ == "__main__":
sys.exit(main())
|
class Number:
def check(self,ch):
if (ch>='a'and ch<='z'):
print("Its an alphabet")
elif(int(ch)>=0 and int(ch)<=9):
print("Its an number")
ss=Number()
ch=input("Enter a charcater")
ss.check(ch)
|
import time
import numpy as num
import scipy as sci
import sympy as sym
from sympy import *
from numpy import *
from scipy import *
from scipy.sparse import *
x1 = 0; x2 = 0; x3 = 0; x4 = 0; y1 = 0; y2 = 0; y3 = 0; y4 = 0 # Initializing all variables necessary for the program
range_x = 0; range_y = 0; no_of_elements = 0; range_x_temp = 0; range_y_temp = 0
N1 = 0; N2 = 0; N3 = 0; N4 = 0; dN1s = 0; dN2s = 0; dN3s = 0; dN4s = 0; dN1t = 0; dN2t = 0; dN3t = 0; dN4t = 0
mx = []; my = []; ST = []; k_small = []; row = []; col = []; data = []
element = {}; xst = {}; yst = {}; J = {}; J_inv = {}; J_det = {}; XT = {}; XT_trans = {}; Ke = {}; K = {}; K_temp = {}; row_stiffness = {}; col_stiffness = {}
s = symbols('s'); t = symbols('t'); k = symbols('k')
n = 0
def input_geometry(): # First step of the program where the geopetry is specified
global x1, x2, x3, x4, y1, y2, y3, y4
print ("""Please enter the corresponding coordinate values for the geometry under study""")
ex = [float(input("x1\n> ")), float(input("x2\n> ")), float(input("x3\n> ")), float(input("x4\n> "))]
wy = [float(input("y1\n> ")), float(input("y2\n> ")), float(input("y3\n> ")), float(input("y4\n> "))]
ex.sort()
wy.sort()
x1 = ex[-1]; x2 = ex[0]; x3 = x2; x4 = x1
y1 = wy[-1]; y4 = wy[0]; y2 = y1; y3 = y4 # This is the part that sorts out the geometry irrespective of the way the coordinates are entered
mesh_generation() # Calls for the next step
def mesh_generation(): # Second step of the program where the mesh is generated
global x1, x2, x3, x4, y1, y2, y3, y4, range_x, range_y, no_of_elements, mx, my, range_x_temp, range_y_temp
print ("""Please enter what kind of mesh you want.
1. Enter 'coarse' for a rough mesh.
2. Enter 'normal' for a normal mesh.
3. Enter 'fine' for a smooth mesh.""")
mesh_type = input("> ")
if mesh_type == 'coarse': # Coarse mesh splits the geometry into 5 equal horizontal and vertical parts
x = num.linspace(x2, x1, 5)
y = num.linspace(y4, y1, 5)
mx, my = meshgrid(x, y)
print(mx)
print(my)
range_x_temp, range_y_temp = mx.shape
print(mx.shape)
print(my.shape)
range_x = range_x_temp-1
range_y = range_y_temp-1
print(range_x, range_y)
no_of_elements = range_x*range_y
print(no_of_elements)
elif mesh_type == 'normal': # Normal mesh splits the geometry into 10 equal horizontal and vertical parts
x = num.linspace(x2, x1, 10)
y = num.linspace(y4, y1, 10)
mx, my = meshgrid(x, y)
print(mx)
print(my)
range_y_temp, range_x_temp = mx.shape
range_x = range_x_temp-1
range_y = range_y_temp-1
print (range_x, range_y)
no_of_elements = range_x*range_y
print(no_of_elements)
elif mesh_type == 'fine': # Fine mesh splits the geometry into 20 equal horizontal and vertical parts
x = num.linspace(x2, x1, 20)
y = num.linspace(y4, y1, 20)
mx, my = meshgrid(x, y)
print(mx)
print(my)
range_x_temp, range_y_temp = mx.shape
print(mx.shape)
range_x = range_x_temp-1
range_y = range_y_temp-1
print (range_x, range_y)
no_of_elements = range_x*range_y
print(no_of_elements)
elif mesh_type == 'sample': # Sample mesh splits the geometry into any equal horizontal and vertical parts specified by the programmer. Can only be tweaked from inside the code.
x = num.linspace(x2, x1, 3)
y = num.linspace(y4, y1, 3)
mx, my = meshgrid(x, y)
print(mx)
print(my)
range_x_temp, range_y_temp = mx.shape
print(mx.shape)
range_x = range_x_temp-1
range_y = range_y_temp-1
print (range_x, range_y)
no_of_elements = range_x*range_y
print(no_of_elements)
else:
mesh_generation()
input()
four_node_iso() # Calls for the next step of the process
def boundary_conditions(): # Sixth step of the process that gathers temperature boundary conditions from the user. Not yet implemented.
print ("""Here you will specify the boundary conditions for the problem.
Temperature will assume the unit of Kelvin, heat flux will assume the unit of W/m2, conveciton coefficient will assume the unit of W/m2K and heat generation will assume the unit of W'm3.
When specifying boundary conditions, please enter only the numerical values and not the units.
Enter 'temp' for temperature boundary condition.
Enter 'conv' for convection boundary condition.
Enter 'flux' for heat flux boundary condition.
Enter 'gen' for heat flux generation.
Enter 'done' once everthing is specified.""")
type = input("> ")
if type == 'temp':
print ("""Please choose the boundary in which you want to apply temperature boundary conditions.""")
print ("Edge 1 is between the coordinates (%d, %d) and (%d, %d)" % (x1, y1, x2, y2))
print ("Edge 2 is between the coordinates (%d, %d) and (%d, %d)" % (x2, y2, x3, y3))
print ("Edge 3 is between the coordinates (%d, %d) and (%d, %d)" % (x3, y3, x4, y4))
print ("Edge 4 is between the coordinates (%d, %d) and (%d, %d)" % (x4, y4, x1, y1))
print ("""Please enter the no. of the edge you want to select""")
edge = input("> ")
if edge == '1':
temp_1 = int(input("Please enter the temperature\n> "))
boundary_conditions()
elif edge == '2':
temp_2 = int(input("Please enter the temperature\n> "))
boundary_conditions()
elif edge == '3':
temp_3 = int(input("Please enter the temperature\n> "))
boundary_conditions()
elif edge == '4':
temp_4 = int(input("Please enter the temperature\n> "))
boundary_conditions()
else:
print("Sorry, that is not a valid value")
boundary_conditions()
elif type == 'conv':
print ("""Please choose the boundary in which you want to apply convection boundary conditions.""")
print ("Edge 1 is between the coordinates (%d, %d) and (%d, %d)" % (x1, y1, x2, y2))
print ("Edge 2 is between the coordinates (%d, %d) and (%d, %d)" % (x2, y2, x3, y3))
print ("Edge 3 is between the coordinates (%d, %d) and (%d, %d)" % (x3, y3, x4, y4))
print ("Edge 4 is between the coordinates (%d, %d) and (%d, %d)" % (x4, y4, x1, y1))
print ("""Please enter the no. of the edge you want to select""")
edge = input("> ")
if edge == '1':
temp_amb_1 = int(input("Please enter the value of the ambient temperature\n> "))
conv_coeff_1 = int(input("Please enter the value of the convection coefficient\n> "))
boundary_conditions()
elif edge == '2':
temp_amb_2 = int(input("Please enter the value of the ambient temperature\n> "))
conv_coeff_2 = int(input("Please enter the value of the convection coefficient\n> "))
boundary_conditions()
elif edge == '3':
temp_amb_3 = int(input("Please enter the value of the ambient temperature\n> "))
conv_coeff_3 = int(input("Please enter the value of the convection coefficient\n> "))
boundary_conditions()
elif edge == '4':
temp_amb_4 = int(input("Please enter the value of the ambient temperature\n> "))
conv_coeff_4 = int(input("Please enter the value of the convection coefficient\n> "))
boundary_conditions()
else:
print("Sorry, that is not a valid value")
boundary_conditions()
elif type == 'flux':
print ("""Please choose the boundary in which you want to apply heat flux boundary conditions.""")
print ("Edge 1 is between the coordinates (%d, %d) and (%d, %d)" % (x1, y1, x2, y2))
print ("Edge 2 is between the coordinates (%d, %d) and (%d, %d)" % (x2, y2, x3, y3))
print ("Edge 3 is between the coordinates (%d, %d) and (%d, %d)" % (x3, y3, x4, y4))
print ("Edge 4 is between the coordinates (%d, %d) and (%d, %d)" % (x4, y4, x1, y1))
print ("""Please enter the no. of the edge you want to select""")
edge = input("> ")
if edge == '1':
heat_flux_1 = int(input("Please enter the value of the heat flux in this edge\n> "))
boundary_conditions()
if edge == '2':
heat_flux_2 = int(input("Please enter the value of the heat flux in this edge\n> "))
boundary_conditions()
if edge == '3':
heat_flux_3 = int(input("Please enter the value of the heat flux in this edge\n> "))
boundary_conditions()
if edge == '4':
heat_flux_4 = int(input("Please enter the value of the heat flux in this edge\n> "))
boundary_conditions()
else:
print ("Sorry, that is not a valid value")
boundary_conditions()
elif type == 'gen':
heat_gen = int(input("Please enter the value of the heat generation per unit volume within the object\n> "))
boundary_conditions()
elif type == 'done':
stiffness_matrix()
else:
print ("Sorry, that is not a valid command")
boundary_conditions()
four_node_iso()
def four_node_iso(): # Third step of the process which implements a four node isoparametric element for the analysis
global N1, N2, N3, N4, k_small, ST, dN1s, dN2s, dN3s, dN4s, dN1t, dN2t, dN3t, dN4t, s, t, k
N1 = ((1+s)*(1+t)/4) # Shape functions
N2 = ((1-s)*(1+t)/4)
N3 = ((1-s)*(1-t)/4)
N4 = ((1+s)*(1-t)/4)
dN1s = sym.diff(N1, s); dN1t = sym.diff(N1, t) # Differentiating the shape functions
dN2s = sym.diff(N2, s); dN2t = sym.diff(N2, t)
dN3s = sym.diff(N3, s); dN3t = sym.diff(N3, t)
dN4s = sym.diff(N4, s); dN4t = sym.diff(N4, t)
ST = sym.Matrix([[dN1s, dN2s, dN3s, dN4s], [dN1t, dN2t, dN3t, dN4t]]) # Assembling the differentiated shape functions
print(ST)
k = 4 # k value can be tweaked here
k_small = sym.Matrix([[k, 0], [0, k]]) # Calculating the k small matrix which will be the same for all elements since all elements are identical
print(k_small)
input()
stiffness_matrix() # Calls for the next step of the process
def stiffness_matrix(): # Fourth step of the process which calculates the element stiffness matrices for all the elements
global range_x_temp, range_y_temp, no_of_elements, mx, my, element, xst, yst, J, J_inv, J_det, XT, XT_trans, Ke, K, K_temp, N1, N2, N3, N4, ST, k_small, s, t, k
j = 0
i = 0
for h in range(no_of_elements):
element[h] = sym.Matrix([[mx[i+1][j+1], my[i+1][j+1]], # Splitting the individual element coordinate matrices to extract the values
[mx[i+1][j], my[i+1][j]],
[mx[i][j], my[i][j]],
[mx[i][j+1], my[i][j+1]]])
j += 1
if j == range_y:
j = 0
i += 1
print(element)
input()
xst[0] = (element[0][0]*N1)+(element[0][2]*N2)+(element[0][4]*N3)+(element[0][6]*N4); print('x =', xst[0]); input() # Calculating the coordinate values in the isoparametric coordinates using
yst[0] = (element[0][1]*N1)+(element[0][3]*N2)+(element[0][5]*N3)+(element[0][7]*N4); print('y =', yst[0]); input() # the coordinate values from the cartesian coordinates
J[0] = sym.Matrix([[sym.diff(xst[0], s), sym.diff(yst[0], s)], [sym.diff(xst[0], t), sym.diff(yst[0], t)]]); print('Jacobian =', J[0]); input() # Jacobian matrix
J_inv[0] = J[0].inv(); print('Jacobian inverse =', J_inv[0]); input() # Jacobian inverse
J_det[0] = J[0].det(); print('Jacibian determinant =', J_det[0]); input() # Determinant of the Jacobian matrix
XT[0] = J_inv[0]*ST; print('B =', XT[0]); input() # B matrix
XT_trans[0] = XT[0].transpose(); print('B transpose =', XT_trans[0]); input() # Transpose of B matrix
Ke[0] = sym.Matrix(4, 4, XT_trans[0]*k_small*XT[0]*J_det[0]).tolist(); print('Ke =', Ke[0]); input() # Substituting values into the stiffness matrix equation
K[0] = sym.zeros(4, 4).tolist()
for m in range(4): # Calculating stiffness matrices for each element
for n in range(4): # Calculating K values for each point in an element stiffness matrix
K[0][m][n] = (integrate(Ke[0][m][n], (t, -1, 1), (s, -1, 1))) # Integrating each term of the element stiffness matrix
print ('Element Stiffness Matrix =', K[0]); input()
stiffness_assembly() # Calls for the next step of rhe process
def stiffness_assembly(): # Fifth step of the process that assembles all the element stiffness matrices into a single global stiffness matrix
global n, mapping, K, row, col, data, row_stiffness, col_stiffness
n = int(math.sqrt(no_of_elements))
mapping = num.array([2, 3, 1, 0]) # Mapping to help match values from element stiffness matrices to their respective positions in the global stiffness matrix
element_stiffness = num.matrix([[4,-1,-2,-1], [-1,4,-1,-2], [-2,-1,4,-1], [-1,-2,-1,4]]).tolist()
a = 0 # Calcualtes the row index values
for row_index in range ((n**2)+(n-1)):
if (row_index+1) % (n+1) == 0 and row_index != 0:
continue
row_stiffness[a] = num.array([row_index, row_index+1, row_index+n+1, row_index+n+2])
a += 1
print(row_stiffness)
b = 0 # Calculates the column index values
for column_index in range((n**2)+(n-1)):
if (column_index+1) % (n+1) == 0 and column_index != 0:
continue
col_stiffness[b] = num.array([column_index, column_index+1, column_index+n+1, column_index+n+2])
b += 1
print(col_stiffness)
for i in range(n**2):
c = 0
column_final = col_stiffness[i]
for j in range(4):
row_final = row_stiffness[i][c]
c += 1
row_element = mapping[j]
col.append(column_final)
for k in range(4):
row.append(row_final)
column_element = mapping[k]
data.append(K[0][row_element][column_element])
row = ravel(row)
col = ravel(col)
data = ravel(data)
print(row)
print(shape(row))
input()
print(col)
print(shape(col))
input()
print(data)
print(shape(data))
input()
stiffness = coo_matrix((data, (row, col)), shape=((n+1)**2,(n+1)**2), dtype = int8).toarray() # Assembles the global stiffness matrix
print(stiffness)
input() # Should call for the next step in the process which is getting boundary conditions from the user
input_geometry() # Starts the program from input geometry
|
import numpy as np
import matplotlib.pyplot as plt
def load_data(filename):
f= open(filename,'r')
tmp_str=f.readline()
tmp_arr=tmp_str[:-1].split(' ')
N=int(tmp_arr[0]);n_row=int(tmp_arr[1]);n_col=int(tmp_arr[2])
print("N=%d, row=%d, col=%d" %(N,n_row,n_col))
data=np.zeros([N,n_row*n_col+1])
for n in range(N):
tmp_str=f.readline()
tmp_arr=tmp_str[:-1].split(' ')
for i in range(n_row*n_col+1):
data[n][i]=int(tmp_arr[i])
f.close()
return N,n_row,n_col,data
def dataset():
N,n_row,n_col,data=load_data('./AND.txt')
X_train=data[:N,:-1];Y_train=data[:N,-1]
return X_train, Y_train, N
def gradient(X, Xm, Y, Ym, N, alpha, lr):
for n in range(N):
value = (Ym - predictor(X, Xm, N, alpha)) * ((np.dot(Xm, X[n]) +1)**2)
alpha[n] = alpha[n] + (lr*value)
return alpha
def predictor(X, Xm, N, alpha):
s = 0
for n in range(N):
s = s + ((np.dot(Xm, X[n]) +1)**2)*alpha[n]
sigma = sigmoid(s)
return sigma
def sigmoid(s):
large=30
if s<-large: s=-large
if s>large: s=large
return (1 / (1 + np.exp(-s)))
def cost(X,Y,N, alpha):
En=0;epsi=1.e-12
for n in range(N):
y_pred =predictor(X, X[n], N, alpha)
if y_pred<epsi: y_pred=epsi
if y_pred>1-epsi:y_pred=1-epsi
En=En+Y[n]*np.log(y_pred)+(1-Y[n])*np.log(1-y_pred)
En=-En/N
return En
def stocastic(X, Y, N, max_it, lr):
alpha = np.zeros(N)
error=[];error.append(cost(X,Y,N, alpha))
epsi=0.5
it=0
while(error[-1]>epsi):
idx = np.random.randint(0, N)
Xm = X[idx, :]
Ym = Y[idx]
alpha = gradient(X, Xm, Y, Ym, N, alpha, lr)
error.append(cost(X,Y,N, alpha))
print('iteration %d, cost=%.2f\n' %(it,error[-1]),end='')
#Second second codition to stop
it=it+1
if(it>max_it): break
return alpha, error
def training(X, Y, N, it, lr):
alpha, error = stocastic(X,Y, N, it, lr)
plot_cost(error)
print('\nin-samples error=%.2f' % (error[-1]))
return alpha
def validation(X, Y, N, alpha):
predictions = []
for n in range(N):
y_pred = predictor(X, X[n], N, alpha)
if y_pred > 0.5:
y_pred = 1
else:
y_pred = 0
prediction = []
prediction.append(y_pred)
prediction.append(Y[n])
predictions.append(prediction)
plot_prediction(predictions)
print('out-samples error=%.2f\n' % (cost(X,Y,N, alpha)))
return predictions
def confusion_matrix(predictions):
TP = 0
TN = 0
FN = 0
FP = 0
for prediction in predictions:
#Counting the number of true positives and true negatives
if prediction[0] == prediction[1]:
if prediction[0] == 1 and prediction[1] == 1:
TP += 1
else:
TN += 1
else:
#Counting the number of false positives and false negatives
if prediction[0] == 1 and prediction[1] == 0:
FN += 1
else:
FP += 1
matrix = np.matrix([[TP, FP], [FN, TN]])
try:
recall = TP / (TP + FN)
except Exception:
recall = 0
try:
precision = TP / (TP + FP)
except Exception:
precision = 0
try:
accuracy = (TP + TN) / (TP + TN + FP + FN)
except Exception:
accuracy = 0
return accuracy, precision, recall, matrix
def plot_cost(error):
plt.plot(range(len(error)), error, marker='o')
plt.xlabel('Iterations')
plt.ylabel('Number of misclassifications')
plt.ylim([0,max(error)+1])
plt.show()
def plot_prediction(predictions):
y_test = []
y_pred = []
for prediction in predictions:
y_test.append(prediction[1])
y_pred.append(prediction[0])
plt.subplot(2,1,1)
plt.step(range(len(y_test)),y_test, where="mid", color='g')
plt.xlabel('x_test')
plt.title('REAL VALUES')
plt.ylabel('y_test')
plt.subplot(2,1,2)
plt.step(range(len(y_pred)),y_pred, where="mid", color='r')
plt.xlabel('x_test')
plt.title('PREDICTION VALUES')
plt.ylabel('y_prediction')
plt.tight_layout()
plt.show()
def main():
X_train, Y_train, N = dataset()
max_iterations = 450
learning_rate = 0.02
alpha = training(X_train, Y_train, N, max_iterations, learning_rate)
predictions = validation(X_train, Y_train, N, alpha)
accuracy, precision, recall, matrix = confusion_matrix(predictions)
print("Accuracy: %.2f" % (accuracy))
print("Precision: %.2f" % (precision))
print("Recall: %.2f" % (recall))
print("####Confusion Matrix####")
print(matrix)
print("########################")
main()
|
import json
from pprint import pprint
data = json.load(open('file.json'))
print (data)
pprint(data)
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^sentiment/', include('sentiment.urls', namespace='sentiment')),
url(r'^twitter/', include('sentiment.urls', namespace='twitter')),
url(r'^stocks/', include('stocks.urls', namespace='stocks')),
url(r'^users/', include('users.urls', namespace='users')),
url(r'^$', include('pages.urls', namespace='pages')),
url(r'^admin/', include(admin.site.urls)),
]
|
#!/usr/bin/env python
from flaskext.script import Manager, prompt_bool
from marked import app
# import fixtures as _fixtures
from marked.database import init_db
import os
manager = Manager(app)
@manager.shell
def make_shell_context():
from marked import models
return dict(app=app, mod=models)
@manager.command
def newdb():
"""Deletes the database, and creates a new empty one."""
if prompt_bool("Are you sure you want to lose all your data"):
try:
os.remove('test.db')
except OSError:
print "Database did not exist"
init_db()
# @manager.command
# def test():
# """docstring for tests"""
# from unittest import TestLoader, TextTestRunner
# cur_dir = os.path.dirname(os.path.abspath(__file__))
# loader = TestLoader()
# test_suite = loader.discover(cur_dir)
# runner = TextTestRunner(verbosity=2)
# runner.run(test_suite)
if __name__ == "__main__":
manager.run()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-14 14:42
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('first_name', models.CharField(max_length=64, verbose_name='first name')),
('last_name', models.CharField(max_length=64, verbose_name='last name')),
('photo', models.ImageField(null=True, upload_to='players_photos', verbose_name='photo')),
('PERSON_ID', models.PositiveIntegerField(unique=True, verbose_name='PERSON_ID')),
('PLAYERCODE', models.CharField(max_length=128, unique=True, verbose_name='PLAYERCODE')),
],
options={
'verbose_name_plural': 'players',
'ordering': ['last_name', 'first_name'],
'verbose_name': 'player',
},
),
]
|
#-------------------------------- IMPORTS -----------------------------------
import requests
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
#------------------------ FUNCTION DEFINITIONS ------------------------------
def build_SQL_query(ra_range, dec_range, n_obj):
"""
Query is designed to return the following columns: catalog ID, right ascension, declination
green-band magnitude, red-band magnitde, and infrared-band magnitude.
The SQL query is formatted for
http://skyserver.sdss.org/dr14/en/tools/search/x_results.aspx?searchtool=SQL&TaskName=Skyserver.Search.SQL&syntax=NoSyntax&ReturnHtml=true&cmd=
'ra_range' and 'dec_range' are two-element iterables representing
ranges in degrees of right ascension and declination, respectively
'n_obj' is how many hits to return
Returns a string containing the SQL query.
"""
select_stmt = 'SELECT TOP ' + str(n_obj) + ' p.objID, p.ra, p.dec, p.g, p.r, p.i'
from_stmt = 'FROM photoobj as p'
where_stmt = 'WHERE p.ra BETWEEN ' + \
str(ra_range[0]) + ' AND ' + str(ra_range[1]) + \
' AND p.dec BETWEEN ' + str(dec_range[0]) + ' AND ' + str(dec_range[1]) + \
' AND p.r > 1' + \
' AND p.mode = 1'
order_stmt = 'ORDER BY p.r ASC'
query = ' '.join([select_stmt, from_stmt, where_stmt, order_stmt])
print('')
print('QUERY:')
print(query)
return query
def prep_str_for_http(s):
"""
SQL query will be over HTTP, so need to swap out certain
characters
"""
new_s = s.replace(' ', '+')
new_s = new_s.replace(',', '%2C')
new_s = new_s.replace('\n', '%0D%0A')
return new_s
def build_query_URL(sql_str):
"""
Formats the SQL string into an URL for HTTP
'sql_str' is a properly formatted SQL query
Returns the URL string.
"""
url_1 = 'http://skyserver.sdss.org/dr14/en/tools/search/x_results.aspx?searchtool=SQL&TaskName=Skyserver.Search.SQL&syntax=NoSyntax&ReturnHtml=true&cmd='
url_2 = prep_str_for_http(sql_str)
url_3 = '&format=html&TableName='
return url_1 + url_2 + url_3
def submit_query_and_parse_response(ra_range, dec_range, n_obj):
"""
'ra_range' and 'dec_range' are two-element iterables representing
ranges in degrees of right ascension and declination, respectively
'n_obj' is how many hits to return
Returns dataframe of catalog data, according to definitions in
build_SQL_query()
"""
sql_str = build_SQL_query(ra_range, dec_range, n_obj)
url = build_query_URL(sql_str)
response = requests.get(url)
raw_html = response.text
soup = BeautifulSoup(raw_html, 'lxml')
table_body = soup.find("table")
rows = table_body.find_all("tr")
header = rows[0]
col_names = [c.text for c in header.find_all('td')]
data_rows = rows[1:]
row_data_lst = []
for i, r in enumerate(data_rows):
row_data = [td.text for td in r.find_all("td")]
row_data_lst.append(row_data)
df = pd.DataFrame(row_data_lst, columns = col_names)
df['objID'] = df['objID'].astype(str)
df['ra'] = df['ra'].astype(float)
df['dec'] = df['dec'].astype(float)
df['g'] = df['g'].astype(float)
df['r'] = df['r'].astype(float)
df['i'] = df['i'].astype(float)
return df
def ra_dec_to_pixels(celestial_coords, dec, delta_dec, ra, delta_ra, img_size):
"""
Convert celestial coordinates into image coordinates.
'celestial_coords' is N x 2 array, with RA values in column 0, declination in column 1
'dec', and 'ra', are the center of the search window.
'delta_dec' and 'delta_ra' are the half-widths of the window.
The search window is the same as the star field window. they are coextensive.
We assume here that the window is square, which, in general, means that that ra_delta
will have to be larger than dec_delta, because RA is azimuthal angle, which subtends smaller
distances at large declinations, i.e. closer to poles.
Higher values of RA will be on left side of image, for convention
that star field is displayed with Eastern horizon is on left side.
Higher values of declination will be on top side of image.
Image format will have row zero displayed on top of image, and column zero
displayed on left edge.
"""
image_y = ((dec + delta_dec)-celestial_coords[:,1]) * \
(img_size / (2*delta_dec))
image_x = -1*(celestial_coords[:,0] - (ra + delta_ra)) * \
(img_size / (2*delta_ra))
return np.concatenate([image_x.reshape(-1,1),
image_y.reshape(-1,1)], axis=1)
|
import numpy as np
import math
# 30个点的坐标
points = [(0, 0), (20, 36), (96, 14), (14, 59), (15, 35), (59, 74), (6, 7), (65, 52), (12, 44), (-67, 73), (-23, 0),
(-61, 68), (-25, 92), (-87, 87), (-81, 11), (-3, 16), (-24, -90), (-31, -50), (-30, -43), (-74, -28),
(-24, -21), (-6, -30), (-95, -76), (29, -30), (3, -52), (30, -40), (45, -79), (64, -63), (29, -88), (95, -91)]
# 30个点的属性
field = [91, 62, 69, 29, -43, 11, 29, -45, 56, -59, 87, 88, 24, -51, -77, 57, -98, 25, 0, 80, -50, -85, 60, 10, -31, 78,
99, 51, -42, 98]
# 点的个数:30
count = len(field)
#计算属性平均值
sum = 0.0
for i in range(count):
sum += field[i]
average = sum / count
#计算权重矩阵
#采用两点之间距离的倒数
w = np.ones((count, count), dtype=float)
for i in range(count):
for j in range(count):
if i != j:
d_ij = math.sqrt((points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2)
if d_ij > 0:
w[i][j] = 1.0 / d_ij
#计算叉积之和
sum1 = 0.0
for i in range(count):
for j in range(count):
sum1 += w[i][j] * (field[i] - average) * (field[j] - average)
#计算偏差值平方和
sum2 = 0.0
for i in range(count):
sum2 += (field[i] - average) ** 2
#计算权重聚合
s = 0.0
for i in range(count):
for j in range(count):
s += w[i][j]
Moran_I = count / s * sum1 / sum2
print(Moran_I)
|
import random as r
import time as t
import os
class Game():
incorrect = 0
count = 0
score_list = []
#타자연습에 들어갈 문장
Flower = []
Flower.append("여름장이란 애시당초에 글러서, 해는 아직 중천에 있건만")
Flower.append("장판은 벌써 쓸쓸하고 더운 햇발이 벌여 놓은 전 휘장 밑으로 등줄기 훅훅 볶는다.")
Flower.append("마을 사람들은 거지반 돌아간 뒤요, 팔리지 못한 나뭇군패가 길거리에 궁싯거리고들")
Rain = []
Rain.append("소년은 개울가에서 소녀를 보자 곧 윤초시네 증손녀 딸이라는 걸 알 수 있었다.")
Rain.append("소녀는 개울에다 손을 잠그고 물장난을 하고 있는 것이다. 서울서는 이런 개울물을 보지 못기나하니한듯이.")
Rain.append("벌써 며칠 째 소녀는, 학교에서 돌아오는 길에 물장난이었다. 소년은 개울둑에 앉아버렸다.")
Ball = []
Ball.append("사람들은 아버지를 난쟁이라고 불렀다. 사람들은 옳게 보았다. 아버지는 난쟁이다.")
Ball.append("불행하게도 사람들은 아버지를 보는 것 하나만 옳았다. 그 밖의 것들은 하나도 옳지 않았다.")
Ball.append("나는 아버지, 어머니, 영호, 영희, 그리고 나를 포함한 다섯 식구의 모든 것을 걸고 그들이")
def pause_cls(self):
os.system("pause")
os.system("cls")
player = Game()
while True:
print("타자게임")
print("""
================
1. 게임 시작
2. 평균 타자 순위 출력
""")
select1 = int(input("선택하시오. "))
if select1 == 1:
os.system("cls")
input("\n=== Enter로 시작 ===")
os.system("cls")
print("""
=== 쳐보고 싶은 단어 선택하세요. ===
1. 메밀꽃 필 무렵
2. 소나기
3. 난쟁이가 쏘아 올린 작은 공
""")
select2 = int(input("선택하시오. "))
if select2 == 1:
os.system("cls")
print()
print(player.Flower[0])
start = t.time()
answer = input("")
print(player.Flower[1])
answer = input("")
print(player.Flower[2])
answer = input("")
end = t.time()
print("걸린 시간은 %.2f초입니다."%(end - start))
elif select2 == 2:
os.system("cls")
print(player.Rain[0])
answer = input("")
start = t.time()
print(player.Rain[1])
answer = input("")
print(player.Rain[2])
answer = input("")
end = t.time()
print("걸린 시간은 %.2f초입니다."%(end - start))
elif select2 == 3:
os.system("cls")
print(player.Ball[0])
answer = input("")
start = t.time()
print(player.Ball[1])
answer = input("")
print(player.Ball[2])
answer = input("")
end = t.time()
print("걸린 시간은 %.2f초입니다."%(end - start))
else:
print("잘 못 입력하셨습니다.")
player.score_list.append(end-start)
if select1 == 2:
player.score_list.sort()
count = 1
for i in player.score_list:
print("%d등 : %.2f"%(count,i))
count += 1
|
"""
Fractal trees and plants are among the easiest of fractal objects to
understand. They are based on the idea of self-similarity. Each of the branches is
a smaller version of the main trunk of the tree. The main idea in creating
fractal trees or plants is to have a base object and to then create smaller,
similar objects protruding from that initial object. This method is a
recursive method, meaning that it continues for each child down to a finite
number of steps.
"""
|
from flask import Flask, request, render_template, flash, url_for
from flask import Response
from werkzeug.utils import redirect
import psycopg2
from AirBnb import compute_predictions
from werkzeug.utils import secure_filename
import os
from flask import send_from_directory
from Airbnb_config import FeatureSelection, DateTimeColumns, Geo ,host_response_rate ,treat_missing_first, categorical_encoder, treat_missing_second ,Scaler_Min_Max, Mydimension_reducer
import pandas as pd
import numpy as np
app=Flask(__name__,template_folder='Html')
UPLOAD_FOLDER = 'Uploads'
ALLOWED_EXTENSIONS = {'csv', 'xlsx'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "AIApps"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/AllApps')
def AllApps():
return render_template('All_Apps.html')
@app.route('/AirBnB')
def AirBnB():
return render_template('AirBnB.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload_airbnb', methods = ['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
##########################Compute###########################
Predictions=compute_predictions('Uploads/'+filename)
# Predictions=compute_predictions('Uploads/',filename)
os.remove('Uploads/'+filename)
##########################Export###########################
resp = Response(Predictions.to_csv())
# flash("Task Completed!")
resp.headers["Content-Disposition"] = "attachment; filename=AirBNB_Predictions.csv"
resp.headers["Content-Type"] = "text/csv"
return resp
#
# return render_template('AirBnB.html')
if __name__ == '__main__':
app.run(debug=True)
|
from PIL import Image
import numpy as np
################ dilation
def dilation(kernel, input_name):
img_input = Image.open(input_name)
pixels_input = img_input.load()
dilation_output = Image.new(img_input.mode, img_input.size)
dilation_pixels = dilation_output.load()
# initial
for x in range(dilation_output.width):
for y in range(dilation_output.height):
dilation_pixels[x, y] = 0
for x in range(img_input.width):
for y in range(img_input.height):
if pixels_input[x, y] == 255:
flag = True
# check boundry
for z in range(len(kernel)):
x1 = x + kernel[z][0]
y1 = y + kernel[z][1]
if x1 < 0 or x1 > 511 or y1 < 0 or y1 > 511:
flag = False
break
if flag == True:
for z in range(len(kernel)):
x1 = x + kernel[z][0]
y1 = y + kernel[z][1]
dilation_pixels[x1, y1] = 255
return dilation_output
#dilation_output.save(output_name)
################ erosion
def erosion(kernel, input_name):
img_input = Image.open(input_name)
pixels_input = img_input.load()
erosion_output = Image.new(img_input.mode, img_input.size)
erosion_pixels = erosion_output.load()
# initial
for x in range(erosion_output.width):
for y in range(erosion_output.height):
erosion_pixels[x, y] = 0
for x in range(img_input.width):
for y in range(img_input.height):
flag = True
# check boundry
for z in range(len(kernel)):
x1 = x + kernel[z][0]
y1 = y + kernel[z][1]
if x1 < 0 or x1 > 511 or y1 < 0 or y1 > 511:
flag = False
break
if pixels_input[x1, y1] != 255:
flag = False
break
if flag == True:
erosion_pixels[x, y] = 255
return erosion_output
#erosion_output.save(output_name)
kernel = []
for i in range(5):
for j in range(5):
temp = [i-2, j-2]
kernel.append(temp)
kernel.remove([-2,-2])
kernel.remove([-2,2])
kernel.remove([2,-2])
kernel.remove([2,2])
output = dilation(kernel, "binary.bmp")
output.save("dilation.bmp")
output = erosion(kernel, "binary.bmp")
output.save("erosion.bmp")
output = dilation(kernel, "erosion.bmp")
output.save("opening.bmp")
output = erosion(kernel, "dilation.bmp")
output.save("closing.bmp")
################ h&m
kernel = [[0,0],[-1,0],[0,1]]
kernel2 = [[0,-1],[1,-1],[1,0]]
img_input = Image.open("binary.bmp")
pixels_input = img_input.load()
complement_output = Image.new(img_input.mode, img_input.size)
complement_pixels = complement_output.load()
for x in range(img_input.width):
for y in range(img_input.height):
if pixels_input[x, y] == 0:
complement_pixels[x, y] = 255
else:
complement_pixels[x,y] = 0
complement_output.save("complement.bmp")
output1 = erosion(kernel, "binary.bmp")
output1_pixels = output1.load()
output2 = erosion(kernel2, "complement.bmp")
output2_pixels = output2.load()
hnm_output = Image.new(output1.mode, output1.size)
hnm_pixels = hnm_output.load()
for x in range(output1.width):
for y in range(output1.height):
if output1_pixels[x, y] == 255 and output2_pixels[x, y] == 255:
hnm_pixels[x, y] = 255
hnm_output.save("hnm.bmp")
|
from sender.celery_app import celery_app
from sender.transport import Transport
@celery_app.task
def send_message(message_id):
"""Задача отправки сообщения
:param message_id: идентификатор сообщения
:return: результат отправки
"""
transport = Transport(message_id)
return transport.send()
|
(this was done in the console)
>>> #make a list with Monday, Tuesday, Wednesday
>>> week = ['Monday','Tuesday','Wednesday']
>>> week
['Monday', 'Tuesday', 'Wednesday']
>>> week[1]
'Tuesday'
>>> week[-2]
'Tuesday'
>>> week.append('Thursday')
>>> week
['Monday', 'Tuesday', 'Wednesday', 'Thursday']
>>> week[1:3]
['Tuesday', 'Wednesday']
>>> week.remove('Monday')
>>> week
['Tuesday', 'Wednesday', 'Thursday']
>>> week.sort()
>>> week
['Thursday', 'Tuesday', 'Wednesday']
>>> week.reverse()
>>> week
['Wednesday', 'Tuesday', 'Thursday']
>>> week[1]='Friday'
>>> week
['Wednesday', 'Friday', 'Thursday']
>>> len(week)
3
>>>
|
#With the "items" method, you can iterate over both keys and values of a dictionar
l={"france":"paris","india":"delhi"}
for country,capitals in l.items():
print("the capital of " + country + " is " + capitals + "")
|
import os
import sys
import logging
## ws_client and msg_ntk path
sys.path.insert(0, os.path.abspath("../ws_client"))
sys.path.insert(0, os.path.abspath("../msg_ntk"))
from ws_client import WebsocketClient
from msg_ntk import Map2DDataPUB
from msg_ntk import Map2DDataSUB
## LOGGING INFO
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(level=logging.INFO)
class EdgeComm():
def __init__(self):
# varaibles
self.ws_hostaddress = 'https://heroku-uni-socket.herokuapp.com/'
self.ws_sio = ""
# init
self.ws = WebsocketClient(self.ws_hostaddress,self.ws_msg)
self.map_2d_data_sub = Map2DDataSUB(self.map_2d_data_pub_cb)
def ws_msg(self,msg):
LOGGER.info(" ws -> edge_comm got msg: %s", msg)
def map_2d_data_pub_cb(self,msg):
LOGGER.info(" msg_ntk -> edge_comm got msg")
# emit
self.ws_sio.emit('map_data', {'map': "MAP DATA"})
def start_it(self):
# ws
self.ws.start_it()
# get ws_sio
self.ws_sio = self.ws.get_ws_sio()
# msg_ntk
self.map_2d_data_sub.deamon = True
self.map_2d_data_sub.start()
if __name__ == '__main__':
app = EdgeComm()
app.start_it()
|
"""OctreeLevelInfo and OctreeLevel classes.
"""
from __future__ import annotations
import logging
import math
from typing import TYPE_CHECKING, Dict, List, Optional
import numpy as np
from napari.layers.image.experimental.octree_chunk import (
OctreeChunk,
OctreeChunkGeom,
)
from napari.layers.image.experimental.octree_util import OctreeMetadata
LOGGER = logging.getLogger("napari.octree")
if TYPE_CHECKING:
from napari.types import ArrayLike
class OctreeLevelInfo:
"""Information about one level of the octree.
This should be a NamedTuple.
Parameters
----------
meta : OctreeMetadata
Information about the entire octree.
level_index : int
The index of this level within the whole tree.
"""
def __init__(self, meta: OctreeMetadata, level_index: int) -> None:
self.meta = meta
self.level_index = level_index
self.scale = 2**self.level_index
base = meta.base_shape
self.image_shape = (
int(base[0] / self.scale),
int(base[1] / self.scale),
)
tile_size = meta.tile_size
scaled_size = tile_size * self.scale
self.rows = math.ceil(base[0] / scaled_size)
self.cols = math.ceil(base[1] / scaled_size)
self.shape_in_tiles = [self.rows, self.cols]
self.num_tiles = self.rows * self.cols
class OctreeLevel:
"""One level of the octree.
An OctreeLevel is "sparse" in that it only contains a dict of
OctreeChunks for the portion of the octree that is currently being
rendered. So even if the full level contains hundreds of millions of
chunks, this class only contains a few dozens OctreeChunks.
This was necessary because even having a null reference for every
OctreeChunk in a level would use too much space and be too slow to
construct.
Parameters
----------
slice_id : int
The id of the OctreeSlice we are in.
data : ArrayLike
The data for this level.
meta : OctreeMetadata
The base image shape and other details.
level_index : int
Index of this specific level (0 is full resolution).
Attributes
----------
info : OctreeLevelInfo
Metadata about this level.
_tiles : Dict[tuple, OctreeChunk]
Maps (row, col) tuple to the OctreeChunk at that location.
"""
def __init__(
self,
slice_id: int,
data: ArrayLike,
meta: OctreeMetadata,
level_index: int,
) -> None:
self.slice_id = slice_id
self.data = data
self.info = OctreeLevelInfo(meta, level_index)
self._tiles: Dict[tuple, OctreeChunk] = {}
def get_chunk(
self, row: int, col: int, create=False
) -> Optional[OctreeChunk]:
"""Return the OctreeChunk at this location if it exists.
If create is True, an OctreeChunk will be created if one
does not exist at this location.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
create : bool
If True, create the OctreeChunk if it does not exist.
Returns
-------
Optional[OctreeChunk]
The OctreeChunk if one existed or we just created it.
"""
try:
return self._tiles[(row, col)]
except KeyError:
if not create:
return None # It didn't exist so we're done.
rows, cols = self.info.shape_in_tiles
if row < 0 or row >= rows or col < 0 or col >= cols:
# The coordinates are not in the level. Not an exception because
# callers might be trying to get children just over the edge
# for non-power-of-two base images.
return None
# Create a chunk at this location and return it.
octree_chunk = self._create_chunk(row, col)
self._tiles[(row, col)] = octree_chunk
return octree_chunk
def _create_chunk(self, row: int, col: int) -> OctreeChunk:
"""Create a new OctreeChunk for this location in the level.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
Returns
-------
OctreeChunk
The newly created chunk.
"""
level_index = self.info.level_index
meta = self.info.meta
layer_ref = meta.layer_ref
from napari.components.experimental.chunk._request import (
OctreeLocation,
)
location = OctreeLocation(
layer_ref, self.slice_id, level_index, row, col
)
scale = self.info.scale
tile_size = self.info.meta.tile_size
scaled_size = tile_size * scale
pos = np.array(
[col * scaled_size, row * scaled_size], dtype=np.float32
)
data = self._get_data(row, col)
# Create OctreeChunkGeom used by the visual for rendering this
# chunk. Size it based on the base image pixels, not based on the
# data in this level, so it's exact.
base = np.array(meta.base_shape[::-1], dtype=float)
remain = base - pos
size = np.minimum(remain, [scaled_size, scaled_size])
geom = OctreeChunkGeom(pos, size)
# Return the newly created chunk.
return OctreeChunk(data, location, geom)
def _get_data(self, row: int, col: int) -> ArrayLike:
"""Get the chunk's data at this location.
Parameters
----------
row : int
The row coordinate.
col : int
The column coordinate.
Returns
-------
ArrayLike
The data at this location.
"""
tile_size = self.info.meta.tile_size
array_slice = (
slice(row * tile_size, (row + 1) * tile_size),
slice(col * tile_size, (col + 1) * tile_size),
)
if self.data.ndim == 3:
array_slice += (slice(None),) # Add the colors.
return self.data[array_slice]
def log_levels(levels: List[OctreeLevel], start_level: int = 0) -> None:
"""Log the dimensions of each level nicely.
We take start_level so we can log the "extra" levels we created but
with their correct level numbers.
Parameters
----------
levels : List[OctreeLevel]
Print information about these levels.
start_level : int
Start the indexing at this number, shift the indexes up.
"""
from napari._vendor.experimental.humanize.src.humanize import intword
def _dim_str(dim: tuple) -> None:
return f"({dim[0]}, {dim[1]}) = {intword(dim[0] * dim[1])}"
for index, level in enumerate(levels):
level_index = start_level + index
image_str = _dim_str(level.info.image_shape)
tiles_str = _dim_str(level.info.shape_in_tiles)
LOGGER.info(
"Level %d: %s pixels -> %s tiles",
level_index,
image_str,
tiles_str,
)
|
import yfinance as yf
# msft = yf.Ticker("Visa")
# print(msft.info)
# #history = msft.history(period="max")
# temp = yf.Tickers
# history = msft.history(period="3d")
# print(history.size)
# print(len(history))
# #print(history.columns)
#
# print(history)
# for ind in history.index:
# for col in history.columns:
# print(col, ind)
# print(history[col][ind])
# print("----------------------")
from symbol_manager import SymbolMananger
# msft = yf.Ticker("MSFT")
# history = msft.history(period="3d")
sm = SymbolMananger()
hundred_symbols = sm.get_symbols_space_separated()
data = yf.download(hundred_symbols, period="5d")
print(data.size)
print(len(data))
|
######Selection Sort
def selection_sort(l1)
for i in range(0,len(l1)):
max_index=0
max_val=l1[0]
for j in range(0,len(l1)-i):
if l1[j]>max_val:
max_index=j
max_val=l1[j]
#swaping max index with last index of sub array
last_index=len(l1)-i-1
tmp=l1[last_index]
l1[last_index]=l1[max_index]
l1[max_index]=tmp
return l1
if __name__ == "__main__":
#changing into python modules
arr=[92,74,25,255,29,1,21]
sorted_arr=selection_sort(arr)
print(sorted_arr)
|
class Translator():
"""Abstract class for translating standard ciphers (i.e. Morse Code)"""
key = []
def translate(self, *args):
"""Base method for decoding a cipher"""
raise NotImplementedError()
def interactiveTranslate(self):
"""For quick translating with each character typed from the user, type ! to remove last characters"""
print "Interactive translation:"
result = ""
while True:
try:
i = raw_input(" "*len(result))
if (len(i) and i[0] == "!"):
result = result[:-len(i)]
else:
result += self.translate(i)
print result
except KeyboardInterrupt:
print result
return result
def encode(self, *args):
"""Reversed translation"""
raise NotImplementedError()
def decode(self, *args):
"""Just and alias for translate"""
return self.translate(*args)
def graphicEncode(self, *args):
"""Return in numpy array for easy plotting"""
raise NotImplementedError()
def parseInput(self, cipher):
"""Standardize input to a list, values preferably integers indexed from 0"""
return cipher
def setKey(self, key):
self.key = key
|
"""FAM URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
from investments import views as investment_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('signup/', views.signup_view, name='signup'),
path('login/', views.login_view, name='login'),
path('search/', investment_views.search_view, name="search_asset"),
path('search/add-asset/', investment_views.add_asset, name="add_asset"),
path('investments/', investment_views.investment_view, name="investments"),
path('investments/remove-asset', investment_views.delete_asset, name="delete_asset"),
]
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.core import serializers
from django.views import View
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
from rest_framework.renderers import JSONRenderer
from .serializers import CollectionSerializer, ExhibitSerializer, ModuleSerializer, QuestionSerializer
from .models import Collection, Exhibit, Module, Question
class CollectionView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CollectionView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
try:
data = Collection.objects.filter(colID = self.kwargs['cID'])
except KeyError:
data = Collection.objects.filter()
ser = CollectionSerializer(data, many=True)
return JsonResponse(ser.data, safe=False)
def post(self, request, *args, **kwargs):
#Request data
data = JSONParser().parse(request)
#Check to see if the entry already exists
try:
dataUp = Collection.objects.get(colID = data['colID'])
if dataUp is not None:
print("Found")
ser = CollectionSerializer(dataUp, data = data )
ser.is_valid()
ser.save()
return HttpResponse(status=200)
except Collection.DoesNotExist:
ser = CollectionSerializer(data = data)
if ser.is_valid():
ser.save()
return HttpResponse(status=201)
return HttpResponse(status=400)
class ExhibitView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(ExhibitView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
try:
data = Exhibit.objects.filter(exhID = self.kwargs['eID'])
except KeyError:
data = Exhibit.objects.filter()
ser = ExhibitSerializer(data, many=True)
return JsonResponse(ser.data, safe=False)
def post(self, request, *args, **kwargs):
#Request data
data = JSONParser().parse(request)
#Check to see if the entry already exists
try:
dataUp = Exhibit.objects.get(exhID = data['exhID'])
if dataUp is not None:
print("Found")
ser = ExhibitSerializer(dataUp, data = data )
ser.is_valid()
ser.save()
return HttpResponse(status=200)
except Exhibit.DoesNotExist:
ser = ExhibitSerializer(data = data)
if ser.is_valid():
ser.save()
return HttpResponse(status=201)
return HttpResponse(status=400)
class ModuleView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(ModuleView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
try:
data = Module.objects.filter(modID = self.kwargs['mID'])
except KeyError:
data = Module.objects.filter()
ser = ModuleSerializer(data, many=True)
return JsonResponse(ser.data, safe=False)
def post(self, request, *args, **kwargs):
#Request data
data = JSONParser().parse(request)
#Check to see if the entry already exists
try:
dataUp = Module.objects.get(modID = data['modID'])
if dataUp is not None:
print("Found")
ser = ModuleSerializer(dataUp, data = data )
ser.is_valid()
ser.save()
return HttpResponse(status=200)
except Module.DoesNotExist:
ser = ModuleSerializer(data = data)
if ser.is_valid():
ser.save()
return HttpResponse(status=201)
return HttpResponse(status=400)
class QuestionView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(QuestionView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
try:
data = Question.objects.filter(modID = self.kwargs['qID'])
except KeyError:
data = Question.objects.filter()
ser = ModuleSerializer(data, many=True)
return JsonResponse(ser.data, safe=False)
def post(self, request, *args, **kwargs):
#Request data
data = JSONParser().parse(request)
#Check to see if the entry already exists
try:
dataUp = Question.objects.get(modID = data['queID'])
if dataUp is not None:
print("Found")
ser = ModuleSerializer(dataUp, data = data )
ser.is_valid()
ser.save()
return HttpResponse(status=200)
except Question.DoesNotExist:
ser = ModuleSerializer(data = data)
if ser.is_valid():
ser.save()
return HttpResponse(status=201)
return HttpResponse(status=400)
|
import pygame, sys
import itertools
import time
import OpenGL.GL as gl
import neurodot_present.present_lib as pl
from neurodot_present.present_lib import Screen, CheckerBoard, UserEscape, VsyncPatch
pl.DEBUG = False
################################################################################
if __name__ == "__main__":
pygame.init()
pygame.mouse.set_visible(True)
LOOP_MODE = 2 # 2 is using DoubleCheckerboardFlasher loop structure, 1 is using regular CheckerboardFlasher delay
DURATION = 20
flash_rate = 19 # Hz
display_mode = pygame.display.list_modes()[-1]
scr = Screen(display_mode = display_mode)
vsync_patch = VsyncPatch(left = scr.screen_right - pl.VSYNC_PATCH_WIDTH_DEFAULT,
bottom = scr.screen_bottom,
width = pl.VSYNC_PATCH_WIDTH_DEFAULT,
height = pl.VSYNC_PATCH_HEIGHT_DEFAULT
)
cb1 = CheckerBoard(1, color1 = [1.0, 1.0, 1.0], color2 = [0.0, 0.0, 0.0], width = 0.5)
cb2 = CheckerBoard(1, color1 = [0.0, 0.0, 0.0], color2 = [1.0, 1.0, 1.0], width = 0.5)
CB_cycle = itertools.cycle((cb2, cb1))
vvals_cycle = itertools.cycle((0, 15))
try:
vsync_value = vvals_cycle.next()
CB = CB_cycle.next()
board_width = CB.width * CB.nrows
dtc = 1.0/flash_rate
tc = time.time() #time since last change
t0 = time.time()
t_list = []
def render_routine():
#prepare rendering model
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
# render vsync patch
vsync_patch.render(value = vsync_value)
# translate to position of board and render
gl.glTranslatef(-board_width / 2.0, -board_width / 2.0, 0.0)
CB.render()
#show the scene
pygame.display.flip()
is_running = True
while is_running:
t = time.time()
if t > (tc + dtc) and LOOP_MODE == 2:
vsync_value = vvals_cycle.next()
CB = CB_cycle.next()
tc = t #update change time
render_routine()
t_list.append(t) #this is for measuring the loop delay
if LOOP_MODE == 1:
vsync_value = vvals_cycle.next()
CB = CB_cycle.next()
dt = scr.clock.tick_busy_loop(flash_rate)
render_routine()
#dt = scr.clock.tick_busy_loop(1000)
#handle outstanding events
is_running = scr.handle_events()
#print t, t0, duration
if t - t0 > DURATION:
is_running = False
#-----------------------------------------------------------------------
#this is for measuring the loop delay
import numpy as np
print "Mean loop dt: ", np.array(np.diff(t_list).mean())
print "Frequency (Hz):", 1.0 / np.array(np.diff(t_list).mean())
except UserEscape as exc:
print exc
finally:
pass
#exit
pygame.quit()
sys.exit()
|
"""
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy import units as u
from astropy import constants as const
import scipy.integrate as integrate
from .default_cosmo import default_cosmo # define a default cosology for utilities
from .distance_functions import hubble_distance, angular_diameter_distance, _Ez
__all__=('comoving_volume',)
__author__=('Duncan Campbell')
def comoving_volume(z,dw,cosmo=None):
"""
Calculate comoving volume
parameters
----------
z: float
redshift
dw: float
solid angle
cosmo: astropy.cosmology object, optional
cosmology object specifying cosmology. If None, FlatLambdaCDM(H0=70,Om0=0.3)
returns
-------
VC: float
comoving volume in Mpc^3
"""
if cosmo==None:
cosmo = default_cosmo
DH = hubble_distance(cosmo.H0.value)
f = lambda zz: DH*((1.0+zz)**2.0*angular_diameter_distance(zz,cosmo)**2.0)/(_Ez(zz, cosmo.Om0, cosmo.Ok0, cosmo.Ode0))
VC = integrate.quadrature(f,0.0,z,vec_func=False)[0]*dw
return VC
|
import os
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("ggplot")
%matplotlib inline
from tqdm import tqdm_notebook, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from sklearn.model_selection import train_test_split
import tensorflow as tf
import skimage.io as io
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
from keras.layers.core import Lambda, RepeatVector, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D
from keras.layers.merge import concatenate, add
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Lambda, LeakyReLU, Reshape, Add, Cropping2D, Conv2DTranspose, Permute, Conv2D, MaxPooling2D, Activation
from keras.models import Sequential
import numpy as np
import pandas as pd
import os
from keras.layers import Dense,Input,LSTM,Bidirectional,Activation,Conv1D,GRU
from keras.callbacks import Callback
from keras.layers import Dropout,Embedding,GlobalMaxPooling1D, MaxPooling1D, Add, Flatten
from keras.preprocessing import text, sequence
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D
from keras import initializers, regularizers, constraints, optimizers, layers, callbacks
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.models import Model
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from keras import backend as K
from skimage.transform import rescale
from skimage import img_as_uint
import skimage.io as io
import skimage.transform as trans
n = 25
for i in range(0, n):
path = str(i)+'.tif'
img = io.imread('train/image/'+path)
img_resized = rescale(img, (0.835,0.917), anti_aliasing=False)
# print(img_resized.shape)
img_resized = img_as_uint(img_resized)
path = str(i)+'.tif'
io.imsave('train/image_processed/'+path, img_resized)
for i in range(0, n):
path = str(i)+'.tif'
img = io.imread('train/label/'+path)
img_resized = rescale(img, (0.835,0.917), anti_aliasing=False)
img_resized = img_as_uint(img_resized)
path = str(i)+'.tif'
io.imsave('train/label_processed/'+path, img_resized)
n = 10
for i in range(0, n):
path = str(i)+'.tif'
img = io.imread('test/image/'+path)
img_resized = rescale(img, (0.83479,0.9172), anti_aliasing=False)
img_resized = img_as_uint(img_resized)
path = str(i)+'.tif'
io.imsave('test/image_processed/'+path, img_resized)
def vgg_model (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS):
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
c100 = Conv2D(64, (3, 3),activation='elu', kernel_initializer='he_normal', name='conv1_1', padding='same') (inputs)
c1 = Conv2D(64, (3, 3),activation='elu', kernel_initializer='he_normal',name='conv1_2', padding='same') (c100)
p1 = MaxPooling2D((2, 2),strides=(2,2)) (c1)
c2 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1)
c2 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2)
p2 = MaxPooling2D((2, 2),strides=(2,2)) (c2)
c3 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2)
c3 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
c3 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2),strides=(2,2)) (c3)
c4 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3)
c4 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
c4 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2),strides=(2,2)) (c4)
c5 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4)
# c5 = Dropout(0.3) (c5)
c5 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)
c5 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)
p5 = MaxPooling2D(pool_size=(2, 2),strides=(2,2)) (c5)
u6 = Conv2D(4096, (3, 3), strides=(3, 3), padding='same') (c5)
# u6 = concatenate([u6, c4])
c6 = Conv2D(4096, (1, 1), strides=(1, 1), padding='same') (u6)
# c6 = Dropout(0.2) (c6)
# c6 = Conv2D(512, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6)
c7=Conv2D(1, kernel_size=(1, 1), padding='same', activation='relu', name='score_fr')(c6)
c8=LeakyReLU()(Conv2D(1, kernel_size=(4,4), padding='same', name='score14')(c7))
c9=(Cropping2D(cropping=((0, 2), (0, 2))))(c8)
skip1=Add()([c9, inputs])
c99=LeakyReLU()(Conv2D(1, kernel_size=(8,8), padding='same', name='score14')(skip1))
c10=Cropping2D(cropping=((2, 2), (2, 2)))(c99)
c10=LeakyReLU()(Conv2D(1, kernel_size=(8,8), padding='same', name='score14')(c10))
c10=Cropping2D(cropping=((2, 2), (2, 2)))(c10)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c10)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer=Adam(lr = 1e-4), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
return model
def trainGenerator(batch_size, train_path, img_folder, mask_folder, aug_dict, color_mode = 'grayscale',
mask_color_mode = 'grayscale', target_size = (576,576), seed = 1):
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(train_path, classes = [img_folder], class_mode = None,
color_mode = color_mode, target_size = target_size,
batch_size = batch_size, seed = seed)
mask_generator = mask_datagen.flow_from_directory(train_path, classes = [mask_folder], class_mode = None,
color_mode = mask_color_mode, target_size = target_size,
batch_size = batch_size, seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
img,mask = adjustData(img,mask)
yield (img,mask)
def adjustData(img,mask):
if(np.max(img) > 1):
img = img / 255
mask = mask /255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img,mask)
def testGenerator(test_path,num_image,target_size = (576,576),as_gray = True):
for i in range(0, num_image):
img = io.imread(os.path.join(test_path,"%d.tif"%i),as_gray = as_gray)
img = img / 255
img = np.reshape(img,img.shape+(1,))
img = np.reshape(img,(1,)+img.shape)
yield img
data_gen_args = dict(rotation_range=0.2, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.05,
zoom_range=0.05, horizontal_flip=True, fill_mode='nearest')
myGene = trainGenerator(5,'train','image_processed','label_processed',data_gen_args)
model = vgg_model(576,576,1)
model.fit_generator(myGene,steps_per_epoch=5,epochs=1,callbacks=None)
model.save('vgg_model.h5')
model = load_model('vgg_model.h5')
testGene = testGenerator("test/image_processed/", 10)
results = model.predict_generator(testGene,10,verbose=1)
plt.imshow(np.squeeze(results[1]), cmap='Greys', vmin = 0.0, vmax = 0.09)
plt.show()
results_ = np.squeeze(np.multiply(results[1],255))
print (results_)
plt.imshow(results_ , cmap='Greys', vmin = 0, vmax = 9)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Mesa Agent-Based Modeling Framework
Core Objects: Model, and Agent.
"""
import datetime
from .multilevel_mesa import MultiLevel_Mesa
__all__ = ["MultiLevel_Mesa"]
__title__ = 'multilevel_mesa'
__version__ = '0.0.1'
__license__ = 'MIT'
__copyright__ = 'Copyright %s Tom Pike' % datetime.date.today().year
|
from bynarytree import ABR
from bynarytree import ARN
import random
from timeit import default_timer as timer
from matplotlib import pyplot as plt
import pickle
def random_array(n):
array = range(n)
for i in range(0, n):
array[i] = random.randint(0, n * 10)
return array
def random_array_ordered(n):
array = range(n)
return array
def File_Test_ABR():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 200:
j = 0
while j < 5:
array = random_array(dimensione_input)
tree = ABR()
for i in range(0, dimensione_input):
start = timer()
tree.insert(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi[0:len(Tempi)] = []
Tempi2.append(media)
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("InsertABR.p", "wb"))
def File_Test_ABR_search():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 100:
j = 0
while j < 5:
array = random_array(dimensione_input)
tree = ABR()
for i in range(0, dimensione_input):
tree.insert(array[i])
for i in range(0, dimensione_input):
start = timer()
tree.find(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("SearchABR.p", "wb"))
def File_Test_ABR_search_ordered():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 80:
j = 0
while j < 5:
array = random_array_ordered(dimensione_input)
tree = ABR()
for i in range(0, dimensione_input):
tree.insert(array[i])
for i in range(0, dimensione_input):
start = timer()
tree.find(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("SearchABR-Ord.p", "wb"))
def File_Test_ABR_delete():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 200:
j = 0
while j < 5:
array = random_array(dimensione_input)
tree = ABR()
for i in range(0, dimensione_input):
tree.insert(array[i])
for i in range(0, dimensione_input):
start = timer()
tree.delete(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("DeleteABR.p", "wb"))
def File_Test_ABR_delete_ordered():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 200:
j = 0
while j < 5:
array = random_array_ordered(dimensione_input)
tree = ABR()
for i in range(0, dimensione_input):
tree.insert(array[i])
for i in range(0, dimensione_input):
start = timer()
tree.delete(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("DeleteABR-Ord.p", "wb"))
def File_Test_ABR_ordered():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 200:
j = 0
while j < 5:
array = random_array_ordered(dimensione_input)
tree = ABR()
for i in range(0, dimensione_input):
start = timer()
tree.insert(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi[0:len(Tempi)] = []
Tempi2.append(media)
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("InsertABR-Ord.p", "wb"))
def File_Test_ARN():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 200:
j = 0
while j < 5:
array = random_array(dimensione_input)
tree = ARN()
for i in range(0, dimensione_input):
start = timer()
tree.insert(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("InsertARN.p", "wb"))
def File_Test_ARN_ordered():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 200:
j = 0
while j < 5:
array = random_array_ordered(dimensione_input)
tree = ARN()
for i in range(0, dimensione_input):
start = timer()
tree.insert(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("InsertARN-Ord.p", "wb"))
def File_Test_ARN_search():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 100:
j = 0
while j < 5:
array = random_array(dimensione_input)
tree = ARN()
for i in range(0, dimensione_input):
tree.insert(array[i])
for i in range(0, dimensione_input):
start = timer()
tree.find(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("SearchARN.p", "wb"))
def File_Test_ARN_search_ordered():
Tempi = []
Tempi2 = []
Media = []
dimensione_input = 10
rip = 0
while rip < 100:
j = 0
while j < 5:
array = random_array_ordered(dimensione_input)
tree = ARN()
for i in range(0, dimensione_input):
tree.insert(array[i])
for i in range(0, dimensione_input):
start = timer()
tree.find(array[i])
end = timer()
tempo_esec = end - start
Tempi.append(tempo_esec)
somma = 0
for k in range(0, len(Tempi)):
somma += Tempi[k]
media = somma / len(Tempi)
Tempi2.append(media)
Tempi[0:len(Tempi)] = []
j += 1
somma = 0
for k in range(0, len(Tempi2)):
somma += Tempi2[k]
media = somma / len(Tempi2)
Media.append(media)
rip += 1
Tempi2[0:len(Tempi2)] = []
dimensione_input += 10
pickle.dump(Media, open("SearchARN-Ord.p", "wb"))
def tree_height():
_HeightABR = []
_HeightARN = []
dimensione_input = 10
rip = 0
while rip < 80:
tree = ABR()
tree2 = ARN()
A = random_array_ordered(dimensione_input)
for j in range(0, dimensione_input):
tree.insert(A[j])
tree2.insert(A[j])
_HeightABR.append(tree.height())
_HeightARN.append(tree2.height())
dimensione_input += 10
rip += 1
pickle.dump(_HeightABR, open("Height.p", "wb"))
pickle.dump(_HeightARN, open("Height2.p", "wb"))
def Grafic_Insert():
File_Test_ABR()
File_Test_ARN()
File_Test_ABR_ordered()
File_Test_ARN_ordered()
Media1 = pickle.load(open("InsertABR.p", "rb"))
Media2 = pickle.load(open("InsertARN.p", "rb"))
Media3 = pickle.load(open("InsertABR-Ord.p", "rb"))
Media4 = pickle.load(open("InsertARn-Ord.p", "rb"))
plt.plot(Media1, label="albero binario di ricerca")
plt.plot(Media2, label="albero rosso nero")
plt.plot(Media3, label="ABR: elementi ordinati")
plt.plot(Media4, label="ARN: elementi ordinati")
plt.legend()
plt.xlabel("Numero di elementi")
plt.ylabel("Tempi di esecuzione")
plt.title("Alberi Binari a Confronto")
plt.show()
def Grafic_Search_ARN():
File_Test_ABR_delete_ordered()
Media2 = pickle.load(open("DeleteABR-Ord.p", "rb"))
plt.plot(Media2)
plt.xlabel("Numero di elementi")
plt.ylabel("Tempo di esecuzione")
plt.title("Albero rosso-nero")
plt.show()
def Grafic_Search():
File_Test_ABR_search()
File_Test_ABR_search_ordered()
File_Test_ARN_search()
File_Test_ARN_search_ordered()
Media_Search = pickle.load(open("SearchABR.p", "rb"))
Media_Search2 = pickle.load(open("SearchABR-Ord.p", "rb"))
Media_Search3 = pickle.load(open("SearchARN.p", "rb"))
Media_Search4 = pickle.load(open("SearchARN-Ord.p", "rb"))
plt.plot(Media_Search, label="ABR: ricerca")
plt.plot(Media_Search2, label="ABR: ricerca ordinata")
plt.plot(Media_Search3, label="ARN: ricerca")
plt.plot(Media_Search4, label="ARN: ricerca ordinata")
plt.legend()
plt.xlabel("Numero di elementi")
plt.ylabel("Tempi di esecuzione")
plt.title("Ricerca in ABR")
plt.show()
def Grafic_Delete():
File_Test_ABR_delete()
File_Test_ABR_delete_ordered()
Media_Delete = pickle.load(open("DeleteABR.p", "rb"))
Media_Delete2 = pickle.load(open("DeleteABR-Ord.p", "rb"))
plt.plot(Media_Delete, label="ABR elementi casuali")
plt.plot(Media_Delete2, label="ABR elementi ordinati")
plt.legend()
plt.xlabel("Numero di elementi")
plt.ylabel("Tempi di esecuzione")
plt.title("Cancellazione in ABR")
plt.show()
def Grafic_Height():
tree_height()
height = pickle.load(open("Height.p", "rb"))
height2 = pickle.load(open("Height2.p", "rb"))
plt.plot(height, label="ABR elementi ordinati")
plt.plot(height2, label="ARN elementi ordinati")
plt.legend()
plt.xlabel("Dimensione di Input")
plt.ylabel("Altezza")
plt.title("Altezza degli Alberi")
plt.show()
# Grafic_Search_ARN()
|
import crypt, spwd, syslog
def auth_log(msg):
"""Send errors to default auth log"""
syslog.openlog(facility=syslog.LOG_AUTH)
syslog.syslog("SSH Attack Logged: " + msg)
syslog.closelog()
def check_pw(user, password):
"""Check the password matches local unix password on file"""
try:
hashed_pw = spwd.getspnam(user)[1]
except:
return False
return crypt.crypt(password, hashed_pw) == hashed_pw
def pam_sm_authenticate(pamh, flags, argv):
try:
user = pamh.get_user()
except pamh.exception, e:
return e.pam_result
if not user:
return pamh.PAM_USER_UNKNOWN
try:
resp = pamh.conversation(pamh.Message(pamh.PAM_PROMPT_ECHO_OFF, 'Password:'))
except pamh.exception, e:
return e.pam_result
if not check_pw(user, resp.resp):
auth_log("Remote Host: %s (%s:%s)" % (pamh.rhost, user, resp.resp))
return pamh.PAM_AUTH_ERR
return pamh.PAM_SUCCESS
def pam_sm_setcred(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_acct_mgmt(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_open_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_close_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_chauthtok(pamh, flags, argv):
return pamh.PAM_SUCCESS
|
#!/usr/bin/env python3
import time
import random
import typing
import sys
def pos(data, size=4):
ret = []
for x in range(0, len(data), size):
ret.append( int.from_bytes(data[x:x+size], 'big') )
return ret
def neg(data, size=4):
s = b''.join([e.to_bytes(size, 'big') for e in data])
return s
def _encrypt(v: typing.List[int], key: typing.List[int]):
counter, delta, mask = 0, 0xFACEB00C, 0xffffffff
for i in range(32):
counter = counter + delta & mask
v[0] = v[0] + ((v[1] << 4) + key[0] & mask ^ (v[1] + counter) & mask ^ (v[1] >> 5) + key[1] & mask) & mask
v[1] = v[1] + ((v[0] << 4) + key[2] & mask ^ (v[0] + counter) & mask ^ (v[0] >> 5) + key[3] & mask) & mask
return v
def _decrypt(v, key):
deltas = get_delta()
mask = 0xffffffff
for i in range(32):
counter = deltas[31-i]
v[1] = v[1] - ((v[0] << 4) + key[2] & mask ^ (v[0] + counter) & mask ^ (v[0] >> 5) + key[3] & mask) & mask
v[0] = v[0] - ((v[1] << 4) + key[0] & mask ^ (v[1] + counter) & mask ^ (v[1] >> 5) + key[1] & mask) & mask
return v
def get_delta():
counter, delta, mask = 0, 0xFACEB00C, 0xffffffff
deltas = []
for i in range(32):
counter = counter + delta & mask
deltas.append(counter)
return deltas
def decrypt(cipher_text, key):
clear_text = b''
for i in range(0, len(cipher_text), 8):
clear_text += neg(_decrypt(pos(cipher_text[i:i+8]), pos(key)))
return clear_text
def _encrypt(v: typing.List[int], key: typing.List[int]):
counter, delta, mask = 0, 0xFACEB00C, 0xffffffff
for i in range(32):
counter = counter + delta & mask
v[0] = v[0] + ((v[1] << 4) + key[0] & mask ^ (v[1] + counter) & mask ^ (v[1] >> 5) + key[1] & mask) & mask
v[1] = v[1] + ((v[0] << 4) + key[2] & mask ^ (v[0] + counter) & mask ^ (v[0] >> 5) + key[3] & mask) & mask
return v
def encrypt(clear_text: bytes, key: bytes):
cipher_text = b''
for i in range(0, len(clear_text), 8):
cipher_text += neg(_encrypt(pos(clear_text[i:i+8]), pos(key)))
print(cipher_text[:8])
return cipher_text
if __name__ == '__main__':
rand_seed = int(time.time())
while True:
random.seed(rand_seed)
key = random.getrandbits(128).to_bytes(16, 'big')
clear_text = decrypt(bytes.fromhex("77f905c39e36b5eb0deecbb4eb08e8cb"), key)
print(rand_seed)
if clear_text.lower().startswith(b'flag'):
print(clear_text)
sys.exit(0)
else:
rand_seed -= 1
|
s=input()
def solve(s):
l=s.split(" ")
texto=""
for i in l:
if i!="":
texto+=i.capitalize()+" "
else:
texto+=" "
return texto
print(solve(s))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 09:15:57 2020
@author: Administrator
"""
import os
import sys
sys.path.insert(0,os.path.abspath('..'))
import time
import numpy as np
import pandas as pd
import SIMLR
from SIMLR import helper
from sklearn import metrics
from sklearn.metrics.cluster import adjusted_rand_score as ari
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
X = pd.read_csv('yan.csv',header=None)
X = np.array(X)
X = X.transpose()
label = pd.read_csv('yan_label.csv')
y=np.array(label)
label = y.ravel()
c = label.max() # number of clusters
### if the number of genes are more than 500, we recommend to perform pca first!
start_main = time.time()
if X.shape[1]>500:
X = helper.fast_pca(X,500)
else:
X = X.todense()
start_main = time.time()
simlr = SIMLR.SIMLR_LARGE(c, 30, 0); ###This is how we initialize an object for SIMLR. the first input is number of rank (clusters) and the second input is number of neighbors. The third one is an binary indicator whether to use memory-saving mode. you can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
S, F,val, ind = simlr.fit(X)
julei = simlr.fast_minibatch_kmeans(F,c)
print('NMI value is %f \n' % nmi(julei.flatten(),label.flatten()))
print('ARI value is %f \n' % ari(julei.flatten(),label.flatten()))
print('HOM value is %f \n' % metrics.homogeneity_score(julei,label))
print("AMI: %0.3f"% metrics.adjusted_mutual_info_score(label, julei))
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 15:03:51 2017
@author: ian
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import pandas as pd
from scipy.stats import linregress
import DataIO as io
# Get data
f = '/home/ian/OzFlux/Sites/GatumPasture/Data/Processed/All/GatumPasture_L3.nc'
df = io.OzFluxQCnc_to_data_structure(f, output_structure='pandas')
sub_df = df[['Fe_CR3', 'Fe_EP']].dropna()
# Do stats
res = linregress(sub_df.Fe_CR3, sub_df.Fe_EP)
x_line = np.linspace(-100, 400, 11)
y_line = x_line * res.slope + res.intercept
# Plot it
font = FontProperties()
font.set_family('sans serif')
font.set_style('italic')
fig, ax = plt.subplots(1, 1, figsize = (12, 8))
fig.patch.set_facecolor('white')
ax.set_xlim([-100, 400])
ax.set_ylim([-100, 400])
ax.set_xlabel('F$_e\__{CR3}$ (W m$^{-2}$)', fontsize = 18, fontproperties = font)
ax.set_ylabel('F$_e\__{EP}$ (W m$^{-2}$)', fontsize = 18, fontproperties = font)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis = 'x', labelsize = 14)
ax.tick_params(axis = 'y', labelsize = 14)
ax.plot(sub_df.Fe_CR3, sub_df.Fe_EP, marker = 'o', color = '0.5', ms = 3,
ls = '')
ax.plot(x_line, y_line, color = 'black')
txt = 'y = {0}x + {1} (r$^2$ = {2})'.format(str(round(res.slope, 2)),
str(round(res.intercept, 2)),
str(round(res.rvalue ** 2, 2)))
ax.text(-50, 400, txt, fontsize = 18, fontproperties = font,
horizontalalignment = 'left', verticalalignment = 'center')
|
import mysql.connector
import uuid
import sys
from PIL import Image
import base64
import io
import PIL.Image
from mysql.connector.errors import custom_error_exception
from datetime import datetime
cnx = mysql.connector.connect(user="ugqiri0xcve8arnj", password="W05Xj0GMrQfciurwXyku", host="b1d548joznqwkwny7elp-mysql.services.clever-cloud.com",database="b1d548joznqwkwny7elp")
cursor = cnx.cursor(buffered=True)
def createTables () :
query = ("CREATE TABLE Booking (Booking_id int(10) primary key, b_date date NOT NULL, Amount int(5) NOT NULL, Transaction_id int(10) NOT NULL)")
cursor.execute(query)
query = ("CREATE TABLE Room (room_id int(10) primary key ,type VARCHAR(10) NOT NULL, city VARCHAR(20) NOT NULL , owner_name VARCHAR(40) NOT NULL ,price int NOT NULL ,Amenities VARCHAR(40)NOT NULL, images BLOB, loaction VARCHAR(20))")
cursor.execute(query)
query = ("CREATE TABLE owner (o_id int(10) primary key ,name VARCHAR(40) NOT NULL ,gender VARCHAR(10) NOT NULL ,mobile double(10,0) NOT NULL ,email VARCHAR(40)NOT NULL ,Booking_id int(10) NOT NULL ,room_id int(10) NOT NULL,FOREIGN KEY(room_id) references Room(room_id))")
cursor.execute(query)
query = ("CREATE TABLE student_dataset (s_id int(10) primary key ,name VARCHAR(40) NOT NULL, address VARCHAR(40)NOT NULL ,gender VARCHAR(10)NOT NULL ,DOB date NOT NULL, Email VARCHAR(40) NOT NULL, mobile double(10,2) NOT NULL, Booking_id int(10) NOT NULL, FOREIGN KEY(Booking_id) references Booking(Booking_id))")
cursor.execute(query)
def encodeimage (F) :
with open(F, 'rb') as f:
photo = f.read()
encodestring = base64.b64encode(photo)
return encodestring
def insertRoom() :
valuetuple = (3, '2 BHK', 'Pune', 'Prajwal Gandhi', 10000, 'Wifi 24x7 Water and Electricity', encodeimage("D:\Programming\Python\Room Rental\Room3.jpg"), 'Katraj')
query = ("insert into Room values (%s, %s, %s, %s, %s, %s, %s, %s)")
cursor.execute(query,valuetuple)
cnx.commit()
def insertOwner() :
valuetuple = (3, 'Prajwal Gandhi', 'Male', 8798120156, 'gandhi.prajwal@gmail.com', 1002, 3)
query = ("insert into owner values (%s, %s, %s, %s, %s, %s, %s)")
cursor.execute(query,valuetuple)
cnx.commit()
def insertStudent () :
valuetuple = (1, 'Kishor Sawant', 'Pune', 'Male', datetime(2001,1,1), 'rajendra.patil@gmail.com', 7498173960, None)
query = ("insert into student_dataset values (%s, %s, %s, %s, %s, %s, %s, %s)")
cursor.execute(query,valuetuple)
cnx.commit()
def show (IDcol, ID, who, what) :
if ID is not None and IDcol is not None : forone = " where {} = ".format(IDcol) + str(ID)
else : forone = ''
if what is not None : cols = ''.join(what)
else : cols = '*'
query = ("select {} from {}".format(cols, who) + forone)
cursor.execute(query)
data = cursor.fetchall()
if what == 'images' :
image = data[0][0] # Image if any
binary_data = base64.b64decode(image)
image = Image.open(io.BytesIO(binary_data))
image.show()
else :
for i in data :
print(i)
def deleteRecord (Table, key, keyval) :
if key is not None and keyval is not None :
query = ("delete from {} where {} = {}".format(Table, key, keyval))
else :
query = ("truncate table {}".format(Table))
cursor.execute(query)
cnx.commit()
#createTables()
#insertRoom()
#insertOwner()
#insertStudent()
#addBooking()
print("Owners : "); show('o_id', 1, 'owner', None)
print("Rooms : "); show(None, None, 'Room', 'room_id, type, city, owner_name, price, Amenities')
print("Students : "); show(None, None, 'student_dataset', None)
show ('room_id', 1, 'Room', 'images')
#deleteRecord('owner', 'o_id', 1)
cursor.close()
cnx.close()
|
from django.db import models
import reversion
@reversion.register()
class TestModel(models.Model):
name = models.CharField(max_length=10)
|
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from osc_lib.tests import utils
from glareclient.common import utils as g_utils
from glareclient.tests.unit.osc.v1 import fakes_schemas
blob_fixture = {
"status": "active",
"url": "fake_url",
"md5": "35d83e8eedfbdb87ff97d1f2761f8ebf",
"sha1": "942854360eeec1335537702399c5aed940401602",
"sha256": "d8a7834fc6652f316322d80196f6dcf2"
"94417030e37c15412e4deb7a67a367dd",
"external": False,
"content_type": "application/octet-stream",
"size": 594}
def mock_list(*args, **kwargs):
return [{'id': 'fc15c365-d4f9-4b8b-a090-d9e230f1f6ba',
'name': 'art1',
'version': '0.0.0',
'owner': 'f649c77999e449e89627024f71b76603',
'visibility': 'private',
'status': 'active',
'type_name': 'images'},
{'id': '48d35c1d-6739-459b-bbda-e4dcba8a684a',
'name': 'art2',
'version': '0.0.0',
'owner': 'f649c77999e449e89627024f71b76603',
'visibility': 'private',
'status': 'active',
'type_name': 'heat_templates'}]
def mock_get(*args, **kwargs):
return {'id': 'fc15c365-d4f9-4b8b-a090-d9e230f1f6ba',
'name': 'art1',
'version': '0.0.0',
'owner': 'f649c77999e449e89627024f71b76603',
'visibility': 'private',
'status': 'active',
'blob': blob_fixture,
'image': blob_fixture,
'package': blob_fixture,
'template': blob_fixture,
'environment': blob_fixture}
def mock_g_servs(*args, **kwargs):
return {'id': 'fc15c365-d4f9-4b8b-a090-d9e230f1f6ba',
'name': 'art1',
'version': '0.0.0',
'owner': 'f649c77999e449e89627024f71b76603',
'visibility': 'private',
'status': 'active'}
def mock_g_schema(*args, **kwargs):
return fakes_schemas.FIXTURE_SCHEMA
def mock_get_data_file(*args, **kwargs):
return 'data'
class TestArtifacts(utils.TestCommand):
def setUp(self):
super(TestArtifacts, self).setUp()
self.app.client_manager.artifact = mock.MagicMock()
self.app.client_manager.artifact.artifacts.list = mock_list
self.app.client_manager.artifact.artifacts.get = mock_get
self.app.client_manager.artifact.artifacts.get_by_name = mock_get
self.app.client_manager.artifact.artifacts.add_tag = mock_g_servs
self.app.client_manager.artifact.artifacts.remove_tag = mock_g_servs
self.app.client_manager.artifact.artifacts.create = mock_g_servs
self.app.client_manager.artifact.artifacts.update = mock_g_servs
self.app.client_manager.artifact.artifacts.delete = mock_g_servs
self.app.client_manager.artifact.artifacts.activate = mock_g_servs
self.app.client_manager.artifact.artifacts.deactivate = mock_g_servs
self.app.client_manager.artifact.artifacts.reactivate = mock_g_servs
self.app.client_manager.artifact.artifacts.publish = mock_g_servs
self.app.client_manager.artifact.blobs.upload_blob = mock_g_servs
self.app.client_manager.artifact.blobs.download_blob = mock_g_servs
self.app.client_manager.artifact.blobs.add_external_location = \
mock_g_servs
self.app.client_manager.artifact.artifacts.get_type_schema = \
mock_g_schema
g_utils.get_data_file = mock.MagicMock()
g_utils.get_data_file = mock_get_data_file
g_utils.save_blob = mock.MagicMock()
sys.stdout.isatty = mock.MagicMock()
sys.stdout.isatty._mock_return_value = True
|
from django import forms
from .models import Player
from django.core.exceptions import ValidationError
import re
class PlayerForm(forms.ModelForm):
class Meta:
model = Player
fields = ['name','count_correct_answers', 'money_won']
widgets = {
'name': forms.TextInput(attrs={'class':'form-control'}),
}
def clean_name(self):
new_name = self.cleaned_data['name']
if not re.match (r'^[а-яА-ЯёЁa-zA-Z\s]+$', new_name):
raise ValidationError('В имени могут содержаться только буквы')
return new_name
|
from spack import *
class Form(AutotoolsPackage):
homepage = "http://www.example.com"
url = "https://gosam.hepforge.org/gosam-installer/form-4.1.033e.tar.gz"
version('4.1.033e', sha256='b182e10f9969238daea453c14ada9989a4818d23aad8855a8eb5968a231f545c')
def configure_args(self):
args = ['--enable-shared', '--disable-static','--without-gmp', 'CXXFLAGS=-fpermissive']
return args
|
from flask import Flask, jsonify
from numeros import numero
app = Flask(__name__)
@app.route('/<string:entrada>', methods=['GET'])
def response(entrada):
resultado = numero(entrada)
return jsonify(resultado=resultado)
if __name__ == '__main__':
app.run(port=5050, debug=True)
|
'''
Created on 24 de abr de 2018
@author: maikon
'''
import sys
import cv2
import numpy as np
from matplotlib import pyplot as plt
# cascade_src = 'resource/cars.xml'
cascade_src = '/home/maikon/git/OpencvPython/resource/haarcascade_russian_plate_number.xml'
car_cascade = cv2.CascadeClassifier(cascade_src)
def procurar_placa(frame, gray_image):
# img = cv2.imread('resource/placaFoco.jpg',0)
img = frame.copy()
newImg = cv2.blur(gray_image,(5,5))
img = newImg
laplacian = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img,cv2.CV_8U,1,0,ksize=3,scale=1,delta=0,borderType=cv2.BORDER_DEFAULT)
sobely = cv2.Sobel(img,cv2.CV_8U,0,1,ksize=3,scale=1,delta=0,borderType=cv2.BORDER_DEFAULT)
#aplicado o threshold sobre o Sobel de X
tmp, imgThs = cv2.threshold(laplacian,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
#pequena chacoalhada nos pixels pra ver o que cai (isso limpa a img mas
#distancia as regioes, experimente)
#krl = np.ones((6,6),np.uint8)
#erosion = cv2.erode(imgThs,krl,iterations = 1)
#krl = np.ones((19,19),np.uint8)
#dilation = cv2.dilate(erosion,krl,iterations = 1)
#imgThs = dilation
#estrutura proporcional aa placa
morph = cv2.getStructuringElement(cv2.MORPH_RECT,(40,13))
#captura das regioes que possam conter a placa
plateDetect = cv2.morphologyEx(imgThs,cv2.MORPH_CLOSE,morph)
regionPlate = plateDetect.copy()
_, contours, hierarchy = cv2.findContours(regionPlate,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for contour in contours:
[x,y,w,h] = cv2.boundingRect(contour)
if h>250 and w>250:
continue
if h<40 or w<40:
continue
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,255),2)
cv2.drawContours(regionPlate,contours,-1,(255,255,255),18)
cv2.imshow("Output - Press 'q' to exit", regionPlate)
def detectar_carro(frame, gray, car_cascade):
cars = car_cascade.detectMultiScale(gray, 1.1, 1)
for (x,y,w,h) in cars:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
cap = cv2.VideoCapture('resource/video.mp4')
# cap = cv2.VideoCapture('/home/maikon/Downloads/video1.avi')
img = cv2.imread("resource/meucarro.jpg")
g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detectar_carro(img,g, car_cascade)
cv2.imshow("carro", img)
if not cap.isOpened():
print("cannot open video")
sys.exit(1)
fps = cap.get(cv2.CAP_PROP_FPS)
delay = int((1.0 / float(fps)) * 1000)
while (cap.isOpened()):
ret, im = cap.read()
if not ret:
break
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# procurar_placa(im,gray)
detectar_carro(im,gray, car_cascade)
cv2.imshow("Orignal", im)
k = cv2.waitKey(delay)
if k & 0xFFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import zipfile
# In[ ]:
def namelist_in_archive (archive):
with zipfile.ZipFile(archive) as archive:
namelist = archive.namelist()
return namelist
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import math
import sys
def Round(a):
return int(a+.5)
def init():
glClearColor(1.0,1.0,1.0,0.0)
glColor3f(1.0,0.0,0.0)
glPointSize(3.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,600.0,0.0,600.0)
def setpixel(x,y):
glBegin(GL_POINTS)
glVertex2f(x,y)
glEnd()
glFlush()
def getinput():
global xc,yc,rx,ry
xc=input("Enter xc centre of ellipse : ")
yc=input("Enter yc centre of ellipse : ")
rx=input("Enter radius rx of ellipse : ")
ry=input("Enter radius ry of ellipse : ")
def drawcir(xc,yc,rx,ry):
setpixel(Round(xc),Round(yc))
theta=0
section=1000
steps=1/float(section)
while(theta<=360):
theta+=steps
x=rx*math.cos(theta)
y=ry*math.sin(theta)
setpixel(Round(xc+x),Round(yc+y))
def display():
glClear(GL_COLOR_BUFFER_BIT)
drawcir(xc,yc,rx,ry)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(200,200)
glutCreateWindow(" Mid Point Circle ")
getinput()
glutDisplayFunc(display)
init()
glutMainLoop()
main()
|
# -*- coding: utf-8 -*-
#author:Haochun Wang
# import scrapy
from scrapy.spiders import Spider
import re
# from scrapy import *
import re, sys
import requests
import math
# if sys.getdefaultencoding() != 'utf-8':
# reload(sys)
# sys.setdefaultencoding('utf-8')
url = 'http://www.boc.cn/sourcedb/whpj/index.html' # Bank of China currency website
html = requests.get(url).content.decode('utf8')
a = html.index('<td>澳大利亚元</td>') # get the position of AUS dollar
s = html[a:a + 300] # narrow down the range
rate_res = float(re.findall('<td>(.*?)</td>', s)[3]) # Regex get the currency
rate_res = math.ceil(rate_res*0.1)*0.1
class ChemistSpider(Spider):
name = "chemistwarehouse"
allowed_domains = ["www.chemistwarehouse.com.au"]
start_urls = [
"http://www.chemistwarehouse.com.au/Shop-Online/587/Swisse",
"http://www.chemistwarehouse.com.au/Shop-Online/587/Swisse?page=2",
"http://www.chemistwarehouse.com.au/Shop-Online/587/Swisse?page=3",
"http://www.chemistwarehouse.com.au/Shop-Online/587/Swisse?page=4",
"http://www.chemistwarehouse.com.au/Shop-Online/587/Swisse?page=5",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores?page=2",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores?page=3",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores?page=4",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores?page=5",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores?page=6",
"http://www.chemistwarehouse.com.au/Shop-Online/513/Blackmores?page=7",
"http://www.chemistwarehouse.com.au/Shop-Online/660/Nature-s-Way",
"http://www.chemistwarehouse.com.au/Shop-Online/660/Nature-s-Way?page=2",
"http://www.chemistwarehouse.com.au/Shop-Online/660/Nature-s-Way?page=3",
"http://www.chemistwarehouse.com.au/Shop-Online/660/Nature-s-Way?page=4",
"http://www.chemistwarehouse.com.au/Shop-Online/722/Healthy-Care",
"http://www.chemistwarehouse.com.au/Shop-Online/722/Healthy-Care?page=2",
"http://www.chemistwarehouse.com.au/Shop-Online/722/Healthy-Care?page=3",
"http://www.chemistwarehouse.com.au/Shop-Online/722/Healthy-Care?page=4",
"http://www.chemistwarehouse.com.au/Shop-Online/722/Healthy-Care?page=5",
"http://www.chemistwarehouse.com.au/Shop-Online/2128/Bio-Island"
]
def parse(self, response):
product_container = response.selector.xpath('//a[@class="product-container"]').extract()
# name_space = response.selector.xpath('//a[@class="product-container"]/@title').extract()
# price_space = response.selector.xpath('//span[@class="Price"]').extract()
# pricesv = response.selector.xpath('//div[@class="prices"]').extract()
with open("product_container.txt","a+") as file:
# product_container = str(product_container).split(",")
# for item in product_container:
# file.write(str(item)+"\n")
file.write(str(product_container))
file.close()
# with open("name_space.txt","w") as file:
# file.write(str(name_space))
# file.close()
# with open("price_space.txt","w") as file:
# file.write(str(price_space))
# file.close()
# with open("pricesv.txt","w") as file:
# file.write(str(type(pricesv)))
# file.write(str(pricesv))
# file.close()
# price_split_list = str(price_space).split(',')
#p1 = r"(?<=u\''/buy'/\d+/).+?(?=\')"
#pattern1 = re.compile(p1)
# sample = '<span class="Price">$12.99 \n \n \n \n </span>'
# p2 = r"(?<=\'<span class=\"Price\">).+?(?=</span>\')"
# name_res_lst = [] # 商品名列表 product name list
# price_res_lst = [] # 价格列表 price list
# org_price_res_lst = []# 原价列表 original price list
# discount_res_lst = [] # 折扣列表 discount list
# for i in name_space:
#name = re.search(pattern1, i)
#name_res_lst.append(name.group(0)[1:])
# name_res_lst.append(i)
product_list = []
p1 = r"(?<=title=\")[\s\w']*"
pattern1 = re.compile(p1)
p2 = r"(?<=<span class=\"Price\">\$).\d*.\d*"
pattern2 = re.compile(p2)
p3 = r"(?<=<span class=\"Save\">)\s*.*(?=Off)"
pattern3 = re.compile(p3)
for item in product_container:
name = re.search(pattern1,item)
price = re.search(pattern2,item)
save = re.search(pattern3,item)
if name:
name_item = name.group(0)
price_item = price.group(0)
if save:
save_item = save.group(0)
save_item = save_item.split(" ")[-1].strip("$")
with open("save_list.txt","a+",encoding="utf-8") as file:
file.write(save_item+"\n")
# save_item.strip("$")
discount_item = '%.2f' % (float(price_item) / (float(price_item) + float(save_item)))
else:
discount_item = "1"
price_item_cny = '%.2f' % (float(price_item) * rate_res )
product_list.append([name_item,price_item,price_item_cny,discount_item])
else:
pass
with open("res_tmp.txt", "a+") as b:
for item in product_list:
b.writelines(str(item) + '\n')
# for j in price_split_list:
# price = re.search(pattern2, j)
# price_res_lst.append(price.group(0))
# for j in pricesv:
# price = re.search(pattern2, j)
# if price:
# price = price.group(0)
# price_res_lst.append(price)
# price_item = float(price)
# else:
# price_item = 0
# # u = i.split('class="Price">')
# # price_item = float(j.split('\n')[0][1:])
# if 'class="Save"' in j:
# save_item = re.search(pattern3, j)
# save_item = float(save_item.group(0).strip().strip("$"))
# discount = '%.2f' % (price_item / (price_item + save_item))
# discount_res_lst.append(discount)
# org_price = str(price_item + save_item)
# org_price_res_lst.append(org_price)
# else:
# discount_res_lst.append("1")
# with open("res_tmp.txt", "a+") as b:
# for k in range(len(name_res_lst)):
# b.writelines(name_res_lst[k] + ', '+price_res_lst[k].split(' ')[0] + ', ' +
# str((float(price_res_lst[k].split(' ')[0])) * (rate_res + 0.3))
# + ', ' + str(discount_res_lst[k]) + '\n')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
from frappe import _
def get_data():
roles = frappe.get_roles(frappe.session.user)
if 'Prospect' not in (roles):
return {
"mycfo": {
"color": "grey",
"icon": "icon-th",
"type": "module",
"label": _("Customer Details")
},
"Checklist":{
"color": "blue",
"icon": "icon-list",
"type": "module",
"label": _("Checklist")
},
"IP Library": {
"color": "#8e44ad",
"icon": "octicon octicon-database",
"type": "page",
"label": _("IP Library"),
"link":"ip-file-dashboard"
},
"Trainings":{
"color": "#4aa3df",
"icon": "octicon octicon-device-camera-video",
"type": "page",
"label": _("Trainings"),
"link":"training-dashboard"
},
"Discussion Forum": {
"color": "#8e44ad",
"icon": "octicon octicon-organization",
"type": "page",
"label": _("Discussion Forum"),
"link":"discussion-forum"
}
}
else:
return {
"Skill Mapping": {
"color": "grey",
"icon": "icon-th",
"type": "doctype",
"label": "Skill Mapping",
"link": "List/Skill Mapping",
"description": _("Skill Mapping Details"),
},
"Resource Pool":{
"color": "blue",
"icon": "icon-list",
"type": "page",
"label": _("Resource Pool"),
"link":"resourcepool"
}
}
|
# the following is an attempt to handle the Sage script `RiordanGroup' as a
# python module but it can be the case, since that file isn't a python module really.
import sys
entry_riordan_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(entry_riordan_path)
# the following is only for debugging
#print sys.path
|
#!/usr/bin/env python3
import logging
import json
from twilio.rest import Client
"""
Send text message to a list of numbers
client - twilio client that will send the message
client_number - number to send text from
message - body of the text message
"""
def send_mass_texts(client:Client, client_number:str, message:str, number_file:str = "numbers.txt"):
with open(number_file) as f:
numbers = f.readlines()
logging.info("--------------------")
logging.info(f"{client} sending mass texts to {len(numbers)} phone numbers")
logging.info("--------------------")
for number in numbers:
send_message(client, client_number, number, message)
"""
Send text message to a phone number
client - twilio client that will send the message
number - phone number to send the message to
client_number - number to send text from
message - body of the text message
"""
def send_message(client:Client, client_number:str, number:str, message:str):
message = client.messages.create(
body=message,
from_=client_number,
to=number
)
logging.info(f"{client} sent SMS {message} to {number}")
if __name__ == "__main__":
with open("secrets.json") as f:
data = json.load(f)
account_sid = data["account_sid"]
auth_token = data["auth_token"]
client_number = data["client_number"]
client = Client(account_sid, auth_token)
message = "Don't forget to meditate today!"
send_mass_texts(client, client_number, message)
|
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model, metrics, svm
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score, cross_val_predict
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import ShuffleSplit
from math import sqrt
from sklearn.learning_curve import learning_curve
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import sklearn
import datetime
import numpy as np
import pandas as pd
import nltk # needed for Naive-Bayes
# Preprocessing
dataframe = pd.read_json('./data/users.json')
# Determining if any value is missing
# https://chartio.com/resources/tutorials/how-to-check-if-any-value-is-nan-in-a-pandas-dataframe/
# Chaining '.values.any()'
if (dataframe.isnull().values.any()):
dataframe = dataframe[dataframe['error'] != 'Permission denied']
dataframe = dataframe.dropna(subset=['created', 'karma', 'submitted'], how='all')
dataframe = dataframe.drop(['about','error','id'], axis=1)
dataframe['created'] = dataframe['created'].astype(int).values.reshape(-1,1)
dataframe['karma'] = dataframe['karma'].astype(int).values.reshape(-1,1)
dataframe['submitted'] = dataframe['submitted'].astype(int).values.reshape(-1,1)
# Split dataset into train and test subsets (SciKit)
x_train, x_test, y_train, y_test, z_train, z_test = sklearn.model_selection.train_test_split(
dataframe['created'].values,
dataframe['karma'].values,
dataframe['submitted'].values,
test_size=0.20, random_state=5)
# Linear Regression - a linear approach for modeling the relationship between a scalar
# dependent variable 'y' and one or more explanatory variables denoted 'x'
linear_regr = linear_model.LinearRegression()
# Training
x_and_z_train = np.stack([x_train, z_train], axis=1).reshape(-1,2)
x_and_z_test = np.stack([x_test, z_test], axis=1).reshape(-1,2)
linear_regr.fit(x_and_z_train,y_train)
predict_regr = linear_regr.predict(x_and_z_test)
def kfold_cross_validation():
# We split out data into k different subsets
kf = KFold(n_splits=10)
mean_absolute_error_list = []
root_mean_square_error_list = []
for train_index, test_index in kf.split(dataframe['created'].values, dataframe['karma'].values, dataframe['submitted'].values):
x_and_z_temp = np.stack([dataframe['created'].values, dataframe['submitted'].values], axis=1).reshape(-1,2)
x_and_z_train, x_and_z_test = x_and_z_temp[train_index], x_and_z_temp[test_index]
y_train, y_test = dataframe['karma'].values[train_index], dataframe['karma'].values[test_index]
linear_model_temp = linear_model.LinearRegression()
linear_model_temp.fit(x_and_z_train, y_train)
predict_regr = linear_model_temp.predict(x_and_z_test)
mean_absolute_error_temp = str(metrics.mean_absolute_error(y_test, predict_regr))
root_mean_square_error_temp = str(sqrt(metrics.mean_squared_error(y_test, predict_regr)))
mean_absolute_error_list.append(mean_absolute_error_temp)
root_mean_square_error_list.append(mean_absolute_error_temp)
print("Mean Absolute Error (MAE): ", mean_absolute_error_temp)
print("Root Mean Square Error (RMSE)", root_mean_square_error_temp)
print("")
mean_absolute_error_average = np.array(mean_absolute_error_list).astype(np.float)
root_mean_square_error_average = np.array(root_mean_square_error_list).astype(np.float)
print("Mean Absolute Error (MAE) Average, ", str(np.mean(mean_absolute_error_average)))
print("Root Mean Square Error (RMSE) Average, ", str(np.mean(root_mean_square_error_average)))
def logistic_regression():
dataframe_cancer = pd.read_csv('./data/breast_cancer.csv', sep=',')
# Report the head of the table
print(dataframe_cancer.head(10))
logistic_regr = linear_model.LogisticRegression()
header_temp1 = ['Concavity1','Texture1','Symmetry1']
header_temp2 = ['Perimeter1','Area1','Compactness1']
header_temp3 = ['Perimeter1','Area1','Compactness1','Concavity1','Texture1','Symmetry1']
headers = [header_temp1, header_temp2, header_temp3]
calculated_list = []
for vars in headers:
x = dataframe_cancer[vars].values.reshape(-1, len(vars))
y = dataframe_cancer['Diagnosis']
kf = KFold(n_splits=10)
calculated_accuracy_list = []
for train_index, test_index in kf.split(x, y):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
logistic_regr.fit(x_train, y_train)
predict_regr = cross_val_predict(logistic_regr, x_test, y_test, cv=10)
calculated_accuracy = metrics.accuracy_score(y_test, predict_regr, normalize=True)
calculated_accuracy_list.append(calculated_accuracy)
print("Accuracy: ", calculated_accuracy)
calculated_list.append(calculated_accuracy_list)
print()
average_size = np.array(calculated_list[1]).astype(np.float)
average_shape = np.array(calculated_list[0]).astype(np.float)
print("Average Size: ", str(np.mean(average_size)))
print("Average Shape: ", str(np.mean(average_shape)))
def generate_scatterplot(dataframe):
# 3D scatter plot, compare 3 charachteristics of data instead of two
# https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html
model = plt.figure()
plot = model.add_subplot(111, projection='3d')
plot.scatter(xs=x_test, ys=y_test, zs=z_test, c='r', marker='o')
plot.set_xlabel('created')
plot.set_ylabel('karma')
plot.set_zlabel('submitted')
plot.get_figure().savefig('scatterplot.png')
def run():
# Part 1
# Split data into 80/20 training and testing and create a new multivariate linear
# analysis. In this model, include the number of posts as an addition to time
#generate_scatterplot(dataframe)
# Report the MAE and RMSE
print("Mean Absolute Error (MAE)")
print("Training Data: ", str(metrics.mean_absolute_error(y_train, linear_regr.predict(x_and_z_train))))
print("Test Data: ", str(metrics.mean_absolute_error(y_test, predict_regr)))
print("Root-Mean-Square Error (RMSE)")
print("Training Data: ", str(sqrt(metrics.mean_squared_error(y_train, linear_regr.predict(x_and_z_train)))))
print("Test Data: ", str(sqrt(metrics.mean_squared_error(y_test, predict_regr))))
# Part 2
# Create 10 training/test data pairs
# Train model using training data from current fold
# Test model on the test data from current fold
# Report MAE and RMSE on test data from current fold
# Lastly take the average of each metric for all the folds
kfold_cross_validation()
# Part 3
# Load dataset and report the head
# Using 10-fold cross-validation, train your logistic model using every
# variable in the dataset
# Report the accuracy
logistic_regression()
run()
|
# -*- coding: utf-8 -*-
import logging
from typing import Any
import aiocache
import aiocache.backends
from .backends import SimpleMaxTTLMemoryCache
from .serializers import CompressionSerializer
from .cache_group import CacheGroup
from ..typedefs import WebApp
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_caches(app: WebApp, loop) -> Any:
logger.info('before_server_start -> cache.setup_caches')
args = app.config.args
caches = [SimpleMaxTTLMemoryCache()]
if args.redis_host:
try:
redis_cache = aiocache.RedisCache(endpoint=args.redis_host,
port=args.redis_port,
timeout=10,
serializer=CompressionSerializer())
if redis_cache:
caches.append(redis_cache)
except Exception:
logger.exception('failed to add redis cache to caches')
configured_cache_group = CacheGroup(caches=caches)
return configured_cache_group
|
from __future__ import absolute_import
from data_structures import Candidate, Document, Sentence
from readers import MinimalCoreNLPReader, RawTextReader
from base import LoadFile
from utils import (load_document_frequency_file, compute_document_frequency,
train_supervised_model, load_references,
compute_lda_model, load_document_as_bos,
compute_pairwise_similarity_matrix)
import unsupervised
import supervised
|
class Area:
@staticmethod
def square(side ):
return (side * side) if side > 0 else 0
@staticmethod
def rectangle(length, breadth):
return length * breadth
@staticmethod
def triangle(breadth, height):
return (breadth * height) / 2
@staticmethod
def circle(radius):
return 3.14 * (radius * radius)
|
#!/usr/bin/env python
"""
Helper function used throughout the package.
"""
import typing as tp
|
import pygame
from pygame.locals import *
import random
import time
import sys
import os
#DEBUGGER DEVELOPER TOOL
debug = False
if debug == True:
from threading import *
from debug import *
def passthu():
while True:
debug.update(globals())
time.sleep(0.5)
ptt = Thread(target=passthu)
ptt.setDaemon(True)
ptt.start()
suits = ["C","D","H","S"]
nums = ["A","2","3","4","5","6","7","8","9","10","J","K","Q"]
unshuffled_deck = []
if False:
import pygame._view
class deckmaker:
for suit in suits:
for num in nums:
unshuffled_deck.append(num+suit)
def deckcreate(decks):
deck=unshuffled_deck*+int(decks)
random.shuffle(deck)
return deck
def rp(relative_path):
try:
raise Exception
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path,relative_path)
class inputbox:
def get_key():
while 1:
event = pygame.event.poll()
if event.type == KEYDOWN:
return event.key
elif event.type == pygame.QUIT:
pygame.quit()
exit()
else:
pass
def display_box(screen, message):
fontobject = pygame.font.Font(rp('freesans.ttf'), 18)
pygame.draw.rect(screen, (0, 0, 0),
((screen.get_width() / 2) - 100,
(screen.get_height() / 2) - 10,
200, 20), 0)
pygame.draw.rect(screen, (255, 255, 255),
((screen.get_width() / 2) - 102,
(screen.get_height() / 2) - 12,
204, 24), 1)
if len(message) != 0:
screen.blit(fontobject.render(message, 1, (255, 255, 255)),
((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10))
pygame.display.flip()
def ask(screen, question, money):
pygame.font.init()
current_string = []
inputbox.display_box(screen, question + ": " + "".join(current_string))
while 1:
inkey = inputbox.get_key()
if inkey == K_BACKSPACE:
current_string = current_string[0:-1]
elif inkey == K_RETURN:
try:
amount = int("".join(current_string))
if amount < money+1:
break
else:
None
except:
None
elif inkey == K_MINUS:
current_string.append("_")
elif inkey <= 127:
current_string.append(chr(inkey))
inputbox.display_box(screen, question + ": " + "".join(current_string))
return "".join(current_string)
chips = {"currency":"£","amount":999999999999999999999999}
card_value = {"A":11,"1":1,"2":2,"3":3,"4":4,"5":5,"6":6,"7":7,"8":8,"9":9,"10":10,"J":10,"Q":10,"K":10}
card_value_hard = {"A":1,"1":1,"2":2,"3":3,"4":4,"5":5,"6":6,"7":7,"8":8,"9":9,"10":10,"J":10,"Q":10,"K":10}
con = ["bust","in","lose","bj","push"]
def card_load(item,x=150,y=200):
base_path = "./assets/cards/"
to_load = base_path + item + ".png"
card_image = pygame.image.load(to_load)
card = pygame.transform.scale(card_image,(x,y)).convert_alpha()
return card
def card_load_unsafe(item):
base_path = "./assets/cards/"
to_load = base_path + item + ".png"
card = pygame.image.load(to_load).convert_alpha()
return card
def add(give):
global chips
chips["amount"] += give
def dubd(bet):
return 2*bet
def chip_command(command,bet):
give=0
if command == "push":
give = bet
if command == "win":
give = 2*bet
if command == "lose":
give = 0
if command == "bj":
give = 2.5*bet
add(give)
bet = 0
pygame.init()
size=(900,600)
minsize=(800,550)
if size[0] < minsize[0] or size[1] < minsize[1]:
size = minsize
fs = True
if fs == True:
screen = pygame.display.set_mode((0,0),pygame.FULLSCREEN)
else:
screen = pygame.display.set_mode(size)
pygame.display.set_caption("AA Jack");
back_color = "red"
num_decks = 8
backer = card_load(back_color+"_back")
sides = (size[0] - 300)/2
cardx1 = sides
cardy = size[1]/100*65
deck_start = deckmaker.deckcreate(num_decks)
random.shuffle(deck_start)
deck = deck_start
done = False
screen_width = pygame.display.get_surface().get_size()[0]
screen_height = pygame.display.get_surface().get_size()[1]
swh = pygame.display.get_surface().get_size()
card_width = 150
clock = pygame.time.Clock()
bg = pygame.image.load("assets/table.png")
bg = pygame.transform.scale(bg,swh)
split = False
def check():
global deck,deck_start
if len(deck) == 0:
deck_start = deckmaker.deckcreate(num_decks)
deck = deck_start
def hit(person):
global mycards,dealerscards,split,card_side
if split == False:
i = "Done"
if person == "player":
mycards.append(deck.pop(0))
elif person == "dealer":
dealerscards.append(deck.pop(0))
else:
i = "Fail"
return i
else:
i = "Done"
if person == "player":
mycards[card_side].append(deck.pop(0))
elif person == "dealer":
dealerscards.append(deck.pop(0))
else:
i = "Fail"
return i
def reset():
global mycards,dealerscards, step,split
mycards = []
split = False
card_side = 0
splitdoubled = [0,0]
dealerscards = []
check()
step = 0
getbet()
def getbet():
global bet
bet = -1
screen.blit(bg, (0,0))
titlepic = card_load("title",int(screen_width/1.5),int(screen_height/6))
w = titlepic.get_rect().size[0]
whomadeit = card_load("wb",int(screen_width/2.5),int(screen_height/5))
wm = whomadeit.get_rect().size[0]
wh = whomadeit.get_rect().size[1]
screen.blit(titlepic,((screen_width-w)/2,screen_height/8))
screen.blit(whomadeit,((screen_width-wm)/2,(screen_height-2*wh)))
dis_money()
pygame.display.flip()
bet = int(inputbox.ask(screen,"Bet Amount",chips["amount"]))
while bet == -1:
None
chips["amount"] -= int(bet)
def dis_money():
text = font.render(("Chips: " + chips["currency"]) + str(chips["amount"]), True, (0, 0, 0), (255, 255, 255))
screen.blit(text, (0,screen_height/2))
if bet == -1:
dim = "None"
else:
dim = chips["currency"]+str(bet)
text = font.render(("Bet: " + str(dim)), True, (0, 0, 0), (255, 255, 255))
screen.blit(text, (0,screen_height/2+font.size(("Chips: " + chips["currency"]) + str(chips["amount"]))[1]))
def check_amount(cards,who):
global split
if split == False or who == "d":
total = 0
dsoft = True
there_is_ace = False
for card in cards:
if "A" in card:
there_is_ace = True
if (there_is_ace and len(cards) == 2) or ("A" in cards[-1]):
dsoft = False
if dsoft == False:
for card in cards:
total += card_value[card[:-1]]
else:
for card in cards:
total += card_value_hard[card[:-1]]
else:
total = []
for cardlist in cards:
total_of = 0
dsoft = True
there_is_ace = False
for card in cardlist:
if "A" in card:
there_is_ace = True
if (there_is_ace and len(cardlist) == 2) or ("A" in cardlist[-1]):
dsoft = False
if dsoft == False:
for card in cardlist:
total_of += card_value[card[:-1]]
else:
for card in cardlist:
total_of += card_value_hard[card[:-1]]
total.append(total_of)
return total
def gr(c):
mess = card_load(c,x=500,y=400)
w = mess.get_width()
h = mess.get_height()
x = screen_width
y = screen_height
x = (x-w)/2
y = (y-h)/2
screen.blit(mess,[x,y])
pygame.display.flip()
time.sleep(3)
def dis_cards(cards,y,r=True):
row_width = int(card_width * (len(cards))) + int(len(cards) * 30)
for card in range(len(cards)):
card_x_pos = (int(screen_width / 2) - int(row_width / 2)) + (card * card_width) + (card * 30)
screen.blit(card_load(str(cards[card])),[card_x_pos,y])
if r == False:
screen.blit(backer,[card_x_pos,y])
def dis_screen(real=False):
global split
screen.blit(bg, [0,0])
if split == True:
dis_cards_as(mycards,screen_height-210)
else:
dis_cards(mycards,screen_height-210)
dis_cards(dealerscards,5,real)
if real == True:
a = dealer_total
else:
a = dealer_total-card_value[dealerscards[-1][0]]
text = font.render(("Dealer: "+str(a)+"| Player: "+str(my_total)), True, (0, 0, 0), (255, 255, 255))
screen.blit(text, (0,0))
dis_money()
cardx1 = (screen_width/2)-180
screen.blit(card_load("split",x=150,y=50),[cardx1,size[1]/2-55])
screen.blit(card_load("dd",x=150,y=50),[cardx1,size[1]/2])
screen.blit(card_load("hit",x=150,y=50),[cardx1+190,size[1]/2-55])
screen.blit(card_load("stand",x=150,y=50),[cardx1+190,size[1]/2])
def stand():
global dealer_total,my_total,bet,dealerscards,mycards,split,card_side,splitdoubled
my_total = check_amount(mycards,"u")
dealer_total = check_amount(dealerscards,"d")
dis_screen()
dis_money()
if split == False:
dis_screen(True)
dis_money()
pygame.display.flip()
pygame.display.update()
time.sleep(2)
if (dealer_total>my_total) and (dealer_total< 22):
gr("lose")
chip_command("lose",bet)
elif dealer_total == my_total:
gr("push")
chip_command("push",bet)
elif dealer_total >21:
gr("win")
chip_command("win",bet)
elif my_total > 21:
gr("lose")
chip_command("lose",bet)
else:
while (dealer_total<my_total) and (dealer_total<17):
hit("dealer")
dealer_total = check_amount(dealerscards,"d")
dis_screen(True)
pygame.display.flip()
time.sleep(2)
if dealer_total > 21 or my_total>dealer_total:
gr("win")
chip_command("win",bet)
elif dealer_total == my_total:
gr("push")
chip_command("push",bet)
else:
if dealer_total > my_total:
gr("lose")
chip_command("lose",bet)
reset()
else:
dis_screen()
dis_money()
if card_side == 0:
card_side = card_side+1
else:
my_totalq = check_amount(mycards,"u")
c = 0
base_bet = bet /(splitdoubled[0]+splitdoubled[1]+2)
amwin = 0
for my_total in my_totalq:
bet = base_bet *(splitdoubled[c]+1)
dis_screen(True)
pygame.display.flip()
pygame.display.update()
time.sleep(2)
if (dealer_total>my_total) and (dealer_total< 22):
chip_command("lose",bet)
amwin -= 1
elif dealer_total == my_total:
chip_command("push",bet)
elif dealer_total >21:
chip_command("win",bet)
amwin += 1
elif my_total > 21:
chip_command("lose",bet)
amwin -=1
else:
while (dealer_total<my_total) and (dealer_total<17):
hit("dealer")
dealer_total = check_amount(dealerscards,"d")
dis_screen(True)
pygame.display.flip()
time.sleep(2)
if dealer_total > 21 or my_total>dealer_total:
chip_command("win",bet)
amwin += 1
elif dealer_total == my_total:
chip_command("push",bet)
else:
if dealer_total > my_total:
chip_command("lose",bet)
amwin -= 1
c+1
if amwin == 0:
wpl = "push"
elif amwin < 0:
wpl = "lose"
else:
wpl = "win"
gr(wpl)
time.sleep(2)
reset()
def double():
global chips,bet,card_side,split,splitdoubled
if split == False:
if chips["amount"] > bet-1:
chips["amount"] -= bet
bet = 2*bet
time.sleep(2)
hit("player")
stand()
else:
#print("Cant Double Down, Not enough chips!")
None
else:
if card_side == 0:
if chips["amount"] > bet/2-1:
chips["amount"] -= bet/2
bet += bet/2
splitdoubled[0] += 1
time.sleep(2)
hit("player")
card_side += 1
else:
#print("Cant Double Down, Not enough chips!")
None
elif card_side == 1:
if splitdoubled[0] == 0:
if chips["amount"] > bet/2-1:
chips["amount"] -= bet/2
bet += bet/2
splitdoubled[1] += 1
time.sleep(2)
hit("player")
stand()
else:
#print("Cant Double Down, Not enough chips!")
None
else:
if chips["amount"] > bet/3-1:
chips["amount"] -= bet/3
bet += bet/3
splitdoubled[1] += 1
time.sleep(2)
hit("player")
stand()
else:
#print("Cant Double Down, Not enough chips!")
None
step = 0
splitdoubled = [0,0]
mycards = []
dealerscards = []
cardx1 = (screen_width/2)-180
split_button = pygame.Rect(cardx1, size[1]/2-55, 150, 50)
dd_button = pygame.Rect(cardx1, size[1]/2, 150, 50)
hit_button = pygame.Rect(cardx1+190, size[1]/2-55, 150, 50)
stand_button = pygame.Rect(cardx1+190, size[1]/2, 150, 50)
dealer_total = 0
my_total = 0
soft_total = False
font = pygame.font.Font(rp('freesans.ttf'), 32)
logo = card_load_unsafe("logo")
pygame.display.set_icon(logo)
def dis_cards_as(allcards,y):
i = 0
for cards in allcards:
row_width = int(card_width * (len(cards))) + int(len(cards) * 30)
for card in range(len(cards)):
if i==0:
card_x_pos = (int(screen_width / 4) - int(row_width / 2)) + (card * card_width) + (card * 30)
if i==1:
card_x_pos = (int(screen_width / 4 * 3) - int(row_width / 2)) + (card * card_width) + (card * 30)
screen.blit(card_load(str(cards[card])),[card_x_pos,y])
i = i+1
card_side = 0
getbet()
#DAS
#NSAS
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = event.pos
if split_button.collidepoint(mouse_pos):
#print('split_button was pressed at {0}'.format(mouse_pos))
if len(mycards) == 2 and split == False and ((chips["amount"] > bet-1)or bet == 0):
if card_value[mycards[0][:-1]] == card_value[mycards[1][:-1]]:
mycards = [[mycards[0]],[mycards[0]]]
split = True
card_side = 0
chips["amount"] -= bet
bet +=bet
dis_screen()
pygame.display.flip()
else:
#print("Cant Split. Not same valued cards.")
None
else:
#print("Cant Split. You have already split or not enough chips or not first turn.")
None
if dd_button.collidepoint(mouse_pos):
#print('dd_button was pressed at {0}'.format(mouse_pos))
double()
if hit_button.collidepoint(mouse_pos):
#print('hit_button was pressed at {0}'.format(mouse_pos))
hit("player")
check()
step = 1
if stand_button.collidepoint(mouse_pos):
#print('stand_button was pressed at {0}'.format(mouse_pos))
stand()
if split == False:
if my_total > 21 and soft_total == False:
dis_screen(True)
gr("bust")
chip_command("lose",bet)
reset()
if my_total == 21 and len(mycards) == 2:
dis_screen(True)
if dealer_total == 21:
gr("push")
chip_command("push",bet)
else:
gr("bj")
chip_command("bj",bet)
reset()
if dealer_total == 21 and len(dealerscards) == 2:
dis_screen(True)
gr("lose")
chip_command("lose",bet)
reset()
if step == 0:
for i in "12":
check()
mycards.append(deck.pop(0))
check()
dealerscards.append(deck.pop(0))
step += 1
if step == 1:
dealer_total = 0
my_total = 0
soft = False
dsoft = False
dealer_total = check_amount(dealerscards,"d")
my_total = check_amount(mycards,"u")
#print(dealer_total,my_total)
step +=1
dis_screen()
dis_money()
pygame.display.flip()
clock.tick(120)
pygame.quit()
exit()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 17:06:55 2020
@author: 91880
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
cars=pd.read_csv("cars_sampled.csv")
cars.columns
cars.shape
cars.head()
cars.info()
description=cars.describe()
cars.head()
cars.columns[cars.isnull().any()]
#The missing values are in columns 'vehicleType', 'gearbox', 'model', 'fuelType', 'notRepairedDamage'
cars.groupby(by=['brand']).price.max().sort_values()
#Max price is of ford i.e 12345678
#Min price is of daewoo i.e 2799
cars['price'].value_counts()
cars1=cars.copy(deep=True)
#Making two separate data on the basis of price
car_low_price=cars1.loc[cars1['price']<100]
car_data_new=cars1.loc[cars1['price']>=100]
car_data_new.shape
#Saving it to separate csv file
car_low_price.to_csv('car_price_less_than_100.csv',index=False)
car_data_new.to_csv('car_price_greater_than_100.csv',index=False)
#Splitting the data
from sklearn.model_selection import train_test_split
train,test=train_test_split(car_data_new,test_size=0.2073957,random_state=0)
train_new=pd.concat([train,car_low_price])
train.to_csv('train_without_price_missing.csv',index=False)
train_new.to_csv('train_with_price_missing.csv',index=False)
test.to_csv('test_data.csv',index=False)
#Reading the train data that includes vague price values to impute X's
train_data=pd.read_csv('train_with_price_missing.csv')
train_data.head()
#Cheching for randomness of data
import missingno as msno
msno.bar(train_data)
msno.heatmap(train_data)
msno.matrix(train_data)
corr_matrix=train_data.corr()
sns.heatmap(corr_matrix,annot=True)
plt.show
#Imputing missing values with term 'missing'
train_data.isnull().sum()
train_data['model'].fillna('missing_model',inplace=True)
train_data['vehicleType'].fillna('missing_vehicle',inplace=True)
train_data['gearbox'].fillna('missing_gear',inplace=True)
train_data['fuelType'].fillna('missing_fuel',inplace=True)
train_data['notRepairedDamage'].fillna('missing_damage',inplace=True)
train_data.isnull().sum()
#saving the imputed data
train_data.to_csv('train_data_missing1.csv',index=False)
data2=pd.read_csv('train_data_missing1.csv')
#Finding the Outliers
for i in data2.columns:
if (data2[i].dtype=='O'):
print( data2[i].value_counts(sort=False,normalize=True))
#The variables 'Offertype' and 'seller' can be dropped from the data as it have only one significant category
data2.drop("seller",inplace=True,axis=1)
data2.drop("offerType",inplace=True,axis=1)
data2.info()
#box plot
import seaborn as sns
sns.boxplot(x=data2['abtest'],y=data2['price'])
#percentile
np.percentile(data2['price'],99.4)
#4.5 %of the data is below 100
|
from agent.agent import Agent
from functions import *
import sys
import pandas as pd
# import cudf as pd
import os
#set GPU Device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
main_df=pd.DataFrame()
empty_list=[]
if len(sys.argv) != 4:
print( "Usage: python train.py [stock] [window] [episodes]")
exit()
stock_name, window_size, episode_count = sys.argv[1], int(sys.argv[2]), int(sys.argv[3])
agent = Agent(window_size)
data = getStockDataVec(stock_name)
# l = (data.size) -1
l = len(data) -1
#l=300
batch_size = 32
for e in range(episode_count + 1):
print( "Episode " + str(e) + "/" + str(episode_count))
state = getState(data, 0, window_size + 1)
total_profit = 0
agent.inventory = []
for t in range(l):
# state=np.reshape(state,(state.shape[0],state.shape[1],1))
action = agent.act(state)
# sit
next_state = getState(data, t + 1, window_size + 1)
reward = 0
if action == 1: # buy
agent.inventory.append(data[t])
print( "Buy: " + formatPrice(data[t])+" index:"+str(t))
elif action == 2 and len(agent.inventory) > 0: # sell
bought_price = agent.inventory.pop(0)
reward = max(data[t] - bought_price, 0)
total_profit += data[t] - bought_price
empty_list.append({'Buy':bought_price,'Sell':data[t],'Profit':data[t] - bought_price})
print( "Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price)+" index:"+str(t))
done = True if t == l - 1 else False
agent.memory.append((state, action, reward, next_state, done))
state = next_state
if done:
df1 = pd.DataFrame(empty_list, columns=['Buy','Sell','Profit'])
path='./output/episode'+str(e)+'.csv'
df1.to_csv(path)
main_df=main_df.append(df1)
print( "--------------------------------")
print( "Total Profit: " + formatPrice(total_profit))
print( "--------------------------------")
if len(agent.memory) > batch_size:
agent.expReplay(batch_size)
if e % 1 == 0:
agent.model.save("models/model_ep" + str(e))
main_df.to_csv('./main_df.csv')
|
import cv2 as cv
import numpy as np
import os
def limiarizar(origem, destino):
nome = []
for n in os.listdir(origem):
nome.append(n)
for i in nome:
os.chdir(origem)
img = cv.imread(i)
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#Usando o método de Otsu para executar o limiar automático da imagem
ret2,th2 = cv.threshold(img_gray,127,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
#criação do kernel para usar na dilatação e erosão da imagem
kernel = np.ones((1,1),np.uint8)
#aumenta a área da imagem. Usada para aumentar as bordas da imagem
dilation = cv.dilate(th2,kernel,iterations = 1)
#corroi os limites do objeto. Usado para diminir as bordas da imagem
erosion = cv.erode(th2, kernel, iterations=3)
os.chdir(destino)
cv.imwrite(i, erosion)
origem = r'C:\Users\carlo\OneDrive\Documents\GitHub\tcc_uel\cinza\.'
destino = r'C:\Users\carlo\OneDrive\Documents\GitHub\tcc_uel\limiarizadas'
limiarizar(origem, destino)
|
from django.contrib import admin
from .models import Book, Category, BookImage
from django.contrib.gis.admin import OSMGeoAdmin
from django.utils.html import format_html
# Register your models here.
class BookImageInline(admin.StackedInline):
model = BookImage
list_display = ('thumbnail_tag')
readonly_fields = ('thumbnail',)
def thumbnail_tag(self, obj):
if obj.thumbnail:
return format_html(
'<img src="%s"/>' % obj.thumbnail.url
)
return "-"
thumbnail_tag.short_description = "Thumbnail"
class CategoryAdmin(admin.ModelAdmin):
list_display=['name','slug']
prepopulated_fields={'slug':('name',)}
class BookAdmin(OSMGeoAdmin):
list_display=['name','category','price','date','seller','city','location', 'condition', 'isbn','rating']
list_filter =['category','date']
list_editable = ['price','city']
inlines = [BookImageInline]
admin.site.register(Category,CategoryAdmin)
admin.site.register(Book,BookAdmin)
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import unittest
import numpy as np
from nufft import nufft1d1freqs, nufft1d1, nufft1d2, nufft1d3
def _get_data():
ms = 90
nj = 128
k1 = np.arange(-0.5 * nj, 0.5 * nj)
j = k1 + 0.5 * nj + 1
x = np.pi * np.cos(-np.pi * j / nj)
c = np.empty_like(x, dtype=np.complex128)
c.real = np.sin(np.pi * j / nj)
c.imag = np.cos(np.pi * j / nj)
f = 48 * np.cos((np.arange(ms) + 1) * np.pi / ms)
return x, c, f
def _get_data_roundtrip():
ms = 512
nj = 200
x = np.sort(np.random.choice(np.linspace(-np.pi,
np.pi,
ms,
endpoint=False),
nj,
replace=False))
c = np.random.randn(nj)
f = np.empty(ms)
return x, c, f
def _error(exact, approx):
return np.sqrt(np.sum(np.abs(exact - approx) ** 2) / np.sum(np.abs(exact)**2))
class NUFFT1DTestCase(unittest.TestCase):
"""Tests for 1D `nufft.py`."""
def setUp(self):
self.x, self.c, self.f = _get_data()
def _type_1_even(self, eps=1e-10):
p2 = nufft1d1(self.x, self.c, len(self.f), direct=True)
p1 = nufft1d1(self.x, self.c, len(self.f), eps=eps)
self.assertTrue(_error(p1, p2) < eps,
"Type 1: Discrepancy between direct and fft function")
self.assertEqual(len(nufft1d1freqs(len(self.f))), len(p1),
"Wrong length of frequency array")
def _type_1_odd(self, eps=1e-10):
p2 = nufft1d1(self.x, self.c, len(self.f) + 1, direct=True)
p1 = nufft1d1(self.x, self.c, len(self.f) + 1, eps=eps)
self.assertTrue(_error(p1, p2) < eps,
"Type 1: Discrepancy between direct and fft function")
self.assertEqual(len(nufft1d1freqs(len(self.f) + 1)), len(p1),
"Wrong length of frequency array")
def _type_2(self, eps=1e-10):
c2 = nufft1d2(self.x, self.f, direct=True)
c1 = nufft1d2(self.x, self.f, eps=eps)
self.assertTrue(_error(c1, c2) < eps,
"Type 2: Discrepancy between direct and fft function")
def _type_3(self, eps=1e-10):
p2 = nufft1d3(self.x, self.c, self.f, direct=True)
p1 = nufft1d3(self.x, self.c, self.f, eps=eps)
self.assertTrue(_error(p1, p2) < eps,
"Type 3: Discrepancy between direct and fft function")
def _type_1_2_roundtrip(self, eps=1e-10):
x, c1, f = _get_data_roundtrip()
p = nufft1d1(x, c1, len(f), iflag=-1, eps=eps)
c2 = len(x) / len(f) * nufft1d2(x, p, iflag=1, direct=True)
self.assertTrue(_error(c1, c2) < eps,
"Type 1 and 2: roundtrip error.")
def _type_1_and_3(self, eps=1e-10):
f = nufft1d1freqs(len(self.f))
p2 = nufft1d3(self.x, self.c, f, eps=eps)
p1 = nufft1d1(self.x, self.c, len(f), eps=eps)
self.assertTrue(_error(p1, p2) < eps,
"Type 1 and 3 and not close (even)")
f = nufft1d1freqs(len(f) + 1)
p2 = nufft1d3(self.x, self.c, f, eps=eps)
p1 = nufft1d1(self.x, self.c, len(f), eps=eps)
self.assertTrue(_error(p1, p2) < eps,
"Type 1 and 3 and not close (odd)")
df = 0.5 * (f[1] - f[0])
p1 = nufft1d1(self.x, self.c, len(f), eps=eps, df=df)
f = nufft1d1freqs(len(f), df=df)
p2 = nufft1d3(self.x, self.c, f, eps=eps)
self.assertTrue(_error(p1, p2) < eps,
"Type 1 and 3 and not close (even)")
def test_type_1_even(self):
"""Is the 1D type 1 with even data correct?"""
for eps in [1e-2, 1e-5, 1e-10, 1e-12]:
self._type_1_even(eps)
def test_type_1_odd(self):
"""Is the 1D type 1 with odd data correct?"""
for eps in [1e-2, 1e-5, 1e-10, 1e-12]:
self._type_1_odd(eps)
def test_type_2(self):
"""Is the 1D type 2 correct?"""
for eps in [1e-6, 1e-10, 1e-12]:
self._type_2(eps)
def test_type_3(self):
"""Is the 1D type 3 correct?"""
for eps in [1e-2, 1e-5, 1e-10, 1e-12]:
self._type_3(eps)
def test_type_1_2_roundtrip(self):
"""Is the 1D roundtrip using type 1 and 2 correct?"""
for eps in [1e-2, 1e-5, 1e-10, 1e-12]:
self._type_1_2_roundtrip(eps)
def test_1_and_3(self):
"""Are the 1D type 1 and 3 similar?"""
for eps in [1e-2, 1e-5, 1e-10, 1e-12]:
self._type_1_and_3(eps)
if __name__ == '__main__':
unittest.main()
|
#!flask/bin/python
import imp
import os
from migrate.versioning import api
from app import db
from config import DevConfig, ProdConfig
if os.environ.get('MDGIT_ENV') == 'dev':
config = DevConfig
else:
config = ProdConfig
migration = config.SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % (api.db_version(config.SQLALCHEMY_DATABASE_URI, config.SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(config.SQLALCHEMY_DATABASE_URI, config.SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(config.SQLALCHEMY_DATABASE_URI, config.SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(config.SQLALCHEMY_DATABASE_URI, config.SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + str(api.db_version(config.SQLALCHEMY_DATABASE_URI, config.SQLALCHEMY_MIGRATE_REPO))
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
|
class LinkedList:
def __init__(self):
self.head = None
class LLNode:
def __init__(self, data=None, next=None):
self.data = data
self.next = None
def build_linked_list(idx_max, idx, lst):
lst.head = LLNode(idx)
build_ll_nodes(idx_max, idx + 1, lst.head)
return lst
def build_ll_nodes(idx_max, idx, node):
assert node is not False, "error, node None type in build_nodes"
if idx < idx_max:
node.next = LLNode(idx)
build_ll_nodes(idx_max, idx + 1, node.next)
def print_list(l_list):
cursor = l_list.head
while cursor is not None:
print(cursor.data)
cursor = cursor.next
|
#!/usr/bin/python
import time
var = 1
while var == 1 : # This constructs an infinite loop
time.sleep(5) # Delay for 5 seconds
|
from sqlalchemy import *
from sqlalchemy import event
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, backref, sessionmaker, scoped_session
from sqlalchemy.ext.associationproxy import association_proxy
import calendar
import numbers
import re
import time
import threading
import uuid
Base = declarative_base()
ACCESS_LEVEL_NONE = 0
ACCESS_LEVEL_READ = 1
ACCESS_LEVEL_EDIT = 2
ACCESS_LEVEL_ADMIN = 3
class User( Base ):
__tablename__ = 'users'
user_id = Column( Integer, primary_key = True )
user_name = Column( Text, nullable = False )
password_hash = Column( Text, nullable = False )
access_level = Column( Integer, nullable = False, default = ACCESS_LEVEL_NONE )
can_logon = Column( Integer )
def __init__( self, user_name, password_hash ):
self.user_name = user_name
self.password_hash = password_hash
def __repr__( self ):
return 'User( %r, %r, %r, %r, %r, %r )' % (
self.user_id, self.user_name,
self.can_admin, self.can_logon,
self.child, self.parent, self.sort )
class Session( Base ):
__tablename__ = 'sessions'
session_id = Column( Text, primary_key = True )
user_id = Column( Integer, ForeignKey( 'users.user_id' ) )
expires_time = Column( Integer, nullable = False )
access_level = Column( Integer, nullable = False, default = ACCESS_LEVEL_NONE )
user = relation( 'User', backref = backref( 'sessions', lazy = 'dynamic' ) )
def __init__( self, expires_time ):
self.session_id = str( uuid.uuid1() )
self.user_id = None
self.expires_time = expires_time
self.access_level = ACCESS_LEVEL_NONE
def __repr__( self ):
return 'Session( %r, %r, %r, %r )' % (
self.session_id, self.user_id,
self.expires_time, self.access_level )
DBSession = None
dbfile = None
class WebMigrator:
def __init__( self ):
pass
def determine_schema_info( self, session ):
return None, None, None
def init_schema( self, engine, ver, rev ):
Base.metadata.create_all( engine )
def upgrade_schema( self, session, ver, rev ):
assert False
def init( database_file ):
global dbfile
global DBSession
import hdbfs.db_utils
migrators = {
'higu_web' : WebMigrator(),
}
dbfile = hdbfs.db_utils.DatabaseFile( database_file, migrators )
dbfile.init()
dbfile.init_schema( 'higu_web', 1, 0 )
DBSession = dbfile.get_session
def dispose():
global dbfile
global DBSession
if( dbfile is not None ):
DBSession = None
dbfile.dispose()
dbfile = None
|
# data.py
ofiledir = './data/'
ofilename = 'IF9999.csv'
ofileheader = ['date', 'time', 'open', 'high', 'low', 'close', 'vol', 'position']
start_time_am = ' 10:01:00'
end_time_am = ' 11:20:00'
start_time_pm = ' 13:31:00'
end_time_pm = ' 14:50:00'
xx_range = 30
yy_range = 1
# network.py
logdir = './log/'
savedir = './save/'
training_start_date = '2011-1-1'
training_end_date = '2013-12-31'
backtest_start_date = '2014-1-1'
backtest_end_date = '2014-12-31'
# mystrategy
weight_file = './minute/save/2016-09-07 18:41:41.weight'
# backtest.py
|
import logging
import importlib
import inspect
from .utils import import_class
default_app_config = 'stretch.apps.StretchConfig'
logger = logging.getLogger('stretch')
def is_index(member):
if not inspect.isclass(member):
return False
if not getattr(member, 'IS_INDEX', False):
return False
return True
class StretchApp(object):
"""
A single point of reference for Stretch project settings.
"""
def __init__(self):
from django.conf import settings
self.settings = getattr(settings, 'STRETCH_SETTINGS', {})
self.settings['EXCLUDED_INDICES'] = getattr(self.settings, 'EXCLUDED_INDICES', [])
self.settings['EXCLUDED_INDICES'].append('StretchIndex')
self.settings.setdefault('SIGNAL_HANDLER', None)
self.settings.setdefault('TEST', False)
self.settings.setdefault('RAISE_EXCEPTIONS', False)
def register_signals(self):
for index in self.indices:
index._register_signals()
def deregister_signals(self):
for index in self.indices:
index._deregister_signals()
def connect(self):
"""
Initialize or return the global elasticsearch connection
"""
if self.settings['TEST']:
## Monkey patch a mock backend to avoid ever trying to connect to Elasticsearch
import elasticsearch_dsl.connections
from stretch.tests.base import get_connection
connections = getattr(elasticsearch_dsl.connections, 'connections')
setattr(connections, 'get_connection', get_connection)
else:
from elasticsearch_dsl.connections import connections
try:
_connection = connections.get_connection()
except KeyError:
hosts = self.settings.get('HOSTS', ['localhost'])
timeout = self.settings.get('TIMEOUT', 60)
connections.create_connection(hosts=hosts, timeout=timeout)
_connection = connections.get_connection()
self.connection = _connection
def get_indices_by_name(self, names):
"""
Fetch a list of indices by their index names
"""
selected = []
for index in self.indices:
if index._get_index_name() in names:
selected.append(index)
return selected
def get_indices_names(self):
"""
List the indices in the project by their index name
"""
names = []
for index in self.indices:
names.append(index._get_index_name())
return names
def register_index(self, index_class):
"""
Adds an index to the list of indices in the app
"""
self._indices.append(index_class())
def reset(self):
"""
Removes all signals, connections, and registered indices.
"""
self.deregister_signals()
del self._indices
self.connection = None
self.connect()
@property
def indices(self):
"""
A list of StretchIndex classes in the project
"""
if not hasattr(self, '_indices'):
apps = getattr(self.settings, 'APPS', None)
if apps is None:
from django.conf import settings
apps = settings.INSTALLED_APPS
self._indices = []
for app in apps:
try:
stretch_module = importlib.import_module('{0}.stretch_indices'.format(app))
except ImportError as e:
logger.debug(e)
continue
for name, index_class in inspect.getmembers(stretch_module, is_index):
if name not in self.settings.get('EXCLUDED_INDICES'):
self._indices.append(index_class())
return self._indices
global stretch_app
stretch_app = StretchApp()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
x = np.array([4.6, 0.0, 6.4, 6.5, 4.4, 1.1, 2.8, 5.1, 3.4, 5.8, 5.7, 5.5, 7.9, 3.0, 6.8, 6.2, 4.0, 8.6, 7.5, 1.3, 6.3, 3.1, 6.1, 5.3, 3.9, 5.8, 2.6, 4.8, 2.2, 5.3])
y = np.array([5.5, 1.7, 7.2, 8.3, 5.7, 1.1, 4.1, 6.7, 5.0, 6.6, 6.3, 5.6, 8.7, 3.6, 8.2, 6.2, 5.0, 9.5, 8.9, 2.6, 7.4, 5.0, 8.2, 6.6, 5.1, 7.0, 3.5, 6.3, 2.9, 6.9])
m = x.size
X = np.c_[np.ones((m, 1)), x]
alpha = 0.005
iterations = 100
print 'initialize theta'
print theta
theta = np.array([[1.], [1.]])
for i in xrange(iterations):
temp = np.array([
[theta[0, 0] - alpha / m * (theta[0, 0] + theta[1, 0] * x - y).T.dot(X[:, 0])],
[theta[1, 0] - alpha / m * (theta[0, 0] + theta[1, 0] * x - y).T.dot(X[:, 1])]
])
theta = temp
print 'Calculated theta'
print theta
plt.plot(x, y, 'rx')
plt.plot(X[:, 1], X.dot(theta), 'b-')
plt.xlabel('xlabel')
plt.ylabel('ylabel')
plt.legend(('Training Data', 'Linear Regression'), loc='upper left')
plt.show()
# End of Line.
|
# -*- coding: cp1252 -*-
from donees import *
from fonctions import *
score=recup_score(nom_fichier_scores)
mot= mot_hoazard(liste_mots)
lettres_recup=[]
essai=0
utilisateur=recup_nom()
if utilisateur not in score.keys() :
score[utilisateur]=0
while essai < nb_coups:
lettres_recup.append(recup_lettre())
result=affiche_result(lettres_recup,mot)
print result
if result==mot:
print "mot trouver"
break
else:
essai+=1
score[utilisateur]=calcul_score(essai,nb_coups)
print 'votre score est' score[utulisateur]
enregistrer_scores(nom_fichier_scores,score)
|
from _typeshed import Incomplete
class MinHeap:
class _Item:
key: Incomplete
value: Incomplete
def __init__(self, key, value) -> None: ...
def __init__(self) -> None: ...
def min(self) -> None: ...
def pop(self) -> None: ...
def get(self, key, default: Incomplete | None = None) -> None: ...
def insert(self, key, value, allow_increase: bool = False) -> None: ...
def __nonzero__(self): ...
def __bool__(self) -> bool: ...
def __len__(self) -> int: ...
def __contains__(self, key) -> bool: ...
class PairingHeap(MinHeap):
class _Node(MinHeap._Item):
left: Incomplete
next: Incomplete
prev: Incomplete
parent: Incomplete
def __init__(self, key, value) -> None: ...
def __init__(self) -> None: ...
def min(self): ...
def pop(self): ...
def get(self, key, default: Incomplete | None = None): ...
def insert(self, key, value, allow_increase: bool = False): ...
class BinaryHeap(MinHeap):
def __init__(self) -> None: ...
def min(self): ...
def pop(self): ...
def get(self, key, default: Incomplete | None = None): ...
def insert(self, key, value, allow_increase: bool = False): ...
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Cubic Bezier polynomial interpolating from P0 to P3 as u
goes from 0 to 1 controlled by P1, P2::
B(u) = P0*(1-u)**3 + P1*3*u*(1-u)**2 + P2*3*u**2*(1-u) + P3*u**3
To apply to surface of revolution (rr,z) in range z1 to z2, equate
(z - z1)
u = -------- u = 0 at z=z1 u = 1 at z=z2
(z2 - z1)
Or more in spirit of Bezier decide on begin/end points and
control points
::
(z1, rr1)
(cz1, crr1)
(cz2, crr2)
(z2, rr2)
* https://stackoverflow.com/questions/246525/how-can-i-draw-a-bezier-curve-using-pythons-pil
::
In [6]: bezier([0,1])
Out[6]: [(50, 100), (100, 50)]
In [7]: bezier([0,0.5,1])
Out[7]: [(50, 100), (77.5, 77.5), (100, 50)]
In [8]: bezier([0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
Out[8]:
[(50, 100),
(55.900000000000006, 95.9),
(61.60000000000001, 91.60000000000002),
(67.1, 87.1),
(72.4, 82.4),
(77.5, 77.5),
(82.4, 72.4),
(87.1, 67.1),
(91.60000000000001, 61.6),
(95.89999999999999, 55.9),
(100, 50)]
"""
def pascal_row(n):
# This returns the nth row of Pascal's Triangle
result = [1]
x, numerator = 1, n
for denominator in range(1, n//2+1):
# print(numerator,denominator,x)
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if n&1 == 0:
# n is even
result.extend(reversed(result[:-1]))
else:
result.extend(reversed(result))
return result
def make_bezier(xys):
"""
:param xys: sequence of 2-tuples (Bezier control points)
:return func: call it over t parameter iterable
Uses the generalized formula for bezier curves
http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization
For cubic bezier with 4 points combinations is just (1,3,3,1)
"""
n = len(xys)
combinations = pascal_row(n-1)
def bezier(ts):
result = []
for t in ts:
tpowers = (t**i for i in range(n))
upowers = reversed([(1-t)**i for i in range(n)])
coefs = [c*a*b for c, a, b in zip(combinations, tpowers, upowers)]
result.append(
tuple(sum([coef*p for coef, p in zip(coefs, ps)]) for ps in zip(*xys)))
return result
pass
return bezier
from PIL import Image
from PIL import ImageDraw
def bezier_heart():
im = Image.new('RGBA', (100, 100), (0, 0, 0, 0))
draw = ImageDraw.Draw(im)
ts = [t/100.0 for t in range(101)]
xys = [(50, 100), (80, 80), (100, 50)]
bezier = make_bezier(xys)
points = bezier(ts)
xys = [(100, 50), (100, 0), (50, 0), (50, 35)]
bezier = make_bezier(xys)
points.extend(bezier(ts))
xys = [(50, 35), (50, 0), (0, 0), (0, 50)]
bezier = make_bezier(xys)
points.extend(bezier(ts))
xys = [(0, 50), (20, 80), (50, 100)]
bezier = make_bezier(xys)
points.extend(bezier(ts))
draw.polygon(points, fill = 'red')
im.save('out.png')
if __name__ == '__main__':
xys = [(50,100), (80,80), (100,50) ]
bezier = make_bezier(xys)
|
import json
import string
from time import sleep
import requests
class DomainChecker:
def __init__(self, tld):
self.tld = tld
self.check_url = "https://www.hosting.kr/domains?query={}"
self.headers = {
"content-type": "application/json",
"x-requested-with": "XMLHttpRequest"
}
def check_target_domains(self, target_domains):
found_domains = []
for d in target_domains:
if self.check(self.get_target_url(d)):
found_domains.append(d)
sleep(0.5)
return target_domains
def find_short_domain(self):
for a in string.ascii_lowercase:
for b in string.ascii_lowercase:
target_domain = self.get_target_domain(a + b)
target_url = self.get_target_url(target_domain)
if self.check(target_url):
print(target_domain)
sleep(0.5)
def check(self, target_url):
res = requests.get(target_url, headers=self.headers).json()
return res['statusMsg'] == '등록가능'
def get_target_domain(self, name):
return "{}.{}".format(name, self.tld)
def get_target_url(self, target_domain):
return self.check_url.format(target_domain)
if __name__ == '__main__':
target_domains = ['jslee.me']
dc = DomainChecker("me")
for d in target_domains:
dc.check_target_domains(target_domains)
# dc.find_short_domain()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import struct
import time
import app_pb2
XY_PKG_MAX_LEN = 2048000;
XY_HEADER_LEN = 17;
PACKAGE_HEADER = ">IIIHHb";
SERVER_HOST = '192.168.206.128';
SERVER_PORT = 10000;
class CClient:
HOST=SERVER_HOST;
PORT=SERVER_PORT;
UserID = 1472978293;
Passwd = "12345";
s = None;
def __init__(self, host, port):
self.HOST = host;
self.PORT = port;
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
self.s.connect((self.HOST, self.PORT));
def __del__(self):
self.s.close();
def SendRecvLoginReq(self):
CurReq = app_pb2.LoginReq();
CurReq.userid = self.UserID;
CurReq.passwd = self.Passwd;
CurReq.plat = 1;
content = CurReq.SerializeToString();
headerlen = XY_HEADER_LEN + len(content)
header = struct.pack(PACKAGE_HEADER, headerlen, 0x00020003, 0, 0, 0, 0);
data = header + content
self.s.sendall(data);
recv_data = self.s.recv(XY_PKG_MAX_LEN);
recv_header = recv_data[0:XY_HEADER_LEN]
recv_content = recv_data[XY_HEADER_LEN:];
PkgLen, CmdID, SN, CkSum, Ret, Compresse = struct.unpack(PACKAGE_HEADER, recv_header);
print PkgLen, CmdID, SN, CkSum, Ret, Compresse
CurRsp = app_pb2.LoginRsp();
CurRsp.ParseFromString(recv_content)
print CurRsp.ret;
if CurRsp.ret != 0:
print 'login error'
return -1;
else:
print '%d login success!!!'%self.UserID
return 0;
def SendRecvRegisterReq(self):
passwd = '12345';
CurReq = app_pb2.RegisterReq();
CurReq.passwd = passwd;
CurReq.nickname = 'mama';
CurReq.sex = 0;
CurReq.birthday = 1472978293;
CurReq.telno = '13724872174';
CurReq.address = u'广东省揭阳市惠来县仙庵镇京陇乡京东古祖东巷18号广东省揭阳市惠来县仙庵镇京陇乡京东古祖东巷18号';
CurReq.email = 'huixiaoke2009huixiaoke2009@qq.com'
content = CurReq.SerializeToString();
headerlen = XY_HEADER_LEN + len(content)
header = struct.pack(PACKAGE_HEADER, headerlen, 0x00020001, 0, 0, 0, 0);
data = header + content
self.s.sendall(data);
recv_data = self.s.recv(XY_PKG_MAX_LEN);
recv_header = recv_data[0:XY_HEADER_LEN]
recv_content = recv_data[XY_HEADER_LEN:];
PkgLen, CmdID, SN, CkSum, Ret, Compresse = struct.unpack(PACKAGE_HEADER, recv_header);
print PkgLen, CmdID, SN, CkSum, Ret, Compresse
CurRsp = app_pb2.RegisterRsp();
CurRsp.ParseFromString(recv_content)
print CurRsp.ret;
if CurRsp.ret != 0:
print 'register error'
return -1;
else:
print '%d register success!!!'%CurRsp.userid
self.UserID = CurRsp.userid;
self.Passwd = passwd;
return 0;
def SendRecvAddFriendReq(self):
CurReq = app_pb2.AddFriendReq();
CurReq.userid = 1472978286;
content = CurReq.SerializeToString();
headerlen = XY_HEADER_LEN + len(content)
header = struct.pack(PACKAGE_HEADER, headerlen, 0x00040003, 0, 0, 0, 0);
data = header + content
self.s.sendall(data);
recv_data = self.s.recv(XY_PKG_MAX_LEN);
recv_header = recv_data[0:XY_HEADER_LEN]
recv_content = recv_data[XY_HEADER_LEN:];
PkgLen, CmdID, SN, CkSum, Ret, Compresse = struct.unpack(PACKAGE_HEADER, recv_header);
print PkgLen, CmdID, SN, CkSum, Ret, Compresse
CurRsp = app_pb2.AddFriendRsp();
CurRsp.ParseFromString(recv_content)
print CurRsp.ret;
if CurRsp.ret != 0:
print 'add friend error'
return -1;
return 0;
def Run(self):
Ret = 0;
print '-------------------------'
Ret = self.SendRecvRegisterReq();
if Ret != 0:
return -1;
Ret = self.SendRecvLoginReq();
if Ret != 0:
return -1;
#self.SendRecvAddFriendReq();
return 0;
def main():
c1 = CClient(SERVER_HOST, SERVER_PORT);
c1.Run();
time.sleep(2);
c2 = CClient(SERVER_HOST, SERVER_PORT);
c2.Run();
time.sleep(2);
c3 = CClient(SERVER_HOST, SERVER_PORT);
c3.Run();
time.sleep(2);
c4 = CClient(SERVER_HOST, SERVER_PORT);
c4.Run();
time.sleep(2);
time.sleep(100000);
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 19:59:46 2019
@author: PEKERPCLocal
Description:
Snippets of code for basic exploration of data. Moslty inspired by
couple of blog posts and tutorials.
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
# Load data
#-----------------------------------------------------------------------------
train = pd.read_csv(r'data\train.csv')
test = pd.read_csv(r'data\test.csv')
# We will use log of SalePrice since the competition does that for error calculation.
#-----------------------------------------------------------------------------
target = np.log(train.SalePrice)
train["logSP"] = target
#-----------------------------------------------------------------------------
# Look at the distribution of the target.
# Seaborn's plots are nice and are worth a look.
plt.figure()
plt.hist(train.SalePrice,bins=40)
plt.title("SalePrice distribution")
plt.figure()
plt.hist(train.logSP,bins=30)
plt.title("log(SalePrice) distribution")
# Do the same with seaborn
plt.figure()
sns.distplot(train.SalePrice)
plt.title("SalePrice distribution")
plt.figure()
sns.distplot(train.logSP,bins=30)
plt.title("log(SalePrice) distribution")
# Skew of the target distribution
#-----------------------------------------------------------------------------
print( "\n\n{}".format("="*80))
print( "SalePrice skew = %f." % train.SalePrice.skew() )
print( "log(SalePrice) skew = %f." % np.log(train.SalePrice).skew() )
# Select a subset of columns based on dtype (in this case, numeric)
#-----------------------------------------------------------------------------
numeric_features = train.select_dtypes(include=[np.number])
print( "\n\n{}".format("="*80))
print("Numeric features:\n------------------")
print(numeric_features.dtypes)
# Correlation between features. corr() function uses numeric features only.
# Look at features highest (and most negative) correlated with SalePrice
#-----------------------------------------------------------------------------
c0 = train.corr()
#c0 = numeric_features.corr() # corr() itself finds the numeric features in data.
print( "\n\n{}".format("="*80))
print("Numeric features highly (or most negatively) correlated with SalePrice:")
print( c0['SalePrice'].sort_values(ascending=False)[:10] )
print( c0['SalePrice'].sort_values(ascending=False)[-5:] )
# Plot of features and their correlation with SalePrice, sorted wrt correlation
plt.figure()
plt.plot(c0.SalePrice.sort_values(),"o-")
plt.xticks(rotation=90)
plt.title("Correlation of features with SalePrice")
# What are the unique values in OverallQuall.
# Will do this for all features in the uniq() function below.
#-----------------------------------------------------------------------------
print( "\n\n{}".format("="*80))
#print("OverallQual unique values: %s" % train.OverallQual.unique())
print("OverallQual unique values: %s" % sorted(train.OverallQual.unique()))
# Box-plots are another way to look at relationships visually.
# Can do this for other features as well. Seaborn does a nice job again.
#-----------------------------------------------------------------------------
plt.figure()
sns.boxplot(x="Heating",y="logSP",data=train)
# Pairplot of Seaborn is anothre powerful visual tool to investigate relationships.
#-----------------------------------------------------------------------------
plt.figure()
cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']
sns.pairplot(train[cols], height = 2.5) # height used to be size in older versions.
# Following are small sample functions to do bits of exploration.
# They are meant to be edited and ran for exploration purposes, not for production use.
#-----------------------------------------------------------------------------
def uniq():
'''Print unique values in each of the features. Sort values if possible.'''
for c in train.columns:
try:
x = sorted(train[c].unique())
except:
x = train[c].unique()
print("-"*40,"\n",c,len(x))
if len(x) < 30:
print(x)
else:
print("%d values. Min=%s, Max=%s" % (len(x), x[0], x[-1]))
#-----------------------------------------------------------------------------
def PivT(index='OverallQual', values='SalePrice'):
'''
Print a pivot table of the given 2 features, and then plot.
index is better to be a categorical value with a small cardinality.
'''
quality_pivot = train.pivot_table(index=index,
values=values, aggfunc=np.median)
print(quality_pivot)
quality_pivot.plot(kind='bar', color='blue')
plt.xlabel(index)
plt.ylabel('Median {}'.format(values))
plt.xticks(rotation=0)
#-----------------------------------------------------------------------------
def Scat():
'''Do a scatter plot of 2 features vs. Sale Price'''
plt.figure()
plt.scatter(x=train['GrLivArea'], y=target)
plt.ylabel('Sale Price')
plt.xlabel('Above grade (ground) living area square feet')
plt.figure()
plt.scatter(x=train['GarageArea'], y=target)
plt.ylabel('Sale Price')
plt.xlabel('Garage Area')
|
"""
$ pylint birthday_cake_candles.py
Your code has been rated at 10.00/10 (previous run: 5.00/10, +5.00)
"""
FILE_OPEN = open('DATA.lst', 'r')
MESSAGE = FILE_OPEN.read()
DATA = [MESSAGE]
TYPE_DATA = []
DATA_INT = []
for line in DATA:
# Convierto mi DATA que es un String en un Array de String
TYPE_DATA = line.split()
# Convierto mi Array de String en un Array de Enteros y lo casteo a una lista
DATA_INT = list(map(int, TYPE_DATA))
DATA_FIND = DATA_INT.count(82)
print(DATA_FIND)
FILE_OPEN.close()
# $ python guzmanandrew.py
# 7
|
t,p=map(int,input().split())
temp=0
temp=t
t=p
p=temp
print(t,p)
|
#!/usr/bin/env python3
from __future__ import print_function
import aerospike
from aerospike import exception as ex
from aerospike_helpers.operations import operations
from aerospike_helpers.operations import map_operations
from aerospike import predicates as p
from aslib.dbg import Dbg
from aslib.asutils import Lck
from aslib.asutils import ClientWrapper
import inspect
import json
import os
import random
import signal
import sys
import time
import uuid
# TODO:
#
# add check for 2nd idx before calling query; or just once in run before loop
#
# add support to create 2nd index
#
cmd_info = [
"help: Print long help on Lck and this script\n",
"put: Put a record\n",
"mputs: Multi-put calls\n",
"get: Get a record\n",
"op: Run operate command\n",
"del: Delete a record\n",
"lck: Lock a record\n",
"uck: Unlock a record\n",
"clr: Clear a stale lock\n",
"trunc: Truncate a set\n",
"scan: Scan a set\n",
"txn: Run a transaction\n",
"rtxns: Random txn calls\n",
"rcn: Reconcile logs to accounts\n",
"cln: Cleanup stale pending transactions\n",
"clnsvc: Cleanup service; loops 4eva calling cleanup\n",
"info: Request info; ukey is info 'command'\n",
# https://www.aerospike.com/docs/reference/info/
"tst: Testing hook\n"
]
cmds = []
for cmd in cmd_info:
cmds.append(cmd.split(':')[0])
cmd_hlp = ""
for cmd in cmd_info:
cmd_hlp += cmd
try:
as_host = os.environ['AS_HOST']
except:
as_host = '127.0.0.1' # asdb host
try:
as_port = os.environ['AS_PORT']
except:
as_port = 3000 # asdb port
as_ns = 'test' # asdb namespace
as_set = 'mxn' # asdb master set
fpct = 0 # Failure percentage in 10ths of a percent
# rec: { name: nm, bal: bal,
# lck: { txnid: tid, ukey: ky},
# lckto: to, ptxns: [tid, tid2] }
now = lambda: int(round(time.time() * 1000))
# command line parser
def main_parser (argv):
import argparse
# define command line parser
parser = argparse.ArgumentParser (
description="Multi-transaction test tool.")
# cmd; see cmd_info above
parser.add_argument (
"-cmd", metavar="cmd",
#required=True,
choices=cmds,
help="PPrint:Command to execute.\n""{}".format(cmd_hlp)
)
# namespace
parser.add_argument (
"-ns", "--namespace", metavar="namespace", dest="ns",
#required=True,
default=as_ns,
help="Aerospike namespace["+as_ns+"].")
# set
parser.add_argument (
"-set", metavar="set", dest="set",
#required=True,
default=as_set,
help="Aerospike set["+as_set+"].")
# user key
parser.add_argument (
"-ukey", "--userKey", metavar="user_key", dest="ukey",
help="Aerospike user key.")
# transaction Id
parser.add_argument (
"-txnid", "--transactionId", metavar="txnid", dest="txnid",
help="Transaction Id.")
# record
parser.add_argument (
"-rec", "--record", metavar="record", dest="rec",
help="PPrint:Aerospike record as a JSON string.\n"
"e.g. '{\"name\": \"john\", \"bal\": 1000}'")
# operations
parser.add_argument (
"-ops", "--operations", metavar="ops", dest="ops",
help="PPrint:operations to execute; e.g.\n"
"'[ operations.read (\"name\"),\n"
" operations.read (\"bal\")]'")
# transaction amount
parser.add_argument (
"-amt", "--amount", metavar="amount", dest="amt",
type=int,
help="Amount of transaction.")
# count
parser.add_argument (
"-n", "--count", metavar="count", dest="cnt",
type=int,
help="Count used with 'rtxns' or 'mputs' commands.")
# failure percentage
parser.add_argument (
"-fpct", "--failure_percent", metavar="percent", dest="fpct",
type=int,
default=fpct,
help="Failure percentage in 10ths of a percent (10 is 1%%; 1 is 0.1%%). Introduces simulated asdb failures at given percentage.")
# no exists
parser.add_argument (
"-nx", "--no_exists", action='store_true', dest="nx",
help="Record must not exist. Policy flag used with 'put' command.")
# gen match
parser.add_argument (
"-gen", "--generation", metavar="generation", dest="gen",
type=int,
default=0,
help="Record generation match value. Used with 'put' command.")
# host
parser.add_argument (
"-host", metavar="host",
default=as_host,
help="Aerospike seed host name ["+as_host+"].")
# port
parser.add_argument (
"-port", metavar="port",
type=int,
default=as_port,
help="Aerospike seed host port["+str(as_port)+"].")
# add dbg parser for debug switches
dbg_parser = Dbg.arg_parser(parser)
# hack to show usage w/o the myriad dbg switches; get those w/ '-h'
# main_parser(['usage', emsgs])
if argv[0] == 'usage':
for emsg in argv[1]:
print ("Error: {}".format(emsg))
print ("\n{}".format(parser.format_usage()))
return
return dbg_parser.parse_args(argv[1:])
#return parser.parse_args(argv[1:])
# main_parser
# Create a client and connect it to the cluster
def aero_connect (cfg, d=None):
if d is None: d = Dbg('none')
d.enter ("cfg:{}".format(cfg))
# check args
errs = ''
if cfg is None: errs += "cfg is req'd; "
if errs: d.throw (ValueError, errs)
try:
client = aerospike.client(cfg).connect()
except Exception as x:
xmsg = "Failed to connect to the cluster with {}".format(cfg['hosts'])
msg2 = "N.B. AS_HOST and AS_PORT env vars reset defaults of 127.0.0.1:3000"
d.err (xmsg+"\n"+msg2)
d.exc (x, xmsg, lvl=d.DBG)
d.leave ()
return None
if fpct is not None and fpct > 0:
client = ClientWrapper (client, fpct=fpct)
d.leave ()
return client
# aero_connect
# Get list of namespaces
def info_hndlr (client, cmd, d=None):
if d is None: d = Dbg('none')
d.enter ()
errs = ''
if cmd is None: errs += "cmd is req'd; "
if errs: d.throw (ValueError, errs)
res = client.info_all (cmd)
d.leave (res)
return res
# Write a record
def put_hndlr (client, ns, set, ukey, recstr, nx=False, gen=0, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{} recstr:{} nx:{}"\
.format(ns, set, ukey, recstr, nx))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if recstr is None: errs += "recstr is req'd; "
if errs: d.throw (ValueError, errs)
key = (ns, set, ukey)
rec = json.loads(recstr)
policy = {}
meta = {}
if nx:
policy['exists'] = aerospike.POLICY_EXISTS_CREATE
if gen > 0:
policy['gen'] = aerospike.POLICY_GEN_EQ
meta['gen'] = gen
try:
for ky in rec:
# dmf: hack to provide for bin removal
if rec[ky] == 'None':
rec[ky] = aerospike.null()
client.put(key, rec, meta=meta, policy=policy)
except Exception as x:
xmsg = "Exception: {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
d.leave (-1)
return -1
d.leave (0)
return 0
# put_hndlr
# Multi-put handler
# ukey is used as a prefix, appended with count
def mputs_hndlr (client, ns, set, ukey, cnt, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{} cnt:{}".format(ns, set, ukey, cnt))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if cnt is None: errs += "cnt is req'd; "
if errs: d.throw (ValueError, errs)
rc = 0
for cnt in range(cnt):
rkey = ukey+str(cnt)
rec = {"name": rkey, "bal": 0}
recstr = json.dumps(rec,sort_keys=True)
rc = put_hndlr (client, ns, set, rkey, recstr, nx=False, gen=0, d=d)
if rc < 0:
d.err ("Failed to put {}.{}.{} {}".format(ns,set,rkey,recstr))
d.leave (rc)
return rc
d.leave (rc)
return rc
# mputs_hndlr
# Read a record
def get_hndlr (client, ns, set, ukey, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{}".format(ns, set, ukey))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if errs: d.throw (ValueError, errs)
key = (ns, set, ukey)
try:
(rkey, meta, rec) = client.get(key)
except Exception as x:
xmsg = "Exception: {}".format(x)
d.exc (x, xmsg)
d.leave ()
return (None, None, None)
# d.leave ((meta, json.dumps(rec,sort_keys=True)))
d.leave ((meta, rec))
return (rkey, meta, rec)
# get_hndlr
# Read a record
def operate_hndlr (client, ns, set, ukey, ops, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{}".format(ns, set, ukey))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if errs: d.throw (ValueError, errs)
key = (ns, set, ukey)
try:
meta = None
policy = None
(rkey, meta, rec) = client.operate(key, eval(ops), meta, policy)
except Exception as x:
xmsg = "Exception: {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
d.leave ()
return (None, None, None)
d.leave ((meta, json.dumps(rec,sort_keys=True)))
return (rkey, meta, rec)
# operate_hndlr
# Delete a record
def del_hndlr (client, ns, set, ukey, no_exist_allowed=False, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{}".format(ns, set, ukey))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if errs: d.throw (ValueError, errs)
key = (ns, set, ukey)
try:
client.remove(key)
except Exception as x:
if no_exist_allowed and type(x) == ex.RecordNotFound:
d.dbg ("record did not exist, but that's ok")
d.leave (0)
return 0
else:
xmsg = "Exception: {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
d.leave (-1)
return -1
d.leave (0)
return 0
# del_hndlr
# Truncate a set
def trunc_hndlr (client, ns, set, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{}".format(ns, set))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if errs: d.throw (ValueError, errs)
rc = 0
try:
rc = client.truncate (ns, set, 0)
except Exception as x:
xmsg = "Exception: {}".format(x)
d.exc (x, xmsg)
d.leave (-1)
return -1
d.leave (rc)
return rc
# trunc_hndlr
# Scan a set
def scan_hndlr (client, ns, set, callback, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} callback:{}".format(ns, set, callback))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if callback is None: errs += "callback is req'd; "
if errs: d.throw (ValueError, errs)
try:
if set == '':
res = client.scan(ns,None)
else:
res = client.scan(ns, set)
res.foreach(callback)
d.leave (0)
return 0
except Exception as x:
xmsg = "Exception: {}".format(x)
d.exc (x, xmsg)
d.leave ()
return
d.leave ()
return
# scan_hndlr
# Transaction handler
# lock record
# log transaction
# settle transaction, and unlock record
def txn_hndlr (client, ns, set, ukey, txnid, amt, usr_ops=None, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{} amt:{} usr_ops:{}"\
.format(ns, set, ukey, amt, usr_ops))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if txnid is None: errs += "txnid is req'd; "
if amt is None: errs += "amt is req'd; "
if errs: d.throw (ValueError, errs)
# lock the record
try:
lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
(rkey, rmeta, rec) = lck.acquire ()
except Exception as x:
xmsg = "Failed to acquire lock; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
d.leave (xmsg)
return (None, None, None)
d.dbg ("rec:{} locked".format(ukey))
# log info
log_set = as_set + 'log'
log_key = ukey + ":" + txnid
log_bin = 'log'
log_rec = {"txnid": txnid, "amt": str(amt), "set": set, "ukey": ukey}
log_rec_str = json.dumps(log_rec,sort_keys=True)
# log transaction
rc = 0
try:
rc = put_hndlr (client, ns, log_set, log_key, log_rec_str, nx=True, d=d)
except:
d.err ("Error: failed putting rec:{} to set:{} in 'txn'"\
.format(log_rec, log_set))
d.leave ((None, None, None))
return (None, None, None)
if rc < 0:
d.dbg ("Failed to put {}.{}.{} {}".format(ns,log_set,log_key,log_rec_str))
d.leave ((None, None, None))
return (None, None, None)
d.dbg ("logged txn {}".format(log_rec_str))
settle_ops = [
operations.increment("bal", amt),
operations.read('name'),
operations.read('bal')
]
# add user ops
if usr_ops is not None:
for op in usr_ops:
d.dbg ("usr op:{}".format(op))
settle_ops.append (op)
# settle txn and release the lock
try:
(rkey, rmeta, rec) = lck.release (settle_ops)
except Exception as x:
d.err ("Exception caught releasing lock; {}".format(x))
d.leave ((None, None, None))
return (None, None, None)
d.dbg ("rec {} settled: {}".format(ukey,rec))
d.leave ((rmeta, json.dumps(rec,sort_keys=True)))
return (rkey, rmeta, rec)
# txn_hndlr
# Random transaction handler
# Apply cnt transactions to random account records
def rtxns_hndlr (client, ns, set, ukey, cnt, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} cnt:{}".format(ns, set, cnt))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if cnt is None: errs += "cnt is req'd; "
if errs: d.throw (ValueError, errs)
# Transact on a specific ukey
if ukey is not None:
(rkey, meta, rec) = get_hndlr (client, ns, set, ukey, d=d)
if rec is None:
d.err ("Failed to get ({}:{}:{})".format(ns,set,ukey))
d.leave (-1)
return -1
usr_keys = [] # user keys
usr_amts = {} # user amounts
usr_keys.append(rec['name'])
usr_amts[rec['name']] = rec['bal']
# Get list of ns.set usr_keys in order to randomly transact on them
else:
usr_keys = []
usr_amts = {}
def get_names (kmr):
try:
rec = kmr[2]
usr_keys.append(rec['name'])
usr_amts[rec['name']] = rec['bal']
except Exception as x:
xmsg = "rtxns: get_names: rec:{} Exception: {}".format(rec, x)
d.exc (x, xmsg, lvl=d.ERR)
scan_hndlr (client, ns, set, get_names, d=d)
# Need records to transact upon
if len(usr_keys) == 0:
d.err ("{}.{} set is empty! Initial records required".format(ns, set))
d.leave (-1)
return -1
d.dbg ("Initial balances: {}".format(usr_amts))
ops = [ operations.read('name'), operations.read('bal') ]
# Loop executing random transactions
for cnt in range(cnt):
ukey = usr_keys[random.randint(0,len(usr_keys)-1)]
amt = random.randint(-100, 1000)
txnid = str(uuid.uuid4())
(rkey, meta, rec) = txn_hndlr (client, ns, set, ukey, txnid, amt, ops, d=d)
if rec is None:
d.err ("Error: Failed transaction; not aggregating ukey:{} amt:{}"\
.format(ukey,amt))
else:
usr_amts[ukey] += amt
if d.dlvl >= d.INF:
d.out ("ran txn:{} amt:{:6d} rec:{}"\
.format(txnid,amt,json.dumps(rec,sort_keys=True)))
d.out ("{}".format(usr_amts))
scan_hndlr (client, ns, set,
lambda kmr: d.out ("{}".format(json.dumps(kmr[2],sort_keys=True))) , d=d)
d.leave (0)
return 0
# rtxns_hndlr
# Reconciliation handler
# aggregate txn logs and compare to account bals
def rcn_hndlr (client, ns, set, ukey=None, d=None):
if d is None: d = Dbg('none')
d.enter ()
# loop over recs
def get_recs (kmr):
d.dbg ("get_recs entered")
scan_rec = kmr[2]
lck_rec = {}
ukey = scan_rec['name']
txnid = str(uuid.uuid4())
# lock the record
try:
usr_ops = [ operations.read ('bal') ]
lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
(rkey, rmeta, lck_rec) = lck.acquire (usr_ops)
except Exception as x:
lck_fails.append(ukey)
xmsg = "Failed to acquire lock; {}".format(x)
# d.inf (xmsg)
d.exc (x, xmsg, lvl=d.ERR)
#d.leave (xmsg)
return
d.dbg ("lck'd ukey:{} scan_rec:{} lck_rec:{}".format(ukey,scan_rec,lck_rec))
rec_bal = int(lck_rec['bal'])
pend_amt = 0
log_amt = 0
d.dbg ("lck_rec:{}".format(lck_rec))
d.dbg ("rec_bal:{} pend_amt:{} log_amt:{}".format(rec_bal, pend_amt, log_amt))
rec_ptxn_ukeys = []
for ptxn in lck_rec['ptxns']:
d.dbg ("ptxn:{}".format(ptxn))
rec_ptxn_ukeys.append(ptxn.split(':')[1])
d.dbg ("rec_ptxn_ukeys:{}".format(rec_ptxn_ukeys))
# loop over logs for rec (need 2idx on log[ukey])
d.dbg ("query'ing for {}".format(ukey))
query = client.query(ns, set+'log')
query.where(p.equals('ukey', ukey))
query_recs = query.results( {'total_timeout':2000})
d.dbg ("query_recs:{}".format(query_recs))
for query_rec in query_recs: # query_recs is a list of kmr recs
d.dbg ("query_rec:{}".format(query_rec))
log_rec = query_rec[2]
d.dbg ("log_rec:{}".format(log_rec))
# pending txns get deducted from total log amount
d.dbg ("log_rec['txnid']:{} rec_ptxn_ukeys:{}".format(log_rec['txnid'], rec_ptxn_ukeys))
if log_rec['txnid'] in rec_ptxn_ukeys:
pend_amt += int(log_rec['amt'])
d.dbg ("pend_amt:{}".format(pend_amt))
else:
log_amt += int(log_rec['amt'])
d.dbg ("log_amt:{}".format(log_amt))
lck.release()
# report reconciliation errors
balanced = False
if rec_bal == log_amt:
d.dbg ("rec_bal == log_amt")
balanced = True
else:
rcn_fails.append(ukey)
d.inf ("ukey:{} bal:{} pend_amt:{} log_amt:{} balanced:{}".format(ukey, rec_bal, pend_amt, log_amt, balanced))
if rec_bal != log_amt:
d.inf ("{} Not Balanced!\n scan_rec:{}\n lck_rec:{}".format(ukey, scan_rec, lck_rec))
lck_fails = []
rcn_fails = []
if ukey is not None:
kmr = get_hndlr (client, ns, set, ukey, d=d)
get_recs (kmr)
else:
scan_hndlr (client, ns, set, get_recs, d=d)
if len(lck_fails) > 0:
d.dbg ("Failed to acquire lock on ukeys:{}".format(lck_fails))
if len(rcn_fails) > 0:
d.dbg ("Failed to reconcile ukeys:{}".format(rcn_fails))
d.leave ()
return {'lck_fails': lck_fails, 'rcn_fails': rcn_fails}
# rcn_hndlr
def cln_callback (client, ns, set, ukey, txnid, d=None):
if d is None: d = Dbg('none')
d.enter ("ns:{} set:{} ukey:{} txnid:{}".format(ns, set, ukey, txnid))
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if txnid is None: errs += "txnid is req'd; "
if errs: d.throw (ValueError, errs)
# remove transaction log record
log_ukey = ukey + ":" + txnid
d.inf ("deleting txn log record w/ key:{}".format(log_ukey))
rc = del_hndlr (client, ns, set+'log', log_ukey, no_exist_allowed=True, d=d)
if rc < 0:
d.dbg ("Failed to remove {}.{}.{}".format(ns,set+'log',log_ukey))
d.leave (-1)
return -1
d.leave (0)
return 0
# cln_callback
def idx_hndlr (client, ns, set, ukey=None, d=None):
if d is None: d = Dbg('none')
d.enter ()
# check args
errs = ''
if ns is None: errs += "ns is req'd; "
if set is None: errs += "set is req'd; "
if ukey is None: errs += "ukey is req'd; "
if errs: d.throw (ValueError, errs)
# get a record from the set
d.leave (0)
return 0
def chk_func_args (dct, reqd):
errs = ''
for req in reqd:
if dct[req] is None:
errs += "arg {} req'd; ".format(req)
if errs:
func = inspect.currentframe().f_back.f_code.co_name
errs = "Error: {}; {}".format(func,errs)
return errs
def chk_args (args, req_lst):
emsgs = []
argv = vars(args)
for req in req_lst:
if argv[req] is None:
if req == 'cmd':
cmds_str = ''
for cmd in cmds:
cmds_str += " {}".format(cmd)
emsgs.append ("'{}' argument required.\n One of: {}".format(req,cmds_str))
else:
emsgs.append ("'{}' argument required.".format(req))
# Show usage
if len(emsgs) != 0:
# special arg list to generate usage; allows usage w/o debug switch listings
main_parser(['usage', emsgs])
#return -1
sys.exit (1)
return 0
# chk_args
def main ():
args = main_parser(sys.argv)
# create dbg object
d = Dbg (lvl = args.dbg_level,
trc = args.dbg_trace,
log_file = args.dbg_log_file,
log_only = args.dbg_log_only,
date_n_time = args.dbg_date_n_time,
file_n_line = args.dbg_file_n_line,
hdr_date_format = args.dbg_hdr_date_format,
file_name = args.dbg_file_name,
func_name = args.dbg_func_name,
line_begin = args.dbg_line_begin,
line_end = args.dbg_line_end)
d.enter ()
global fpct
cmd = args.cmd
ns = args.ns
set = args.set
ukey = args.ukey
txnid = args.txnid
rec = args.rec
ops = args.ops
amt = args.amt
cnt = args.cnt
fpct = args.fpct
nx = args.nx
gen = args.gen
host = args.host
port = args.port
chk_args (args, ['cmd'])
# Create a basic ops list
basic_ops = [
operations.read ("name"),
operations.read ("bal"),
operations.read (Lck.lck_bin),
operations.read (Lck.timeout_bin),
operations.read (Lck.pending_txns_bin)
]
# Connect to asdb; always req'd
client = aero_connect ({'hosts': [(host, port)]}, d=d)
if client is None:
d.err ("Failed connect")
d.leave (-1)
return -1
# Get help
if cmd == 'help':
import mxn
d.out ("{}".format(help(mxn)))
d.out ("\n\n")
# d.out ("{}".format(help(Lck)))
import aslib.asutils
d.out ("{}".format(help(aslib.asutils)))
pass
# Get a list of namespaces
elif cmd == 'info':
chk_args (args, ['ukey'])
ns_lst = info_hndlr (client, ukey, d=d)
d.out ("{}".format(json.dumps(ns_lst,sort_keys=True)))
# Put a record
elif cmd == 'put':
chk_args (args, ['ns', 'set', 'ukey', 'rec'])
rc = put_hndlr (client, ns, set, ukey, rec, nx, gen, d=d)
if rc < 0:
d.err ("Failed to put {}.{}.{} {}".format(ns,set,ukey,rec))
# Put multiple random records
elif cmd == 'mputs':
chk_args (args, ['ns', 'set', 'ukey', 'cnt'])
mputs_hndlr (client, ns, set, ukey, cnt, d=d)
# Get a record
elif cmd == 'get':
chk_args (args, ['ns', 'set', 'ukey'])
loop_cnt = cnt
if loop_cnt is None: loop_cnt = 1
for i in range(loop_cnt):
if ukey.isnumeric():
ukey = int(ukey)
(rkey, meta, rec) = get_hndlr (client, ns, set, ukey, d=d)
if rec is None:
d.err ("Failed to get ({}:{}:{})".format(ns,set,ukey))
else:
#d.out ("{}".format(json.dumps(rec,sort_keys=True)))
d.out ("{}".format(rec))
# # get_record w/ field spec
# lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
# if ops is None:
# ops = '''[
# "lck",
# "lck.txnid", # "txnid" field from "lck" map
# "ptxns",
# "ptxns.1" # 2nd elm of array
# ]'''
# (rkey, rmeta, rec) = lck.get_record (rec_fields=eval(ops))
# d.out ("fields:{}".format(json.dumps(rec,sort_keys=True)))
# Run operate command
elif cmd == 'op':
chk_args (args, ['ns', 'set', 'ukey'])
if ops is None: ops = basic_ops
(rkey, meta, rec) = operate_hndlr (client, ns, set, ukey, ops, d=d)
if rec is None:
d.err ("Failed operate on ({}:{}:{})".format(ns,set,ukey))
else:
d.out ("{}".format(json.dumps(rec,sort_keys=True)))
# Scan a set
elif cmd == 'scan':
chk_args (args, ['ns', 'set'])
# scan result foreach callback takes a (key,meta,rec) tuple
global agg
agg = 0
def txnid_amt (kmr):
global agg
try:
rec = kmr[2]
if set == 'mxn':
name = rec['name']
bal = rec['bal']
try:
lck = rec['lck']
lckto = rec['lckto']
ptxns = rec['ptxns']
except:
lck = '{}'
lckto = 0
ptxns = '[]'
d.out ("{}\"name\" :\"{}\", \"bal\" :\"{}\", \"lck\" :\"{}\", \"lckto\" :\"{}\", \"ptxns\" :\"{}\" {}".format('{', name, bal, lck, lckto, ptxns, '}'))
if set == 'mxnlog':
txnid = rec['txnid']
amt = rec['amt']
ukey = rec['ukey']
uset = rec['set']
agg += int(amt)
d.out ("{}\"txnid\": \"{}\", \"amt\" :\"{}\", \"ukey\" :\"{}\", \"set\" :\"{}\"{}".format('{', txnid, amt, ukey, uset, '}'))
except Exception as x:
xmsg = "txnid_amt: log_scan: rec:{} Exception: {}".format(rec,x)
d.exc (x, xmsg, lvl=d.ERR)
def print_rec (kmr):
try:
key = kmr[0]
d.dbg ("{}".format(key))
rec = kmr[2]
d.out ("{}".format(rec))
#d.out ("{}".format(json.dumps(rec,sort_keys=True,indent=2)))
except Exception as x:
xmsg = "print_rec: log_scan: rec:{} Exception: {}".format(rec,x)
d.exc (x, xmsg, lvl=d.ERR)
if set == '*':
# dmf: add "-n 0" to just show counts
ns_lst = info_hndlr (client, 'sets', d=d)
#d.out ("ns_lst:{}".format(ns_lst))
for key in ns_lst.keys():
#d.out ("key:{}".format(key))
#d.out ("{}".format(ns_lst[key][1]))
ns_set_specs = ns_lst[key][1]
for ns_set_spec in ns_set_specs.split(';'):
if ns_set_spec == '\n': break
#d.out ("ns_set_spec:'{}'".format(ns_set_spec))
attrs = ns_set_spec.split(':')
#d.out("attrs:{}".format(attrs))
ns_discovered = attrs[0].split('=')[1]
set_discovered = attrs[1].split('=')[1]
objects = attrs[2].split('=')[1]
tombstones = attrs[3].split('=')[1]
memory_data_bytes = attrs[4].split('=')[1]
truncate_lut = attrs[5].split('=')[1]
stop_writes_count = attrs[6].split('=')[1]
disable_eviction = attrs[7].split('=')[1]
if ns_discovered == ns:
d.out ("ns:{} set:{} objs:{}".format(ns_discovered,set_discovered,objects))
if cnt is None:
scan_hndlr (client, ns_discovered, set_discovered, print_rec, d=d)
break
else:
# dmf: use set spec of '' to scan whole namespace with set of None
scan_hndlr (client, ns, set,
#lambda kmr: d.out ("{}".format(kmr[2])),
#lambda kmr: d.out ("{}".format(json.dumps(kmr[2],sort_keys=True))),
#txnid_amt,
print_rec,
d=d)
# if set == 'mxnlog':
# d.out ("agg:{}".format(agg))
# Delete a record
elif cmd == 'del':
chk_args (args, ['ns', 'set', 'ukey'])
del_hndlr (client, ns, set, ukey, no_exist_allowed=True, d=d)
# Truncate a set
elif cmd == 'trunc':
chk_args (args, ['ns', 'set'])
trunc_hndlr (client, ns, set, d=d)
# Lock a record w/ multi-trans locking model
elif cmd == 'lck':
chk_args (args, ['ns', 'set', 'ukey', 'txnid'])
if ops is None: ops = basic_ops
try:
lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
(rkey, rmeta, rec) = lck.acquire (ops)
d.out ("{}".format(json.dumps(rec,sort_keys=True)))
d.dbg ("rmeta:{}".format(rmeta))
except Exception as x:
xmsg = "Failed to acquire lock; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
# Unlock a record - forced! - does not honor multi-trans locking model
elif cmd == 'uck':
chk_args (args, ['ns', 'set', 'ukey', 'txnid'])
if ops is None: ops = basic_ops
# lck = Lck (client, ns, set, ukey, 'mxn.uck', d=d, fpct=fpct)
lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
try:
(rkey, rmeta, rec) = lck.break_lock (ops, ignore_timeout=True)
d.out ("{}".format(json.dumps(rec,sort_keys=True)))
d.dbg ("rmeta:{}".format(json.dumps(rmeta,sort_keys=True)))
except Exception as x:
xmsg = "Failed to acquire lock; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
# Cleanup all stale pending transactions on any record
elif cmd == 'cln':
chk_args (args, ['ns', 'set'])
lck = Lck (client, ns, set, d=d, fpct=fpct)
usr_ops = None
lck.cleanup (cln_callback, usr_ops)
# Cleanup service; just loops forever calling cleanup
elif cmd == 'clnsvc':
chk_args (args, ['ns', 'set'])
lck = Lck (client, ns, set, d=d, fpct=fpct)
usr_ops = None
while True:
beg = now()
d.dbg ("beg:{}".format(beg))
try:
lck.cleanup (cln_callback, usr_ops)
end = now()
d.dbg ("end:{} diff:{}".format(end, end-beg))
time.sleep(1000/1000) # 100 ms
except:
break;
# Clear a stale lock; destructive action; no cleanup performed
elif cmd == 'clr':
chk_args (args, ['ns', 'set', 'ukey', 'txnid'])
if ops is None: ops = basic_ops
lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
try:
(rkey, rmeta, rec) = lck.clear_lock (ops)
d.out ("{}".format(json.dumps(rec,sort_keys=True)))
d.dbg ("rmeta:{}".format(json.dumps(rmeta,sort_keys=True)))
except Exception as x:
xmsg = "Failed clearing lock; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
# Drive a transaction - lock, transact, unlock - account settlement use case
elif cmd == 'txn':
chk_args (args, ['ns', 'set', 'ukey', 'txnid', 'amt'])
(rkey, meta, rec) = txn_hndlr (client, ns, set, ukey, txnid, amt, d=d)
d.out ("{}".format(json.dumps(rec,sort_keys=True)))
# Drive random transactions
elif cmd == 'rtxns':
chk_args (args, ['ns', 'set', 'cnt'])
rtxns_hndlr (client, ns, set, ukey, cnt, d=d)
#d.out (Lck.get_stats())
try:
Lck.put_stats(client, ns, set)
except Exception as x:
xmsg = "Failed putting lock stats; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
# Aggregate log amounts (to reconcile logs w/ accounts)
elif cmd == 'rcn':
chk_args (args, ['ns', 'set'])
fails = rcn_hndlr (client, ns, set, ukey, d=d)
if len(fails['lck_fails']) != 0:
d.out ("Failed to lock some records: {}".format(fails['lck_fails']))
if len(fails['rcn_fails']) == 0:
if len(fails['lck_fails']) != 0:
d.out ("Reconciled; except for failed locks")
else:
d.out ("Reconciled")
else:
d.out ("Unbalanced records found: {}".format(fails['rcn_fails']))
# Create 2nd idx
elif cmd == 'idx':
chk_args (args, ['ns', 'set', 'ukey'])
rc = idx_hndlr (client, ns, set, ukey, d=d)
if rc < 0:
d.out ("Failed to create 2nd index on {} bin".format(ukey))
else:
d.out ("2nd index created on {} bin".format(ukey))
# Test driver
elif cmd == 'tst':
try:
lck = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
lck.acquire()
lck.release()
except Exception as x:
xmsg = "Failed lock acquire/release; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
#
try:
lck2 = Lck (client, ns, set, ukey, txnid, d=d, fpct=fpct)
lck2.acquire()
lck2.release()
except Exception as x:
xmsg = "Failed lock #2 acquire/release; {}".format(x)
d.exc (x, xmsg, lvl=d.ERR)
#
d.out (lck.get_stats())
###
# chk_args (args, ['ns', 'set'])
# def mxn_scan (kmr):
# try:
# rec = kmr[2]
# name = rec['name']
# bal = rec['bal']
# lck = rec['lck']
# ptxns = rec['ptxns']
# #d.out ("name:{} bal:{}".format(name, bal))
# for ptxn in ptxns:
# #d.out (" ptxn: {}".format(ptxn))
# d.out ("{} bal:{}".format(ptxn, bal))
# except Exception as x:
# xmsg = "mxn_scan: Exception: {}".format(x)
# d.exc (x, xmsg, lvl=d.ERR)
# def mxnlog_scan (kmr):
# try:
# rec = kmr[2]
# ukey = rec['ukey']
# txn = rec['txn']
# amt = txn['amt']
# txnid = rec['txnid']
# d.out ("{} amt:{}".format(ukey+":"+txnid, amt))
# except Exception as x:
# xmsg = "mxnlog_scan: Exception: {}".format(x)
# d.exc (x, xmsg, lvl=d.ERR)
# scan_hndlr (client, ns, set, mxnlog_scan, d=d)
# Disconnect from asdb; always req'd
try:
client.close()
except:
pass
d.dbg ("Disconnected from aerospike cluster")
d.leave (0)
return 0
# main
# allows for 'import mxn' and standard Python help(mxn) call
if __name__ == "__main__":
main()
|
from django.shortcuts import render,redirect,get_object_or_404
from django.utils import timezone
from django.core.paginator import Paginator
from django.views.generic.edit import FormView
from .models import Notice,Comment
from django.db.models import Q
from django.views.generic.edit import FormView
from .forms import NewNotice,SearchForm,CommentForm
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def home(request):
return render(request,'notice/home.html')
def test(request):
return render(request,'notice/test.html')
def detail(request, notice_id):
notice_detail = get_object_or_404(Notice, pk=notice_id)
return render(request, 'notice/detail.html', {'notice': notice_detail})
def comment_create(request, notice_id):
# 요청 메서드가 POST방식 일 때만 처리
if request.method == 'POST':
# Post인스턴스를 가져오거나 404 Response를 돌려줌
notice = get_object_or_404(Notice, pk=notice_id)
# request.POST에서 'content'키의 값을 가져옴
content = request.POST.get('content')
writer = request.POST.get('writer')
# 내용이 전달 된 경우, Comment객체를 생성 및 DB에 저장
Comment.objects.create(
notice=notice,
# 작성자는 현재 요청의 사용자로 지정
comment_contents=content,
comment_writer=writer,
)
return redirect('detail_comment')
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.delete()
return redirect('detail_comment')
def read(request):
notice=Notice.objects
counts=Notice.objects.count()
notice_list=Notice.objects.all()
paginator=Paginator(notice_list,10)
page=request.GET.get('page')
posts=paginator.get_page(page)
#page 추가
max_index = len(paginator.page_range)
page_numbers_range = 5
current_page = int(page) if page else 1
start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range
end_index = start_index + page_numbers_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
return render(request,'notice/home.html',{'notice':notice,'posts':posts,'counts':counts,'page_range':page_range})
def create(request):
if request.method=='POST':
form=NewNotice(request.POST)
if form.is_valid:
post=form.save(commit=False)
post.pub_date=timezone.now()
post.save()
return redirect('home')
else:
form=NewNotice()
return render(request,'notice/new.html',{'form':form})
return
def update(request,pk):
notice=get_object_or_404(Notice,pk = pk)
form=NewNotice(request.POST, instance=notice)
if form.is_valid():
form.save()
return redirect('home')
return render(request,'notice/new.html',{'form':form})
def delete(request,pk):
notice=get_object_or_404(Notice,pk=pk)
notice.delete()
return redirect('home')
class SearchFormView(FormView):
# form_class를 forms.py에서 정의했던 PostSearchForm으로 정의
form_class = SearchForm
template_name = 'notice/search.html' #정보 보내고싶은 곳
# 제출된 값이 유효성검사를 통과하면 form_valid 메소드 실행
# 여기선 제출된 search_word가 PostSearchForm에서 정의한대로 Char인지 검사
def form_valid(self, form):
# 제출된 값은 POST로 전달됨
# 사용자가 입력한 검색 단어를 변수에 저장
search_word = self.request.POST['search_word']
# Post의 객체중 제목이나 설명이나 내용에 해당 단어가 대소문자관계없이(icontains) 속해있는 객체를 필터링
# Q객체는 |(or)과 &(and) 두개의 operator와 사용가능
post_list = Notice.objects.filter(Q(title__icontains=search_word) | Q(body__icontains=search_word))
context = {}
# context에 form객체, 즉 PostSearchForm객체 저장
context['form'] = form
context['search_term'] = search_word
context['object_list'] = post_list
return render(self.request, 'notice/search.html', context)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 14:19:28 2019
@author: Ananthan, James
"""
import numpy as np
import pandas as pd
import process as sp
import os
from sklearn.preprocessing import normalize
import math
import sys
def t_n_d(file):
"""
gets title and description from a file
"""
s = file.read()
des = s[s.index(','):]
t = s[:s.index(',')]
return t,des
class Model:
def __init__(self,tags,kind,th=0.2):
"""
:param tags:
"nouns", "all", or "sic"
:param kind:
"boolean", "freq" or "tfidf"
:param th:
threshold for excluding words. th=0.2 means words occuring in more
than 20% of documents are excluded
"""
self.dict=None
self.model_vecs=None
self.num_docs=0
self.tags=tags
self.th=th
self.kind=kind
def make_dict_of_words(self,path, errorfile):
"""
:return: dict of words and freq in corpus. If tags are "sic", words are instead drawn from files in SIC_DESC_PATH, but frequency is still from corpus.
"""
word_dict={}
total_docs=0
dictfails = 0
SIC_DESC_PATH = "sic_descriptions"
if self.tags == "sic":
word_dict = {}
for filename in os.listdir(SIC_DESC_PATH):
file = open(SIC_DESC_PATH + "/" + filename, 'r', encoding='utf8')
text = file.read()
des = set(sp.tokenize_str(text))
for word in des:
if word not in word_dict:
word_dict[word] = 0
file.close()
for filename in os.listdir(path):
file = open(path+"/"+filename,'r', encoding="utf8")
title,des=t_n_d(file)
try:
if self.tags=="all":
des_ls = sp.tokenize_str(des)
elif self.tags=='nouns':
des_ls = sp.tokenize_str_hp(des,title)
elif self.tags =="sic":
des_ls = sp.tokenize_str(des)
total_docs+=1
except:
des_ls = []
if errorfile is not None:
dictfails += 1
exc1, exc2, exc3 = sys.exc_info()
errorfile.write(filename + " failed in dictionary step: " + str(exc1) + " ; " + str(exc2)+ "\n")
# words_in_doc = set()
file.close()
words = set(des_ls)
for word in words:
if self.tags == "sic":
if word in word_dict:
word_dict[word] += 1
else:
if word not in word_dict:
word_dict[word] = 1
else:
word_dict[word] += 1
# for word in des_ls:
# if word not in word_dict and word not in words_in_doc:
# if self.tags == "sic":
# pass
# else:
# word_dict[word]=1
# words_in_doc.add(word)
# elif word not in words_in_doc:
# word_dict[word]+=1
# words_in_doc.add(word)
final_dict = {k: v for k, v in word_dict.items() if v/total_docs<=self.th}
if errorfile is not None:
errorfile.write(str(dictfails) + " documents failed in dictionary step\n")
print('made dict with tags: ',self.tags,', th: ',self.th)
self.dict=final_dict
self.num_docs=total_docs
return final_dict
def make_seq_freq_vec(self,seq_ls,words,words_to_index):
"""
:param seq_ls:
tokenized sentence as list
:param words:
list of words
:param words_to_index:
dict of word to index
:return:
"""
vec=np.zeros(len(words))
if self.kind == 'boolean':
seq_set = set(seq_ls)
for i in range(len(words)):
if words[i] in seq_set:
vec[i] = 1
elif self.kind == 'freq':
for word in seq_ls:
if word in words_to_index:
vec[words_to_index[word]] += 1
for i in range(len(words)):
vec[i] = 1 + math.log(vec[i]) if vec[i] > 0 else vec[i]
elif self.kind=='tfidf':
for word in seq_ls:
if word in words_to_index:
vec[words_to_index[word]] += 1
for i in range(len(words)):
vec[i] = vec[i]*math.log(self.num_docs/self.dict[words[i]]) if vec[i] > 0 else vec[i]
mod = np.linalg.norm(vec)
if mod != 0:
vec = vec / mod
else:
vec =vec
return vec
def make_vec_df(self,path,w_dict, prefix, errorfile):
data=[]
firms=[]
vecfails = 0
vecempty = 0
words=list(w_dict.keys())
words_to_index = {word : words.index(word) for word in words}
word_dict_df=pd.DataFrame(words)
word_dict_df.to_csv(prefix + '/' + self.tags+'_'+self.kind+'_'+str(self.th)+'_dict.csv')
i=1
for filename in sorted(os.listdir(path)):
file = open(path+"/"+filename,'r', encoding="utf8")
title,des=t_n_d(file)
try:
if self.tags == 'nouns':
des_ls = sp.tokenize_str_hp(des, title)
elif self.tags == 'all' or self.tags == 'sic':
des_ls = sp.tokenize_str(des)
vec = self.make_seq_freq_vec(des_ls,words, words_to_index)
# data.append(vec)
# firms.append(title)
if np.dot(vec, vec) != 0:
data.append(vec)
firms.append(title)
else:
vecempty += 1
if errorfile is not None:
errorfile.write(filename + " contained no words after preprocessing\n")
except:
if errorfile is not None:
vecfails += 1
exc1, exc2, exc3 = sys.exc_info()
errorfile.write(filename + " failed in vector step: " + str(exc1) + " ; " + str(exc2)+ "\n")
i+=1
file.close()
data = np.array(data).T.tolist()
df = pd.DataFrame(data, columns=firms)
self.model_vecs=df
if errorfile is not None:
errorfile.write(str(vecfails) + " documents failed in vector step\n" + str(vecempty) + " documents contained no words after preprocessing\n")
return df
def make_model(tags,kind,th,errorfile=None,prefix=""):
"""
For tags, kind, th: see Model.__init__()
:errorfile:
string: path to text file to record errors while making the model, or None, to ignore errors.
:prefix:
string: path to folder containing model's output.
"""
model = Model(tags,kind,th)
print('making model')
model.make_vec_df('test_data',model.make_dict_of_words('test_data', errorfile), prefix, errorfile)
model.model_vecs.to_csv(prefix + '/'+tags+'_'+kind+'_'+str(th)+'_vectors.csv')
print('made vectors')
return prefix + '/'+tags+'_'+kind+'_'+str(th)+'_vectors.csv'
# make_model('nouns','freq',.2)
|
# Driver Code
my_list = [923,6,234,56,3,0,123,5,7,3]
# length of the list
length = len(my_list)
for i in range(length):
# index of the element we consider to have the least value
min_index = i
# i + 1 beacuse i th index is the one we are comparing
for j in range(i + 1, length):
# if the value at index(currently having the min value) is greater than the value..
# ..present at another index in the same array/list
# then the min_index is changed to the index having smaller value
if my_list[min_index] > my_list[j]:
min_index = j
# after one complete loop, the index having smallest value is identified (ignoring the smallest values which have been sorted before)
# this smallest value is replaced by the index we considered to be smallest ( refer to line 11 : min_index = i)
my_list[i], my_list[min_index] = my_list[min_index], my_list[i]
print("Sorted Array: ", my_list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.