text stringlengths 8 6.05M |
|---|
from django.shortcuts import render
from rest_framework import generics,viewsets
from .models import Estimates
from .serializer import EstimateSerializer
from django.http import JsonResponse
class EstimateViewSet(viewsets.ModelViewSet):
queryset = Estimates.objects.all()
serializer_class = EstimateSerializer
def estimate_id(request):
estimate = Estimates.objects.last()
data = {"estimate_id": estimate.quote_number}
return JsonResponse(data)
|
from flask import Flask,render_template,request,url_for,jsonify,json,session,redirect
import unirest
import httplib
from parse_rest.connection import register
register(<application_id>, <rest_api_key>[, master_key=None]) //parse credentials
from parse_rest.datatypes import Object
app=Flask(__name__)
class ngos(Object):
pass
class resource(Object):
pass
class user(Object):
pass
headers={
"X-Parse-Application-Id": "----",
"X-Parse-REST-API-Key": "-------",
"Content-Type": "application/json"
}
// ngo info
@app.route('/ngolist',methods=['GET'])
def ngolist():
connection = httplib.HTTPSConnection('api.parse.com', 443)
connection.connect()
connection.request('GET', '/1/classes/ngos', '',headers)
result = json.loads(connection.getresponse().read())
return jsonify(result)
// userinfo
@app.route('/userlist',methods=['GET'])
def userlist():
connection = httplib.HTTPSConnection('api.parse.com', 443)
connection.connect()
connection.request('GET', '/1/classes/users', '',headers)
result = json.loads(connection.getresponse().read())
return jsonify(result)
//resource_requests info
@app.route('/resources',methods=['GET'])
def resources():
connection = httplib.HTTPSConnection('api.parse.com', 443)
connection.connect()
connection.request('GET', '/1/classes/resource', '',headers)
result = json.loads(connection.getresponse().read())
return jsonify(result)
@app.route('/<ngoname>/pending_requests',methods=['GET'])
def pending_requests():
response=ngo_pending_request()
return render_template('index.html',response=response)
@app.route('/<ngoname>/accepted_requests',methods=['GET'])
def accepted_requests():
response=ngo_accepted_request()
return render_template('index.html',response=response)
@app.route('/<user_id>/requests',methods=['GET'])
def total_requests():
total_requests=user_requests
return jsonify(total_requests)
def ngo_pending_request(ngo_id):
requests=user.Query.get(objectId=ngo_id)
dict=[]
for i in requests:
pending_request=i.Query.filter(request_accepted=False)
dict.append(pending_request)
return dict
def ngo_accepted_request(ngo_id):
requests=user.Query.get(objectId=ngo_id)
dict=[]
for i in requests:
accepted_requests=i.Query.filter(request_accepted=True)
dict.append(accepted_request_request)
return dict
def user_requests(user_id):
requests=user.Query.get(objectId=user_id)
return requests
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
|
# file io stuff
import os.path
import json
camID = 0
whiteBalance = 1
isFlipped = False
def getParams():
file = open('configs/camera.txt', 'r')
config = file.read()
params = json.loads(config)
return config
def getWhiteBalance():
return whiteBalance
def setParam(paramName, value, cameraParameters):
global camID
global whiteBalance
global isFlipped
if (paramName == 'whiteBalance'):
whiteBalance = float(value)
elif (paramName == 'camera id'):
print(value)
camID = int(value)
elif (paramName == 'flip camera'):
isFlipped = value
else:
currentValue = getattr(cameraParameters, paramName)
if (isinstance(currentValue, int)):
setattr(cameraParameters, paramName, int(value))
elif (isinstance(currentValue, float)):
setattr(cameraParameters, paramName, float(value))
write_camera_params(cameraParameters)
def write_camera_params(cameraParameters):
global camID
global whiteBalance
global isFlipped
params = {
"camera id": camID,
"whiteBalance": whiteBalance,
"flip camera": isFlipped,
"adaptiveThreshWinSizeMin": cameraParameters.adaptiveThreshWinSizeMin,
"adaptiveThreshWinSizeStep": cameraParameters.adaptiveThreshWinSizeStep,
"adaptiveThreshConstant": cameraParameters.adaptiveThreshConstant,
"minMarkerPerimeterRate": cameraParameters.minMarkerPerimeterRate,
"maxMarkerPerimeterRate": cameraParameters.maxMarkerPerimeterRate,
"minCornerDistanceRate": cameraParameters.minCornerDistanceRate,
"minMarkerDistanceRate": cameraParameters.minMarkerDistanceRate,
"minDistanceToBorder": cameraParameters.minDistanceToBorder,
"markerBorderBits": cameraParameters.markerBorderBits,
"minOtsuStdDev": cameraParameters.minOtsuStdDev,
"perspectiveRemoveIgnoredMarginPerCell": cameraParameters.perspectiveRemoveIgnoredMarginPerCell,
"maxErroneousBitsInBorderRate": cameraParameters.maxErroneousBitsInBorderRate,
"errorCorrectionRate": cameraParameters.errorCorrectionRate
}
file = open('configs/camera.txt', 'w')
file.write(json.dumps(params))
file.close()
def load_camera_config(cameraParameters):
file = open('configs/camera.txt', 'r')
config = file.read()
params = json.loads(config)
camID = params['camera id']
whiteBalance = params['whiteBalance']
isFlipped = params['flip camera']
# Thresholding
cameraParameters.adaptiveThreshWinSizeMin = params['adaptiveThreshWinSizeMin']
cameraParameters.adaptiveThreshWinSizeStep = params['adaptiveThreshWinSizeStep']
cameraParameters.adaptiveThreshConstant = params['adaptiveThreshConstant']
# Contour Filtering
cameraParameters.minMarkerPerimeterRate = params['minMarkerPerimeterRate']
cameraParameters.maxMarkerPerimeterRate = params['maxMarkerPerimeterRate']
cameraParameters.minCornerDistanceRate = params['minCornerDistanceRate']
cameraParameters.minMarkerDistanceRate = params['minMarkerDistanceRate']
cameraParameters.minDistanceToBorder = params['minDistanceToBorder']
# Bits Extraction
cameraParameters.markerBorderBits = params['markerBorderBits']
cameraParameters.minOtsuStdDev = params['minOtsuStdDev']
cameraParameters.perspectiveRemoveIgnoredMarginPerCell = params['perspectiveRemoveIgnoredMarginPerCell']
# parameters.perpectiveRemovePixelPerCell = 10 # 4
# Marker Identification
cameraParameters.maxErroneousBitsInBorderRate = params['maxErroneousBitsInBorderRate']
cameraParameters.errorCorrectionRate = params['errorCorrectionRate']
def getCamera():
global camID
return camID
def getFlip():
global isFlipped
return isFlipped
|
from django.shortcuts import render
from django.http import HttpResponse, request
from .models import * |
from __future__ import absolute_import
import logging
import tensorflow as tf
from parser.constants import TrainVariables
class LatentAttentionNetwork(object):
"""The RNN underlying the implemented model for IFTTT domain.
This class replicates the network proposed in the following NIPS 2016 paper:
Chen, Xinyun, Chang Liu Richard Shin Dawn Song, and Mingcheng Chen.
"Latent Attention For If-Then Program Synthesis."
arXiv preprint arXiv:1611.01867 (2016).
Attributes:
dropout (tensorflow.placeholder): Placeholder for probability of dropout
to be used in the Dropout layer. A value of 1.0 results in no
dropout being applied.
inputs (tensorflow.placeholder): Placeholder for inputs to the network.
Input should be a 2D array where the first dimension corresponds to
batch size and the second dimension is the input dimensionality.
labels (tensorflow.placeholder): Placeholder for true labels
corresponding to inputs. Labels should be in the form of a 2D array,
the first dimension being the batch size, and each row being a
one-hot vector.
seq_lens (tensorflow.placeholder): Placeholder for actual lenghts of
inputs, barring the `NULL` tokens.
dictionary_embedding (Tensor): Output of dictionary embedding layer.
Shape=(`self._batch_size`, `self._sent_size`, `self._hidden_size`)
rnn_embedding (tensorflow.Tensor): Output of RNN embedding layer.
Shape=(`self._batch_size`, `self._sent_size`, 2*`self._hidden_size`)
latent_attention (tensorflow.Tensor): Output of latent attention layer.
Shape=(`self._batch_size`, `self._sent_size`, 1)
active_attention (tensorflow.Tensor): Output of active attention layer.
Shape=(`self._batch_size`, `self._sent_size`, 1)
output_representation (tensorflow.Tensor): The output representation.
Shape=(`self._batch_size`, 2*`self._hidden_size`, 1)
prediction (tensorflow.Tensor): Logit predictions.
Shape=(`self._batch_size`, `self._num_classes`, 1)
loss (tensorflow.Tensor): Value of loss. Cross-entropy loss is used.
optimize (tensorflow.op): Operation to optimize loss function.
error (Tensor): Value of classification error.
"""
def __init__(self, config, num_classes, train_vars):
"""Sets hyper-parameter values based on the passed `config`
Args:
config: A configuration class, similar to
`configs.PaperConfigurations`.
num_classes (int): Total number of label classes.
train_vars (TrainVariables): The mode that determines which set of
model parameters should be modified during training.
The mode `TrainVariables.all` causes all model parameters to be
learned during training.
The mode `TrainVariables.non_attention` results in only the
model parameters that are not part of the attention mechanism
to be learned. This includes only the variable named "p".
The mode `TrainVariables.attention` results in all the attention
related parameters being learned. This includes all variables
except "p".
"""
self.dropout = tf.placeholder(tf.float32, name='dropout')
self._learning_rate = config.learning_rate
"""float: Learning rate for optimizer."""
self._max_gradient_norm = config.max_gradient_norm
"""float: Maximum norm of gradients. If the norm of gradients go above
this value, they are rescaled."""
self._hidden_size = config.hidden_size
"""int: Size of embedding for each token, as output by dictionary
embedding. In the paper, twice of this value is denoted by d."""
self._batch_size = config.batch_size
"""int: Size of mini-batch, i.e., number of examples in `self.inputs`"""
self._vocab_size = config.vocab_size
"""int: Size of vocabulary. In the paper, this value is denoted by N."""
self._sent_size = config.sent_size
"""int: Maximum size of each description. In the paper, this value is
denoted by j."""
self._num_classes = num_classes
"""int: Total number of label classes. In the paper, this value is
denoted by M."""
self._initializer = tf.random_uniform_initializer(-1, 1)
""": Initializer to be used to initialize all tensorflow variables. This
is same as the one proposed in the paper."""
self.inputs = tf.placeholder(tf.int32,
[None, self._sent_size], 'inputs')
self.labels = tf.placeholder(tf.float32,
[None, self._num_classes], 'labels')
self.seq_lens = tf.placeholder(tf.int32, [None], 'seq_lens')
self.dictionary_embedding = self.dictionary_embedding_layer()
self.rnn_embedding = self.rnn_embedding_layer()
self.latent_attention = self.latent_attention_layer()
self.active_attention = self.active_attention_layer()
self.output_representation = self.output_representation_layer()
self.prediction = self.prediction_layer()
self.loss = self.loss_layer()
self.optimize = self.optimize_layer(train_vars)
self.error = self.error_layer()
def dictionary_embedding_layer(self):
"""Constructs the dictionary embedding layer.
The dictionary embedding layer is essentially a lookup-table where each
token in the vocabulary is mapped to an embedding vector.
Returns:
tensorflow.Tensor: Output of dictionary embedding layer.
Shape=(?, `self._sent_size`, `self._hidden_size`)
"""
dict_embedding_matrix = tf.get_variable(
name="dict_embedding_matrix",
shape=[self._vocab_size, self._hidden_size],
dtype=tf.float32, initializer=self._initializer)
return tf.nn.embedding_lookup(dict_embedding_matrix, self.inputs)
def rnn_embedding_layer(self):
"""Constructs the RNN embedding layer.
In keeping with the paper, a Bidirectional LSTM is used to embed each
token -- input to the RNN is in the form of an intermediate
embedding returned by the Dictionary Embedding layer -- in a
2*`self._hidden_size` space. The embedding is obtained by concatenating
the outputs of the two LSTMs.
Returns:
tensorflow.Tensor: Output of RNN embedding layer.
Shape=(?, `self._sent_size`, 2*`self._hidden_size`)
"""
# The forward LSTM cell.
cell_fw = tf.nn.rnn_cell.LSTMCell(
num_units=self._hidden_size, initializer=self._initializer)
# Add dropout wrapper to the cell.
cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell=cell_fw,
input_keep_prob=self.dropout,
output_keep_prob=self.dropout)
# The backward LSTM cell.
cell_bw = tf.nn.rnn_cell.LSTMCell(
num_units=self._hidden_size, initializer=self._initializer)
# Add dropout wrapper to the cell.
cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell=cell_bw,
input_keep_prob=self.dropout,
output_keep_prob=self.dropout)
# Construct Bidirectional LSTM using the two cells.
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw, cell_bw=cell_bw, inputs=self.dictionary_embedding,
sequence_length=self.seq_lens, dtype=tf.float32)
rnn_output_fw, rnn_output_bw = outputs[0], outputs[1]
# Concatenate the output of the two LSTMs.
rnn_output_concatenated = tf.concat(2, [rnn_output_fw, rnn_output_bw],
name="rnn_output_batched")
return rnn_output_concatenated
def latent_attention_layer(self):
"""Constructs the Latent Attention layer.
Latent Attention outputs a set of weights over the input tokens, one
set over each input.
Returns:
tensorflow.Tensor: Output of latent attention layer.
Shape=(`self._batch_size`, `self._sent_size`, 1)
"""
u = tf.get_variable(name="u", shape=[2 * self._hidden_size, 1],
dtype=tf.float32,
initializer=self._initializer)
# Convert `rnn_embedding` to 2D tensor
embed = tf.reshape(self.rnn_embedding,
shape=[-1, 2 * self._hidden_size])
l_pre_softmax = tf.matmul(embed, u)
l_pre_softmax = tf.reshape(l_pre_softmax,
shape=[-1, self._sent_size, 1],
name="l_pre_softmax")
return tf.nn.softmax(l_pre_softmax, dim=1, name="l")
def active_attention_layer(self):
"""Constructs the Active Attention layer.
Active Attention outputs a set of weights over the input tokens, one
set over each input. It utilizes the latent attention weights to come
up with a new set of weights.
Returns:
tensorflow.Tensor: Output of active attention layer.
Shape=(`self._batch_size`, `self._sent_size`, 1)
"""
v = tf.get_variable(name="v",
shape=[2 * self._hidden_size, self._sent_size],
dtype=tf.float32, initializer=self._initializer)
# Convert rnn_embedding to 2D tensor
embed = tf.reshape(self.rnn_embedding,
shape=[-1, 2 * self._hidden_size])
a_pre_softmax = tf.matmul(embed, v)
a_pre_softmax = tf.reshape(a_pre_softmax,
shape=[-1, self._sent_size, self._sent_size],
name="a_pre_softmax")
a = tf.nn.softmax(a_pre_softmax, dim=1, name="a")
w = tf.batch_matmul(a, self.latent_attention, name="w")
return w
def output_representation_layer(self):
"""Constructs the Output Representation layer.
Output layer embeds the entire description in a 2*`self._hidden_size`
space.
Returns:
tensorflow.Tensor: The output representation.
Shape=(`self._batch_size`, 2*`self._hidden_size`, 1)
"""
w_normalized = tf.nn.l2_normalize(self.active_attention, dim=1,
name="w_normalized")
embed = tf.transpose(self.rnn_embedding, [0, 2, 1])
return tf.batch_matmul(embed, w_normalized, name="o")
def prediction_layer(self):
"""Constructs the final Prediction layer.
The output of this layer can be interpreted as unscaled probabilities of
the input belonging to each class.
Returns:
Logit predictions.
Shape=(`self._batch_size`, `self._num_classes`, 1)
"""
p = tf.get_variable(name="p",
shape=[self._num_classes, 2 * self._hidden_size],
dtype=tf.float32, initializer=self._initializer)
initializer = tf.zeros([self._num_classes, 1])
pred = tf.scan(lambda a, o: tf.matmul(p, o), self.output_representation,
initializer=initializer)
return tf.reshape(pred, shape=[-1, self._num_classes],
name="log_predictions")
def loss_layer(self):
"""Calculates the cross-entropy loss."""
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(self.prediction,
self.labels),
name="loss")
def optimize_layer(self, train_vars):
"""Sets up the optimizer to be used for minimizing the loss function.
Adam Optimizer is used.
Args:
train_vars (TrainVariables): The mode that determines which set of
model parameters should be modified during training.
The mode `TrainVariables.all` causes all model parameters to be
learned during training.
The mode `TrainVariables.non_attention` results in only the
model parameters that are not part of the attention mechanism
to be learned. This includes only the variable named "p".
The mode `TrainVariables.attention` results in all the attention
related parameters being learned. This includes all variables
except "p".
Returns:
tensorflow.Operation: Operation to be executed to perform
optimization.
"""
var_list = []
if train_vars is TrainVariables.all:
var_list = tf.trainable_variables()
elif train_vars is TrainVariables.non_attention:
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='p')
elif train_vars is TrainVariables.attention:
all_vars = tf.trainable_variables()
for var in all_vars:
if "p:" not in var.name:
var_list.append(var)
else:
logging.error("Illegal type of `train_vars`: %s", train_vars)
raise TypeError
logging.info("Optimizing these variables: %s",
[var.name for var in var_list])
optimizer = tf.train.AdamOptimizer(self._learning_rate)
grads_and_vars = optimizer.compute_gradients(self.loss,
var_list=var_list)
capped_grads_and_vars = [
(tf.clip_by_norm(grad, self._max_gradient_norm), var)
for grad, var in grads_and_vars]
return optimizer.apply_gradients(capped_grads_and_vars)
def error_layer(self):
"""Calculates the classification error."""
mistakes = tf.not_equal(tf.argmax(self.labels, 1),
tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32), name="error")
|
import hashlib, trans
import socket, os, codecs
import urllib2, urlparse
import sys
import re
from datetime import datetime
from uuid import uuid4
from copy import deepcopy
from django.contrib import auth
from django.core.files.base import ContentFile
from django.template import TemplateDoesNotExist
from django.template.defaultfilters import slugify
from django.template.loader import make_origin, find_template_loader
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.deconstruct import deconstructible
from django.utils.translation import get_language
def file_put_contents(filename, data, utf=False):
f = codecs.open(filename, "w", "utf-8-sig") if utf else open(filename, 'w')
f.write(data)
f.close()
def file_get_contents(url):
if os.path.exists(url):
return open(url, 'r').read()
opener = urllib2.build_opener()
content = opener.open(url).read()
return content
#deprecated
def settings_context_get():
return os.environ.get('SETTINGS_CONTEXT', 'REMOTE')
#deprecated
def settings_context_set(hosts):
if socket.gethostname() in hosts:
os.environ.setdefault('SETTINGS_CONTEXT', 'LOCAL')
else:
os.environ.setdefault('SETTINGS_CONTEXT', 'REMOTE')
def is_local_settings(hosts):
return socket.gethostname() in hosts
def image_from_url_get(url):
url_data = urlparse.urlparse(url)
ext = os.path.splitext(url_data.path)[1]
return ContentFile(urllib2.urlopen(url).read())
def image_from_url_get_2(url, name_gen=True):
file = image_from_url_get(url)
ext = os.path.splitext(urlparse.urlparse(url).path)[1]
if name_gen:
md5 = hashlib.md5()
md5.update('%s-%s' % (url, datetime.now()))
if not ext:
ext = '.jpg'
file.name = '%s%s' % (md5.hexdigest(), ext.lower())
else:
file.name = os.path.basename(url)
return file
def slugify_ru(str):
try:
return slugify(str.encode('trans'))
except Exception, e:
return slugify(unicode(str).encode('trans'))
def md5_for_file(f, block_size=2 ** 20):
f = open(f)
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5
def model_class_get(path):
from django.db.models import get_model
arr = path.split('.')
return get_model(arr[0], arr[1])
def model_object_get(path, pk):
cls = model_class_get(path)
return cls.objects.get(pk=pk)
def class_get(path):
arr = path.split('.')
return getattr(import_module('.'.join(arr[0:-1])), arr[-1])
def qset_to_dict(qset, key='id'):
res = SortedDict()
for item in qset:
res[getattr(item, key)] = item
return res
def shop_login(request, username, password):
if request.session.has_key(auth.SESSION_KEY):
del request.session[auth.SESSION_KEY]
request.session.modified = True
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return user
def exception_details():
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return file_name, exc_type, exc_obj, exc_tb
def dict_sort(dic_or_list, key):
if isinstance(dic_or_list, list):
sorted_x = sorted(dic_or_list, key=lambda x: x[key])
else:
sorted_x = sorted(dic_or_list.iteritems(), key=lambda x: x[1][key])
return sorted_x
def handle_uploaded_file(f, path):
destination = open(path, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def ex_find_template(name, exclude=[], dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
template_source_loaders = None
from django.conf import settings
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
if loader_name in exclude: continue
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
return loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def file_exists(path):
try:
with open(path) as f:
return True
except IOError as e:
return None
def template_to_source():
import codecs
from django.conf import settings
from django.template.loaders.app_directories import Loader
from common.models import SiteTemplate
loader = Loader()
apps_root = os.path.realpath('%s/../' % settings.PROJECT_ROOT)
for st in SiteTemplate.active_objects.all():
for filepath in loader.get_template_sources(st.name):
try:
if file_exists(filepath) and filepath.startswith(apps_root):
with codecs.open(filepath, 'w', 'utf8') as f:
f.write(st.content)
print st.name, filepath, '-ok'
except IOError as e:
print e
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_to_underline(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
class SiteMapGenerator(object):
def _write_header(self):
self.file.write('''<?xml version="1.0" encoding="UTF-8"?>''')
def _open_urlset(self):
self.file.write('''<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">''')
def _close_urlset(self):
self.file.write('</urlset>')
def _write_urls(self, urls):
for url in urls:
self.file.write('<url>')
self.file.write('<loc>%(loc)s</loc>' % url)
self.file.write('<changefreq>%(changefreq)s</changefreq>' % url)
self.file.write('<priority>%(priority)s</priority>' % url)
self.file.write('</url>')
def generate(self, path, **kwargs):
try:
self.file = open(path, 'w+')
self._write_header()
self._open_urlset()
self._write_urls(kwargs['urls'])
self._close_urlset()
except BaseException, e:
from common.std import exception_details
import logging
log = logging.getLogger('file_logger')
ed = unicode(exception_details())
log.log(logging.DEBUG, ed)
return {'success': False, 'error': ed}
finally:
self.file.close()
return {'success': True}
#<?xml version="1.0" encoding="UTF-8"?>
#<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
#{% for item in items %}
#{% if item.url|first == '/' %}
#<url>
#<loc>{{ item.url }}</loc>
#<changefreq>weekly</changefreq>
#<priority>1</priority>
#</url>
#{% endif %}
#{% endfor %}
#</urlset>
def dict_merge(target, *args):
# Merge multiple dicts
if len(args) > 1:
for obj in args:
dict_merge(target, obj)
return target
# Recursively merge dicts and set non-dict values
obj = args[0]
if not isinstance(obj, dict):
return obj
for k, v in obj.iteritems():
if k in target and isinstance(target[k], dict):
dict_merge(target[k], v)
else:
target[k] = deepcopy(v)
return target
# request.FILES['file']
# path = 'users/1' for example
from django.core.files.storage import default_storage
def form_file_save(file, path):
name, ext = os.path.splitext(file.name)
name = '%s%s' % (str(uuid4()), ext)
file_path = '%s/%s' % (path, name)
full_path = default_storage.save(file_path, ContentFile(file.read()))
return file_path, full_path
def file_put_contents(filename, data, utf=False):
f = codecs.open(filename, "w", "utf-8-sig") if utf else open(filename, 'w')
f.write(data)
f.close()
def file_get_contents(url):
if os.path.exists(url):
return open(url, 'r').read()
opener = urllib2.build_opener()
content = opener.open(url).read()
return content
#deprecated
def settings_context_get():
return os.environ.get('SETTINGS_CONTEXT', 'REMOTE')
#deprecated
def settings_context_set(hosts):
if socket.gethostname() in hosts:
os.environ.setdefault('SETTINGS_CONTEXT', 'LOCAL')
else:
os.environ.setdefault('SETTINGS_CONTEXT', 'REMOTE')
def is_local_settings(hosts):
return socket.gethostname() in hosts
def image_from_url_get(url):
url_data = urlparse.urlparse(url)
ext = os.path.splitext(url_data.path)[1]
return ContentFile(urllib2.urlopen(url).read())
def image_from_url_get_2(url, name_gen=True):
file = image_from_url_get(url)
ext = os.path.splitext(urlparse.urlparse(url).path)[1]
if name_gen:
md5 = hashlib.md5()
md5.update('%s-%s' % (url, datetime.now()))
if not ext:
ext = '.jpg'
file.name = '%s%s' % (md5.hexdigest(), ext.lower())
else:
file.name = os.path.basename(url)
return file
def md5_for_file(f, block_size=2 ** 20):
f = open(f)
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5
def model_class_get(path):
from django.db.models import get_model
arr = path.split('.')
return get_model(arr[0], arr[1])
def model_object_get(path, pk):
cls = model_class_get(path)
return cls.objects.get(pk=pk)
def class_get(path):
arr = path.split('.')
return getattr(import_module('.'.join(arr[0:-1])), arr[-1])
def qset_to_dict(qset, key='id'):
res = SortedDict()
for item in qset:
res[getattr(item, key)] = item
return res
def shop_login(request, username, password):
if request.session.has_key(auth.SESSION_KEY):
del request.session[auth.SESSION_KEY]
request.session.modified = True
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return user
def exception_details():
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return file_name, exc_type, exc_obj, exc_tb
def dict_sort(dic_or_list, key):
if isinstance(dic_or_list, list):
sorted_x = sorted(dic_or_list, key=lambda x: x[key])
else:
sorted_x = sorted(dic_or_list.iteritems(), key=lambda x: x[1][key])
return sorted_x
def handle_uploaded_file(f, path):
destination = open(path, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def ex_find_template(name, exclude=[], dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
template_source_loaders = None
from django.conf import settings
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
if loader_name in exclude: continue
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
return loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def file_exists(path):
try:
with open(path) as f:
return True
except IOError as e:
return None
def template_to_source():
import codecs
from django.conf import settings
from django.template.loaders.app_directories import Loader
from common.models import SiteTemplate
loader = Loader()
apps_root = os.path.realpath('%s/../' % settings.PROJECT_ROOT)
for st in SiteTemplate.active_objects.all():
for filepath in loader.get_template_sources(st.name):
try:
if file_exists(filepath) and filepath.startswith(apps_root):
with codecs.open(filepath, 'w', 'utf8') as f:
f.write(st.content)
print st.name, filepath, '-ok'
except IOError as e:
print e
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_to_underline(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
class SiteMapGenerator(object):
def _write_header(self):
self.file.write('''<?xml version="1.0" encoding="UTF-8"?>''')
def _open_urlset(self):
self.file.write('''<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">''')
def _close_urlset(self):
self.file.write('</urlset>')
def _write_urls(self, urls):
for url in urls:
self.file.write('<url>')
self.file.write('<loc>%(loc)s</loc>' % url)
self.file.write('<changefreq>%(changefreq)s</changefreq>' % url)
self.file.write('<priority>%(priority)s</priority>' % url)
self.file.write('</url>')
def generate(self, path, **kwargs):
try:
self.file = open(path, 'w+')
self._write_header()
self._open_urlset()
self._write_urls(kwargs['urls'])
self._close_urlset()
except BaseException, e:
from common.std import exception_details
import logging
log = logging.getLogger('file_logger')
ed = unicode(exception_details())
log.log(logging.DEBUG, ed)
return {'success': False, 'error': ed}
finally:
self.file.close()
return {'success': True}
#<?xml version="1.0" encoding="UTF-8"?>
#<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
#{% for item in items %}
#{% if item.url|first == '/' %}
#<url>
#<loc>{{ item.url }}</loc>
#<changefreq>weekly</changefreq>
#<priority>1</priority>
#</url>
#{% endif %}
#{% endfor %}
#</urlset>
def dict_merge(target, *args):
# Merge multiple dicts
if len(args) > 1:
for obj in args:
dict_merge(target, obj)
return target
# Recursively merge dicts and set non-dict values
obj = args[0]
if not isinstance(obj, dict):
return obj
for k, v in obj.iteritems():
if k in target and isinstance(target[k], dict):
dict_merge(target[k], v)
else:
target[k] = deepcopy(v)
return target
# request.FILES['file']
# path = 'users/1' for example
from django.core.files.storage import default_storage
def form_file_save(file, path):
name, ext = os.path.splitext(file.name)
name = '%s%s' % (str(uuid4()), ext)
file_path = '%s/%s' % (path, name)
full_path = default_storage.save(file_path, ContentFile(file.read()))
return file_path, full_path
@deconstructible
class UploadPath(object):
def __init__(self, *args, **kwargs):
self.sub_path = kwargs['sub_path']
self.field = kwargs['field_name']
def __call__(self, instance, file_name):
return file_name
# return path.format(instance.user.username, self.sub_path, filename)
def lang_get():
return get_language().split('-')[0]
def slugify_ru(str):
try:
return slugify(str.encode('trans'))
except Exception, e:
return slugify(unicode(str).encode('trans'))
|
# coding=utf-8
from django.http import HttpResponseRedirect
from django.core.context_processors import csrf
from django.shortcuts import render_to_response, HttpResponse
from django.core.urlresolvers import reverse
from django.core.files.storage import FileSystemStorage
from models import *
from forms import *
from xlsgenerator import generate_stats_xls
import os, re
from PIL import Image
mymenu = [{"href":'/',
"text": u"Главная"
},
{"href": "#",
"text": u"Меню",
'children': [{"href": "/static/menu.pdf", "text":u"Меню блюд"},
{"href": "/static/bar.pdf", "text":u"Барная карта"},
{"href": "/business", "text":u"Меню бизнес-ланча"}]
},
{"href":"/contacts",
"text": u"Контакты"
},
{"href":"/vacancies",
"text": u"Вакансии"
},
{"href":"/menu",
"text":u"Доставка",
'children': [{"href": "/delivery", "text": u"Условия доставки"},
{"href": "/menu", "text":u"Доставка блюд меню"},
{"href": "/business+", "text":u"Доставка бизнес-ланча"}
]
}]
class TreeNode():
def __init__ (self, val):
self.val = val
self.children = []
def addChild(self, val):
self.children.append (TreeNode(val))
return self.children[-1]
class Tree():
def __init__(self, val, showRoot = False):
self.root = TreeNode(val)
def getRoot (self):
return self.root
def addChild (self, val):
return self.root.addChild(val)
class MainMenuGenerator ():
def __init__ (self, menulst = None):
self.tree = Tree({}, False)
if not menulst is None:
self.populate_menu_with(menulst)
def render_menu(self, node = None, level = 0):
if node is None:
node = self.tree.getRoot()
children = "".join([self.render_menu(n, level + 1) for n in node.children ])
if level != 0:
return ('<li >' +
'<a href="' + node.val['href'] + '">' +
node.val['text'] + '</a>' +
('<ul>' + children + '</ul>' if children else '') +
'</li>')
else:
return '<ul id = "jsddm">' + children + "</ul>"
def populate_menu_with(self, menulst, node = None):
if node is None:
node = self.tree.getRoot()
for m in menulst:
nodeNext = node.addChild ({'href' : m['href'], 'text' : m ['text']})
if m.has_key ('children'):
self.populate_menu_with(m['children'], nodeNext)
def populate_template_values (request, url, rtype, atype = u"Акция"):
template_values = {}
template_values.update(csrf(request))
template_values['menu_generator'] = MainMenuGenerator (mymenu)
template_values['bl_discount'] = Setting.objects.get(key='bl_discount').value
template_values['menu_discount'] = Setting.objects.get(key='menu_discount').value
template_values['bl_min_order'] = Setting.objects.get(key='bl_min_order').value
template_values['menu_min_order'] = Setting.objects.get(key='menu_min_order').value
template_values['bl_lunch_box'] = Setting.objects.get(key='bl_lunch_box').value
template_values['menu_lunch_box'] = Setting.objects.get(key='menu_lunch_box').value
template_values['bl_delivery'] = Setting.objects.get(key='bl_delivery').value
template_values['menu_delivery'] = Setting.objects.get(key='menu_delivery').value
template_values["req_url"] = url
template_values['records'] = Text.objects.filter (rtype = rtype).order_by('order')
template_values['siderecords'] = Text.objects.filter (rtype = atype).order_by('order')
template_values['topleftrecords'] = Text.objects.filter (rtype = u"Сверху слева").order_by('order')
template_values['toprightrecords'] = Text.objects.filter (rtype = u"Сверху справа").order_by('order')
menu_generator = MainMenuGenerator(mymenu)
template_values['menu_generated'] = menu_generator.render_menu()
return template_values
def MainPage (request):
return render_to_response ("index.html", populate_template_values (request, "main", u"О нас"))
def MainPageRedirect (request):
return HttpResponseRedirect ("/")
def Contacts (request):
return render_to_response ("base.html", populate_template_values (request, "contacts", u"Карта", u'Контакты'))
def Vacancies (request):
return render_to_response ("base.html", populate_template_values (request, "vacancies", u"Вакансии"))
def new_price_print_or_update(price_delta, update = False):
txt_file_content = ""
for menucat in MenuCat.objects.all().order_by('order'):
if MenuEntry.objects.filter (menucat = menucat).count():
txt_file_content += "=== " + menucat.name.encode('utf-8') + " ===" + "\n"
menuentries = MenuEntry.objects.filter (menucat = menucat).order_by('order')
for menuentry in menuentries:
old_price = menuentry.price
new_price = int(round(old_price * (100.0 + price_delta) / 100.0))
if update:
menuentry.price = new_price
menuentry.save()
txt_file_content += menuentry.code.encode('utf-8') + ". " + menuentry.name.encode('utf-8') + " - " + str(new_price) + " (" + str(old_price) + ")" + "\n"
return txt_file_content
def PriceIncrease(request):
if request.user and request.user.is_staff and request.method == 'POST':
new_price_print_or_update(int(request.POST['price_percent']), True)
return HttpResponseRedirect(reverse('admin:index'))
def xls_to_response(xls, fname):
response = HttpResponse(mimetype="application/ms-excel")
response['Content-Disposition'] = 'attachment; filename=%s' % fname
xls.save(response)
return response
def StatsXls (request):
if request.user and request.user.is_staff and request.method == 'GET':
orderitems = OrderItem.objects.all()
lines = []
items_dict = {}
cats_dict = {}
for orderitem in orderitems:
if orderitem.item:
if not orderitem.item in items_dict:
items_dict[orderitem.item] = 0
if not orderitem.item.menucat in cats_dict:
cats_dict[orderitem.item.menucat] = 0
items_dict[orderitem.item] += 1
cats_dict[orderitem.item.menucat] += 1
for menucat in MenuCat.objects.all().order_by('order') :
if menucat.is_empty():
continue
lines.append ([u'', menucat.name , (cats_dict[menucat] if menucat in cats_dict else 0) ])
for menuentry in MenuEntry.objects.filter (menucat = menucat).order_by('order'):
lines.append ([menuentry.code, menuentry.name, (items_dict[menuentry] if menuentry in items_dict else 0)])
xls = generate_stats_xls (lines)
return xls_to_response(xls,'stats.xls')
def NewPricesTxt (request, price_delta_percent):
if request.user and request.user.is_staff and request.method == 'GET':
return HttpResponse (new_price_print_or_update(int(price_delta_percent), False),
content_type="text/plain; charset=utf-8")
return HttpResponseRedirect(reverse('admin:index'))
def Upload (request):
location = None
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if request.user and request.user.is_staff and request.method == 'POST':
if len (request.FILES) == 1 and 'pictype' in request.POST:
request_file = request.FILES['file']
name = request_file.name.lower()
pictype = request.POST['pictype']
try:
if pictype == "menupic":
correct_name = (re.match ('^[a-z]\d{1,2}$', name.split(".")[0])) and (name.split(".")[1] == "png")
stream = request_file.file
image = Image.open(stream)
if correct_name: # ignore image size
location = "static/pics"
name = name.split(".")[0].upper() + "." + name.split(".")[1]
image.thumbnail((128, 128), Image.ANTIALIAS)
outfile = os.path.join(basepath, location, name.split(".")[0] + ".thumbnail." + name.split(".")[1])
image.save(outfile, "PNG")
elif pictype == "gallerypic":
correct_name = (0 < int(name.split(".")[0]) < 20) and (name.split(".")[1] == "jpg")
stream = request_file.file
image = Image.open(stream)
size_x, size_y = image.size
if correct_name and size_y == 510 and (680 <= size_x <= 770 ):
location = "static/photogallery"
elif pictype == "menupdf":
if name == "bar.pdf" or name == "menu.pdf":
location = "static/"
except Exception as inst:
pass
if not location is None:
with open(os.path.join(basepath, location, name), 'wb+') as destination:
for chunk in request_file.chunks():
destination.write(chunk)
if location is None:
pass#print "Failure!"
return HttpResponseRedirect(reverse('admin:index'))
def DeliverySuccess (request):
template_values = populate_template_values (request, "delivery", u"Доставка")
return render_to_response ("delivery_success.html", template_values)
def Delivery (request):
template_values = populate_template_values (request, "delivery", u"Доставка")
template_values['order_created'] = False
if request.method != 'POST':
template_values['order_form'] = OrderForm()
else:
form = OrderForm(request.POST) # A form bound to the POST data
if not form.is_valid():
# give it one more chance
data = request.POST.copy()
data['appartment_no'] = u''
form = OrderForm(data=data)
if form.is_valid():
order_contents = form.cleaned_data['order_contents'].split(",")
order = form.save()
for item_quantity in order_contents:
item_or_code = item_quantity.split(":")[0]
if item_or_code.startswith ('bl'):
code = item_or_code
item = None
else:
item = MenuEntry.objects.get(pk = int(item_or_code))
code = None
quantity = int(item_quantity.split(":")[1])
order_item = OrderItem(item = item, order = order, quantity = quantity, code = code)
order_item.save()
order.processed = False
order.save()
order_history = OrderHistory( original_order = order,
created = order.created,
money = order.calc_order_total())
order_history.save()
return HttpResponseRedirect ("delivery_success")
else:
template_values['order_creation_failed'] = True
return render_to_response ("delivery.html", template_values)
def Menu (request, submenu_index):
submeny_index = 0 if (submenu_index is None) else int(submenu_index)
if submeny_index == -1:
return HttpResponseRedirect ("/business+")
menucats = MenuCat.objects.all().order_by('order')
cur_menucat = MenuCat.objects.filter(order = submeny_index).get()
cur_menuentries = MenuEntry.objects.filter (menucat = cur_menucat).order_by('order')
template_values = {"menucats":menucats, "cur_menucat":cur_menucat , "entries": cur_menuentries}
template_values.update(populate_template_values(request, "menu", ""))
template_values.update ({"show_bl": True})
template_values['preorder'] = True
template_values['bl_min_order'] = Setting.objects.get(key='bl_min_order').value
template_values['menu_min_order'] = Setting.objects.get(key='menu_min_order').value
template_values['bl_lunch_box'] = Setting.objects.get(key='bl_lunch_box').value
template_values['bl_delivery'] = Setting.objects.get(key='bl_delivery').value
template_values['menu_lunch_box'] = Setting.objects.get(key='menu_lunch_box').value
template_values['menu_delivery'] = Setting.objects.get(key='menu_delivery').value
return render_to_response ("menu.html", template_values)
def BusinessLunch (request, delivery):
delivery = (False if delivery is None else True)
preorder = (True if delivery else False)
template_values = populate_template_values (request, "business", u"Бизнес-ланч")
if delivery:
template_values['records'] = template_values['records'][2:]
else:
template_values['records'] = template_values['records'][:2]
cat_entry_dicts = []
blvariantno = Setting.objects.get (key = 'cur_bl_variant').value
blvariantno = int(blvariantno)
cats = BLMenuCat.objects.all().order_by ('order')
if (delivery):
cats = cats.exclude (name = u"Напитки")
for cat in cats:
entries = BLMenuEntry.objects.filter(blmenucat = cat).filter(Q(variant = blvariantno) | Q(variant = 2) ).order_by ('order')
cat_entry_dicts.append ({"cat":cat, "entries":entries})
template_values ["cat_entry_dicts"] = cat_entry_dicts
template_values ['preorder'] = preorder
template_values ['show_order_box'] = delivery
template_values ['bl_cats'] = [cat.name_singular for cat in cats]
template_values ['blprice1'] = Setting.objects.get (key = 'blprice1').value
template_values ['blprice2'] = Setting.objects.get (key = 'blprice2').value
template_values ['blprice3'] = Setting.objects.get (key = 'blprice3').value
template_values ['blprice4'] = Setting.objects.get (key = 'blprice4').value
template_values ['delivery'] = delivery
return render_to_response ("business.html", template_values)
|
# Compare Algorithms
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
#from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
#Column names
#features = ["X1","X2","X3","X4","X5","X6","X7","X8","Y"]
features=["age","job","marital","education","default","housing","loan","contact","month","day_of_week","duration","campaign","pdays","previous","poutcome","emp.var.rate","cons.price.idx","cons.conf.idx","euribor3m","nr.employed","Y"]
#Read the file
#df=pd.read_csv("diabetes.csv",header=None,names=features)
df=pd.read_csv("bank.csv",header=None,names=features)
#df=pd.read_csv("bank_full.csv",header=None,names=features)
#Label Encoding required for Bank Dataset only
#for i in range(len(features)):
# if (type(df[features[i]][0])==str):
# #print(i)
# le = preprocessing.LabelEncoder()
# le.fit(df[features[i]])
# df[features[i]]=le.transform(df[features[i]])
no_of_features=df.shape[1]-1
no_of_rows=df.shape[0]
X_df = df[features[:-1]]
Y_df = df['Y']
# prepare configuration for cross validation test harness
seed = 0
# prepare models
models = []
models.append(('Decision Tree', DecisionTreeClassifier()))
models.append(('ADA Boost', AdaBoostClassifier()))
models.append(('Neural Network', MLPClassifier()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_df, Y_df, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
plt.ylabel("Accuracy")
ax.set_xticklabels(names)
plt.show() |
import pandas as pd
import datetime
import numpy as np
import six
## filter PDF->Excel file; select Equi from listed securities
def filter(filename):
bdt_df = pd.read_excel(filename, 1, header=None, index_col=None)
bdt_df_mat = bdt_df.as_matrix()
bdt_equi_list = []
bdt_gdrs_list = []
for d in bdt_df_mat:
equi_flag = False
gdrs_flag = False
for s in d:
if isinstance(s, six.string_types):
if "equi." in s.lower():
equi_flag = True
if ("gdr" in s.lower()) or ("gds" in s.lower()) :
gdrs_flag = True
if equi_flag:
if gdrs_flag:
bdt_gdrs_list.append(d)
else:
bdt_equi_list.append(d)
return pd.DataFrame(bdt_equi_list), pd.DataFrame(bdt_gdrs_list)
bdt_equi_list, bdt_gdrs_list = filter('Bdt.xlsx')
mf_equi_list, mf_gdrs_list = filter('EuroMF.xlsx')
# bdt_equi_list.to_csv('bdt_equi_list.csv')
# bdt_gdrs_list.to_csv('bdt_gdrs_list.csv')
# mf_equi_list.to_csv('mf_equi_list.csv')
# mf_gdrs_list.to_csv('mf_gdrs_list.csv')
## reformat: merge multiple name columns into one
def reformat(raw_mat):
raw_mat = raw_mat.as_matrix()
ref_mat = np.empty((raw_mat.shape[0],7),dtype=np.object)
for i,r in enumerate(raw_mat):
ref_mat[i,0] = r[0]
name_str = ''
for j,c in enumerate(r[1:]):
if isinstance(c, six.string_types):
if "equi." in c.lower():
break
else:
name_str += " " + c
ref_mat[i,1] = name_str
counter = 0
for s in r[j+1:]:
if isinstance(s, datetime.datetime):
s = s.strftime('%d/%m/%Y')
if isinstance(s, six.string_types):
ref_mat[i,2+counter] = s
counter+=1
return pd.DataFrame(ref_mat)
mat_list = [bdt_equi_list, bdt_gdrs_list, mf_equi_list, mf_gdrs_list]
reformated_list =[reformat(i) for i in mat_list]
filename_str = ['Bdt_equi_list.csv', 'Bdt_gdrs_list.csv', 'EuroMF_equi_list.csv','EuroMF_gdrs_list.csv']
[i.to_csv("equity_filtered_list/"+j) for i,j in zip(reformated_list, filename_str)]
## compare trth & filtered result
trth_list = pd.read_csv('trth_equity_speedguide.csv', header=None, index_col=None).as_matrix()[1:,:]
isin_list = trth_list[:,1].tolist()
official_list = reformated_list[0].as_matrix()[:,0].tolist() + reformated_list[2].as_matrix()[:,0].tolist()
isin_list = [i.split("->")[-1] if "->" in i else i for i in isin_list]
wrong_isin=list()
not_in_official = set(isin_list) - set(official_list)
not_in_trth = set(official_list) - set(isin_list)
# write not in trth but in official into csv
bdt_mat = reformated_list[0].as_matrix()
mf_mat = reformated_list[2].as_matrix()
bdt_not_list = list()
mf_not_list = list()
for i in not_in_trth:
for j in bdt_mat:
if i==j[0]:
bdt_not_list.append(j)
for j in mf_mat:
if i==j[0]:
mf_not_list.append(j)
pd.DataFrame(bdt_not_list).to_csv("./equity_filtered_list/bdt_not_in_trth.csv")
pd.DataFrame(mf_not_list).to_csv("./equity_filtered_list/mf_not_in_trth.csv")
# write not in official but in trth to csv
trth_not_list=list()
for i in not_in_official:
for j in trth_list:
if i==j[1]:
trth_not_list.append(j)
pd.DataFrame(trth_not_list).to_csv("./equity_filtered_list/trth_not_in_official.csv")
def save_csv(list_var,name):
pd.DataFrame(list_var).to_csv(name)
# invalid list
exported_trth_list = pd.read_csv('trth_exported_1.csv', header=None, index_col=None).as_matrix()[1:,:]
exported_trth_mat = exported_trth_list[:,1]
exported_trth_filtered_list = [i.split("->")[-1] if (isinstance(i, six.string_types)) and ("->" in i) else i for i in exported_trth_mat]
invalid_isin = set(official_list) - set(exported_trth_filtered_list)
invalid_isin = list(invalid_isin)
save_csv(exported_trth_mat, 'invalid_exported_mat.csv')
save_csv(official_list, 'official_list.csv')
save_csv(invalid_isin, 'invalid_isin.csv')
# generate ric lists
ric_features_list = []
for i in official_list:
flag = False
for j in exported_trth_list:
if isinstance(j[1], six.string_types):
if i in j[1]:
ric_features_list.append(j)
flag = True
if not flag:
print(i)
save_csv(ric_features_list, "ric_features_list.csv")
a = np.array(ric_features_list)
b = set(a[:,1])
|
r"""PyTorch Detection Training.
To run in a multi-gpu environment, use the distributed launcher::
python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
train.py ... --world-size $NGPU
The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
--lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.
On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
--epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3
Also, if you train Keypoint R-CNN, the default hyperparameters are
--epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
"""
import datetime
import os
import time
import presets
import torch
import torch.utils.data
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import utils
from coco_utils import get_coco
from engine import evaluate, train_one_epoch
from group_by_aspect_ratio import create_aspect_ratio_groups, GroupedBatchSampler
from torchvision.transforms import InterpolationMode
from transforms import SimpleCopyPaste
def copypaste_collate_fn(batch):
copypaste = SimpleCopyPaste(blending=True, resize_interpolation=InterpolationMode.BILINEAR)
return copypaste(*utils.collate_fn(batch))
def get_dataset(is_train, args):
image_set = "train" if is_train else "val"
num_classes, mode = {"coco": (91, "instances"), "coco_kp": (2, "person_keypoints")}[args.dataset]
with_masks = "mask" in args.model
ds = get_coco(
root=args.data_path,
image_set=image_set,
transforms=get_transform(is_train, args),
mode=mode,
use_v2=args.use_v2,
with_masks=with_masks,
)
return ds, num_classes
def get_transform(is_train, args):
if is_train:
return presets.DetectionPresetTrain(
data_augmentation=args.data_augmentation, backend=args.backend, use_v2=args.use_v2
)
elif args.weights and args.test_only:
weights = torchvision.models.get_weight(args.weights)
trans = weights.transforms()
return lambda img, target: (trans(img), target)
else:
return presets.DetectionPresetEval(backend=args.backend, use_v2=args.use_v2)
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Detection Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
parser.add_argument(
"--dataset",
default="coco",
type=str,
help="dataset name. Use coco for object detection and instance segmentation and coco_kp for Keypoint detection",
)
parser.add_argument("--model", default="maskrcnn_resnet50_fpn", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=26, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers (default: 4)"
)
parser.add_argument("--opt", default="sgd", type=str, help="optimizer")
parser.add_argument(
"--lr",
default=0.02,
type=float,
help="initial learning rate, 0.02 is the default value for training on 8 gpus and 2 images_per_gpu",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"--norm-weight-decay",
default=None,
type=float,
help="weight decay for Normalization layers (default: None, same value as --wd)",
)
parser.add_argument(
"--lr-scheduler", default="multisteplr", type=str, help="name of lr scheduler (default: multisteplr)"
)
parser.add_argument(
"--lr-step-size", default=8, type=int, help="decrease lr every step-size epochs (multisteplr scheduler only)"
)
parser.add_argument(
"--lr-steps",
default=[16, 22],
nargs="+",
type=int,
help="decrease lr every step-size epochs (multisteplr scheduler only)",
)
parser.add_argument(
"--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)"
)
parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start_epoch", default=0, type=int, help="start epoch")
parser.add_argument("--aspect-ratio-group-factor", default=3, type=int)
parser.add_argument("--rpn-score-thresh", default=None, type=float, help="rpn score threshold for faster-rcnn")
parser.add_argument(
"--trainable-backbone-layers", default=None, type=int, help="number of trainable layers of backbone"
)
parser.add_argument(
"--data-augmentation", default="hflip", type=str, help="data augmentation policy (default: hflip)"
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only."
)
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
parser.add_argument("--weights-backbone", default=None, type=str, help="the backbone weights enum name to load")
# Mixed precision training parameters
parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training")
# Use CopyPaste augmentation training parameter
parser.add_argument(
"--use-copypaste",
action="store_true",
help="Use CopyPaste data augmentation. Works only with data-augmentation='lsj'.",
)
parser.add_argument("--backend", default="PIL", type=str.lower, help="PIL or tensor - case insensitive")
parser.add_argument("--use-v2", action="store_true", help="Use V2 transforms")
return parser
def main(args):
if args.backend.lower() == "tv_tensor" and not args.use_v2:
raise ValueError("Use --use-v2 if you want to use the tv_tensor backend.")
if args.dataset not in ("coco", "coco_kp"):
raise ValueError(f"Dataset should be coco or coco_kp, got {args.dataset}")
if "keypoint" in args.model and args.dataset != "coco_kp":
raise ValueError("Oops, if you want Keypoint detection, set --dataset coco_kp")
if args.dataset == "coco_kp" and args.use_v2:
raise ValueError("KeyPoint detection doesn't support V2 transforms yet")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
if args.use_deterministic_algorithms:
torch.use_deterministic_algorithms(True)
# Data loading code
print("Loading data")
dataset, num_classes = get_dataset(is_train=True, args=args)
dataset_test, _ = get_dataset(is_train=False, args=args)
print("Creating data loaders")
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
if args.aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
else:
train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, args.batch_size, drop_last=True)
train_collate_fn = utils.collate_fn
if args.use_copypaste:
if args.data_augmentation != "lsj":
raise RuntimeError("SimpleCopyPaste algorithm currently only supports the 'lsj' data augmentation policies")
train_collate_fn = copypaste_collate_fn
data_loader = torch.utils.data.DataLoader(
dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, collate_fn=train_collate_fn
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn
)
print("Creating model")
kwargs = {"trainable_backbone_layers": args.trainable_backbone_layers}
if args.data_augmentation in ["multiscale", "lsj"]:
kwargs["_skip_resize"] = True
if "rcnn" in args.model:
if args.rpn_score_thresh is not None:
kwargs["rpn_score_thresh"] = args.rpn_score_thresh
model = torchvision.models.get_model(
args.model, weights=args.weights, weights_backbone=args.weights_backbone, num_classes=num_classes, **kwargs
)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.norm_weight_decay is None:
parameters = [p for p in model.parameters() if p.requires_grad]
else:
param_groups = torchvision.ops._utils.split_normalization_params(model)
wd_groups = [args.norm_weight_decay, args.weight_decay]
parameters = [{"params": p, "weight_decay": w} for p, w in zip(param_groups, wd_groups) if p]
opt_name = args.opt.lower()
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov="nesterov" in opt_name,
)
elif opt_name == "adamw":
optimizer = torch.optim.AdamW(parameters, lr=args.lr, weight_decay=args.weight_decay)
else:
raise RuntimeError(f"Invalid optimizer {args.opt}. Only SGD and AdamW are supported.")
scaler = torch.cuda.amp.GradScaler() if args.amp else None
args.lr_scheduler = args.lr_scheduler.lower()
if args.lr_scheduler == "multisteplr":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
elif args.lr_scheduler == "cosineannealinglr":
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
else:
raise RuntimeError(
f"Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported."
)
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.amp:
scaler.load_state_dict(checkpoint["scaler"])
if args.test_only:
torch.backends.cudnn.deterministic = True
evaluate(model, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq, scaler)
lr_scheduler.step()
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"args": args,
"epoch": epoch,
}
if args.amp:
checkpoint["scaler"] = scaler.state_dict()
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
# evaluate after every epoch
evaluate(model, data_loader_test, device=device)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
|
# Реализуйте reducer в задаче подсчета среднего времени, проведенного пользователем на странице.
# Mapper передает в reducer данные в виде key / value, где key - адрес страницы, value - число секунд, проведенных пользователем на данной странице.
# Sample Input:
# www.facebook.com 100
# www.google.com 10
# www.google.com 5
# www.google.com 15
# stepic.org 60
# stepic.org 100
# Sample Output:
# www.facebook.com 100
# www.google.com 10
# stepic.org 80
import sys
prev = ''
time_spent = 0
people = 0
for line in sys.stdin:
site, time = line.strip().split("\t")
if prev == '':
prev = site
if site != prev:
print('%s\t%d' % (prev, time_spent/people))
time_spent = int(time)
people = 1
prev = site
else:
time_spent += int(time)
people += 1
if prev:
print('%s\t%d' % (prev, time_spent/people)) |
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from test_adds import adjustement
# from model.project import Project
def test_add_project(app, db, json_projects):
app.session.ensure_login("administrator", "root")
app.project.open_create_page()
# old_projects = db.get_project_list()
old_projects = app.soap.get_project_list("administrator", "root")
project = json_projects
app.project.fill_project_data(name=project.name, status=project.status, inherit_categories=project.inherit_categories, view_status=project.view_status,
description=project.description)
app.project.submit_project()
wait = WebDriverWait(app.wd, 10)
wait.until(EC.invisibility_of_element_located((By.XPATH, "//a[contains(.,'Manage Users')]")))
if len(app.wd.find_elements_by_xpath("//td[contains(.,'APPLICATION ERROR #701')]"))>0:
raise Exception('ErrorExistAready', 'Project already exist')
elif len(app.wd.find_elements_by_xpath("//td[contains(.,'APPLICATION ERROR #701')]"))==0:
wait = WebDriverWait(app.wd, 10)
wait.until(lambda d: d.find_element_by_link_text("Proceed"))
app.wd.find_element_by_link_text("Proceed").click()
wait.until(lambda d: d.find_element_by_xpath("//input[@value='Create New Project']"))
link_name = adjustement.clear_multiple_spaces(project.name.strip())
assert app.wd.find_element_by_link_text("%s" % link_name)
# new_projects = db.get_project_list()
new_projects = app.soap.get_project_list("administrator", "root")
assert len(old_projects) +1 == len(new_projects)
old_projects.append(project)
# assert sorted(old_projects, key=Project.get_name) == sorted(new_projects, key=Project.get_name)
assert sorted(old_projects, key=lambda x: x.name) == sorted(new_projects, key=lambda x: x.name)
def test_add_existing_project(app, db):
app.project.open_create_page()
# old_projects = db.get_project_list()
old_projects = app.soap.get_project_list("administrator", "root")
project = db.get_one_project()
app.project.fill_project_data(name=project.name, status=50, inherit_categories=False, view_status=50,
description='To jest opis 1')
app.project.submit_project()
wait = WebDriverWait(app.wd, 10)
# new_projects = db.get_project_list()
new_projects = app.soap.get_project_list("administrator", "root")
assert len(old_projects) == len(new_projects)
wait.until(lambda d: d.find_element_by_xpath("//td[contains(.,'APPLICATION ERROR #701')]"))
assert app.wd.find_element_by_xpath("//p[contains(.,'A project with that name already exists. Please go back and enter a different name.')]")
assert old_projects == new_projects
|
from flask_wtf import FlaskForm as Form
from wtforms import TextField, TextAreaField, SubmitField, validators, ValidationError
# This is creating my form.
# Imported the module Flask_wTF, wtforms.
# Created each field I wanted to be on my contact form e.g. name, email, subject.
# Flask-WTF comes with built-in validators.
class ContactForm(Form):
name = TextField("Name", [validators.Required("Please enter your name.")])
# The [validators.Required()] code is used to validate its presence in each form field.
# The email one, uses the pattern of user@example.com, to make sure the email is actually valid.
# In each of these I have written the specific error I want to flash up if it doesn't validate.
email = TextField("Email", [validators.Required("Please enter your email address."), validators.Email("Please enter your email address.")])
subject = TextField("Subject", [validators.Required("Please enter a subject.")])
message = TextAreaField("Message", [validators.Required("Please enter a message.")])
submit = SubmitField("Send")
|
"""
Script that takes in dual fragment merged paired end reads of AAFC Diptera CO1 amplicons and removes degenerate primers
Script expects very specific conditions, and currently does not support other primers or amplicons.
Author: Jackson Eyres
Copyright: Government of Canada
License: MIT
"""
from Bio import SeqIO
import subprocess
import os
import argparse
def main():
parser = argparse.ArgumentParser(description='Trims Diptera CO1 merged reads of degenerate primers')
parser.add_argument('-f', type=str,
help='Merged Read file in .fq format', required=True)
parser.add_argument('-p', type=str,
help='Primer File in .fasta format', required=True)
parser.add_argument('-o', type=str,
help='Output file', required=True)
parser.add_argument('-dg', type=str,
help='Path to degenerate oligo Script', default="pipeline_files/dg")
args = parser.parse_args()
find_primers(args.f, args.p, args.o, args.dg)
def get_primers(file):
# Adds primers to list, reverse compliments the reverse primers. Biopython correctly reverse compliments degenerate
# primers
primer_sequences = []
with open(file) as f:
index = 0
for seq in SeqIO.parse(f, "fasta"):
index += 1
if index % 2 == 0:
primer_sequences.append(seq.reverse_complement())
else:
primer_sequences.append(seq)
return primer_sequences
def find_primers(fastq_file, primer_file, output_file, dg):
"""
Scans reads for forward or reverse primers. If fragment A, removes everything before the forward primer, and
removes the degenerate reverse primer. If fragment B, removed the degenerate forward primer, and everything beyond
the reverse primer. Also eliminates any reads that are too short, or have no primers detected. The remaining reads
can properly assembly without any internal degeneracy.
-----------------------
Forward A --------------------
Reverse B
:param fastq_file:
:param primer_file:
:param output_file:
:param dg: Script to generate oligos of degenerate primers
:return:
"""
with open(fastq_file) as f:
primers = get_primers(primer_file)
primer_a_f = str(primers[0].seq) # Forward primer Fragment A
primer_b_r = str(primers[3].seq) # Reverse Degenerate Primer Fragment B
# Degenerate primers must be converted into their respective oligos for detection in the reads
# Uses the dg program compiled from Author Pierre Lindenbaum https://www.biostars.org/p/6219/
dg_result = subprocess.run(['./{}'.format(dg), primer_b_r], stdout=subprocess.PIPE).stdout.decode('utf-8')
reverse_primers = dg_result.split('\n')[:-1]
curated_fragment_a_reads = []
curated_fragment_b_reads = []
total_reads = 0
too_short_a = 0
too_short_b = 0
no_primers = 0
filtered_out_reads = []
for seq in SeqIO.parse(f, "fastq"):
total_reads += 1
sliced_seq = False
"""
Look for fragment A primer in read. If found, trim the read to the minimum_fragement length,
which excludes the degenerate reverse primer.
"""
index = seq.seq.find(primer_a_f)
if index > 0: # Fragment A Forward Primer Found
min_size = 454 # Forward Primer + Fragment A without Reverse Primer
if len(seq.seq) >= index + min_size: # Fragment is minimum size without including degenerate primer
cut_read = seq[index+len(primer_a_f):index+min_size+1] # Cut off Forward and Reverse Primer's
if len(cut_read.seq) == 430:
curated_fragment_a_reads.append(cut_read)
sliced_seq = True
else:
too_short_a += 1
else:
too_short_a += 1
else:
"""
Since fragment a forward primer wasn't found, likely dealing with fragment B read
Look for the reverse primer, since its degenerate look at all possible oligos.
Then trim the read to just the reverse primer at tail, and trim the first 20 bases to
capture the degenerate forward primer
"""
for primer in reverse_primers:
temp_index = seq.seq.find(primer)
if temp_index > 400:
index = temp_index
if len(seq.seq) > 450: # Contains both B fragment primers
cut_read = seq[index-415:index] # Remove the forward and reverse primers
curated_fragment_b_reads.append(cut_read)
sliced_seq = True
else:
too_short_b += 1
break
else:
no_primers += 1
if not sliced_seq:
filtered_out_reads.append(seq)
log_string = "File {}, Total Reads: {}, Missing Primers: {}, Reads Too Short: {}, Fragment A Reads: {}, " \
"Fragement B Reads: {}, Filtered Out Reads: {}".format(os.path.basename(fastq_file),
total_reads, no_primers,
(too_short_a+too_short_b),
len(curated_fragment_a_reads), len(curated_fragment_b_reads),
len(filtered_out_reads))
print(log_string)
# Write sliced reads to output file
curated_reads = curated_fragment_a_reads + curated_fragment_b_reads
with open(output_file, "w") as g:
SeqIO.write(curated_reads, g, "fastq")
# Write unsliced reads to output file
unfiltered_output_file = output_file.replace(".fq", "_unfiltered.fq")
with open(unfiltered_output_file, "w") as h:
SeqIO.write(filtered_out_reads, h, "fastq")
if __name__ == "__main__":
main()
|
from pyVim.connect import SmartConnect
import ssl
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
si=SmartConnect(host="",port=443,user="root",pwd="",sslContext=gcontext)
c = si.RetrieveContent()
rootfolder = c.rooFolder()
for datacenter in rootfolder.childEntity:
print datacenter.name
|
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
import stat
from beets import config
from beets.plugins import BeetsPlugin
from beets.util import ancestry, displayable_path, syspath
def convert_perm(perm):
"""Convert a string to an integer, interpreting the text as octal.
Or, if `perm` is an integer, reinterpret it as an octal number that
has been "misinterpreted" as decimal.
"""
if isinstance(perm, int):
perm = str(perm)
return int(perm, 8)
def check_permissions(path, permission):
"""Check whether the file's permissions equal the given vector.
Return a boolean.
"""
return oct(stat.S_IMODE(os.stat(syspath(path)).st_mode)) == oct(permission)
def assert_permissions(path, permission, log):
"""Check whether the file's permissions are as expected, otherwise,
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(path, permission):
log.warning('could not set permissions on {}', displayable_path(path))
log.debug(
'set permissions to {}, but permissions are now {}',
permission,
os.stat(syspath(path)).st_mode & 0o777,
)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
self.config.add({
'file': '644',
'dir': '755',
})
self.register_listener('item_imported', self.fix)
self.register_listener('album_imported', self.fix)
self.register_listener('art_set', self.fix_art)
def fix(self, lib, item=None, album=None):
"""Fix the permissions for an imported Item or Album.
"""
files = []
dirs = set()
if item:
files.append(item.path)
dirs.update(dirs_in_library(lib.directory, item.path))
elif album:
for album_item in album.items():
files.append(album_item.path)
dirs.update(dirs_in_library(lib.directory, album_item.path))
self.set_permissions(files=files, dirs=dirs)
def fix_art(self, album):
"""Fix the permission for Album art file.
"""
if album.artpath:
self.set_permissions(files=[album.artpath])
def set_permissions(self, files=[], dirs=[]):
# Get the configured permissions. The user can specify this either a
# string (in YAML quotes) or, for convenience, as an integer so the
# quotes can be omitted. In the latter case, we need to reinterpret the
# integer as octal, not decimal.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
for path in files:
# Changing permissions on the destination file.
self._log.debug(
'setting file permissions on {}',
displayable_path(path),
)
if not check_permissions(path, file_perm):
os.chmod(syspath(path), file_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, file_perm, self._log)
# Change permissions for the directories.
for path in dirs:
# Changing permissions on the destination directory.
self._log.debug(
'setting directory permissions on {}',
displayable_path(path),
)
if not check_permissions(path, dir_perm):
os.chmod(syspath(path), dir_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, dir_perm, self._log)
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = 'jimit'
__CreateAt__ = '2019\3\7 0007-8:57' |
import os
import cv2
import numpy as np
import tensorflow as tf
path = 'testimages/'
tf.reset_default_graph() # 重置计算图
sess = tf.Session()
# 导入保存好的计算图
saver = tf.train.import_meta_graph('model/softmax_model.meta')
# 导入计算图中的所有参数
saver.restore(sess, 'model/softmax_model')
graph = tf.get_default_graph() # 获取当前计算图
input = graph.get_tensor_by_name('input:0') # 模型输入节点
output = graph.get_tensor_by_name('output:0') # 模型输出节点
pathDir = os.listdir(path)
pathDir.sort(key=lambda x:int(x.split('.')[0]))
for i in pathDir:
# print(i)
img = cv2.imread(path + str(i) )[:, :, 0] / 255 # 读取图片数据
img = img.reshape([1, 28 * 28]) # 进行维度转化
pre = sess.run(output, feed_dict={input: img}) # 将新样本放入模型中进行预测
res = np.argmax(pre, 1) # 预测标签
print('图片 ', str(i) + ' 中的数字是: ', res[0])
sess.close()
|
from _typeshed import Incomplete
STATUS: Incomplete
EMOJI_DATA: Incomplete
|
import logging
from telegram.ext import Updater, CommandHandler, InlineQueryHandler
import scryfall_telegram.actions as actions
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def main():
with open('./token.txt') as f:
token = f.read().strip()
updater = Updater(token=token)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('start', actions.start))
dispatcher.add_handler(InlineQueryHandler(actions.inline_search))
updater.start_polling()
if __name__ == "__main__":
main()
|
from flask import Flask, request, redirect
import cgi
import os
import jinja2
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
app = Flask(__name__)
app.config['DEBUG'] = True
tasks = []
@app.route('/', methods=['POST', 'GET'])
def todos():
if request.method == 'POST': # This is case sensitive!!
task = request.form['task']
tasks.append(task)
template = jinja_env.get_template('todos.html')
return template.render(tasks=tasks, title="I have so much TODO...")
app.run() |
import serial
import time
with open('port_config.txt', 'r') as f:
text = f.readlines()
SIM900devicePort = '/dev/' + text[0].split('\n')[0]
print 'SIMinterface port =', SIM900devicePort
#devicePort = '/dev/ttyUSB0' #USB to serial device
escapeString = 'xYzZyX' #some random string of letters that won't ever look like a command
serialBaudRate = 115200 #default for the 922
serialTimeout = 1
responseWaitTime = 0.25 #seconds
class SIMinterface():
def __init__(self):
self.devicePort = SIM900devicePort
self.escapeString = escapeString
self.serialBaudRate = serialBaudRate
self.serialTimeout = serialTimeout
self.verbose = False
self.confSerial()
self.openSerial()
def confSerial(self):
self.Sim = serial.Serial()
self.Sim.port = self.devicePort
self.Sim.baudrate = self.serialBaudRate
self.Sim.timeout = self.serialTimeout
if self.Sim.isOpen():
self.closeSerial()
def openSerial(self):
self.Sim.open()
self.Sim.write('CONS OFF') #turn console mode off incase it's on
# self.Sim.write(self.escapeString)
self.getFullDeviceID()
def clearStatus(self):
self.write('*CLS')
def closeSerial(self):
self.Sim.close()
def connectToMainframePort(self, port):
self.write('CONN {0}, "{1}"'.format(port, self.escapeString))
time.sleep(responseWaitTime)
def escape(self):
self.write('xYzZyX')
def flushOutput(self):
self.write('FLSO')
def flushBuffers(self):
self.write('FLSH')
def write(self,message):
message = message + '\r\n'
self.Sim.write(message)
if self.verbose:
print message
def readline(self):
return self.Sim.readline()
def getDeviceID(self):
id = self.getFullDeviceID()
try:
return id.split(',')[1]
except IndexError:
return 'None'
def getFullDeviceID(self):
return self.query('*IDN?')
def initDiodeCal(self, channel,curvename):
self.write('CINI {0},0,"{1}"'.format(channel,curvename))
def queryDiodeCal(self,channel):
return self.query('CINI? {0}'.format(channel))
def addCalPoint(self, voltpoint, temppoint):
self.write('CAPT 4,{0},{1}'.format(voltpoint, temppoint))
def queryCalPoint(self, calpoint):
return self.query('CAPT? 4,{0}'.format(calpoint))
def selectCalCurve(self, channel): #curve can be 'STAN 0' or 'USER 1'
self.write('CURV {0},1'.format(channel))
def queryCalCurve(self, channel):
return self.query('CURV? {0}'.format(channel))
def query(self, message):
self.write(message)
time.sleep(responseWaitTime)
return self.readline().rstrip()
def SIM922getDiodeVoltages(self):
volts = self.query('VOLT? 0,1')
volts = volts.split(',')
try:
voltages = map(float, volts)
except ValueError:
voltages = [float('nan')]
return voltages
def SIM922getDiodeTemps(self):
temps = self.query('TVAL? 0,1')
temps = temps.split(',')
try:
temperatures = map(float,temps)
except ValueError:
temperatures = [float('nan')]
return temperatures
def SIM925switchMUX(self,channel):
self.write('CHAN {0}'.format(channel))
def setCalCurveAC(self,channel):
self.write('CURV {0}'.format(channel))
def queryCalCurveAC(self):
return self.query('CURV?')
def getTempAC(self):
temp = self.query('TVAL?')
return temp
def getResistanceAC(self):
resistance = self.query('RVAL?')
return resistance
def SIM921setExcitationOnOff(self,state):
self.write('EXON {0}'.format(state))
def SIM921setExcitationRange(self,e_int):
self.write('EXCI {0}'.format(e_int))
def SIM921excitation_q(self):
return self.query('EXON?')
def SIM921setResistanceRange(self,r_int):
self.write('RANG {0}'.format(r_int))
def SIM921setExcitationFreq(self, freq):
self.write('FREQ {0}'.format(freq))
def SIM921getExcitationFreq(self):
return self.query('FREQ?')
def SIM921setTimeConstant(self, tau):
self.write('TCON {0}'.format(tau))
def SIM921getTimeConstant(self):
return self.query('TCON?')
|
import pickle
import pandas as pd
from sklearn.cluster import KMeans
print("Loading data set...")
beer_reviews = pd.read_csv("beer_reviews.csv")
data = beer_reviews.copy() # create copy of original data set
# drop unnecessary columns
data.drop(["brewery_id", "brewery_name", "review_time", "review_profilename", "beer_style", "beer_name", "beer_beerid"],
axis=1, inplace=True)
# fill empty cells with mean value
mean_abv = data['beer_abv'].mean()
data = pd.DataFrame(data).fillna(mean_abv)
# data contains data about beer_reviews including all features that are necessary and empty cells are filled
# make copies of data for every different model
data_model_1 = data.copy() # data with only beer_abv
data_model_2 = data.copy() # data with beer_abv and review_overall
data_model_3 = data.copy() # data with beer_abv, review_overall and review_aroma
data_model_4 = data.copy() # data with beer_abv, review_overall, review_aroma and review_appearance
data_model_5 = data.copy() # data with beer_abv, review_overall, review_aroma, review_appearance and review_taste
# prepare data for each model by dropping columns that we don't need
print("Dropping unnecessary columns...")
data_model_1.drop(["review_overall", "review_aroma", "review_appearance", "review_palate", "review_taste"], axis=1,
inplace=True)
data_model_2.drop(["review_aroma", "review_appearance", "review_palate", "review_taste"], axis=1, inplace=True)
data_model_3.drop(["review_appearance", "review_palate", "review_taste"], axis=1, inplace=True)
data_model_4.drop(["review_palate", "review_taste"], axis=1, inplace=True)
data_model_5.drop(["review_palate"], axis=1, inplace=True)
# print sample from each data set
print("Data model 1 (beer_abv): ")
print(data_model_1.head(5))
print("\nData model 2 (review_overall, beer_abv): ")
print(data_model_2.head(5))
print("\nData model 3 (review_overall, review_aroma, beer_abv): ")
print(data_model_3.head(5))
print("\nData model 4 (review_overall, review_aroma, review_appearance, beer_abv): ")
print(data_model_4.head(5))
print("\nData model 5 (review_overall, review_aroma, review_appearance, review_taste, beer_abv): ")
print(data_model_5.head(5))
print("\nData model (review_overall, review_aroma, review_appearance, review_palate, review_taste, beer_abv): ")
print(data.head(5))
# train the models with 4 clusters
print("Training model 1...")
kmeans_1 = KMeans(n_clusters=4, random_state=0)
kmeans_1.fit(data_model_1)
pickle.dump(kmeans_1, open("kmeans_1.pkl", "wb")) # save model 1
print("Training model 2...")
kmeans_2 = KMeans(n_clusters=4, random_state=0)
kmeans_2.fit(data_model_2)
pickle.dump(kmeans_2, open("kmeans_2.pkl", "wb")) # save model 2
print("Training model 3...")
kmeans_3 = KMeans(n_clusters=4, random_state=0)
kmeans_3.fit(data_model_3)
pickle.dump(kmeans_3, open("kmeans_3.pkl", "wb")) # save model 3
print("Training model 4...")
kmeans_4 = KMeans(n_clusters=4, random_state=0)
kmeans_4.fit(data_model_4)
pickle.dump(kmeans_4, open("kmeans_4.pkl", "wb")) # save model 4
print("Training model 5...")
kmeans_5 = KMeans(n_clusters=4, random_state=0)
kmeans_5.fit(data_model_5)
pickle.dump(kmeans_5, open("kmeans_5.pkl", "wb")) # save model 5
print("Training model final...")
kmeans_final = KMeans(n_clusters=4, random_state=0)
kmeans_final.fit(data)
pickle.dump(kmeans_final, open("kmeans_final.pkl", "wb")) # save model final
# print cluster centers
print("\nPrinting cluster centers:")
print("\nCluster centers for model 1")
print(list(data_model_1.columns.values))
print(kmeans_1.cluster_centers_)
print("\nCluster centers for model 2")
print(list(data_model_2.columns.values))
print(kmeans_2.cluster_centers_)
print("\nCluster centers for model 3")
print(list(data_model_3.columns.values))
print(kmeans_3.cluster_centers_)
print("\nCluster centers for model 4")
print(list(data_model_4.columns.values))
print(kmeans_4.cluster_centers_)
print("\nCluster centers for model 5")
print(list(data_model_5.columns.values))
print(kmeans_5.cluster_centers_)
print("\nCluster centers for model final")
print(list(data.columns.values))
print(kmeans_final.cluster_centers_)
|
import yaml
def config():
data = yaml.load(open("db.yaml", "r", encoding="utf8"))
data_list = [{
"name": data[k].get("name"),
"host": data[k].get("master").get("host"),
"port": data[k].get("master").get("port"),
"user": data[k].get("master").get("user"),
"pass": data[k].get("master").get("pass"),
"db": data[k].get("master").get("db"),
} for k in data]
return data_list
|
from .UQAnalysis import UQAnalysis
from .RawDataAnalyzer import RawDataAnalyzer
from .Common import Common
class UncertaintyAnalysis(UQAnalysis):
def __init__(self, ensemble, output):
self.moments = None
super(UncertaintyAnalysis, self).__init__(ensemble, output, UQAnalysis.UNCERTAINTY)
def saveDict(self):
sd = super(UncertaintyAnalysis, self).saveDict()
sd['moments'] = self.moments
return sd
def loadDict(self, sd):
super(UncertaintyAnalysis, self).loadDict(sd)
self.moments = sd.get('moments', None)
def analyze(self):
data = self.ensemble.getValidSamples()
Common.initFolder(RawDataAnalyzer.dname)
fname = Common.getLocalFileName(RawDataAnalyzer.dname, data.getModelName().split()[0], '.dat')
data.writeToPsuade(fname, fixedAsVariables=True)
#perform UA
mfile, self.moments = RawDataAnalyzer.performUA(fname, self.outputs[0])
#archive file
if mfile is not None:
self.archiveFile(mfile)
return mfile
def showResults(self):
#restore .m file from archive
fileName = 'matlabua.m'
self.restoreFromArchive(fileName)
RawDataAnalyzer.plotUA(self.ensemble, self.outputs[0], fileName, self.moments)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-23 20:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api_', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='musicgenre',
options={'ordering': ['name']},
),
migrations.RenameField(
model_name='place',
old_name='place_id',
new_name='id',
),
migrations.RemoveField(
model_name='bandatevent',
name='ends_at',
),
migrations.RemoveField(
model_name='bandatevent',
name='starts_at',
),
migrations.RemoveField(
model_name='host',
name='events',
),
migrations.RemoveField(
model_name='musicgenre',
name='parent_genre',
),
migrations.RemoveField(
model_name='proposition',
name='band_confirmed',
),
migrations.RemoveField(
model_name='proposition',
name='host',
),
migrations.AddField(
model_name='bandatevent',
name='proposition',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='api_.Proposition'),
preserve_default=False,
),
migrations.AddField(
model_name='event',
name='confirmed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='event',
name='description',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='event',
name='host',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='hosted_event', to='api_.Host'),
preserve_default=False,
),
migrations.AlterField(
model_name='event',
name='url',
field=models.URLField(null=True),
),
migrations.AlterField(
model_name='faatevent',
name='feeling',
field=models.CharField(choices=[('i', 'interested'), ('n', 'nah'), ('g', 'going')], max_length=1, null=True),
),
migrations.AlterField(
model_name='proposition',
name='event',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='api_.Event'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='event',
unique_together=set([('name', 'starts_at')]),
),
]
|
import math
import random
def leer_numero(ini, fin, mensaje):
while True:
try:
valor = int( input(mensaje) )
except TypeError as ex:
print(type(ex).__name__,'- {}'.format("Tipo de dato no valido"))
else:
if valor >= ini and valor <= fin:
break
return valor
def generador():
numeros = leer_numero(1,20,"¿Cuantos números quieres generar? [1-20]: ")
modo = leer_numero(1,3,"¿Cómo quieres redondear los números? " \
"[1]Al alza [2]A la baja [3]Normal: ")
lista = []
for i in range(numeros):
n = random.uniform(1, 101)
if modo == 1:
print("{} => {}".format(n, math.ceil(n)) )
n = math.ceil(n)
elif modo == 2:
print("{} => {}".format(n, math.floor(n)) )
n = math.floor(n)
elif modo == 3:
print("{} => {}".format(n, round(n,2)) )
n = round(n,2)
lista.append(n) # Entero aleatorio de 1 a 100, 101 excluído
print(lista)
generador() |
def hw(s) -> bool:
length = len(s)
for i in range(0, int(length / 2)):
if s[i] != s[length - i - 1]:
return False
return True
class Solution:
s = ""
catch = {}
catcha = {}
def countSubstrings(self, s: str) -> int:
self.s = s
return self.dfs(0, len(s))
def dfs(self, a: int, b: int) -> int:
if a >= b:
return 0
if self.catch.get((a, b)) is not None:
return 0
if a + 1 == b:
self.catch[(a, b)] = 1
return 1
count = 0
if hw(self.s[a:b]):
count += 1
count += self.dfs(a, b - 1)
count += self.dfs(a + 1, b)
self.catch[(a, b)] = count
return count
if __name__ == '__main__':
print("a" * 100)
print(Solution().countSubstrings("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
|
# import the libraries
import numpy as np
import pandas as pd
import torch
# for neural networks
import torch.nn as nn
# for parallel computations
import torch.nn.parallel as parallel
# for the optimizer
import torch.optim as optim
# for some utilities
import torch.utils.data
# for stochasitic gradient descent
from torch.autograd import Variable
# Prepare the data set
# Delimiter (separator) is \t for tab
training_set = pd.read_csv("ml-100k/u1.base", delimiter='\t')
# Easier to work with arrays, so lets convert it. Convert all the values to integers to work with it
training_set = np.array(training_set, dtype="int")
test_set = pd.read_csv("ml-100k/u1.test", delimiter='\t')
test_set = np.array(test_set, dtype="int")
# Get whatever the last user # is, and the highest of the two from the training_set of test_set for the loop later
num_users = int(max(max(training_set[:,0]),max(test_set[:,0])))
num_movies = int(max(max(training_set[:,1]),max(test_set[:,1])))
# Boltzmann Machine requires specific input. We need input value and the feature
# Convert the data into an array with users as rows and movies in columns: input = user, features = movie watched
def convert(data):
# Make a list of lists. Each of the 943 users in the list will have a list of which of the 1682 movies they watched
# List instead of array because torch expects lists of lists (torch docs say it still works with arrays though)
new_data = []
for user_ids in range(1,num_users+1):
# Second bracket is syntax for a conditional, says only get all movies for current user
movie_ids = data[:,1][data[:,0] == user_ids]
rating_ids = data[:,2][data[:,0] == user_ids]
# Create a new empty array
ratings = np.zeros(num_movies)
# Fill the empty array with the ratings of the movies. Movie_ids holds all the indexes of the movies
ratings[movie_ids-1] = rating_ids
new_data.append(list(ratings))
return new_data
# Convert the training set and the test set so that they are ready for the boltzmann machine
training_set = convert(training_set)
test_set = convert(test_set)
# Convert data into Torch tensor (tensor is just a multi-dimensional matrix)
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
# On to the actual Boltzmann Machine
# Convert the ratings into binary ratings: 1 = liked, 0 = not liked, b/c we're predicting a binary "will they like it",
# and if you remember RBMs, the output and input are swapped as it works, so they must be the same format
# Get rid of all the missing values. Remember brackets are conditionals.
training_set[training_set == 0] = -1
# 1/2 stars means they didn't like it, 3/4/5 stars means they did like it
training_set[training_set == 1] = 0
training_set[training_set == 2] = 0
training_set[training_set >= 3] = 1
test_set[test_set == 0] = -1
test_set[test_set == 1] = 0
test_set[test_set == 2] = 0
test_set[test_set >= 3] = 1
# Create the architecture of the Neural Network
class RBM():
# Every class must have an __init__. It's like the constructor
def __init__(self, num_visible_nodes, num_hidden_nodes):
# Initialize weights, which are the probabilities of the visible nodes given the hidden nodes
self.W = torch.randn(num_hidden_nodes,num_visible_nodes)
# Initialize bias for hidden nodes
self.a = torch.randn(1, num_hidden_nodes)
# Initialize bias for visible nodes
self.b = torch.randn(1, num_visible_nodes)
# Sample the hidden nodes using bernoulli distribution according to the probability hidden given visible
def sample_h(self, x):
# wx from z = wx + b. Torch.nn makes the product of two tensors. .t() to transpose it
wx = torch.mm(x, self.W.t())
# z from z = wx + b, just like normal ANN
# Each input vector is not treated individually like in an ANN, but rather in batches, as we use all of the
# inputs to predict a singular output. So to apply the bias to all the parts of wx, the tensors must be the same
# size. Use expand_as to expand the bias tensor to size of wx. Could also use a.expand(wx.size())
activation = wx + self.a.expand_as(wx)
# probability is given by the sigmoid activation function
prob_hid_given_vis = torch.sigmoid(activation)
# We're making a bernoulli RBM because we're predicting a binary outocme, so fit results to that distribution
# Bernoulli sampling: determines the threshold, maybe 0.77, and everything the below that will activate the
# neuron, everything above that won't
return prob_hid_given_vis, torch.bernoulli(prob_hid_given_vis)
# Sample the visible nodes using bernoulli distribution according to the probability visible given hidden
def sample_v(self, y):
# No transpose because x column of weights matches with hidden nodes (y is the hidden nodes)
wy = torch.mm(y, self.W)
# b bias instead of a because it's for converting to visible nodes
activation = wy + self.b.expand_as(wy)
prob_vis_given_hid = torch.sigmoid(activation)
return prob_vis_given_hid, torch.bernoulli(prob_vis_given_hid)
# Contrastive divergence: minimize the weights to minimize/optimize the energy i.e. maximize the log likelihood
# v0 is the first input vector, vk is the visible nodes after k epochs (round trips to hidden then back to visible)
# ph0 is probabilities at first input vector, phk is probabilities of yes after k epochs
def train(self, v0, vk, ph0, phk):
# The following is just the algorithm for contrastive divergence
# ph0 = probability that the hidden nodes = 1 given the input vector
self.W += (torch.mm(v0.t(),ph0) - torch.mm(v0.t(), phk)).t()
self.b += torch.sum((v0-vk), 0)
self.a += torch.sum((ph0-phk), 0)
# num visible nodes (number of movies, because we have a node for each movie), could also say nv = num_movies, but it's
# safer to do this, based off our tensors, looking at the length of a line in training_set
num_vis = len(training_set[0])
# Try to detect 100 features, 100 is just the arbitrary number I chose
num_hid = 100
# Update the weights after several observations. 1 would be online learning, updating weights after each observation
# We'll try after 100 observations
batch_size = 100
rbm = RBM(num_vis,num_hid)
# Training the rbm
num_epochs = 10
for epoch in range(1, num_epochs+1):
# Keep track of our loss (cost function i.e. residual)
train_loss = 0
# Need to normalize loss, we'll divide loss by this, . is to make it float
s = 0.
# Go through the users in batches (step is batch_size)
for user_id in range(0, num_users-batch_size, batch_size):
# Set vk to be values in the training set from current user_id to uesr_id+batch_size
vk = training_set[user_id:user_id+batch_size]
# Set v0 so we can compare later for the loss function. At the begining, it will be the same as the target,
# hence why they are the same code
v0 = training_set[user_id:user_id+batch_size]
# Set the initial probabilities equal to the initial bernoulli sampling of v, v0
# Use ,_ to say we only want the first variable, since our class returns 2 variables
ph0,_ = rbm.sample_h(v0)
# K steps of contrastive divergence, do gibbs sampling, make the sampling equal the next h/v (back and forth)
for k in range(10):
# hk = Second element returned from sampling hidden nodes (the bernoulli sampling
_,hk = rbm.sample_h(vk)
_,vk = rbm.sample_v(hk)
# Ignore the missing nodes (the -1 nodes) by freezing them (set them always equal to v0)
vk[v0<0] = v0[v0<0]
# Get the sample for the hidden node applied on the last sample of the visible node
phk,_ = rbm.sample_h(vk)
# Apply contrastive divergence
rbm.train(v0,vk,ph0,phk)
# Calculate the loss, the difference in the original and the current sample (mean b/c we do many at once)
# Conditional so we only factor in movies for which ratings exist
train_loss += torch.mean(torch.abs(v0[v0 >= 0]-vk[v0 >= 0]))
# Update the counter, which is to normalize the loss
s += 1.
print('epoch: ' + str(epoch) + ' loss: '+str(train_loss/s))
# Testing the rbm, basically just get rid of all code that has us do it multiple times, since only need one additional
test_loss = 0
s = 0.
for user_id in range(num_users):
# Keep as training set because our input is from our training data
v = training_set[user_id:user_id+1]
# vt = visible nodes in target
vt = test_set[user_id:user_id+1]
# Make sure the tensor isn't empty (no rating), since that'll give an error
if len(vt[vt >= 0]) > 0:
_,h = rbm.sample_h(v)
_,v = rbm.sample_v(h)
test_loss += torch.mean(torch.abs(vt[vt>=0] - v[vt>=0]))
s += 1.
print('test loss: '+str(test_loss/s))
|
from google.appengine.ext import db
from usermeta import UserMeta
from game import Game
import math
import random
from ..utilities import *
class League(db.Model):
def createLeague(self,teams):
league_teams=[]
for t in teams:
team=LeagueTeam(user=t)
t.league=True
save_user(t)
team.put()
league_teams.append(team)
self.createFixture(league_teams)
def createFixture(self,teams):
"""
Fixtures: 4 pools of 4, round robin (3 rounds) then semis
Round robin: AvB,CvD;
AvC,BvD;
AvD,BvC;
"""
# Assumes 16 teams exactly
pools=self.createPools(teams=teams)
for p in pools:
p.roundRobin()
def createPools(self,teams):
if len(teams) < 4:
return
numPerPool=int(math.ceil(float(len(teams))/4))
random.shuffle(teams)
poolA=Pool(league=self)
poolB=Pool(league=self)
poolC=Pool(league=self)
poolD=Pool(league=self)
for p in [poolA,poolB,poolC,poolD]:
p.put()
for i in range(0,4):
teams[i].pool=poolA
teams[4+i].pool=poolB
teams[8+i].pool=poolC
teams[12+i].pool=poolD
for t in teams:
t.put()
return [poolA,poolB,poolC,poolD]
class Pool(db.Model):
league=db.ReferenceProperty(League)
def roundRobin(self):
teams=self.leagueteam_set
Match(pool=self,round=1,teamA=teams[0],teamB=teams[1]).put()
Match(pool=self,round=1,teamA=teams[2],teamB=teams[3]).put()
Match(pool=self,round=2,teamA=teams[0],teamB=teams[2]).put()
Match(pool=self,round=2,teamA=teams[1],teamB=teams[3]).put()
Match(pool=self,round=3,teamA=teams[0],teamB=teams[3]).put()
Match(pool=self,round=3,teamA=teams[1],teamB=teams[2]).put()
class LeagueTeam(db.Model):
user=db.ReferenceProperty(UserMeta)
wins=db.IntegerProperty(default=0)
losses=db.IntegerProperty(default=0)
draws=db.IntegerProperty(default=0)
points=db.IntegerProperty(default=0)
percentage=db.IntegerProperty(default=0)
pool=db.ReferenceProperty(Pool)
def set_wins(self,wins):
self.wins=wins
self.points=4*self.wins+2*self.draws
def set_draws(self,draws):
self.draws=draws
self.points=4*self.wins+2*self.draws
class Match(db.Model):
round=db.IntegerProperty(required=True)
pool=db.ReferenceProperty(Pool)
league=db.ReferenceProperty(League)
teamA=db.ReferenceProperty(LeagueTeam,collection_name="teamA_set")
teamB=db.ReferenceProperty(LeagueTeam,collection_name="teamB_set")
teamAScore=db.IntegerProperty(default=0)
teamBScore=db.IntegerProperty(default=0)
winner=db.IntegerProperty(default=0)
played=db.BooleanProperty(default=False) |
<<<<<<< HEAD
Python 3.4.3 (v3.4.3:9b73f1c3e601, Feb 24 2015, 22:44:40) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> """
This is a commit
written in
more than just one line
"""
'\nThis is a commit\nwritten in\nmore than just one line\n'
>>> print("hello world")
hello world
>>>
=======
Python 3.4.3 (v3.4.3:9b73f1c3e601, Feb 24 2015, 22:44:40) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> """
This is a commit
written in
more than just one line
"""
'\nThis is a commit\nwritten in\nmore than just one line\n'
>>> print("hello world")
hello world
>>>
>>>>>>> 382a015b7bec93d06310ab87c7558e7d6069d5a7
|
import sys
r = sys.stdin.readline
N = int(r())
arr = []
sum = 0
for i in range(N) :
arr.append(int(r()))
arr.sort() #정렬하고
arr.reverse() #내림차순으로 만듬
for i in range(N) :
temp = arr[i] - i
if temp < 0 :
break
else :
sum += temp
print(sum) |
#!/usr/bin/env python
'''
Basic script to initialize database with some test entries.
'''
__author__ = 'Aditya Viswanathan'
__email__ = 'aditya@adityaviswanathan.com'
import argparse
import os
import sys
from db import db
from flask_script import Manager as ScriptRunner
from flask_migrate import Migrate, MigrateCommand
from models import *
from test_entities import make_entities
# Append parent dir to $PYTHONPATH to import Flask app.
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(my_path, os.pardir)))
from api import app
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_owners', type=int, default=1, help='number of owners to generate')
parser.add_argument(
'--num_properties', type=int, default=1, help='number of properties to generate per owner')
parser.add_argument(
'--num_managers', type=int, default=1, help='number of managers to generate per property')
parser.add_argument(
'--num_units', type=int, default=1, help='number of units to generate per property')
parser.add_argument(
'--num_tenants', type=int, default=1, help='number of tenants to generate per property')
parser.add_argument(
'--num_tickets', type=int, default=1, help='number of tickets to generate per tenant')
parser.add_argument(
'--num_contracts', type=int, default=1, help='number of contracts to generate per min(units, tenants)')
parser.add_argument(
'--num_contract_payments', type=int, default=0, help='number of contract payments to generate per contract')
parser.add_argument(
'--num_ticket_payments', type=int, default=0, help='number of ticket payments to generate per contract')
parser.add_argument(
'--num_contractors', type=int, default=1, help='number of contractors to generate')
parser.add_argument('--payments', default=False, action='store_true')
args = parser.parse_args()
migrate = Migrate(app, db)
migrate.init_app(app)
db.drop_all()
db.create_all()
make_entities(num_owners=args.num_owners,
num_properties=args.num_properties,
num_managers=args.num_managers,
num_units=args.num_units,
num_tenants=args.num_tenants,
num_tickets=args.num_tickets,
num_contracts=args.num_contracts,
num_contractors=args.num_contractors,
num_contract_payments=args.num_contract_payments,
num_ticket_payments=args.num_ticket_payments,
test_payments=args.payments)
|
#!/bin/python3
import os
import sys
import datetime
#
# Complete the timeConversion function below.
#
#Sample Input 0
#07:05:45PM
#Sample Output 0
#19:05:45
def timeConversion(s):
#
# Write your code here.
#
isAm = s.find("AM")>-1
isPm = s.find("PM")>-1
s = s.replace("AM","")
s = s.replace("PM","")
hour = int(s.split(":")[0])
minute = int(s.split(":")[1])
second = int(s.split(":")[2])
if isAm:
if hour < 12:
pass
else:
hour = 0
elif isPm:
if hour < 12:
hour += 12
formattedSeconds = "{0:0=2d}".format(second)
formattedMinute = "{0:0=2d}".format(minute)
formattedHour = "{0:0=2d}".format(hour)
return f"{formattedHour}:{formattedMinute}:{formattedSeconds}"
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = timeConversion(s)
f.write(result + '\n')
f.close()
|
"""
pyjld.system.registry.base
"""
__author__ = "Jean-Lou Dupont"
__fileid = "$Id: base.py 37 2009-04-03 01:58:16Z jeanlou.dupont $"
__all__ = ['Registry', 'RegistryException']
import sys
class RegistryException(Exception):
"""
An exception class for Registry
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class Registry(object):
"""
Facade for the cross-platform Registry
Can be accessed like a dictionary: for this
functionality, an instance must be constructed
with the 'file' parameter specified.
"""
reg = None
def __init__(self, file = None):
self.file = file
if sys.platform[:3] == 'win':
from pyjld.system.registry.reg_windows import WindowsRegistry
self.reg = WindowsRegistry(file)
else:
from pyjld.system.registry.reg_linux import LinuxRegistry
self.reg = LinuxRegistry(file)
def getKey(self, file, key):
"""
GETS the specified key
"""
return self.reg.getKey(file, key)
def setKey(self, file, key, value, cond = False):
"""
SETS the specified key
"""
if (cond):
if (value is None):
#print "skipping key[%s]" % key
return
return self.reg.setKey(file, key, value)
# DICTIONARY INTERFACE
# ====================
def __getitem__(self, key):
return self.reg.get(key, None)
def __setitem__(self, key, value):
self.reg[key] = value
def __contains__(self, key):
return (key in self.reg)
|
import sys
'''
https://adventofcode.com/2020/day/1
'''
def parse(args):
input_nums = []
with open(args[1], mode='r') as f:
for line in f.readlines():
input_nums.append(int(line.strip()))
return input_nums
def find2(input_nums, target):
'''
Find 2 numbers (assumed positive ints) in input_nums that sum to target. Return their product
'''
search_for = set()
for v in input_nums:
if v > target:
continue
if v in search_for:
return (target - v) * v
search_for.add(target - v)
return None
def find3(input_nums, target):
'''
Find 3 numbers (assumed positive ints) in input_nums that sum to target. Return their product
'''
for v in input_nums:
if v > target:
continue
result = find2(input_nums, target - v)
if result is not None:
return v * result
return None
def main(args):
input_nums = parse(args)
print("part one: {}".format(find2(input_nums, 2020)))
print("part two: {}".format(find3(input_nums, 2020)))
# part one: 842016
# part two: 9199664
if __name__ == '__main__':
main(sys.argv) |
# import env
Import('env')
if(env['PLATFORM'] == 'linux'):
env.ParseConfig( 'pkg-config --cflags --libs uuid ')
objs = env.SharedObject([Glob('*.cpp')])
Return('objs')
|
import toml
output_file = ".streamlit/secrets.toml"
with open("milkbar-326412-53ae838df218.json") as json_file:
json_text = json_file.read()
config = {"textkey": json_text}
toml_config = toml.dumps(config)
with open(output_file, "w") as target:
target.write(toml_config) |
__author__ = "Kavitha Yogaraj"
__status__ = "Development"
# -------------------------------------------------------------
# Import Packages required for Running Code
# -------------------------------------------------------------
import traceback
import os
import sys
from urllib.request import urlopen
import pandas as pd
import pandas as pd
import zipfile, io
import numpy as np
import re
import datetime
# importing module
import logging
# Create and configure logger
logging.basicConfig(filename="sanctions_etl.log",
format='%(asctime)s %(message)s',
filemode='w')
# Creating an object
log = logging.getLogger()
# Setting the threshold of logger to DEBUG
log.setLevel(logging.INFO)
util_path = os.path.join(os.path.abspath(os.path.dirname("__file__")), os.pardir, os.pardir, os.pardir)
sys.path.insert(0, util_path)
import time
from sanctions_dict import *
from db_wrapper import *
from xml_df import XML2DataFrame
class Sanctions:
"""
this class gets data for all 4 sources and updates table : https://scsanctions.un.org/resources/xml/en/consolidated.xml
"""
# xml_urls = [ 'https://scsanctions.un.org/al-qaida/',
# 'https://scsanctions.un.org/resources/xml/en/consolidated.xml', ]
""" sources, url sources and whether it is xml sources or not"""
""" to keep url fetched data so once fetched is available many times"""
def __init__(self):
self.response_data = ''
self.xml_response_data = ''
self.zipped = ''
self.db_connect = ''
self.xml2df = ''
pass
""" Using db_wrapper.py data"""
def db_hndlr(self):
# Give the table name into which the panda should get appended to
try:
log.info("======Creating DB Handler using config.ini to read Db Settings ========")
self.db_connect = db_wrapper()
print("after getting db:" )
print(self.db_connect)
self.xml2df = XML2DataFrame()
print("Xxxxxxxxxxxxmmmmmmmmllllllll->>>>>>>>>" + self.xml2df)
return True
except Exception as err:
log.critical("Error occured while readin the db settings from config.ini file and the error is %s" % (err))
self.db_connect = None
log.critical("exiting the process.....")
return False
return True
def single_quote(self, s1):
return "'%s'" % s1
#prepare for insert
def insert_data(self, insert_query):
log.info("---db_insert, the insert query is :" + insert_query)
print("---db_insert, the insert query is :" + insert_query)
message = ''
stat = False
try:
results = self.db_connect.cursor.execute(insert_query)
self.db_connect.conn.commit()
log.info("---db_inserted, the insert query is :" + insert_query)
except Exception as e:
stat = False
print("exception at db_insert")
print(e)
print(traceback.format_exc())
if not stat:
message = 'Error occurred while inserting this record'
return stat, message
# """ this function is used for sanction names upload
# on UN data alone, there is a problem in my xml_dataframe"""
def get_xml_dataframe(self, source_dict, type):
# todo parse entities
stat = False
df = ''
print("get_xml_dataframe - type : " +type)
print("after printing xml response")
try:
print("---get_xml_dataframe, parsing the xml for source: " + source_dict['source'] + " for type: " + type)
self.xml2df = XML2DataFrame(self.xml_response_data)
except Exception as e:
print("Error occured in get_xml_dataframe, parsing the xml and error is %s" % (e))
return stat, df
log.info("---get_xml_dataframe, getting individulas & entities for type: " + type)
print(type)
print("--------type ---------")
if type == 'name':
for attribute in self.xml2df.root:
for indiv in attribute.iter('INDIVIDUALS'):
df = self.xml2df.process_data(indiv)
for entity in attribute.iter('ENTITIES'):
df2 = self.xml2df.process_data(entity)
frames = [df, df2]
df = pd.concat(frames)
print(df.head())
print("--------------name------")
log.info("---get_xml_dataframe, concatenated dataframe for type " + type)
return stat, df
else:
frames = []
for attribute in self.xml2df.root:
for indiv in attribute.iter('INDIVIDUALS'):
df = self.xml2df.process_data(indiv)
df = df[source_dict[type]]
for entity in attribute.iter('ENTITIES'):
df2 = self.xml2df.process_data(entity)
df2 = df2[source_dict[type]]
frames = [df, df2]
df = pd.concat(frames)
log.info("---get_xml_dataframe, concatenated dataframe for type " + type)
print(df.head())
print("--------------"+type+"------------")
return stat, df
def get_url(self, url):
# get url, check errors and update
res = {}
stat = False
try:
#log.info("---get_url, url to fetch is :" + url)
response = urlopen(url)
stat = False
except urlopen.error.HTTPError as e:
# Return code error (e.g. 404, 501, ...)
print('HTTPError: {}'.format(e.code))
#log.critical('HTTPError: {}'.format(e.code))
stat = False
except urlopen.error.URLError as e:
# Not an HTTP-specific error (e.g. connection refused)
print('URLError: {}'.format(e.reason))
#log.critical('URLError: {}'.format(e.code))
stat = False
else:
# 200
#log.info("---get_url, url to fetch is successful")
response_data = response.read()
stat = True
return stat, response_data
# as logic to get the url data and keep the response
# in response_data if its xml/ in zipped if it zip data
def fetch_data(self, source_dict):
stat = False
#log.info("---fetch_data, for this source and fetch type {} {}".format(source_dict['source'],source_dict['fetch_type']))
if source_dict['fetch_type'] == 'xml':
print("xml url: " +source_dict['url'])
stat, self.xml_response_data = self.get_url(source_dict['url'])
print("After fetching url :" )
#print(self.xml_response_data)
else:
if source_dict['fetch_type'] == 'zip':
stat, response_data = self.get_url(source_dict['url'])
self.zipped = zipfile.ZipFile(io.BytesIO(response_data))
return stat
def read_file(self, source_dict, file_name):
source_col_names = source_dict[file_name]
stat = False
#log.info("---read_file, for this source and these are the columns {} {}".format(source_dict['source'], source_dict[file_name]))
try:
with self.zipped.open(file_name) as f:
df = pd.read_csv(f, header=None, delimiter=",", names=source_col_names)
except Exception as e:
#log.critical("Error occured while reading the file and error is %s" % (e))
return stat, False
stat = True
return stat, df
def process_df(self, df):
#log.info("---process_df, for this source and these are the columns")
stat = False
try:
df_obj = df.select_dtypes(['object'])
# manipulate values in df, trim extra space, replace single quotoes, replace -0- value
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
df[df_obj.columns] = df_obj.apply(lambda x: x.str.replace("'", "\''"))
df = df.replace({"\n": ' '}, regex=True)
df = df.replace({"\t": ' '}, regex=True)
# df = df.replace({'\s+': ' '}, regex=True)
df = df.replace({'-0-': ''}, regex=True)
df.columns = map(str.lower, df.columns)
df = df.replace(np.nan, '', regex=True)
df.columns = map(str.lower, df.columns)
except Exception as e:
#log.critical("Error occured while processing the df and error is %s" % (e))
return stat, False
stat = True
return stat, df
def datetime_now(self):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def make_queries_xml(self, df, source_dict, type):
#log.info("---make_queries_xml, for this source and its xml type {} ".format(source_dict['source']))
query_list = []
stat = False
df['dataid'] = df['dataid'].apply(lambda x: x.split("_")[0])
df['id'] = df['dataid'].apply(lambda x: 'UN' + str(x))
for index, row in df.iterrows():
ID1 = self.single_quote('')
if type == 'name':
name = self.single_quote(
'{} {} {} {}'.format(row['first_name'], row['second_name'], row['third_name'], row['fourth_name']))
query_value = "( " \
+ self.single_quote(row['id']) + ", " \
+ name + ", " \
+ self.single_quote(row['comments1']) + ", " \
+ source_dict['source'] + ", " \
+ self.single_quote(self.datetime_now()) + ") "
if type == 'alt_name':
query_value = "( " + \
self.single_quote(row['id']) + ", " \
+ ID1 + ", " \
+ self.single_quote(row['alias_name']) + ", " \
+ self.single_quote(source_dict['source']) + ", " \
+ self.single_quote(self.datetime_now()) + ") "
if type == 'addr':
query_value = "( " + \
self.single_quote(row['id']) + ", " \
+ ID1 + ", " + self.single_quote(row['address']) + ", " \
+ self.single_quote(source_dict['source']) + ") "
query = '{} {}'.format(insert_command_types[type], query_value)
query_list.append(query)
stat = True
#log.info("---make_queries_xml, length of query_list {} ".format(len(query_list)))
return query_list
def make_queries(self, df, source_dict, type):
#log.info("---make_queries, for this source and its zip type {} ".format(source_dict['source']))
# alt_names
query_list = []
stat = False
# this is only for altname ID1
df['ent_num'] = df['ent_num'].apply(lambda x: 'OFAC' + str(x))
if type == 'alt_name':
df['alt_num'] = df['alt_num'].apply(lambda x: 'OFAC' + str(int(x)) if x != '' else '')
if type == 'addr':
df['add_num'] = df['add_num'].apply(lambda x: 'OFAC' + str(int(x)) if x != '' else '')
for index, row in df.iterrows():
query_value = ''
if type == 'alt_name':
query_value = "( " + self.single_quote(row['ent_num']) + ", " \
+ self.single_quote(row['alt_num']) + ", " \
+ self.single_quote(row['alt_name']) + ", " \
+ self.single_quote(source_dict['source']) + ", " \
+ self.single_quote(self.datetime_now()) + ") "
if type == 'name':
desc = self.single_quote(
'{} {} {} {} {} {} {} {} {} {}'.format(row['sdn_type'], row['program'], row['title'],
row['call_sign'], row['vess_type'], row['tonnage'],
row['grt'], row['vess_flag'], row['vess_owner'],
row['remarks']))
desc = re.sub('\s+', ' ', desc).strip()
query_value = "( " + self.single_quote(row['ent_num']) + ", " + self.single_quote(
row['sdn_name']) + ", " + desc + ", " + self.single_quote(
source_dict['source']) + ", " + self.single_quote(self.datetime_now()) + ") "
if type == 'addr':
address = self.single_quote(
'{} {} {} {}'.format(row['address'], row['city_state_zip'], row['country'], row['add_remarks']))
address = re.sub('\s+', ' ', address).strip()
query_value = "( " + self.single_quote(row['ent_num']) + ", " + self.single_quote(
row['add_num']) + ", " + address + ", " + self.single_quote(source_dict['source']) + ")"
# after everything insert command and inert query list gets appended for each record
query = '{} {}'.format(insert_command_types[type], query_value)
query_list.append(query)
#log.info("---make_queries, length of query_list {} ".format(len(query_list)))
stat = True
return query_list
def process_data(self, source_dict, type):
#log.info("---process_data, source and type")
print("source_dict : " )
#print("source_dict : " + source_dict )
print(source_dict)
print("type :" + type)
results = ''
query_list = []
# for sdn & non-sdn type doesnt matter
if source_dict['fetch_type'] == 'zip':
print('zip working fine')
stat, df = self.read_file(source_dict, source_dict[type])
stat, df = self.process_df(df)
if stat:
query_list = self.make_queries(df, source_dict, type)
return query_list
else:
return query_list
if source_dict['fetch_type'] == 'xml':
stat, df = self.get_xml_dataframe(source_dict, type)
stat, df = self.process_df(df)
if stat:
query_list = self.make_queries_xml(df, source_dict, type)
return query_list
else:
return query_list
def notify_user(self, subject, message):
"""
This function is used to send email.This will read the email settings from config file\
:param subject: Subject for the mail\
:param message: Email body content\
:return:
"""
try:
print("Haven't implemented the email seding details")
except Exception as e:
print("Error occured while sending the email and error is %s" % (e))
def main(self):
start_time = time.clock()
start_datetime = self.datetime_now()
stat = self.db_hndlr()
stat = True
if not stat:
print("Error occured while connecting to the DB")
log.critical("Error occured while connecting to the DB exiting sanctions")
return
for source_dict in list_dict:
start_time_event_log = self.datetime_now()
stat = self.fetch_data(source_dict)
if stat:
for type in types:
start_time_event_log = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_name = self.single_quote("Sanctions_ETL {} {}".format(source_dict['source'], type))
print(log_name)
log.info(log_name)
log.info(log_name + '\tref: ' + tables[type])
query_list = []
records = 0
log.info(" --before processing query list is: {}".format(len(query_list)))
try:
print("before going to process_data")
query_list = self.process_data(source_dict, type)
print(query_list)
print("after going to process_data")
print(len(query_list))
print("db_connect")
print(self.db_connect)
print("cursor")
print(self.db_connect.cursor)
#log.info(" --after processing query list is: {}".format(len(query_list)))
for query in query_list:
log.info(" --query inserting--: {}".format(query))
#print(" --query inserting--: {}".format(query))
stat, results = self.insert_data(query)
if stat:
records += 1
#log.info(" --number of records inserted--: {}".format(records))
end_time = time.clock()
totaltimetaken = end_time - start_time
except Exception as err:
log.critical("Error occured while processing the data: %s" % (err))
end_time = time.clock()
totaltimetaken = end_time - start_time
log.info("totaltimetaken :%s" %(totaltimetaken))
else:
message = "Error occurred while fetching source : {} ".format(source_dict['source'])
log.critical("fetching data had some problem for this source, {} moving on to next soure".format(source_dict['source']))
continue
if __name__ == "__main__":
obj = Sanctions()
print("coming here")
obj.main() |
import json
# DEVELOPER: https://github.com/undefinedvalue0103/nullcore-1.0/
logging = None
cfg = json.loads(open('config.json', 'r').read())
def __update__():
with open('config.json', 'w') as f:
f.write(json.dumps(cfg, indent=4))
def __reload__():
global cfg
try:
new_cfg = json.load(open('config.json', 'r'))
except:
logging.log('$FY[$FBCONFIG$FY]$FR Configuration file malformed or not found')
__update__()
return
if new_cfg != cfg:
logging.log('$FY[$FBCONFIG$FY]$FM Updated')
cfg = new_cfg
def get(key):
__reload__()
logging.log('$FY[$FBCONFIG$FY]$FM GET $FG%s'%key)
keys = key.split('.')
ret = cfg
for k in keys:
if type(ret) == dict and k in ret.keys():
ret = ret[k]
else:
ret = None
return ret
def set(key, value):
global cfg
logging.log('$FY[$FBCONFIG$FY]$FM POST $FG%s $FY= $FR%s'%(key, repr(value)))
keys = key.split('.')
obj = cfg
exec('obj["%s"] = %s'%('"]["'.join(keys), repr(value)))
cfg = obj
__update__()
if __name__ == '__main__':
print(get('key.key2.key3'))
print(exec('__config__["key"]["key2"]["key3"] = "value"'))
|
lines = """LLLLLLL.LLLLLLLLLLLLLLL.LLL.LLLL.LLLLLLL.L.LLLLLLL.LLLLLL.LLLLLLLL.LLLLL..LLLLLLLLLLLLLLLLLL
LLLLLLL.LLLLLL.LLLL.LLL.LLLLLLLLLL.LLLLLLL.LLLLL.L.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL
.LLLLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLL.LL.LLLLLLL.LLL.LL.LLLLLLLLLLL.LLL..LLLLLLLL.LLLLLLLL
LLLLLLLLLLLLLLL..LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLL.L.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL
LLLLL.L.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLLLLL.LLL..LLL.LLLLLL.LLLLLL.LLLL.LLLL.L.LLLLLL
LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.L.LL.LLLL.LLLLLL..LLLLLL.LLLLLLLL..LLLLL.LLLLLLLL..LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL.LLLLL.L.LLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
.LLLLLL..LLLLLLL.LLLLLL.LLLLL.LLLLLLLLLLLL.L.LLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLL.LLL
..LLLL......L.L..L..L..L.....LL..L.....L.....LLL.L.L.LL...L.....L...LL..L......L...LL......L
LL.LLLL.LLLLLLLLL.LLLLL.LLLLL.LLLLLLL.LLLL.LLLLLL..LLLLLL.LLLL.LLL.LLLLLLLLLLLLLLLLLLLLLLLLL
LLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLL.LLLLLL..LLLLLLLLLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLL.LLLLLL.LLLLL.LLLLLLL.LLLL.LL.LLLLLL.LLLL.LLLLLLLL.LLLLLL..LL.LLLLL.LLLL..L.
LLLLLLLLLLLLLL.LL.LLLLL.LLLLLLLLLLLLL.L.LL.LLLLLLL.L.LLLL.LLLLLLLL.LLLLLL.L.LLLLLLLLLLLLLLLL
LLLLLLL.L.LLLLLL.LLLLLLLLL.LL.LLLLL.LLLLL..L.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LL
LLLLLLLLLLLLLLLL.LLLLLLLLL.LL.LLLLLLL.LLLL.LLLLLLL.LLLLLLLLLLLLLL..LLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLL..LLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LL.LLLLLLLLLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLL.LLLLLLL.LLLLLL.L.LLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLLLLLLLLLLL.LLLLL..LLLLLLLLLL.LLL.LLLLLLLLLLLLLL.LLL.LLLLLLLLL.LLLLL.L.LLLL.LLLLLLLLLLL
.L......L.LL.L...L.....L..L..L...LL.L.......L.............LL.L......LL.L...LL.L...L.....LL..
LL.LLLL.LLLLLLLLLLLLLLL.LLLLL.LLLLLLLLL.LLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
LLLLL.L.L.LLLLLLLLL.LLLLLLLLLLLLL.LL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLL
LLLL.LL.LL.LLLLL.LLLL.L.LLLLL.LLLLLLLLLL.LLLLLLLLL..L.LLLLLLLLLLL.L.LLLLL.LLLLLLLLLLLLLLLL.L
LLLLLLL.LLLLLLLLLLLLLLL.LLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLL.L
.......LL...L.......LL...L.......LLL......L..L...L...L.....LL..L.L......L.LL.....LL.....L...
LLLLLLLLLLLLLLLL.LLLLLL.LLLLL.LLLL.LL.LLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLL
LLLLLLL.LLLLLLLLLLLLLL..LLLLLLLLLLLLL.LLLL.LL.LLLL.LLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLLLLL.LLLLLL.L.LLL.L.LLL.LLL.LLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLL
L.LLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLL.LLLLLLL.LLLLLLLLL..LLLLLL.LLLLLLLLL.LLLLLLLL
LL.LLLL.LLLLLLLL.LLLLLL.LLLLL.LL.LLLLLLLLL.LLLLLLL.LLLLLLLLL.LLLLL.LLLLLL.LLLLLL.LL.LLLLLLLL
LLLLL.L.LLLLLLLL.LLLLLLLLLL.L.LLL.LLL.LLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL.LLLLL.LLLLL.L.LLLLLLLLLLLL.LLLLLL.LLLLLLLL.LLLLLL.LLLLLL.LL.LLL.LLLL
L..LL.........L....L....L..L.LL.....LL.L.LL...L........L...L...L...L.LLL..LL..LLLLL.L.......
LLLLLLL.LLLLLLLLLLLL.LL.LLLLL.LLLLLLL.LLLLLLLLLLLLLLLLL.L.LLLLLLLL.LLLLLL.LLLLLLL.L..LLLLLLL
LLLLLLLL.LLLLLLL.LLLLLL.LLLLL.LLLLLLLLLLLL.LLLLLLL.LLLLLL.LLLLLLLL.LLLLLL.LLLL.L.LL.LLLLLLLL
.LLLLLL.LLLLLL.L.LLLLLL.LLLLL.LLLLLLL.L.LL.LLLLLLLLLLL.LLLLLLLLLLL.LLLLLL.LLLLLLLL..LLLLLLLL
LL.LLLL..LLLLLLL.LLLLLL.LLLLL.LLLLLLLLLLLL.LLLLLL..LLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
L..LL..........L......L..L....L..L.L.LL.LLL..L.L.L.LLLL...L..L..L.L.L..LLLL.L.L...L..LL.....
LLLLLLLLLLLLLLLLLLLLLLL.LL.LLLLLLLLLL.LL.L.LLLLLLL.LLLLLL.LLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLLLLLLLL.LLLL.LL.LLLLL.LL.LLL.LLLLLLLLLLLLLLL.LLL.LLLLLLLLLLLLL.LLLLLL.
LLLLLL.LLLLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLL
LLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLL.LLLL.LLL.LLLLLL.LLL.L.LLLLLLLLL.LL.LLLLLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLLL.LLL.L.LLLLLL
LLLLLLLLLLLLL.LL.LLLLLL.LLL.L.LLLLLLL.LLLLLLLLLLLL.LLLLLL.LLLL.LLL.LLLLLL.LLLLLL.LLLLLLLLLLL
.L.LLLL.L.LLL.L....L...LL.L.L..LL.L.LLLL.L..L..L.L.LL...LL..L.L.L...LL.L.......LL..LLL......
LLLLL.L..LLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLL.LL.LLLL.LLLLLL.LLLLLL.L.LLLLLL.LLLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLL..LLL.LLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.L.LLLLLL
LLLLLLL.LLLLLLLL.LLLL.L.LLL.LLLLLLLLL.LLLL.LLLLLLL.L..LLL.LLLLLLL..LL.LLL.LLLLLLLLL.LLL.LLL.
LLLLLLL.LLLLLLLL.LLLLLL.L.LLLLLLLLLL..LLLLLLLLLLLL.L.LLLL.LLLLLLLL.LLLLLL.L.LLLL.LL.LLLL.LLL
LLLL.LLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLLLLLLLLLL.LLLLL.LLLLLLL.LLLL.LLLLLLL.LLLLLL.LL.LLLLL.LLL.LL.LLLLLLLLLLLLLLLLLL
LL.LLLLLLLLLLLLL.LLLLLLLLLLLL.LLL.LLL.LLLL.LLLLLLLLLLLL.L.LLLLL.LL.LLLLLL.L.L.LLLLLLLLLLLLLL
LLLLLL.LLLLLL.LL.LLLLLLLLL.LLLLLLLLLL.LLLL.LL.LLLLLLLLLLL.LL.LLLLL.LLLLL.LLLLLLLLLL.LLLLLLLL
.LLL.......LL.LL....L..L.....LL.....L..L...LLL....L...L..LLL...L............LLLL.LL.......L.
LLLLL.L.LLLLLLLLLLLLLLLLLLLLL.L.LLLLLLLLLLLLLL.LLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLL.LL.LLLLLLLL
LLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL
L.LLLLL...LLLLLLLLLLLLL.LLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL
LLLLLLLLLLLLLLLL.L.L.LLLLLLLL.LLLLLLL.LLLL.LLLLL.L.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL
LLL.LLLLLLLLLLLLLLLLLLL.LLL.LLLLLLLLLL.LLL.LLLLLL..LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL
L..L.LL...LLL.L...LLL....L.L.L.L.....LL..........LL...L.L.L.LLLL....L.....L....LLL......LLLL
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLL.L.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLL..LLLLLLLLLLLLLL.LLLLLLLLLLLLLL.LLL.LLLLLLL.LLLLLLLLLLLLLLLLLL.LLL.L.L.LL.LLLLLLLLLLL
LLLLLLLLLLLLLLLL.LLLL.L.LLLLL.LLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL.L.LLLLLLLL.LLLLLLLLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL.LLLLL.LLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLL.L
LLLLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLL.LLLL.LLL.LLL.LLLLLL.LLLLLLLL.LLLLLL.LLL.LLLLL.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL..LLLLLLLLLLLL.LLLL.L..L.LL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLL.L.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLLLL..LLLLLLLLLLLL.LLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.L.LLLLLLL.LLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLL
LLLLLL..LLLLLLLL.LLLLLLLLLLLL.LLLLLL..LLLL.LLLL.LL.L.LLLL.LLLL.LLL.LLLLLL.LLLLLLLLLLLL.LLLLL
........LL..LL..L............L..L.L..L....L.......LL...L......L....LLL..L.L......LL..LL.L.L.
LLLLLLL.LLLLLLLL.LLLLLLLLLLLL.LLLLLL..LLLL.LLLLLLL.LLL.LL.LLLLLLLL.LLLLLL.LLLLLLLLL.LLL.LLLL
LLLLLL.LLLLLLLLL.LLLLLL.LLLLL.LLLLLLLLLLLL.LLLLL.LLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
LLL.LLL.LLLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL..LLL.LLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLL.L.LLLLL.LL.LLLL.LLLL.L.LLLLL.LL.LLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL
LLLL.LLLLLLLLLLLLLLLLLL.LLLLLLLLLLLL..LLLL.LLLLLL.LLLLLLL.LLLLLL.LLLLLLLL.LLLLLLL.L.LLLLLLLL
LLLLLLL.LLLLL.LL.LLL.LL.LLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LL.LLLLL
LLLLLLLLLL.LLLLLLLLLLLL.LLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLL.LLLLLLLL.LL.LLLLLLLLLLLLL.LLLLLLLL
LLLL.L.L...L.......L..LL.LL.LL....LL..L.L.L......L...LLL..L.L.L...LLL...L.....L.LLLL........
.LLLLLL.L.LLLLLL.LLLLLLLLLLL..LLLLLLL.LLLL.LLLLLLL.LLLLLL.LLLLL.LLLLLLLLL.LLLL.LLLL.LLLLLLLL
LLLLLLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLL.LLLLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLL.
LLLLLLL.L.LLLLLLLLLLLLL.LLLLL.LLL.LLL.LLLL.LLLLLLL.LLLLLLLLLLLLLLL.LLL.LL.L.LLLLLLLLLLLLLLLL
LLLLLLL.LLLLL.LL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLL.LL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL
L.LLLLL.LLLLL.LLLLLLLLL.LLLLL..LLLLLL.LLLL.LLLLLLL.L.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.LLL.LL..
..L...L.L.......LL.LLL.LLL..L....L..L.L.L..L.L........L...L.LL.L.........LLLLL.L.LL.L...L.LL
LLLLLLL.LLLLL.LL.LLLLLL.LLLL..LLLLL.L.LL.LLLLLL.LL.LLLLLLLLLLLLLLL.LLL.LLLLLLLLLLLLLLLLLLLLL
LLLLLLL.L.LLLLLLLLLLLLL.LLLLL.LL.LLLLLLL.LLLLLLLLL.LLL.LL.LLLLLLLL.LLL.LL.LLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL..L.LLLLLLLLLLLL.LLLLLL.L.LLLLLL.LLLLLL.LLLLLLLLL.LLL.LLLL
.LLLLLL.LL.LL.LLLLLL.L..LLLLLLL.LLLLL.LLLL.LL.LL.L.LL.L.L.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL
LLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLL.LLL.LLLLLLLLLLLLLLLLLL..LLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL.LLLLL.LLLLLLLLLLLL.LLLLLLL.LL.LL..LLLLLLLL.LLLLLLLLLLLLLLLL.LLL..LLL
LLLLLLLLL.LL.LLL.LLLLLLLLLLLLLLLLLLLL.LLL..LLLLLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLL.LLLL.LLLLLLLLLLL.LLLL.LLLLLLLLLL.L.LLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL
LLL......LL...L...L..LL..L..L......L...L.L..L.........L.LL.LL..L..L.L......L..L........L..L.
LL.LLLLLLLLLLLLL.LL.LLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL
LLLLLLLLLLLLLL.L.LLLLLLLLLLLL.LL.LLLL..LLL.LLLLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLL.LL.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLL.LLLLL.LLLLLLL.LLLLLLLLLL.L.LLLLLL.LLLLLLLL..LLLLL.LLLLLLLLL.LLLLLLLL
LLLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLL.LLL.LLLLLL.LL.LLLLLL.LLLLLLLL
LLLLLLLLLLLLLLLL.LLLLLL.LLL.L.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLL.LLL.L.LLLLLL.LLLLLLLLLLLLLLLLLL
LLLLLLL.LLLLL.LL.LL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLL
LLL.LLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLL.LLLLLLLLLLL.LLLLLLLLL.LLLLLLLL
"""
# lines = """L.LL.LL.LL
# LLLLLLL.LL
# L.L.L..L..
# LLLL.LL.LL
# L.LL.LL.LL
# L.LLLLL.LL
# ..L.L.....
# LLLLLLLLLL
# L.LLLLLL.L
# L.LLLLL.LL"""
lines = lines.strip()
lines = [line for line in lines.split("\n")]
from helpers import timer
import copy
minX = 0
minY = 0
maxX = len(lines[0]) - 1
maxY = len(lines) - 1
def isOutofXBounds(coord):
if coord < minX or coord > maxX:
return True
return False
def isOutOfYBounds(coord):
if coord < minY or coord > maxY:
return True
return False
def noOccupiedAdjacent(coords, state):
y, x = coords
n = [y - 1, x]
e = [y, x + 1]
s = [y + 1, x]
w = [y, x - 1]
ne = [y - 1, x + 1]
se = [y + 1, x + 1]
nw = [y - 1, x - 1]
sw = [y + 1, x - 1]
adj = [n, e, s, w, ne, se, nw, sw]
occupiedSeats = 0
for i, v in enumerate(adj):
if isOutOfYBounds(v[0]) or isOutofXBounds(v[1]):
continue
elif state[v[0]][v[1]] == "#":
occupiedSeats += 1
if occupiedSeats > 0:
return False
return True
def hasFourAdjacent(coords, state):
y, x = coords
n = [y - 1, x]
e = [y, x + 1]
s = [y + 1, x]
w = [y, x - 1]
ne = [y - 1, x + 1]
se = [y + 1, x + 1]
nw = [y - 1, x - 1]
sw = [y + 1, x - 1]
adj = [n, e, s, w, ne, se, nw, sw]
occupiedSeats = 0
for i, v in enumerate(adj):
if isOutOfYBounds(v[0]) or isOutofXBounds(v[1]):
continue
elif state[v[0]][v[1]] == "#":
occupiedSeats += 1
if occupiedSeats >= 4:
return True
return False
@timer
def part1():
currentState = copy.deepcopy(lines)
nextState = [
[0 for j in range(len(currentState[0]))] for i in range(len(currentState))
]
while True:
for y in range(len(currentState)):
for x in range(len(currentState[y])):
if currentState[y][x] == ".":
nextState[y][x] = currentState[y][x]
elif currentState[y][x] == "L" and noOccupiedAdjacent(
[y, x], currentState
):
nextState[y][x] = "#"
elif currentState[y][x] == "#" and hasFourAdjacent(
[y, x], currentState
):
nextState[y][x] = "L"
else:
nextState[y][x] = currentState[y][x]
if nextState == currentState:
break
currentState = copy.deepcopy(nextState)
return sum([line.count("#") for line in currentState])
def seeNoOccupied(coords, state):
y, x = coords
n = [-1, 0]
e = [0, 1]
s = [1, 0]
w = [0, -1]
ne = [-1, -1]
se = [1, -1]
nw = [-1, 1]
sw = [1, 1]
directions = [n, e, s, w, ne, se, nw, sw]
for direction in directions:
y, x = coords
while True:
newY = y + direction[0]
newX = x + direction[1]
if isOutOfYBounds(newY) or isOutofXBounds(newX):
break
seat = state[newY][newX]
if seat == "#":
return False
elif seat == "L":
break
y = newY
x = newX
return True
def see5Occupied(coords, state):
y, x = coords
n = [-1, 0]
e = [0, 1]
s = [1, 0]
w = [0, -1]
ne = [-1, -1]
se = [1, -1]
nw = [-1, 1]
sw = [1, 1]
directions = [n, e, s, w, ne, se, nw, sw]
occupiedSeats = 0
for direction in directions:
y, x = coords
while True:
if occupiedSeats >= 5:
return True
newY = y + direction[0]
newX = x + direction[1]
if isOutOfYBounds(newY) or isOutofXBounds(newX):
break
seat = state[newY][newX]
if seat == "#":
occupiedSeats += 1
break
elif seat == "L":
break
y = newY
x = newX
return occupiedSeats >= 5
@timer
def part2():
currentState = copy.deepcopy(lines)
nextState = [
[0 for j in range(len(currentState[0]))] for i in range(len(currentState))
]
while True:
for y in range(len(currentState)):
for x in range(len(currentState[y])):
if currentState[y][x] == ".":
nextState[y][x] = currentState[y][x]
elif currentState[y][x] == "L" and seeNoOccupied([y, x], currentState):
nextState[y][x] = "#"
elif currentState[y][x] == "#" and see5Occupied([y, x], currentState):
nextState[y][x] = "L"
else:
nextState[y][x] = currentState[y][x]
if nextState == currentState:
break
currentState = copy.deepcopy(nextState)
return sum([line.count("#") for line in currentState])
print(f"Answer 1: {part1()}")
print(f"Answer 2: {part2()}")
|
import sys,os
import gui
import core
import argparse
#この部分で定義
parser = argparse.ArgumentParser()
parser.add_argument("-gui","--gui", help="run core with gui",
action='store_true')
args = parser.parse_args()
#with gui
if args.gui:
gui.main()
else:
core.main()
|
from django.conf import settings
from django.db import models
from django_extensions.db.models import TimeStampedModel
from tickets.enums import Urgency,Location,Category
class Ticket(TimeStampedModel):
title = models.CharField(max_length=255)
location = models.CharField(choices=Location.choices(), default=Location.ADMIN
, max_length=20)
category = models.CharField(choices=Category.choices(), default=Category.OTHER,max_length=20)
unit_number = models.CharField(max_length=255, default=None, blank=True, null=True)
description = models.TextField(max_length=1000, null=True, blank=True)
public = models.BooleanField(default=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING, related_name='tickets'
)
urgency = models.CharField(choices=Urgency.choices(), default=Urgency.LOW, max_length=20)
email = models.EmailField(default=None, blank=True, null=True)
imagefile = models.FileField(upload_to='images/', null=True, verbose_name="")
def get_description(self):
"""
If urgency high, print 3 exclamation marks instead of one.
"""
d = self.description
if self.urgency == Urgency.HIGH:
d = d.replace('!', '!!!')
return d
def __str__(self):
return 'Ticket #{id}'.format(id=self.id, title=self.title)
|
import numpy as np
import torch
class NeighborFinder:
def __init__(self, adj_list, n_user, n_item, uniform=False, seed=None, use_mem=False):
self.node_to_neighbors = []
self.node_to_edge_idxs = []
self.node_to_edge_timestamps = []
adj_list_new = [[] for _ in range(n_user + n_item + 1)] # TODO: Not us +1
for u in adj_list:
assert u != 0
adj_list_new[u] = [x for x in adj_list[u]]
for neighbors in adj_list_new:
# Neighbors is a list of tuples (neighbor, edge_idx, timestamp)
# We sort the list based on timestamp
sorted_neighhbors = sorted(neighbors, key=lambda x: x[2])
self.node_to_neighbors.append(
np.array([x[0] for x in sorted_neighhbors]))
self.node_to_edge_idxs.append(
np.array([x[1] for x in sorted_neighhbors]))
self.node_to_edge_timestamps.append(
np.array([x[2] for x in sorted_neighhbors]))
self.uniform = uniform
if seed is not None:
self.seed = seed
self.random_state = np.random.RandomState(self.seed)
self.ngh_memory = {}
self.use_mem = use_mem
def find_before(self, src_idx, cut_time):
# if src_idx == 0:
# return np.array([]), np.array([]), np.array([])
"""
Extracts all the interactions happening before cut_time for user src_idx in the overall interaction graph. The returned interactions are sorted by time.
Returns 3 lists: neighbors, edge_idxs, timestamps
"""
i = np.searchsorted(self.node_to_edge_timestamps[src_idx], cut_time)
return self.node_to_neighbors[src_idx][:i], self.node_to_edge_idxs[src_idx][:i], self.node_to_edge_timestamps[src_idx][:i]
def get_temporal_neighbor(self, batch_source_node, batch_timestamp, n_neighbors=20):
"""
Given a list of users ids and relative cut times, extracts a sampled temporal neighborhood of each user in the list.
Params
------
src_idx_l: List[int]
cut_time_l: List[float],
num_neighbors: int
"""
assert (len(batch_source_node) == len(batch_timestamp))
tmp_n_neighbors = n_neighbors if n_neighbors > 0 else 1
# NB! All interactions described in these matrices are sorted in each row by time
out_neighbors = np.zeros((len(batch_source_node), tmp_n_neighbors)).astype(np.int32) # each entry in position (i,j) represent the id of the item targeted by user src_idx_l[i] with an interaction happening before cut_time_l[i]
out_timestamps = np.zeros((len(batch_source_node), tmp_n_neighbors)).astype(np.float32) # each entry in position (i,j) represent the timestamp of an interaction between user src_idx_l[i] and item neighbors[i,j] happening before cut_time_l[i]
baout_edges = np.zeros((len(batch_source_node), tmp_n_neighbors)).astype(np.int32) # each entry in position (i,j) represent the interaction index of an interaction between user src_idx_l[i] and item neighbors[i,j] happening before cut_time_l[i]
for i, (source_node, timestamp) in enumerate(zip(batch_source_node, batch_timestamp)):
# extracts all neighbors, interactions indexes and timestamps of all interactions of user source_node happening before cut_time
if self.use_mem:
if (source_node, timestamp) in self.ngh_memory:
source_neighbors, source_edge_idxs, source_edge_times = self.ngh_memory[source_node, timestamp]
else:
source_neighbors, source_edge_idxs, source_edge_times = self.find_before(source_node, timestamp)
self.ngh_memory[source_node, timestamp] = (source_neighbors, source_edge_idxs, source_edge_times)
else:
source_neighbors, source_edge_idxs, source_edge_times = self.find_before(source_node, timestamp)
if len(source_neighbors) > 0 and n_neighbors > 0:
if self.uniform: # if we are applying uniform sampling, shuffles the data above before sampling
sampled_idx = np.random.randint(0, len(source_neighbors), n_neighbors)
# if n_neighbors <= len(source_neighbors):
# sampled_idx = np.random.choice(len(source_neighbors), n_neighbors)
# else:
# sampled_idx_1 = np.arange(len(source_neighbors))
# sampled_idx_2 = np.random.randint(0, len(source_neighbors), n_neighbors - len(source_neighbors))
# sampled_idx = np.concatenate((sampled_idx_1, sampled_idx_2))
out_neighbors[i, :] = source_neighbors[sampled_idx]
out_timestamps[i, :] = source_edge_times[sampled_idx]
baout_edges[i, :] = source_edge_idxs[sampled_idx]
# re-sort based on time
pos = out_timestamps[i, :].argsort()
out_neighbors[i, :] = out_neighbors[i, :][pos]
out_timestamps[i, :] = out_timestamps[i, :][pos]
baout_edges[i, :] = baout_edges[i, :][pos]
else:
# Take most recent interactions
source_edge_times = source_edge_times[-n_neighbors:]
source_neighbors = source_neighbors[-n_neighbors:]
source_edge_idxs = source_edge_idxs[-n_neighbors:]
# assert (len(source_neighbors) <= n_neighbors)
# assert (len(source_edge_times) <= n_neighbors)
# assert (len(source_edge_idxs) <= n_neighbors)
out_neighbors[i, n_neighbors - len(source_neighbors):] = source_neighbors
out_timestamps[i, n_neighbors - len(source_edge_times):] = source_edge_times
baout_edges[i, n_neighbors - len(source_edge_idxs):] = source_edge_idxs
return (out_neighbors, baout_edges, out_timestamps)
|
from django.contrib import admin
# Register your models here.
from api.models import Resource
from api.models import Location
admin.site.register(Resource)
admin.site.register(Location) |
age = 21
name = 'YanYu'
print '%s is %d years old'%(name, age)
print 'Why is %s is playing with python?'%name |
# print_count.py
def print_count(n):
yield "Hello World\n"
yield "\n"
yield "Look at me count to %d\n" % n
for i in xrange(n):
yield " %d\n" % i
yield "I'm done!\n"
# Example:
if __name__ == '__main__':
out = print_count(10)
print "".join(out)
# Route to a file
out = print_count(5)
f = open("count.txt","wb")
for chunk in out:
f.write(chunk)
f.close()
|
a1 = int(input('Digite o primeiro termo: '))
r = int(input('Digite a Razão desse termo: '))
n = 1
for n in range(1,11):
an = a1 + (n-1)*r
print(an, end=' ')
|
from enum import Enum
Races = Enum('Races', 'DWARF, ELF, HALFLING, HUMAN, DRAGONBORN, GNOME, HALF-ELF, HALF-ORC, TIEFLING')
Skills = Enum('Skills', 'STRENGTH, DEXTERITY, CONSTITUTION, INTELLIGENCE, WISDOM, CHARISMA')
class Race():
def __init__(self, name, bonus1, bonus2):
self.name = name
self.bonus1 = bonus1
self.bonus2 = bonus2
print(bonus1[0].name)
print(bonus1[1])
print(bonus2[0].name)
print(bonus2[1])
DragonBorn = Race(Races.DRAGONBORN, [Skills.STRENGTH, 2], [Skills.CHARISMA, 1])
|
import datetime
from django.db import models
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from akun.models import Profil
Validator = RegexValidator(
regex='^[0-9]*$', message='Hanya Angka', code='NIK tidak valid')
class Kategori(models.Model):
nama_kategori = models.CharField(max_length=30)
# jam_masuk = models.DateTimeField()
# jam_keluar = models.DateTimeField()
# def batas_jam(self):
# return self.jam_masuk + datetime.timedelta(minutes=5)
class Jadwal(models.Model):
kategori = models.ForeignKey(Kategori, on_delete=models.CASCADE)
tgl_absen = models.DateField(auto_now_add=True)
jam_masuk = models.TimeField()
jam_pulang = models.TimeField()
def batas_jam(self):
return self.jam_masuk + datetime.timedelta(minutes=5)
class Peserta(models.Model):
user = models.ForeignKey(Profil, on_delete=models.CASCADE)
jadwal_peserta = models.ForeignKey(Jadwal, on_delete=models.CASCADE)
def __str__(self):
return self.user.nama
class Status(models.Model):
status = models.CharField(max_length=10)
def __str__(self):
return self.status
class Scan(models.Model):
# access_token = models.CharField(max_length=100)
peserta = models.ForeignKey(Peserta, on_delete=models.CASCADE)
scan_jam = models.TimeField(auto_now_add=True)
tgl_scan = models.DateField(auto_now_add=True)
def status_absen(self):
for qs in Jadwal.objects.all():
if self.tgl_scan == qs.tgl_absen:
if self.scan_jam < qs.jam_masuk:
return 'JAM MASUK'
elif self.scan_jam > qs.jam_pulang and self.scan_jam < datetime.time(21, 0, 0):
return 'JAM PULANG'
else:
return 'INVALID'
# def jam_pulang(self):
# if self.scan_jam.date() == datetime.date.today():
# for qs in Jadwal.objects.all():
# if self.scan_jam.timetz() > qs.jam_masuk.timetz():
# return self.scan_jam.astimezone().time()
# else:
# return 'None'
# def save(self, *args, **kwargs):
# for a in Jadwal.objects.all():
# if self.scan_jam.date() == a.jam_masuk.date():
# # return self.scan_jam.astimezone().time()
# if self.scan_jam.time() < a.jam_masuk.time():
# if (self.status_scan.status == 'JAM MASUK'):
# self.status_scan.save(pk=3)
# return super(Scan, self).save(*args, **kwargs)
# else:
# self.status_scan.save(pk=1)
# return super(Scan, self).save(*args, **kwargs)
# elif self.scan_jam.time() > a.jam_pulang.time():
# if self.jam_kegiatan() == 'JAM KELUAR':
# return 'INVALID'
# else:
# return 'JAM KELUAR'
|
import sys
import cv2
from vision.camera import load_camera
from vision.video import load_video_writer
from ml.model import load_yolo, load_detectron
import logging
import argparse
import tqdm
logger = logging.getLogger("root")
def yolo(args):
detector = load_detector(args)
video_writer_original = load_video_writer(
"recording_original.mp4", (1280, 720))
video_writer_labelled = load_video_writer(
"recording_labelled.mp4", (1280, 720))
if(args.video_input == ""):
cap = load_camera(0)
else:
cap = cv2.VideoCapture(args.video_input)
while True:
(success, frame) = cap.read()
video_writer_original.write(frame)
if not success:
logger.error("Fatal error: Frame not received.")
sys.exit(1)
detector_result = detector.detect(frame)
for box in detector_result:
bbox = box["bbox"]
label = box["label"]
x = bbox["x"]
y = bbox["y"]
w = bbox["width"]
h = bbox["height"]
cv2.rectangle(img=frame,
pt1=(x, y),
pt2=(x + w, y + h),
color=(36, 255, 12),
thickness=2)
cv2.putText(img=frame,
text=label,
org=(x, y - 30),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.7,
color=(36, 255, 12),
thickness=2)
video_writer_labelled.write(frame)
if args.video_out:
cv2.imshow("clearbotvision", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
video_writer_labelled.release()
video_writer_original.release()
def detectron():
from ml.model import load_detectron
cap = cv2.VideoCapture(0)
detector = load_detectron()
detector.detect(cap)
cap.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Clearbot AI and PController")
parser.add_argument('-v', '--video_out', type=bool,
default=False, help="Show the camera video output")
parser.add_argument('-i', '--video_input', type=str,
default="", help="Input")
parser.add_argument('--debug', type=bool, default=False,
help="Switch to debug mode")
parser.add_argument('-m', '--modeltype', type=str,
default="detectron", help="Available: 'yolo' or 'detectron'")
parser.add_argument('-s', '--modelsize', type=str,
default="full", help="Either 'tiny' or 'full' model")
_args = parser.parse_args()
if _args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if _args.modeltype == "detectron":
detectron()
if _args.modeltype == "yolo":
yolo(_args)
|
#! /usr/bin/env python
"""Module for interfacing with KARR's propeller chip
@author:Kristian Charboneau
"""
import serial
def move_x(value):
pass
def move_y(value):
pass
def move_z(value):
pass
def rot_x(value):
pass
def rot_y(value):
pass
def rot_z(value):
pass
def _light(value):
pass
def camera_pan(value):
pass
def camera_tilt(value):
pass
def depth():
pass
class Light():
def __init__(self):
self.brightness = 0
def set_brightness(self, value):
self.brightness = value
def get_brightness(self):
return self.brightness
def cycle():
""" Send and receive data with the propeller chip. Should be called every
time the mainloop starts over
"""
pass
|
import turtle
def draw_circle(x,y,r):
turtle.up()
turtle.goto(x,y)
turtle.stamp()
turtle.forward(r)
turtle.down()
turtle.left(90)
turtle.circle(r)
turtle.shape("turtle")
draw_circle(0,0,50)
draw_circle(200,200,100)
draw_circle(100,-100,50)
|
import connexion
import six
from swagger_server.models.exposures_bundle import ExposuresBundle # noqa: E501
from swagger_server import util
def get_exposures(coords_file, start_date=None, end_date=None): # noqa: E501
"""provided with list of lat,lons in a file (1 pair on each line) will return a bundle of exposure types (CMAQ, roadway, & socio-economic)
By passing in the appropriate options, you can get a bundle of exposure types (CMAQ, roadway, & socio-economic) # noqa: E501
:param coords_file: input file with list of lat,lon coordinates (1 pair per line; decimal format - WGS84 assumed)
:type coords_file: werkzeug.datastructures.FileStorage
:param start_date: start date of range (ex: 2010-01-01) - if not provided, no CMAQ data will be returned
:type start_date: str
:param end_date: end date of range (ex: 2010-01-02) - if not provided, no CMAQ data will be returned
:type end_date: str
:rtype: ExposuresBundle
"""
print("Lisa - I got into default_controller:get_exposures")
if (start_date):
start_date = util.deserialize_date(start_date)
if (end_date):
end_date = util.deserialize_date(end_date)
from swagger_server.exposures.exposures import Exposures
exp = Exposures()
kwargs = locals()
data = exp.get_values(**kwargs)
return data
|
# Generated by Django 2.2.2 on 2019-07-07 19:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('listings', '0014_auto_20190707_1223'),
]
operations = [
migrations.RenameField(
model_name='listing',
old_name='Rules',
new_name='rules',
),
]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import settings_service_pb2 as settings__service__pb2
class SettingsServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListConfig = channel.unary_unary(
'/settings.SettingsService/ListConfig',
request_serializer=settings__service__pb2.ListConfigRequest.SerializeToString,
response_deserializer=settings__service__pb2.ListConfigResponse.FromString,
)
self.CreateConfig = channel.unary_unary(
'/settings.SettingsService/CreateConfig',
request_serializer=settings__service__pb2.Config.SerializeToString,
response_deserializer=settings__service__pb2.Config.FromString,
)
self.GetConfigByName = channel.unary_unary(
'/settings.SettingsService/GetConfigByName',
request_serializer=settings__service__pb2.Config.SerializeToString,
response_deserializer=settings__service__pb2.Config.FromString,
)
self.GetConfigByGroup = channel.unary_unary(
'/settings.SettingsService/GetConfigByGroup',
request_serializer=settings__service__pb2.Config.SerializeToString,
response_deserializer=settings__service__pb2.ConfigByNameMap.FromString,
)
self.SetConfig = channel.unary_unary(
'/settings.SettingsService/SetConfig',
request_serializer=settings__service__pb2.Config.SerializeToString,
response_deserializer=settings__service__pb2.Config.FromString,
)
self.DeleteConfig = channel.unary_unary(
'/settings.SettingsService/DeleteConfig',
request_serializer=settings__service__pb2.Config.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class SettingsServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def ListConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetConfigByName(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetConfigByGroup(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SettingsServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListConfig': grpc.unary_unary_rpc_method_handler(
servicer.ListConfig,
request_deserializer=settings__service__pb2.ListConfigRequest.FromString,
response_serializer=settings__service__pb2.ListConfigResponse.SerializeToString,
),
'CreateConfig': grpc.unary_unary_rpc_method_handler(
servicer.CreateConfig,
request_deserializer=settings__service__pb2.Config.FromString,
response_serializer=settings__service__pb2.Config.SerializeToString,
),
'GetConfigByName': grpc.unary_unary_rpc_method_handler(
servicer.GetConfigByName,
request_deserializer=settings__service__pb2.Config.FromString,
response_serializer=settings__service__pb2.Config.SerializeToString,
),
'GetConfigByGroup': grpc.unary_unary_rpc_method_handler(
servicer.GetConfigByGroup,
request_deserializer=settings__service__pb2.Config.FromString,
response_serializer=settings__service__pb2.ConfigByNameMap.SerializeToString,
),
'SetConfig': grpc.unary_unary_rpc_method_handler(
servicer.SetConfig,
request_deserializer=settings__service__pb2.Config.FromString,
response_serializer=settings__service__pb2.Config.SerializeToString,
),
'DeleteConfig': grpc.unary_unary_rpc_method_handler(
servicer.DeleteConfig,
request_deserializer=settings__service__pb2.Config.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'settings.SettingsService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
from django.shortcuts import render, HttpResponseRedirect, reverse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import JsonResponse
from numpy.core.numeric import NaN
import pandas as pd
from django.contrib import messages
from .models import Consumer
from work.models import Site
from work.functions import getHabID
from django.db.models import F, Sum, Count, Q, FileField
from .codes import CFIELDS
from consumers.models import formatString, genCode, getConsumerID
@ensure_csrf_cookie
def index(request):
data = getData()
return render(request, "consumers/index.html", {'data': data})
def getData():
cs = Consumer.objects.all()
crural = cs.filter(census__lt=799999).values(
'site__district').annotate(
no_habs = Count('site__habitation', distinct=True),
bpl=Count('apl_bpl', filter=Q(apl_bpl='BPL')),
apl=Count('apl_bpl', filter=Q(apl_bpl='APL')),
unknown=Count('apl_bpl', filter=Q(apl_bpl=None)))
df = pd.DataFrame(crural).fillna('None')
df.set_index('site__district', inplace=True)
# curban = Consumer.objects.filter(census__gt=799999).values(
# 'site__district', 'apl_bpl').annotate(records=Count('site__hab_id'))
curban = cs.filter(census__gt=799999).values(
'site__district').annotate(
no_habs = Count('site__habitation', distinct=True),
bpl=Count('apl_bpl', filter=Q(apl_bpl='BPL')),
apl=Count('apl_bpl', filter=Q(apl_bpl='APL')),
unknown=Count('apl_bpl', filter=Q(apl_bpl=None)))
df2 = pd.DataFrame(curban).fillna('None')
df2.set_index('site__district', inplace=True)
# c = Consumer.objects.filter(isInPortal=True).values(
# 'district').annotate(count=Count('name'))
# dfp = pd.DataFrame(c).fillna('None')
# dfp.set_index('district', inplace=True)
# df['in portal '] = dfp
dfdiv = pd.DataFrame(cs.values('site__division').annotate(hh=Count('consumer_no')))
return '<div style="display: flex;"><div>Rural<br>' + df.to_html() + '</div><div>Urban<BR>' + df2.to_html() +'</div><div>'+ dfdiv.to_html()+ '</div></div>'
def upload(request):
# TODO: find site first and update
if(not request.method == 'POST'):
return render(request, "consumers/index.html")
file = request.FILES['file']
upid = request.POST['upid']
df = pd.read_excel(file, 'upload')
df = df.fillna('')
df_template = pd.read_excel('files/template_consumer_details.xlsx')
cols = df_template.columns
truths = [col in df.columns for col in df_template.columns]
ifmatch = all(truths)
ncreated = 0
nupdated = 0
if(not ifmatch):
notmatch = [df_template.columns[i]
for i, col in enumerate(truths) if not col]
messages.error(request, "Field not found– ")
messages.error(request, notmatch)
return HttpResponseRedirect(reverse('consumers:index'))
for index, row in df.iterrows():
# unique_together = ('census', 'habitation', 'name', 'consumer_no')
# print('Processing..')
# print(row)
census = row[cols[0]]
habitation = " ".join(str(row[cols[1]]).split()).upper()
name = " ".join(str(row[cols[3]]).split()).upper()
consumer_no = str(row[cols[7]]).replace(" ", "").upper()
consumer, created = Consumer.objects.get_or_create(
consumer_id=getConsumerID(census, consumer_no, name)
)
if(created):
ncreated += 1
else:
nupdated += 1
# consumer.village = row[cols[]]
changed = False
if(census):
changed = True
consumer.census = census
if(habitation):
changed = True
consumer.habitation = habitation
if(name):
changed = True
consumer.name = name
if(consumer_no):
changed = True
consumer.consumer_no = consumer_no
if(row[cols[2]]):
consumer.edate = row[cols[2]]
changed = True
if(row[cols[11]]):
consumer.status = row[cols[11]]
changed = True
if(row[cols[5]]):
consumer.aadhar = row[cols[5]]
changed = True
if(row[cols[8]]):
consumer.meter_no = row[cols[8]]
changed = True
if(row[cols[6]]):
consumer.apl_bpl = row[cols[6]]
changed = True
if(row[cols[4]]):
consumer.mobile_no = row[cols[4]]
changed = True
if(row[cols[9]]):
consumer.voter_no = row[cols[9]]
changed = True
if(row[cols[10]]):
consumer.tariff = row[cols[10]]
changed = True
if(row[cols[12]]):
consumer.pdc_date = row[cols[12]]
changed = True
if(row[cols[13]]):
consumer.address1 = row[cols[13]]
changed = True
if(row[cols[14]]):
consumer.address2 = row[cols[14]]
changed = True
if(row[cols[15]]):
consumer.remark = row[cols[15]]
changed = True
censusSite = Site.objects.filter(census=row[cols[0]]).first()
if(censusSite):
consumer.district = censusSite.district
consumer.village = censusSite.village
consumer.changeid = upid
hab_id = getHabID(census=row[cols[0]], habitation=row[cols[1]])
consumer.hab_id = hab_id
if(Site.objects.filter(hab_id=hab_id).exists()):
site = Site.objects.get(hab_id=hab_id)
consumer.site = site
if(changed):
try:
consumer.save()
except Exception as ex:
messages.error(request, ex.__str__())
print('Processing..')
print(row)
print(ex)
messages.success(request, '{} updated. {} uploaded of {} records'.format(
nupdated, ncreated, len(df)))
return HttpResponseRedirect(reverse('consumers:index'))
def api_getConsumers(request):
if(request.method != 'POST'):
return JsonResponse(
'nothing to do'
)
filterString = {}
habid = request.POST.get('habid', None)
if(habid):
filterString['site__hab_id__icontains'] = habid
print(habid)
address = request.POST.get('address', None)
if(address):
filterString['address1__icontains'] = address
name = request.POST.get('name', None)
if(name):
filterString['name__icontains'] = formatString(name)
consumer_no = request.POST.get('consumer_no', None)
if(consumer_no):
filterString['consumer_no__icontains'] = genCode(consumer_no)
consumer_id = request.POST.get('consumer_id', None)
if(consumer_id):
filterString['consumer_id__exact'] = genCode(consumer_id)
habid_exact = request.POST.get('habid_exact', None)
if(habid_exact):
filterString['site__hab_id__exact'] = genCode(habid_exact)
inPortal = request.POST.get('inPortal', None)
if(inPortal):
filterString['isInPortal'] = True
village = request.POST.get('village', None)
if(village):
filterString['site__village__icontains'] = formatString(village)
consumers = Consumer.objects.filter(
**filterString).order_by('name', 'site__census')
header = consumers.values('site__village', 'site__census', 'site__habitation', 'site__district', 'site__division').order_by(
).annotate(count=Count('site__habitation'))
dfheader = pd.DataFrame(header)
fields = ['site__village', 'site__census', 'site__habitation', 'name', 'consumer_no', 'edate2', 'status', 'aadhar', 'meter_no',
'apl_bpl', 'mobile_no', 'voter_no', 'tariff', 'pdc_date', 'address1', 'address2', 'site__district','site__division', 'remark']
# df = pd.DataFrame(consumers.values(*CFIELDS))
df = pd.DataFrame(consumers.values(*fields, 'id'))
# print(df.head())
if(not df.empty):
df = df.fillna('')
df = df.applymap(lambda x: '' if x=='nan' else x)
df['edit'] = df.loc[:, 'id'].map(
lambda x: '<a target="_blank" href=/admin/consumers/consumer/' + str(x) + '>edit</a>')
df['delete'] = df.loc[:, 'id'].map(
lambda x: '<a target="_blank" href=/admin/consumers/consumer/' + str(x) + '/delete/>delete</a>')
# df.head()
df1 = df.loc[:, [*fields, 'edit', 'delete',]]
df1.iloc[:, :-3].to_excel('outputs/filtered_consumers.xlsx')
datajson = list(df1.T.to_dict().values())
# print(df1.iloc[:,5:].head())
# print(datajson)
return JsonResponse({'consumers': datajson, 'count': len(df1), 'header': dfheader.to_html()})
else:
return JsonResponse({'status': "no data"})
|
#!/usr/bin/env python
PACKAGE = "vocus2_ros"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
fusion_enum = gen.enum([ gen.const("Arithmetic_mean", int_t, 0, "Arithmetic mean"),
gen.const("Max", int_t, 1, "Max"),
gen.const("Uniqueness_weight", int_t, 2, "Uniqueness weight"),
gen.const("Individual_weights", int_t, 3, "Individual weights")],
"Fusion modes")
cspace_enum = gen.enum([ gen.const("Lab", int_t, 0, "Lab color space"),
gen.const("Opponent_CODI", int_t, 1, "Klein/Frintrop DAGM 2012"),
gen.const("Opponent", int_t, 2, "like above but shifted and scaled to [0,1]"),
gen.const("Itti", int_t, 3, "splitted RG and BY channels")],
"Color spaces")
gen.add("restore_default", bool_t, 0, "Restore default configuration", False)
gen.add("num_foci", int_t, 0, "Number of Foci", 1, 1, 5)
gen.add("start_layer", int_t, 0, "First pyramid layer that is used", 0, 0, 6)
gen.add("stop_layer", int_t, 0, "Last pyramid layer that is used", 4, 0, 6)
gen.add("center_sigma", int_t, 0, "Center Sigma", 3, 1, 6)
gen.add("surround_sigma", int_t, 0, "Surround Sigma", 13, 5, 20)
gen.add("n_scales", int_t, 0, "Number of scales per layer", 2, 1, 5)
gen.add("msr_thresh", double_t, 0, "Threshold of MSR", .75, 0.5, 1)
gen.add("normalize", bool_t, 0, "Normalize input", True)
gen.add("orientation", bool_t, 0, "Use orientation feature", False)
gen.add("combined_features", bool_t, 0, "Use combined features", False)
gen.add("topdown_learn", bool_t, 0, "Enable top down learning", False)
gen.add("topdown_search", bool_t, 0, "Enable top down search", False)
gen.add("center_bias", bool_t, 0, "Enable center bias", False)
gen.add("center_bias_value", double_t, 0, "Magnitude of center bias", 0.000005, 0.0000005, 0.0005)
gen.add("c_space", int_t, 0, "Color space", 1, 0, 3, edit_method=cspace_enum)
gen.add("fuse_feature", int_t, 0, "Specifies how feature maps should be fused to generate the conspicuity maps", 0, 0, 3, edit_method=fusion_enum)
gen.add("fuse_conspicuity", int_t, 0, "Specifies how conspicuity maps are fused", 0, 0, 3, edit_method=fusion_enum)
# conspicuity map weights
gen.add("consp_intensity_on_off_weight", double_t, 0, "Saliency map: Weight of intensity on_off fusion", .25, 0, 1)
gen.add("color_channel_1_weight", double_t, 0, "Saliency map: Weight of color channel 1 (channel a) [OR: weight of Color Channel in Combined Features mode] ", .25, 0, 1)
gen.add("color_channel_2_weight", double_t, 0, "Saliency map: Weight of color channel 2 (channel b) [OR: ignored in Combined Feautures mode]", .25, 0, 1)
gen.add("orientation_channel_weight", double_t, 0, "Saliency map: Weight of orientation channel [OR: ignored if no orientation channel]", .25, 0, 1)
# feature map weights
gen.add("intensity_on_off_weight", double_t, 0, "Weight of intensity on-off", .5, 0, 1)
gen.add("intensity_off_on_weight", double_t, 0, "Weight of intensity off-on", .5, 0, 1)
gen.add("color_a_on_off_weight", double_t, 0, "Color a channel on-off", .5, 0, 1)
gen.add("color_a_off_on_weight", double_t, 0, "Color a channel off-on", .5, 0, 1)
gen.add("color_b_on_off_weight", double_t, 0, "Color b channel on-of", .5, 0, 1)
gen.add("color_b_off_on_weight", double_t, 0, "Color a channel off-on", .5, 0, 1)
gen.add("orientation_1_weight", double_t, 0, "Orientation channel 1", .25, 0, 1)
gen.add("orientation_2_weight", double_t, 0, "Orientation channel 2", .25, 0, 1)
gen.add("orientation_3_weight", double_t, 0, "Orientation channel 3", .25, 0, 1)
gen.add("orientation_4_weight", double_t, 0, "Orientation channel 4", .25, 0, 1)
exit(gen.generate(PACKAGE, "vocus2_ros", "vocus2_ros"))
|
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint, time_diff_in_hours, now, time_diff, add_days, formatdate
from frappe import _
import json
import math
import re
@frappe.whitelist()
def get_global_search_suggestions(filters):
query = """ select name from `tabTraining` where training_status = 'Published'
and name like '%{0}%'
union select name from `tabSkill Matrix 18` where name like '%{0}%'
union select name from `tabSkill Matrix 120` where name like '%{0}%'
union select name from `tabDocument Type` where name like '%{0}%'
""".format(filters)
suggestions = frappe.db.sql(query, as_list=1)
suggestions = [suggestion[0] for suggestion in suggestions]
return suggestions
@frappe.whitelist()
def get_published_trainings(search_filters):
search_filters = json.loads(search_filters)
limit_query = "LIMIT 5 OFFSET {0}".format(search_filters.get("page_no") * 5 )
downld_query, avg_rat_query = get_ratings_and_downloads_query()
my_query = """ select * , ({1}) as download_count , ({2}) as avg_ratings from `tabTraining` tr
where tr.training_status = 'Published' and
( tr.skill_matrix_18 like '%{0}%' or tr.name like '%{0}%'
or tr.skill_matrix_120 like '%{0}%' or tr.document_type like '%{0}%'
or tr.industry like '%{0}%' or tr.description like '%{0}%' ) order by tr.creation desc """.format(search_filters.get("filters"), downld_query, avg_rat_query)
total_records = get_total_records(my_query)
response_data = frappe.db.sql(my_query + limit_query, as_dict=True)
assessment_status = get_request_download_status(response_data)
total_pages = math.ceil(total_records[0].get("count",0)/5.0)
return {"response_data":response_data, "total_pages":total_pages, "test_status":assessment_status}
def get_total_records(query):
return frappe.db.sql(query.replace("*", "count(*) as count", 1), as_dict=1)
def get_request_download_status(response_data):
for response in response_data:
response["comments"] = frappe.db.sql(""" select tr.user_id, tr.comments, tr.ratings, concat(usr.first_name , ' ' ,usr.last_name) as full_name
from `tabTraining Review` tr left join `tabUser` usr
on usr.name = tr.user_id
where tr.training_name = %s""",(response.get("training_name")),as_dict=1)
ans_sheet = frappe.db.sql(""" select answer_sheet_status from
`tabAnswer Sheet`
where student_name = %s and training_name = %s
order by creation desc limit 1 """,(frappe.session.user, response.get("training_name") ), as_dict=1)
response["ans_status"] = ans_sheet[0].get("answer_sheet_status") if ans_sheet else ""
result = frappe.db.get_value("Answer Sheet", {"student_name":frappe.session.user, "answer_sheet_status":["in", ["New", "Pending"] ]}, 'name')
return result if result else ""
def get_ratings_and_downloads_query():
download_query = " select count(name) from `tabTraining Download Log` tdl where tdl.training_name = tr.name "
avg_rat_query = " select ifnull(avg(ratings), 0.0) from `tabTraining Review` rvw where rvw.training_name = tr.name "
return download_query , avg_rat_query
@frappe.whitelist()
def create_training_review(request_data):
request_data = json.loads(request_data)
if not frappe.db.get_value("Training Review", {"user_id":frappe.session.user, "training_name":request_data.get("training_name")}, "name"):
tr = frappe.new_doc("Training Review")
tr.user_id = frappe.session.user
tr.ratings = flt(request_data.get("ratings"))
tr.comments = request_data.get("comments")
tr.training_name = request_data.get("training_name")
tr.save(ignore_permissions=True)
else:
tr = frappe.get_doc("Training Review", {"user_id":frappe.session.user, "training_name":request_data.get("training_name")})
tr.comments = request_data.get("comments")
tr.ratings = flt(request_data.get("ratings"))
tr.save(ignore_permissions=True)
@frappe.whitelist()
def make_training_subscription_form(request_data):
request_data = json.loads(request_data)
training_data = frappe.db.get_value("Training",{"name":request_data.get("tr_name")}, "*", as_dict=True)
tsa = frappe.new_doc("Training Subscription Approval")
tsa.request_type = "Unforced Training"
tsa.training_requester = frappe.session.user
tsa.update(get_subscription_form_dict(training_data))
tsa.save(ignore_permissions=True)
tsa.submit()
# send_mail_of_training_request(training_data.get("name"))
@frappe.whitelist()
def assign_forced_training(request_data):
request_data = json.loads(request_data)
for row in request_data:
training_data = frappe.db.get_value("Training",{"name":row.get("training_name")}, "*", as_dict=True)
tsa = frappe.new_doc("Training Subscription Approval")
tsa.request_type = "Forced Training"
tsa.training_requester = frappe.db.get_value("Employee", {"name":row.get("employee")}, "user_id")
tsa.update(get_subscription_form_dict(training_data))
tsa.save(ignore_permissions=True)
tsa.submit()
def send_mail_of_training_request(training_name):
template = "/templates/training_templates/training_request.html"
cd = get_central_delivery()
first_name, last_name = frappe.db.get_value("User", {"name":frappe.session.user}, ["first_name", "last_name"])
user_name = ' '.join([first_name, last_name]) if last_name else first_name
args = {"user_name":first_name , "training_name":training_name}
frappe.sendmail(recipients= cd, sender=None, subject="Training Document Notification",
message=frappe.get_template(template).render(args))
def get_subscription_form_dict(training_data):
return {
"training_name":training_data.get("name"),
"document_type":training_data.get("document_type"),
"industry":training_data.get("industry"),
"skill_matrix_120":training_data.get("skill_matrix_120"),
"skill_matrix_18":training_data.get("skill_matrix_18"),
"assessment":training_data.get("assessment"),
"request_status":"Open",
"central_delivery_status":"Accepted",
"central_delivery":"Administrator"
}
def get_central_delivery():
central_delivery = frappe.get_list("UserRole", filters={"role":"Central Delivery","parent":["!=", "Administrator"]}, fields=["parent"])
central_delivery = [user.get("parent") for user in central_delivery]
return central_delivery
@frappe.whitelist()
def validate_if_current_user_is_author():
if "Central Delivery" in frappe.get_roles():
return "success"
else:
return frappe.db.get_value("Training", {"owner":frappe.session.user, "training_status":"Published"}, "name")
@frappe.whitelist()
def get_training_list(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if "Central Delivery" in frappe.get_roles():
return frappe.db.sql(get_training_query(cond, txt))
else:
cond = "and owner = '{0}'".format(frappe.session.user)
return frappe.db.sql(get_training_query(cond, txt), as_list=1)
def get_training_query(cond, txt):
return """ select name from
`tabTraining`
where training_status = 'Published'
and name like '{txt}'
{cond} limit 20 """.format(cond = cond, txt = "%%%s%%" % txt )
@frappe.whitelist()
def get_employee_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(get_emp_query(filters.get("training_name"),txt),as_list=1)
def get_emp_query(training_name, txt):
return """ select name, employee_name from
`tabEmployee` emp
where NOT EXISTS ({cond})
and user_id not in ('Administrator', '{usr}')
and (name like '{txt}' or employee_name like '{txt}')
limit 20 """.format(cond = get_sub_query(training_name), txt = "%%%s%%" % txt, usr= frappe.session.user )
def get_sub_query(training_name):
return """ select *
from `tabAnswer Sheet` ans
where ans.answer_sheet_status in ("New", "Open", "Pending")
and ans.student_name = emp.user_id
and ans.training_name = '{0}'
order by ans.creation desc limit 1 """.format(training_name)
# def get_sub_query(training_name):
# return """ select ( select tsa.training_requester from `tabTraining Subscription Approval` tsa
# where tsa.request_status in ("Open", "Accepted") and tsa.training_name = '{0}'
# and tsa.training_requester= emp.user_id order by creation desc limit 1) as emp_user_id from
# `tabEmployee` emp """.format(training_name)
def get_filtered_employee(cond, txt):
cond = cond if cond else "''"
return """ select name, employee_name from
`tabEmployee`
where user_id not in ({cond})
and user_id not in ('Administrator', '{usr}')
and (name like '{txt}' or employee_name like '{txt}')
limit 20 """.format(cond = cond, txt = "%%%s%%" % txt, usr= frappe.session.user )
@frappe.whitelist()
def create_training_download_log(training_name, ans_sheet):
tdl = frappe.new_doc("Training Download Log")
tdl.user_id = frappe.session.user
tdl.training_name = training_name
tdl.downloaded_datetime = now()
tdl.answer_sheet_link = ans_sheet
tdl.save(ignore_permissions=True)
@frappe.whitelist()
def get_my_trainings():
response_data = frappe.db.sql(""" select tr.training_name, tr.training_path,
ans.answer_sheet_status, ans.name as ans_sheet, ans.creation,
ans.percentage_score from
`tabAnswer Sheet` ans join `tabTraining` tr
on ans.training_name = tr.name
where student_name = %s order by ans.creation desc""",(frappe.session.user), as_dict=1)
get_meta_data_of_response(response_data)
return response_data
@frappe.whitelist()
def get_meta_data_of_response(response_data):
mapper = {"New":"Not Completed", "Pending":"Partial Completed", "Open":"Test Completed", "Closed":"Result Declared"}
for response in response_data:
response["download_flag"] = frappe.db.get_value("Training Download Log", {"training_name":response.get("training_name"),
"user_id":frappe.session.user, "answer_sheet_link":response.get("ans_sheet")}, "name")
response["assessment_status"] = mapper.get(response.get("answer_sheet_status")) if response.get("answer_sheet_status") else ""
response["tooltip_title"] = "{0} test Completed".format(response.get("training_name")) if response.get("answer_sheet_status") in ["Open", "Closed"] else " Test allowed after training download !!!!"
response["sub_date"] = formatdate(response.get("creation"))
response["feedback_form"] = frappe.db.get_value("Training Feedback", {"user":frappe.session.user, "answer_sheet":response.get("ans_sheet"), "training":response.get("training_name")}, "name")
@frappe.whitelist()
def check_answer_sheet_status(ans_sheet):
return frappe.db.get_value("Answer Sheet", {"name":ans_sheet}, 'answer_sheet_status')
@frappe.whitelist()
def get_feedback_questionnaire():
qtns = frappe.get_all("IP Questionnaire", filters={"parent":"Training Questionnaire", "status":1}, fields=["*"])
return qtns
@frappe.whitelist()
def create_feedback_questionnaire_form(answer_dict, ans_sheet, training):
answer_dict = json.loads(answer_dict)
fdbk = frappe.get_doc({
"doctype": "Training Feedback",
"user":frappe.session.user,
"user_answers":answer_dict,
"training":training,
"answer_sheet":ans_sheet
})
fdbk.flags.ignore_permissions = True
fdbk.insert()
return "success" |
def solve(a):
even, odd = 0,0
for x in a:
if isinstance(x,int):
if x%2==0:
even+=1
else:
odd+=1
return even-odd
'''
Given an array, return the difference between the count of even numbers and the
count of odd numbers. 0 will be considered an even number.
For example:
solve([0,1,2,3]) = 0 because there are two even numbers and two odd numbers.
Even - Odd = 2 - 2 = 0.
Let's now add two letters to the last example:
solve([0,1,2,3,'a','b']) = 0. Again, Even - Odd = 2 - 2 = 0. Ignore letters.
The input will be an array of lowercase letters and numbers only.
Haskell:
solve ["0","1","2","3","a","b"] = 0 -- In Haskell, all array elements will be strings.
Other languages:
solve([0, 1 ,2, 3, 'a', 'b']) = 0
'''
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/<name>')
def hello(name):
return "Hello " + name.capitalize()
if __name__ == '__main__':
app.run(debug = True)
# or
# app.debug = True
# app.run()
|
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
plotly.tools.set_credentials_file(username='zz186', api_key='g7hnRhD8XruvpT3eKj1C')
data=pd.read_csv("FIFA19 - Ultimate Team players.csv")
df2=data[["quality","league"]]
df2["counts"]=1
df2=df2.groupby(['quality']).sum()
df2=df2.reset_index()
a=[]
b=[]
for i in range(len(df2)):
a.append(df2.quality[i])
b.append(df2.counts[i])
trace = go.Pie(labels=a, values=b,
hoverinfo='label+value', textinfo='percent+label',
textfont=dict(size=20),
marker=dict(
line=dict(color='#000000', width=2)))
data=[trace]
layout= {
"title":"Different quality of players percentage",
"annotations": [
{
"font": {
"size": 30
},
"showarrow": False,
"text": "GHG",
"x": 0.20,
"y": 0.5
}]
}
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='styled_pie_chart')
#py.iplot([trace], filename='styled_pie_chart') |
import logging
import fmcapi
def test__application_category(fmc):
logging.info("Testing ApplicationCategory class.")
obj1 = fmcapi.ApplicationCategories(fmc=fmc)
logging.info("All ApplicationCategories -- >")
result = obj1.get()
logging.info(result)
logging.info(f"Total items: {len(result['items'])}")
logging.info("\n")
del obj1
obj1 = fmcapi.ApplicationCategories(fmc=fmc, name="SMS tools")
logging.info("One ApplicationCategory -- >")
logging.info(obj1.get())
logging.info("\n")
logging.info("Testing ApplicationCategory class done.\n")
|
from .autoprotocol import *
from .behavior_specialization import *
from .markdown import *
from .opentrons import *
|
import os
#Comment this out to use your gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras import metrics
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
import tensorflow.keras
import numpy as np
import pandas as pd
#import argparse
np.random.seed( 0 )
########
# PLAN #
########
'''
Create a neural network that takes a sequence of numbers and returns the next number
Very simple sanity check
'''
################
# CREATE MODEL #
################
max_features = 10
# Layers
input = Input(shape=(5,1,), name="in1", dtype="float32" )
#exit( 0 )
lstm1 = LSTM( 10, input_shape=(5,1), return_sequences="true" )( input )
#exit( 0 )
lstm2 = LSTM( 5, return_sequences="false" )( lstm1 )
#exit( 0 )
flat = Flatten( )( lstm2 )
d1 = Dense( units=10, activation="relu" )( input )
d2 = Dense( units=10, activation="relu" )( d1 )
d3 = Dense( units=10, activation="relu" )( d2 )
flat2 = Flatten( )( d3 )
merge = tensorflow.keras.layers.concatenate( [flat,flat2], name="merge", axis=-1 )
out = Dense( units=3, activation="relu" )( merge )
model = Model( inputs=input, outputs=out )
metrics_to_output=[ 'binary_accuracy' ]
model.compile( loss='binary_crossentropy', optimizer='adam', metrics=metrics_to_output )
model.summary()
train_input = []
train_output = []
# Output bits are: (unfinished), (has a 2.0), (strictly decreasing, ignoring -1.0s)
# Input is >0, -1.0 denotes blank space
train_input.append( [ [ 0.0 ], [ 0.5 ], [ 1.0 ], [ -1.0 ], [ -1.0 ] ] )
train_output.append( [ 1, 0, 0 ] )
train_input.append( [ [ 0.0 ], [ 2.0 ], [ 0.5 ], [ -1.0 ], [ -1.0 ] ] )
train_output.append( [ 1, 1, 0 ] )
train_input.append( [ [ 0.0 ], [ 0.5 ], [ 1.0 ], [ 5.0 ], [ 10.0 ] ] )
train_output.append( [ 0, 0, 0 ] )
train_input.append( [ [ 5.0 ], [ 2.0 ], [ -1.0 ], [ -1.0 ], [ -1.0 ] ] )
train_output.append( [ 1, 1, 1 ] )
train_input.append( [ [ 5.0 ], [ 2.0 ], [ 1.0 ], [ 0.5 ], [ 0.0 ] ] )
train_output.append( [ 0, 1, 1 ] )
train_input.append( [ [ 10.0 ], [ 2.0 ], [ 1.0 ], [ 0.5 ], [ 0.0 ] ] )
train_output.append( [ 0, 1, 1 ] )
my_input = np.asarray( train_input )
my_output = np.asarray( train_output )
model.fit( x=my_input, y=my_output, batch_size=6, epochs=100 )
predictions = model.predict( my_input )
for i in range( 0, len( my_output ) ):
print( "Predicted ", predictions[i], " instead of ", my_output[ i ] )
model.summary()
|
import random
game = input("Do you want to play the game:")
while (game== "yes" or game== 'y' or game== "Yes" or game== 'Y'):
print(random.randrange(1,6,1))
game = input("Do you want to play the game again:")
print("end") |
from tkinter import *
root = Tk()
root.title("格子版面管理員")
text = Label(root, text="Hello World!",
width="30", height="5",
bg="black", fg="white")
text.grid(row=0, column=0)
root.mainloop()
# 檔名: tk_demo3.py
# 作者: Kaiching Chang
# 時間: May, 2018
|
from rest_framework import serializers
from django.contrib.auth.models import User
from backend.profiles.models import Profile
class ProfileSerializer(serializers.Serializer):
username = serializers.CharField(max_length=20)
nickname = serializers.CharField(max_length=20)
password = serializers.CharField(write_only=True)
password_confirmation = serializers.CharField(write_only=True)
def save(self):
username = self.validated_data.get('username')
# TODO check already exists username.
# 참고
# https://eunjin3786.tistory.com/270
if User.objects.filter(username=username):
pass
user = User.objects.create_user(
username=username,
password=self.validated_data.get('password')
)
# TODO check already exists nickname.
profile = Profile(
user=user,
nickname=self.validated_data.get('nickname')
)
profile.save()
return profile
|
with open('Day 4/data.dat', "r") as data:
input = [x for x in data]
input.sort()
watches = []
guards = {}
for line in input:
info = line.replace("[", "").replace(']','').split(" ")
if info[2] == 'Guard':
#number, date, time, list of times asleep, times fallen asleep, total min
watches.append([int(info[3][1:]), info[0], info[1], [], 0, 0])
if int(info[3][1:]) not in guards.keys():
guards[int(info[3][1:])] = [0,[0]*60]
if info[2] == 'falls':
watches[-1][3].append([int(info[1].split(':')[1])])
if info[2] == 'wakes':
for min in range(watches[-1][3][watches[-1][4]][0]+1, int(info[1].split(':')[1])):
watches[-1][3][watches[-1][4]].append(min)
watches[-1][5] += watches[-1][3][watches[-1][4]][-1] - watches[-1][3][watches[-1][4]][0] + 1
guards[watches[-1][0]][0] += (watches[-1][3][watches[-1][4]][-1] - watches[-1][3][watches[-1][4]][0] + 1)
for x in watches[-1][3][watches[-1][4]]:
guards[watches[-1][0]][1][x] += 1
watches[-1][4] += 1
maxx = 0
for x in guards.keys():
if max(guards[x][1]) > maxx:
maxx = max(guards[x][1])
guard = x
min = guards[x][1].index(max(guards[x][1]))
with open("Day 4/result 2.dat", "w") as result:
result.write(f'{guard}\n{min}')
|
class TextHelper:
@staticmethod
def hide_sensitive_data(string: str, count: int = 4):
"""Replace sensitive data with asterisk, except last 4 symbols."""
return f"{'*' * (len(string) - count)}{string[-count:]}"
|
import numpy as np
import matplotlib.pyplot as plt
from pandas.core.common import flatten
from fourierMethods import *
from multipdesys import *
N=64
def g(x):
y=2+6*np.sin(6*x)-38*np.cos(6*x)
return y
x=np.linspace(0,2*np.pi,N)
G=g(x)
Fg=np.fft.fft(G)/N
FF=[]
K=findK(N)
for i in range(0,len(K)):
ff=(-Fg[i])/(K[i]**2 + K[i]*1j +2)
FF.append(ff)
FF=np.array(FF)*N
f=np.fft.ifft(FF)
d=np.copy(G)
D=d[1:-1]
L=len(D)
A=[]
B=[]
C=[]
dx=abs(x[0]-x[1])
alpha=(1/(dx**2))-(1/(dx))
beta=(-2/(dx**2))-2
gamma=(1/(dx**2))+(1/(dx))
for i in range(0,L):
if i==0:
C.append(alpha)
B.append(beta)
elif i==L-1:
A.append(gamma)
B.append(beta)
else:
A.append(alpha)
B.append(beta)
C.append(gamma)
xc=[0]
xc.append(list(TDMAsolver(A, B, C, D)))
xc.append([0])
xc=list(flatten(xc))
figure1=plt.figure()
ax=figure1.add_subplot()
ax.plot(x,f,label="spectral",color='red')
ax.plot(x,xc,label="FD",color='green')
ax.set_xlabel("x")
ax.set_ylabel("f(x)")
ax.legend(loc='best')
figure1.suptitle("Function 1")
plt.show()
|
import numpy as np
def activation(v):
# output 1 if v >=0, 0 otherwise
output = None
return output
def predict(x,w):
# calculate logit, v
v = None
# activate v
output = None
return output
def main():
#define input poitns
# the first column is for bias = 1
pointA = np.array([1,0.5,1])
pointB = np.array([1,1,0.5])
pointC = np.array([1,1,0.5])
# define labels
labelA = 1
labelB = -1
labelC = -1
# define initial weights
w = np.array([0,0,0])
# define learning rate
eta = 0.5
for i in range(0,3):
# this should be almost identiccal to example "LearningRule.py"
print(w)
print(labelA == predict(pointA, w))
print(labelB == predict(pointB, w))
print(labelC == predict(pointC, w))
if __name__ == '__main__':
main()
|
import cv2
import numpy as np
import utils.utils as utils
from net.inception import InceptionResNetV1
from net.mtcnn import mtcnn
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if __name__ == "__main__":
# ------------------------------#
# 门限函数
# ------------------------------#
threshold = [0.5, 0.7, 0.8]
# ------------------------------#
# 创建mtcnn对象
# ------------------------------#
mtcnn_model = mtcnn()
# ------------------------------#
# 读取图片并检测人脸
# ------------------------------#
img = cv2.imread('face_dataset/wyf.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rectangles = mtcnn_model.detectFace(img, threshold)
print("---------------",rectangles)
draw = img.copy()
# ------------------------------#
# 转化成正方形
# ------------------------------#
rectangles = utils.rect2square(np.array(rectangles))
# ------------------------------#
# 载入facenet
# ------------------------------#
model_path = './model_data/facenet_keras.h5'
facenet_model = InceptionResNetV1()
facenet_model.load_weights(model_path)
for rectangle in rectangles:
# ---------------#
# 截取图像
# ---------------#
landmark = np.reshape(rectangle[5:15], (5, 2)) - np.array([int(rectangle[0]), int(rectangle[1])])
crop_img = img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
# -----------------------------------------------#
# 利用人脸关键点进行人脸对齐
# -----------------------------------------------#
cv2.imshow("before", cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
crop_img, _ = utils.Alignment_1(crop_img, landmark)
cv2.imshow("two eyes", cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
crop_img = np.expand_dims(cv2.resize(crop_img, (160, 160)), 0)
feature1 = utils.calc_128_vec(facenet_model, crop_img)
print(feature1)
cv2.waitKey(0)
|
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import xticks
import matplotlib.ticker as mtick
from utils import read_attack_stats, VICTIMS_COLORS, VICTIMS_LINESTYLES, \
COEFFS_COLORS, COEFFS_LINESTYLES, COEFFS_LABELS
import pandas as pd
xticks_short = False
LABELSIZE = 14
def attack_stats_against_camera_targets(poisons_root_path, res, target_ids):
for print_size in [2, 4]:
print_size = str(print_size)
poison_label = res['poison_label']
ites = list(res['targets'][target_ids[0]].keys())
victims = list(res['targets'][target_ids[0]][ites[0]]['victims'].keys())
target_ids = [t_id for t_id in target_ids if
len(res['targets'][t_id][ites[0]]['victims'][victims[0]]['camera'])]
print("Printed size is {}".format(print_size))
print("target_ids: {}".format(target_ids))
attack_accs = pd.DataFrame(columns=victims)
attack_accs.index.name = 'ite'
scores = pd.DataFrame(columns=victims)
scores.index.name = 'ite'
for ite in ites:
attack_accs_tmp = []
scores_tmp = []
for victim in victims:
vals = [res['targets'][t_id][ite]['victims'][victim]['camera'][print_size]['scores'][poison_label]
for t_id in target_ids]
scores_tmp.append(sum(vals) / len(vals))
vals = [res['targets'][t_id][ite]['victims'][victim]['camera'][print_size]['prediction'] == poison_label
for t_id in target_ids]
attack_accs_tmp.append(100 * (sum(vals) / len(vals)))
attack_accs.loc[ite] = attack_accs_tmp
scores.loc[ite] = scores_tmp
print("Attack acc. against camera targets for {}".format(poisons_root_path))
print(attack_accs)
print("-------------------------")
def get_coeffs_dist(coeffs, ites):
coeffs_name = ['c1', 'c2', 'c3', 'c4', 'c5']
coeffs_dist = pd.DataFrame(columns=coeffs_name)
coeffs_dist.index.name = 'ite'
for ite, coeffs_ite in zip(ites, coeffs):
dist = [0] * 5
for coeffs_target in coeffs_ite:
for coeffs_net in coeffs_target:
coeffs_net = sorted(coeffs_net, reverse=True)
dist = [d + c for d, c in zip(dist, coeffs_net)]
dist = [d / (len(coeffs_ite) * len(coeffs_ite[0])) for d in dist]
coeffs_dist.loc[ite] = dist
return coeffs_name, coeffs_dist
def plot_coeffs_dist(coeffs, ites, plot_root_path):
coeffs_name, coeffs_dist = get_coeffs_dist(coeffs, ites)
# plot avg. attack acc.
plt.figure(figsize=(6, 3), dpi=400)
ax = plt.subplot(111)
ax.set_xlabel('Iterations', fontsize=LABELSIZE-3)
ax.set_ylabel('Avg. Coefficients Distribution', fontsize=LABELSIZE-3)
ax.grid(color='black', linestyle='dotted', linewidth=0.5)
ax.set_ylim([0, 1.0])
for coeff in coeffs_name:
ax.plot(ites, coeffs_dist[coeff], label=COEFFS_LABELS[coeff], color=COEFFS_COLORS[coeff],
linewidth=2, linestyle=COEFFS_LINESTYLES[coeff])
ax.legend(loc="upper right", fancybox=True, framealpha=0.5, fontsize=LABELSIZE-3)
if xticks_short:
locs, _ = xticks()
xticks(locs[::5], ites[::5], rotation='vertical')
plt.xticks(rotation=90)
plt.savefig('{}/coeffs-dist-avg.pdf'.format(plot_root_path), bbox_inches='tight')
plt.close()
def plot_attack_avg_stats_over_ites(poisons_root_path, res, target_ids, print_xylabels=False):
"""
Simply plot the avg. attack accuracy (over different targets) based on the number of iterations.
And plot the avg confidence score (over different targets) of the malicious intended class (i.e., poison class)
based on the number of iterations.
Same for avg. attack time, avg. clean acc, and avg. loss
"""
poison_label = res['poison_label']
ites = sorted([int(ite) for ite in list(res['targets'][target_ids[0]].keys())])
ites = [str(ite) for ite in ites]
victims = list(res['targets'][target_ids[0]][ites[0]]['victims'].keys())
attack_accs = pd.DataFrame(columns=victims)
attack_accs.index.name = 'ite'
scores = pd.DataFrame(columns=victims)
scores.index.name = 'ite'
clean_acc = pd.DataFrame(columns=victims)
clean_acc.index.name = 'ite'
losses = []
times = []
coeffs = []
for ite in ites:
losses_tmp = [float(res['targets'][t_id][ite]['total_loss']) for t_id in target_ids]
losses.append(sum(losses_tmp) / len(losses_tmp))
# we saved the time performance by mistake fo rehse two targets
times_tmp = [float(res['targets'][t_id][ite]['time']) for t_id in target_ids if t_id not in ['36', '39']]
times.append(sum(times_tmp) / len(times_tmp))
coeffs.append([res['targets'][t_id][ite]['coeff_list'] for t_id in target_ids])
attack_accs_tmp = []
clean_accs_tmp = []
scores_tmp = []
for victim in victims:
vals = [res['targets'][t_id][ite]['victims'][victim]['scores'][poison_label] for t_id in target_ids]
scores_tmp.append(sum(vals) / len(vals))
vals = [res['targets'][t_id][ite]['victims'][victim]['prediction'] == poison_label for t_id in target_ids]
attack_accs_tmp.append(100 * (sum(vals) / len(vals)))
vals = [res['targets'][t_id][ite]['victims'][victim]['clean acc'] for t_id in target_ids]
clean_accs_tmp.append(sum(vals) / len(vals))
attack_accs.loc[ite] = attack_accs_tmp
clean_acc.loc[ite] = clean_accs_tmp
scores.loc[ite] = scores_tmp
plot_root_path = '{}/plots-retrained-for-{}epochs/'.format(poisons_root_path, epochs)
if not os.path.exists(plot_root_path):
os.mkdir(plot_root_path)
# plot avg. attack acc.
plt.figure(figsize=(8, 5), dpi=400)
ax = plt.subplot(111)
if print_xylabels:
ax.set_xlabel('Iterations', fontsize=LABELSIZE)
ax.set_ylabel('Avg. Attack Accuracy', fontsize=LABELSIZE)
ax.grid(color='black', linestyle='dotted', linewidth=0.5)
ax.set_ylim([0, 100])
for victim in victims:
ax.plot(ites, attack_accs[victim], label=victim, color=VICTIMS_COLORS[victim], linewidth=1.5
, linestyle=VICTIMS_LINESTYLES[victim])
tick = mtick.FormatStrFormatter('%d%%')
ax.yaxis.set_major_formatter(tick)
if print_xylabels:
ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=LABELSIZE-1)
if xticks_short:
locs, _ = xticks()
xticks(locs[::5], ites[::5], rotation='vertical')
plt.xticks(rotation=90)
plt.savefig('{}/attack-acc-avg.pdf'.format(plot_root_path), bbox_inches='tight')
plt.close()
print("Mean Attack Acc.")
for ite in ites:
attack_acc_avg = sum([attack_accs[victim][ite] for victim in victims]) / len(victims)
print("ite: {}, attack_acc_avg: {:.2f}".format(ite, attack_acc_avg))
# plot avg. (malicious) class score.
plt.figure(figsize=(6, 4), dpi=400)
ax = plt.subplot(111)
ax.set_xlabel('Iterations', fontsize=LABELSIZE)
ax.set_ylabel('Avg. Probability Score of Malicious (i.e., Poison) Class', fontsize=LABELSIZE)
ax.grid(color='black', linestyle='dotted', linewidth=0.5)
# ax.set_ylim([20, 70])
for victim in victims:
ax.plot(ites, scores[victim], label=victim, color=VICTIMS_COLORS[victim], linewidth=1.5,
linestyle=VICTIMS_LINESTYLES[victim])
ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=LABELSIZE-1)
if xticks_short:
locs, _ = xticks()
xticks(locs[::5], ites[::5], rotation='vertical')
plt.xticks(rotation=90)
plt.savefig('{}/attack-score-avg.pdf'.format(plot_root_path), bbox_inches='tight')
plt.close()
# plot avg. clean accuracy
plt.figure(figsize=(6, 4), dpi=400)
ax = plt.subplot(111)
ax.set_xlabel('Iterations', fontsize=LABELSIZE)
ax.set_ylabel('Avg. Clean Test Accuracy', fontsize=LABELSIZE)
ax.grid(color='black', linestyle='dotted', linewidth=0.5)
# ax.set_ylim([20, 70])
for victim in victims:
ax.plot(ites, clean_acc[victim], label=victim, color=VICTIMS_COLORS[victim], linewidth=1.5,
linestyle=VICTIMS_LINESTYLES[victim])
ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=LABELSIZE-1)
if xticks_short:
locs, _ = xticks()
xticks(locs[::5], ites[::5], rotation='vertical')
plt.xticks(rotation=90)
plt.savefig('{}/clean-acc-avg.pdf'.format(plot_root_path), bbox_inches='tight')
plt.close()
print("Mean Clean Test Acc.")
for ite in ites:
test_acc_avg = sum([clean_acc[victim][ite] for victim in victims]) / len(victims)
print("ite: {}, attack_acc_avg: {:.2f}".format(ite, test_acc_avg))
# plot avg. time
plt.figure(figsize=(6, 4), dpi=400)
ax = plt.subplot(111)
ax.set_xlabel('Iterations', fontsize=LABELSIZE)
ax.set_ylabel('Time (minute)', fontsize=LABELSIZE)
ax.grid(color='black', linestyle='dotted', linewidth=0.5)
# ax.set_ylim([20, 70])
ax.plot(ites, [int(t/60) for t in times], label='Time', color='black', linewidth=2)
ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=LABELSIZE-1)
if xticks_short:
locs, _ = xticks()
xticks(locs[::5], ites[::5], rotation='vertical')
print("Avg. time after {}: {}".format(ites[-1], int(times[-1]/60)))
plt.xticks(rotation=90)
plt.savefig('{}/time.pdf'.format(plot_root_path), bbox_inches='tight')
plt.close()
print("Poisons Classification Acc.")
for ite in ites:
poison_acc_tmp = []
for victim in victims:
vals = [res['targets'][t_id][ite]['victims'][victim]['poisons predictions'] for t_id in target_ids]
vals = [(100.0 * sum([v == poison_label for v in val])) / len(val) for val in vals]
poison_acc_tmp.append(sum(vals) / len(vals))
poison_acc_tmp = sum(poison_acc_tmp) / len(victims)
print("ite: {}, poison_acc: {}".format(ite, poison_acc_tmp))
# plot coeffs dist
if 'end2end' not in poisons_root_path:
plot_coeffs_dist(coeffs, ites, plot_root_path)
if __name__ == '__main__':
import sys
epochs = sys.argv[1]
paths = sys.argv[2:]
# assert 'convex' in paths[0]
# assert len(paths) <= 1 or ('mean' in paths[1] and 'mean-' not in paths[1])
# assert len(paths) <= 2 or 'mean-' in paths[2]
print("NOTE THAT WE ARE EVALUATING THE CASE THAT THE VICTIMS ARE RETRAINED FOR {} EPOCHS"
.format(epochs))
res = []
target_ids = None
for path in paths:
r = read_attack_stats(path, retrain_epochs=epochs)
if target_ids is None:
target_ids = set(r['targets'].keys())
else:
target_ids = target_ids.intersection(r['targets'].keys())
res.append(r)
target_ids = sorted(list(target_ids))
print("Evaluating {}\n Target IDs: {}".format("\n".join(paths), target_ids))
for path, r in zip(paths, res):
plot_attack_avg_stats_over_ites(poisons_root_path=path, res=r, target_ids=target_ids,
print_xylabels=True if 'convex' in path else False)
# print("Now evaluating against camera targets")
# for path, r in zip(paths, res):
# attack_stats_against_camera_targets(poisons_root_path=path, res=r, target_ids=target_ids) |
from kivy.core.window import Window
from kivy.app import App
from kivy.uix.label import Label
import re
from datetime import date
from kivy.uix.popup import Popup
# To Calculate The Age From Date Of Birth
def match(p_dob, p_age):
dob_pattern = r'(((0[1-9]|[12][0-9]|3[01])([/])(0[13578]|10|12)([/])(\d{4}))|(([0][1-9]|[12][0-9]|30)([/])(0[469]|11)([/])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([/])(02)([/])(\d{4}))|((29)(\.|-|\/)(02)([/])([02468][048]00))|((29)([/])(02)([/])([13579][26]00))|((29)([/])(02)([/])([0-9][0-9][0][48]))|((29)([/])(02)([/])([0-9][0-9][2468][048]))|((29)([/])(02)([/])([0-9][0-9][13579][26])))'
numbers = re.match(dob_pattern, p_dob.text)
if numbers:
curr_year = int(date.today().year)
dob_year = int(p_dob.text[6:10])
age = str(curr_year - dob_year)
p_age.text = age + " Years"
else:
return True
# To check that weight and height is numeric only
def number_only(value):
number_pattern = r'^[0-9]*$'
number = re.match(number_pattern, value.text)
if number:
pass
else:
return True
# all the popus for all the response
def submit_popup(p_weight, p_height, p_smoke_yes, p_smoke_no, p_dob, p_age, p_fname, p_gender_f, p_gender_m,
p_gender_o,
p_lname, p_mname):
# To get the radio button values
if p_smoke_yes.active:
smoke = "Yes"
print("smoker=" + smoke)
else:
smoke = "No"
print("smoker=" + smoke)
# To get the gender button values
if p_gender_m.active:
gender = "Male"
print(gender)
elif p_gender_f.active:
gender = "Female"
print(gender)
elif p_gender_o.active:
gender = "Other"
print(gender)
# Popups For Submit Button
pop_sub = Popup(title="Submit", title_align="center", content=Label(text="Information Submitted Successfully"),
size=(300, 200),
size_hint=(None, None), auto_dismiss=True)
pop_empty = Popup(title="Error", title_align="center",
content=Label(text="Empty Field.\nPlease Fill All Information"),
size=(300, 200),
size_hint=(None, None), auto_dismiss=True)
pop_gender = Popup(title="Error", title_align="center",
content=Label(text="Please Select A Gender."),
size=(300, 200),
size_hint=(None, None), auto_dismiss=True)
pop_number = Popup(title="Error", title_align="center",
content=Label(text="Height and Weight Must be numbers."),
size=(300, 200),
size_hint=(None, None), auto_dismiss=True)
dob_pop = Popup(title="Error", size=(280, 200), size_hint=(None, None),
content=Label(text="Please Enter a valid Date Of Birth. \n(e.g. 01/01/1999)"))
if p_fname.text == '' or p_height.text == '' or p_weight.text == '' \
or p_dob.text == '' or p_mname.text == '' or p_lname.text == '':
pop_empty.open()
elif number_only(p_weight) or number_only(p_height):
pop_number.open()
p_weight.text = ''
p_height.text = ''
elif match(p_dob, p_age):
dob_pop.open()
p_age.text = ''
elif p_gender_m.active == False and p_gender_f.active == False and p_gender_o.active == False:
pop_gender.open()
else:
pop_sub.open()
|
import os
os.system("python3 generate_dimacs_gift_v2.py")
os.system("python3 main.py") |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 4 15:38:54 2017
@author: zx621293
"""
def RefineData (protein,labelling): #(input data, labelled ground truth)
mydata=protein.values.astype("float64")
delrow= list()
delcolumn= list()
for column in np.linspace(0,mydata.shape[1]-1,mydata.shape[1]).astype('int'):
if sum(mydata[:,column]!=mydata[:,column])>3: #number of nan in each row
delcolumn.append(column)
print('%i proteins covariates are deleted due to more than three missing value for that protein' %len(delcolumn))
print(delcolumn)
for row in np.linspace(0,mydata.shape[0]-1,mydata.shape[0]).astype('int'):
if sum(mydata[row]!=mydata[row])>=3: #number of nan in each row
delrow.append(row)
print('%i experiments are deleted due to more than three missing protein value for that experiment' %len(delrow))
print(delrow)
mydata_refine = np.delete(mydata,(delrow),0)
mydata_refine = np.delete(mydata_refine,(delcolumn),1)
label_each = np.array([labelling])
label_refine = np.delete(label_each,(delrow),1)
print('Check no more missing value in our refined dataset: %s' %((mydata_refine!=mydata_refine).sum()==0))
if (mydata_refine!=mydata_refine).sum()>0:
mydata = mydata[np.all(mydata > 0, axis=1)]
label_refine = label_refine[np.all(mydata > 0, axis=1)]
print('The resulting input data matrix is refined from \n%i by %i \nto \n%i by %i dimension' %(protein.shape+mydata_refine.shape), )
return mydata_refine, label_refine[0], delrow, delcolumn; #(refined input data with refined labelled ground truth)
|
salary = float(input('Please type your salary:'))
a = (10 / 100) * salary + salary
b = (15 / 100) * salary + salary
if salary <= 1250:
print('You have a salary increase of 15% new salary is £ {} '.format(b))
else:
print('You have a salary increase of 10% your new salary is £ {}'.format(a))
|
import matplotlib.image as mpimg
import numpy as np
import os,sys
import math
from skimage.morphology import opening, closing, white_tophat
from skimage.morphology import square
import postprocessing
from plots import *
from skimage.filters import gaussian
### Data extraction ###
def load_training_images(n):
"""Loads n training images and the corresponding groundtruth.
Returns two lists, (imgs, gt_imgs), where the first contains training images
and the second contains groundtruth images."""
root_dir = "Datasets/training/"
image_dir = root_dir + "images/"
gt_dir = root_dir + "groundtruth/"
files = os.listdir(image_dir)
n = min(n, len(files))
imgs = [load_image(image_dir + files[i]) for i in range(n)]
gt_imgs = [load_image(gt_dir + files[i]) for i in range(n)]
return imgs, gt_imgs
def load_image(path):
"""Returns an image, given its file path."""
img = mpimg.imread(path)
return img
def get_patches(imgs, patch_size):
"""Extracts patches from a list of images."""
img_patches = [img_crop(img, patch_size, patch_size) for img in imgs]
# Linearize list of patches
img_patches = np.asarray([img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))])
return img_patches
def get_features_from_patches(img_patches, extract_func):
"""Constructs X array from image patches lists."""
X = np.asarray([ extract_func(img_patches[i]) for i in range(len(img_patches))])
return X
def get_labels_from_patches(gt_patches, foreground_threshold):
"""Constructs Y array from image patches lists."""
def value_to_class(v):
df = np.sum(v)
if df > foreground_threshold:
return 1
else:
return 0
Y = np.asarray([value_to_class(np.mean(gt_patches[i])) for i in range(len(gt_patches))])
return Y
def get_features_from_img(img, extract_func, patch_size):
"""Constructs the X array of a single image, by splitting it into patches."""
img_patches = img_crop(img, patch_size, patch_size)
X = get_features_from_patches(img_patches, extract_func)
return X
def get_labels_from_img(gt_img, foreground_threshold, patch_size):
"""Constructs Y array from image patches lists."""
gt_patches = img_crop(gt_img, patch_size, patch_size)
Y = get_labels_from_patches(gt_patches, foreground_threshold)
return Y
### Image manipulation ###
def img_float_to_uint8(img):
"""Converts the pixels of an image from float to uint8 values."""
rimg = img - np.min(img)
rimg = (rimg / np.max(rimg) * 255).round().astype(np.uint8)
return rimg
def concatenate_images(img, gt_img):
"""Concatenates an image and its groundtruth."""
nChannels = len(gt_img.shape)
w = gt_img.shape[0]
h = gt_img.shape[1]
if nChannels == 3:
cimg = np.concatenate((img, gt_img), axis=1)
else:
gt_img_3c = np.zeros((w, h, 3), dtype=np.uint8)
gt_img8 = img_float_to_uint8(gt_img)
gt_img_3c[:,:,0] = gt_img8
gt_img_3c[:,:,1] = gt_img8
gt_img_3c[:,:,2] = gt_img8
img8 = img_float_to_uint8(img)
cimg = np.concatenate((img8, gt_img_3c), axis=1)
return cimg
def img_crop(im, w, h):
"""Splits an image into patches of the given size, and returns a list of patches."""
list_patches = []
imgwidth = im.shape[0]
imgheight = im.shape[1]
is_2d = len(im.shape) < 3
for i in range(0,imgheight,h):
for j in range(0,imgwidth,w):
if is_2d:
im_patch = im[j:j+w, i:i+h]
else:
im_patch = im[j:j+w, i:i+h, :]
list_patches.append(im_patch)
return list_patches
def label_to_img(imgwidth, imgheight, w, h, labels):
"""Transforms a linear array of predictions (0 and 1 values) into an image.
w and h represent the patch size."""
im = np.zeros([imgwidth, imgheight])
idx = 0
for i in range(0,imgheight,h):
for j in range(0,imgwidth,w):
im[j:j+w, i:i+h] = labels[idx]
idx = idx + 1
return im
def img_to_label(img, patch_size, Zi):
"""Transforms an image into a linear array of 0 and 1 values"""
height = img.shape[0]/patch_size
width = img.shape[1]/patch_size
labels = np.zeros(len(Zi), dtype=np.int8)
for i in range(int(height*width)):
labels[i] = img[math.floor((i*16)/img.shape[1])][(i*16) % img.shape[1]]
return labels
### Feature extraction ###
def extract_features_6d(img):
"""Extracts a 6-dimensional feature consisting of the average and variance of each RGB channel."""
feat_m = np.mean(img, axis=(0,1))
feat_v = np.var(img, axis=(0,1))
feat = np.append(feat_m, feat_v)
return feat
def extract_features_2d(img):
"""Extracts a 2-dimensional feature consisting of the average gray color as well as variance."""
feat_m = np.mean(img)
feat_v = np.var(img)
feat = np.append(feat_m, feat_v)
return feat
def extract_features_12d(img):
"""Extracts a 12-dimensional feature consisting of the average, max, min, and variance of each RGB channel."""
feat_m = np.mean(img, axis=(0,1))
feat_min = np.min(img, axis=(0,1))
feat_max = np.max(img, axis=(0,1))
feat_v = np.var(img, axis=(0,1))
feat = np.append(feat_m, [feat_v, feat_min, feat_max])
return feat
def augment_features_6d_with_adjacent(X, side_px, patch_size, n_images):
"""Extends an array of 6D features to include the mean of the mean of adjacent patches, for each channel.
This function assumes an ordered sequence of square images of the same size.
X: the features array.
side_px: side number of pixels of the images.
patch_size: side number of pixels in a patch.
n_images: the number of images.
Returns a features array with D=9.
"""
if side_px % patch_size != 0:
raise ValueError("Invalid size")
side_patches = int(side_px / patch_size)
patches_per_image = side_patches * side_patches
vec = []
for n in range(n_images):
for i in range(patches_per_image):
# Compute the mean of the three channels, for each adjacent patch
means = []
if i % side_patches != 0:
means.append(X[i - 1, :3])
if (i + 1) % side_patches != 0:
means.append(X[i + 1, :3])
if i >= side_patches:
means.append(X[i - side_patches, :3])
if (patches_per_image - 1 - i) >= side_patches:
means.append(X[i + side_patches, :3])
m = np.mean(np.array(means), axis=0)
vec.append(m)
X = np.hstack((X, np.array(vec).reshape(-1,3)))
return X
### Predictions analysis ###
def true_positive_rate(Z, Y):
"""Returns the true positive rate, given a set of predictions Z and the true labels Y."""
Zn = np.where(Z == 1)[0]
Yn = np.where(Y == 1)[0]
TPR = len(list(set(Yn) & set(Zn))) / float(len(Z))
return TPR
def true_negative_rate(Z, Y):
"""Returns the true negative rate, given a set of predictions Z and the true labels Y."""
Zn = np.where(Z == 0)[0]
Yn = np.where(Y == 0)[0]
TNR = len(list(set(Yn) & set(Zn))) / float(len(Z))
return TNR
def false_negative_rate(Z, Y):
"""Returns the false negative rate, given a set of predictions Z and the true labels Y."""
Zn = np.where(Z == 0)[0]
Yn = np.where(Y == 1)[0]
FNR = len(list(set(Yn) & set(Zn))) / float(len(Z))
return FNR
def false_positive_rate(Z, Y):
"""Returns the false positive rate, given a set of predictions Z and the true labels Y."""
Zn = np.where(Z == 1)[0]
Yn = np.where(Y == 0)[0]
FPR = len(list(set(Yn) & set(Zn))) / float(len(Z))
return FPR
def f_score(Z, Y, beta=1):
"""Returns the F-score, given a set of predictions Z and the true labels Y.
beta: the emphasis given to false negatives (recall). Default is the F1-score.
"""
fb_num = (1 + beta**2) * true_positive_rate(Z, Y)
fb_den = fb_num + beta**2 * false_negative_rate(Z, Y) + false_positive_rate(Z, Y)
fb = fb_num / fb_den
return fb
### Submission ###
def create_submission(model, extraction_func, patch_size, preproc, aggregate_threshold, image_proc):
"""Loads test images, runs predictions on them and creates a submission file."""
dir_t = "Datasets/test_set_images/"
n_t = 50 # Number of test images
output_patch_size = 16 # The patch size expected by AICrowd
with open('Datasets/submission.csv', 'w') as f:
f.write('id,prediction\n')
for img_idx in range(1, n_t+1):
img_path = dir_t + "test_{0}/test_{0}.png".format(img_idx)
img = load_image(img_path)
img = gaussian(img, sigma = 2, multichannel = True)
# Run predictions
Xi_t = get_features_from_img(img, extraction_func, patch_size)
if preproc is not None:
Xi_t = preproc.transform(Xi_t)
Zi_t = model.predict(Xi_t)
nn_threshold = 0.3
Zi_t = postprocessing.threshold_labels(Zi_t, nn_threshold)
if image_proc:
Zi_t = postprocessing.road_filters(Zi_t)
if patch_size != output_patch_size:
Zi_t = postprocessing.aggregate_labels(Zi_t, patch_size, output_patch_size, aggregate_threshold)
# Write predictions to file
pred_index = 0
for j in range(0, img.shape[1], output_patch_size):
for i in range(0, img.shape[0], output_patch_size):
f.write("{:03d}_{}_{},{}\n".format(img_idx, j, i, Zi_t[pred_index]))
pred_index += 1
|
from .product import Product
class Category:
def __init__(self, category):
self._category = category
self._products = []
@property
def category(self) -> str:
return self._category
@category.setter
def category(self, value):
self._category = value
@property
def products(self) -> list:
return self._products
def add_product(self, product: Product) -> None:
if not self.has_product(product):
self._products.append(product)
product.add_category(self)
def has_product(self, product: Product) -> bool:
return product in self._products
def remove_product(self, product: Product) -> None:
if self.has_product(product):
self._products.remove(product)
product.remove_category(self)
|
# Copyright (C) 2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import logging
import os.path
import subprocess
import _winreg
from lib.common.abstracts import Auxiliary
from lib.common.registry import set_regkey
from lib.common.results import upload_to_host
log = logging.getLogger(__name__)
DebugPrintFilter = (
"SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Debug Print Filter"
)
class DbgView(Auxiliary):
"""Run DbgView."""
def start(self):
if not self.options.get("dbgview"):
return
dbgview_path = os.path.join("bin", "dbgview.exe")
if not os.path.exists(dbgview_path):
log.error("DbgView.exe not found!")
return
# Make sure all logging makes it into DbgView.
set_regkey(
_winreg.HKEY_LOCAL_MACHINE, DebugPrintFilter,
"", _winreg.REG_DWORD, 0xffffffff
)
self.filepath = os.path.join(self.analyzer.path, "bin", "dbgview.log")
# Accept the EULA and enable Kernel Capture.
subprocess.Popen([
dbgview_path, "/accepteula", "/t", "/k", "/l", self.filepath,
])
log.info("Successfully started DbgView.")
def stop(self):
upload_to_host(self.filepath, os.path.join("logs", "dbgview.log"))
|
#!/usr/bin/env python3
import io
import csv
import utils
def download():
utils.download_file('http://astro.temple.edu/~tua87106/DDI_pred.csv', '../data/pmid_26196247/DDI_pred.csv')
def map_to_drugbank():
matched = set()
unmapped_names = set()
results = []
unmapped = 0
duplicated = 0
with io.open('../data/pmid_26196247/DDI_pred.csv', 'r', encoding='utf-8') as f:
# Drug1_ID, Drug1_Name, Drug2_ID, Drug2_Name, Pred_Score
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
for row in reader:
row = [x.strip() for x in row]
id1 = utils.pubchem_to_drugbank_id(row[0])
if id1 is None:
id1 = utils.name_to_drugbank_id(row[1])
id2 = utils.pubchem_to_drugbank_id(row[2])
if id2 is None:
id2 = utils.name_to_drugbank_id(row[3])
if id1 is None:
unmapped_names.add(row[1])
if id2 is None:
unmapped_names.add(row[3])
if id1 is None or id2 is None:
unmapped += 1
continue
id_key = '%s:%s' % (id1 if id1 < id2 else id2, id2 if id1 < id2 else id1)
if id_key not in matched:
matched.add(id_key)
results.append([row[0], id1, row[1], row[2], id2, row[3], row[4]])
else:
duplicated += 1
with io.open('../data/pmid_26196247/DDI_pred_mapped.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
writer.writerow(['Drug1_ID', 'DrugBank1', 'Drug1_Name', 'Drug2_ID', 'DrugBank2', 'Drug2_Name', 'Pred_Score'])
for row in results:
writer.writerow(row)
with io.open('../data/pmid_26196247/unmapped_names.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
for row in unmapped_names:
writer.writerow([row])
# Matched, Duplicated, Unmatched
return [len(results), duplicated, unmapped]
def process() -> [int]:
return map_to_drugbank()
def get_all_interaction_pairs() -> []:
result = []
with io.open('../data/pmid_26196247/DDI_pred_mapped.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
for row in reader:
result.append([row[1], row[2], row[4], row[5], float(row[6])])
return result
|
# -*- coding:utf-8 -*-
from urllib import request,response,parse,error
import re
from bs4 import BeautifulSoup
class crawl:
def __init__(self,url):
self.url=url
#获取页面内容
def getPage(self,Num):
try:
url=self.url+str(Num)+'?o=2'
req=request.urlopen(url)
return str(req.read(),encoding='utf-8')
except error.URLError:
return None
#获取电影信息
def getContent(self):
page=self.getPage(1)
pattern_1=re.compile(r'(.*?)部相关电影')
movie_num=re.findall(pattern_1,page)
print('共有'+movie_num[0].strip()+'部相关电影')
page_num=int(movie_num[0])/10+1
print('共有%d页'%page_num)
index=1
while index<page_num:
content=self.getPage(index)
pattern_2 = re.compile(r'<h2><a target="_blank" href=(.*?)</a></h2>')
items=re.findall(pattern_2,content)
i=1
for item in items:
pattern_3=re.compile(r'"(.*?)"')
link=re.findall(pattern_3,item)
print('%d-%d—'%(index,i)+item.replace('<em>','').replace('</em>',''))
self.getLink(link[0])
i=i+1
index=index+1
#获取下载地址
def getLink(self,link):
page=request.urlopen(link)
content=str(page.read(),encoding='utf-8')
soup=BeautifulSoup(content,'html.parser')
print('百度云盘:')
for i in soup.find_all('a',target='_blank',text='百度云盘'):
print(str(i.parent).replace(str(i),'').replace('<p>','').replace('</p>',''))
print(i.get('href'))
print('迅雷下载:')
for i in soup.find_all('a',target='_blank'):
if str(i.get('href'))[0:6]=='magnet':
print(i.string,end=': ')
print(i.get('href'))
if str(i.get('href'))[0:4] == 'ed2k':
print(i.string, end=': ')
print(i.get('href'))
url=r'http://www.dysfz.cc/'
key=str(input('输入电影名称:'))
url=url+'key/'+parse.quote(key)+'/'
find=crawl(url)
find.getContent() |
import random
ord = ['', '', '', '']
aluno = [0, 1, 2, 3]
for x in range(4):
aluno[x] = input('Digite o {}º aluno: '.format(x + 1))
n = random.randint(0, 3)
while(ord[n] != ''):
n = random.randint(0, 3)
ord[n] = aluno[x]
print('A ordem será: {}, {}, {}, {}'.format(ord[0], ord[1], ord[2], ord[3]))
|
import numpy as np
def compute_dist(mat, # numpy array of shape (N, nx) and type 'float'
scl=None, # [usf] numpy array of shape (nx,) and type 'float'
wt=None, # [nusf] numpy array of shape (N,) and type 'float'
hist=[]):
if hist:
mat = np.concatenate((mat, hist), axis=0)
N, ncols = mat.shape
dmat = np.full((N, N), np.nan)
val = np.inf
if scl is not None:
assert scl.shape[0] == ncols, 'SCL should be of dim %d.' % ncols
mat = mat / np.repeat(np.reshape(scl, (1, ncols)), N, axis=0)
val = 10
for i in range(N):
x = np.repeat(np.reshape(mat[i,:], (1, ncols)), N, axis=0) - mat
dmat[:,i] = np.sum(np.square(x), axis=1)
if wt is not None:
dmat = np.multiply(dmat, np.outer(wt, wt))
val = 9999
np.fill_diagonal(dmat, val)
return dmat
|
def sum(num1, num2):
num3 = num1 + num2
print("Sum of {0}, {1} is : {2}".format(num1, num2, num3))
def sum(num1, num2, num3):
num4 = num1 + num2 + num3
print("Sum of {0}, {1}, {2} is : {3}".format(num1, num2, num3, num4))
def main():
sum(3, 2, 3)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class FieldConstrains(object):
# Required
# Examples:
# ['allowEmpty']
#
# [+] - EmptinessValidator
#
# public function allowEmpty($value)
ALLOW_EMPTY = 'allowEmpty' # можно передать поля без значения, для пустых строк. Для удаления атрибутов.
# Examples:
# ['notEmpty']
#
# [+] - EmptinessValidator
#
# public function notEmpty($value)
NON_EMPTY = 'notEmpty' # нельзя передать поля без значения
# Examples:
# ['required']
# ['required', {'default': 0}]
# ['required', {'default': 'created_at:d'}]
# ['required', {'default': 'id:a'}]
# ['required', {'default': 'last_name:a'}]
# ['required', {'default': 'title:a'}]
# ['required', {'default': 'url:a'}]
# [['required'], ['string']]
#
# [+] - ValidatableField - args: default, required
#
# public function required($value, $params = ['default' => null])
REQUIRED = 'required' # обязательно для заполнения, но может быть выставлен default
# Generic
# Examples:
# ['arrayOf'] -> модифицируем в ['arrayOf', {'type': 'string'}]
# ['arrayOf', {'max': 3, 'type': u'App\\Model\\DB\\Billing\\HoldMessage', 'min': 1}]
# ['arrayOf', {'type': 'App\\Model\\DB\\Billing\\InvitationRuleSiteBinding'}]
# ['arrayOf', {'type': 'App\\Model\\DB\\Billing\\Prechat'}]
# ['arrayOf', {'type': 'App\\Model\\DB\\Locations\\Location', 'empty': True}]
# ['arrayOf', {'type': 'App\\Model\\Supply\\BatchRequest'}]
# ['arrayOf', {'type': 'integer'}]
#
# [+] ImplTypeValidator, MinMaxValidator, EmptinessValidator
ARRAY_OF = 'arrayOf'
# Examples:
# ['typeOf', {'type': '\\App\\Model\\DB\\Billing\\RegistrationMarketingData'}]
# ['typeOf', {'type': 'App\\Model\\DB\\CRM\\Contract'}]
# ['typeOf', {'type': 'App\\Model\\DB\\Main\\ContactInfo'}]
# ['typeOf', {'type': 'App\\Model\\DB\\Payment\\Requisites'}]
# ['typeOf', {'type': 'App\\Model\\Supply\\ProductAddition'}]
# ['typeOf', {'type': '\\App\\Model\\Supply\\RegistrationPartnerData'}]
#
# [-] ImplTypeValidator
TYPE_OF = 'typeOf' # сущность в коде с валидаторами для полей.
# Content
# Examples:
# ['length', {'max': 300}]
# ['length', {'min': 6}]
# ['length', {'min': 7}]
#
# [+] - MinMaxValidator
#
# public function length($value, $params = ['min' => 0, 'max' => null])
LENGTH = 'length' # ограничение на длину строки
# Examples:
# ['match', {'pattern': '#^(daily|weekly|monthly)$#'}]
# ['match', {'pattern': '#^(dark|light|auto)$#'}]
# ['match', {'pattern': '#^(email|phone|email_and_phone)$#'}]
# ['match', {'pattern': '#^(email|phone|email_or_phone|email_and_phone)$#'}]
# ['match', {'pattern': '#^(excluded|specified|home|internal|any)$#'}]
# ['match', {'pattern': '#^(form|chat|callback)$#'}]]
# ['match', {'pattern': '#^(green|orange|blue|red|purple|gray|rose|black|yellow|white)$#'}]
# ['match', {'pattern': '#^(left|right|bottom)$#'}]
# ['match', {'pattern': '#^(left|right|bottom|top)$#'}]
# ['match', {'pattern': '#^(livetex|visitor)$#'}]
# ['match', {'pattern': '#^(man|woman|operator|custom)$#'}]
# ['match', {'pattern': '#^(missed|completed)$#'}]
# ['match', {'pattern': '#^(missed|completed)$#'}]
# ['match', {'pattern': '#^(none|custom)$#'}]
# ['match', {'pattern': '#^(none|default|custom)$#'}]
# ['match', {'pattern': '#^(now|after_current|after_payment)$#'}]
# ['match', {'pattern': '#^(offline|online|busy)$#'}]
# ['match', {'pattern': '/^(online|offline|busy)$/'}]
# ['match', {'pattern': '#^(pixel|percent)$#'}]
# ['match', {'pattern': '#^(positive|negative|undefined)$#'}]
# ['match', {'pattern': '#^(predefined|custom)$#'}]
# ['match', {'pattern': '#^(preset|custom)$#'}]
# ['match', {'pattern': '#^ru_person|ru_legal|ua_person|ua_legal|ua_sp$#'},
# None, "payer_type must be one of 'ru_person', 'ru_legal', 'ua_person', 'ua_legal', 'ua_sp'"]
# ['match', {'pattern': '#^(sip|phone)$#'}]
# ['match', {'pattern': '#^(small|large)$#'}]
# ['match', {'pattern': '#^(small|middle|large)$#'}]
# ['match', {'pattern': '#^(submitted|inprogress|w4lt|w4e|closed)$#'}]
# ['match', {'pattern': '#^(top|right|bottom|left)$#'}]
# ['match', {'pattern': '#^(undefined|incident|request)$#'}]
# ['match', {'pattern': '#^(undefined|low|normal|high|critical)$#'}]
# ['match', {'pattern': '#^(vary|chat|lead)$#'}]
# ['match', {'pattern': '#^(visitor|employee)$#'}]
# ['match', {'pattern': '#^(xwidget|call_from_site)$#'}]
#
# [+] - MatchValidator
#
# public function match($value, $params = ['pattern' => '#.*#'])
MATCH = 'match' # любая регулярка
# WTF?????
# Examples:
# ['upload']
#
# [-]
#
# public function upload($value)
UPLOAD = 'upload' # в паре с field идентификатор поля в сущности для загрузки содержимого файла
# Non specified data Types
# Examples:
# ['digit']
# ['digit', {'max': 100, 'min': -15}]
#
# [+] MinMaxValidator, ImplTypeValidator
#
# public function digit($value, $params = ['min' => null, 'max' => null])
DIGIT = 'digit' # может быть float или int
# Examples:
# ['integer']
# ['integer', {'max': 1000}, None, 'Значение должно быть меньше 1000']
# ['integer', {'max': 100}, None, 'Значение должно быть меньше 100']
# ['integer', {'max': 3600, 'min': 5}, None, 'Значение должно быть от 5 до 3600']
#
# [+] MinMaxValidator, MinMaxValidator
#
# public function integer($value, $params = ['min' => null, 'max' => null])
INTEGER = 'integer' # только int
# Examples:
# ['enum', ['acceptance', 'invoice', 'vatinvoice']]
# ['enum', ['chat', 'lead', 'complaint']]
# ['enum', ['employee', 'visitor']]
# ['enum', ['vary', 'chat', 'lead']]
#
# [+] ImplTypeValidator
#
# public function enum($value, $allowedValues = [])
ENUM = 'enum' # массив со значениями из enum'а
# Specified data Types
# Examples:
# ['boolean']
#
# public function boolean($value, $params = ['strict' => false])
BOOLEAN = 'boolean'
# Examples:
# ['color']
COLOR = 'color'
# Examples:
# ['currency']
#
# public function currency($value)
CURRENCY = 'currency'
# Examples:
# ['email']
# ['email', {'empty': True}]
#
# [+] - EmptinessValidator
#
# public function email($value, $params = ['empty' => false])
EMAIL = 'email'
# Examples:
# ['file', {'empty': True, 'entity': ['callButton.image_offline']}]
# ['file', {'empty': True, 'entity': ['callButton.image_online']}]
# ['file', {'empty': True, 'entity': ['chatButton.image_offline']}]
# ['file', {'empty': True, 'entity': ['chatButton.image_online']}]
# ['file', {'entity': ['employee.photo']}]
# ['file', {'entity': ['payer.acceptance']}]
# ['file', {'entity': ['siteCallSettings.background_custom']}]
# ['file', {'entity': ['siteCallSettings.greeting_custom']}]
# ['file', {'entity': ['siteWidgetSettings.banner_custom']}]
# ['file', {'entity': ['siteWidgetSettings.invitation_photo_custom']}]
#
# [+] - EmptinessValidator,
#
# public function file($value, $params)
# params: empty, entity
FILE = 'file'
# Examples:
# ['idList']
# ['idList']
# ['idList', {'pattern': '#^(missed|completed)$#', 'type': 'string'}]
# ['idList', {'type': 'integer'}]
# ['idlist', {'type': 'string'}]]
# ['idList', {'type': 'string'}]
ID_LIST = 'idList'
# Examples:
# ['phone']
PHONE = 'phone'
# Examples:
# ['string']
# ['string']
# ['string', {'length': 100}]
# ['string', {'length': 180}]
# ['string', {'length': 2000}]
# ['string', {'length': 60}]
# [['string'], ['match', {'pattern': u'#^(sip|phone)$#'}]]
#
# public function string($value, $param = ['length' => 2000])
STRING = 'string'
# public function text($value, $param = ['length' => 10000])
TEXT = 'text'
# Specified only for fields validation
# Examples:
# ['date']
#
# public function date($value)
DATE = 'date'
# Examples:
# [['datetime']]
# [['datetime', {'max': '1 month'}]]
# [['datetime', {'max': '30 days'}, None, 'Максимально возможный интервал - 30 дней']]
#
# public function datetime($value)
DATETIME = 'datetime'
# Example:
# ['time']
#
# public function time($value)
TIME = 'time'
# Unspecified
# NUMERIC = 'numeric' мапиться на digit и integer
_TYPE_CONSTRAINS = [
ARRAY_OF,
TYPE_OF,
UPLOAD,
DIGIT,
INTEGER,
ENUM,
BOOLEAN,
COLOR,
CURRENCY,
EMAIL,
FILE,
ID_LIST,
PHONE,
TEXT,
DATE,
DATETIME,
TIME
]
PRIMITIVE_DATA_TYPES = [
UPLOAD,
COLOR,
CURRENCY,
EMAIL,
PHONE,
DATE,
TIME
]
_VALUE_CONSTRAINS = [
ALLOW_EMPTY,
NON_EMPTY,
REQUIRED,
LENGTH,
MATCH
]
DEFAULT = 'string'
@staticmethod
def is_type_constrain(constrain):
return constrain in FieldConstrains._TYPE_CONSTRAINS
@staticmethod
def is_value_constrain(constrain):
return constrain in FieldConstrains._VALUE_CONSTRAINS
class ImplValidator(object):
def __init__(self, comment=None):
self._comment = comment
@property
def comment(self):
return self._comment
def to_impl_schema(self):
pass
class EmptyImplValidator(ImplValidator):
def __init__(self, empty, comment):
ImplValidator.__init__(self, comment)
self._empty = empty
@property
def empty(self):
return self._empty
class MinMaxValidator(ImplValidator):
def __init__(self, min, max, comment):
ImplValidator.__init__(self, comment)
self._min = min
self._max = max
@property
def min(self):
return self._min
@property
def max(self):
return self._max
# Проверка пустоты поля
class EmptinessValidator(EmptyImplValidator):
def __init__(self, empty, comment=None):
super(EmptinessValidator, self).__init__(empty, comment)
def to_impl_schema(self):
if self.empty is None:
return []
if self.empty:
return [FieldConstrains.ALLOW_EMPTY]
else:
return [FieldConstrains.NON_EMPTY]
# Проверка обязательности поля
class RequiredFieldValidator(ImplValidator):
def __init__(self, required, default, comment):
super(RequiredFieldValidator, self).__init__(comment)
self._required = required
self._default = default
@property
def required(self):
return self._required
@property
def default(self):
return self._default
def to_impl_schema(self):
if self._required:
if self._default is None:
return [FieldConstrains.REQUIRED]
else:
return [FieldConstrains.REQUIRED, {u'default': self.default}]
else:
return []
@staticmethod
def build(data):
if data[0] == FieldConstrains.REQUIRED:
default = None
if len(data) > 1:
default = data[1].get(u'default', None)
return RequiredFieldValidator(True, default, None)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.REQUIRED + '", found: ' + data[0])
class TypeReferenceValidation(ImplValidator):
def __init__(self, type, comment):
super(TypeReferenceValidation, self).__init__(comment)
self._type = type
@property
def type(self):
return self._type
def to_impl_schema(self):
return [FieldConstrains.TYPE_OF, {u'type': self.type}]
@staticmethod
def build(data):
if data[0] == FieldConstrains.TYPE_OF:
type = FieldConstrains.DEFAULT
if len(data) > 1:
type = data[1].get(u'type', FieldConstrains.DEFAULT)
return TypeReferenceValidation(type, None)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.TYPE_OF + '", found: ' + data[0])
class GenericTypeValidation(EmptyImplValidator, MinMaxValidator, TypeReferenceValidation):
def __init__(self, type, parameter, **kwargs):
TypeReferenceValidation.__init__(self, type, kwargs.get(u'comment', None))
EmptyImplValidator.__init__(self, kwargs.get(u'empty', None), kwargs.get(u'comment', None))
MinMaxValidator.__init__(
self, kwargs.get(u'min', None), kwargs.get(u'max', None), kwargs.get(u'comment', None))
self._parameter = parameter
@property
def parameter(self):
return self._parameter
def to_impl_schema(self):
result = []
if self.type == u'Array':
result.append(FieldConstrains.ARRAY_OF)
else:
result.append(self.type)
params = {u'type': self.parameter}
if self.min is not None:
params[u'min'] = self.min
if self.min is not None:
params[u'max'] = self.max
if self.empty is not None:
params[u'empty'] = self.empty
if len(params) > 0:
result.append(params)
if self.comment is not None:
result.append(None)
result.append(self.comment)
return result
@staticmethod
def build(data):
if data[0] == FieldConstrains.ARRAY_OF:
parameter = FieldConstrains.DEFAULT
empty = None
max = None
min = None
comment = None
if len(data) > 1:
parameter = data[1].get(u'type', FieldConstrains.DEFAULT)
empty = data[1].get(u'empty', None)
max = data[1].get(u'max', None)
min = data[1].get(u'min', None)
if len(data) > 3:
comment = data[3]
return GenericTypeValidation(
u'Array',
parameter,
min=min,
max=max,
comment=comment,
empty=empty
)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.ARRAY_OF + '", found: ' + data[0])
class LengthValidation(MinMaxValidator):
def __init__(self, min, max, comment):
super(LengthValidation, self).__init__(min, max, comment)
def to_impl_schema(self):
constrains = dict()
if self._min is not None:
constrains[u'min'] = self.min
if self._max is not None:
constrains[u'max'] = self.max
return [FieldConstrains.LENGTH, constrains]
@staticmethod
def build(data):
if data[0] == FieldConstrains.LENGTH:
min = None
max = None
if len(data) > 1:
min = data[1].get(u'min', None)
max = data[1].get(u'max', None)
return LengthValidation(min, max, None)
else:
ValueError('Validator type must be "' +
FieldConstrains.LENGTH + '", found: ' + data[0])
class MatchValidator(ImplValidator):
def __init__(self, pattern, comment):
super(MatchValidator, self).__init__(comment)
self._pattern = pattern
@property
def pattern(self):
return self._pattern
def to_impl_schema(self):
if self.comment is not None:
return [u'match', {u'pattern': self.pattern}, None, self.comment]
else:
return [u'match', {u'pattern': self.pattern}]
@staticmethod
def build(data):
if data[0] == FieldConstrains.MATCH:
pattern = None
comment = None
if len(data) > 1:
pattern = data[1].get(u'pattern', u'')
if len(data) > 3:
comment = data[3]
return MatchValidator(pattern, comment)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.MATCH + '", found: ' + data[0])
class DigitValidation(MinMaxValidator):
def __init__(self, min, max, comment):
super(DigitValidation, self).__init__(min, max, comment)
def to_impl_schema(self):
if self.min is not None or self.max is not None:
constrains = dict()
if self._min is not None:
constrains[u'min'] = self.min
if self._max is not None:
constrains[u'max'] = self.max
return [u'digit', constrains]
else:
return [u'digit']
@staticmethod
def build(data):
if data[0] == FieldConstrains.DIGIT:
min = None
max = None
if len(data) > 1:
min = data[1].get(u'min', None)
max = data[1].get(u'max', None)
return DigitValidation(min, max, None)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.DIGIT + '", found: ' + data[0])
class IntegerValidation(MinMaxValidator):
def __init__(self, min, max, comment):
super(IntegerValidation, self).__init__(min, max, comment)
def to_impl_schema(self):
result = [u'integer']
if self.min is not None or self.max is not None:
constrains = dict()
if self._min is not None:
constrains[u'min'] = self.min
if self._max is not None:
constrains[u'max'] = self.max
result.append(constrains)
if self.comment is None:
result.append(None)
result.append(self.comment)
@staticmethod
def build(data):
if data[0] == FieldConstrains.INTEGER:
min = None
max = None
comment = None
if len(data) > 1:
min = data[1].get(u'min', None)
max = data[1].get(u'max', None)
if len(data) > 3:
comment = data[1].get(u'comment', None)
return IntegerValidation(min, max, comment)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.INTEGER + '", found: ' + data[0])
class EnumValidation(ImplValidator):
def __init__(self, alternatives, comment):
super(EnumValidation, self).__init__(comment)
self._alternatives = alternatives
@property
def alternatives(self):
return self._alternatives
def to_impl_schema(self):
return [u'enum', self.alternatives]
@staticmethod
def build(data):
if data[0] == FieldConstrains.ENUM:
alternatives = []
if len(data) > 1:
alternatives = data[1]
return EnumValidation(alternatives, None)
else:
raise ValueError('Validator type must be "' +
FieldConstrains.ENUM + '", found: ' + data[0])
class PrimitiveTypeValidation(EmptyImplValidator):
def __init__(self, type_name, empty=None, comment=None):
super(PrimitiveTypeValidation, self).__init__(empty, comment)
self._type_name = type_name
@property
def type_name(self):
return self._type_name
def to_impl_schema(self):
if self.empty is None:
return [self._type_name]
else:
return [self._type_name, {u'empty': self.empty}]
@staticmethod
def build(data):
type_name = data[0]
if type_name in FieldConstrains.PRIMITIVE_DATA_TYPES:
empty = None
if len(data) > 1:
empty = data[1].get(u'empty', None)
return PrimitiveTypeValidation(type_name, empty, None)
else:
raise ValueError(u'Validator type must be in"' +
unicode(FieldConstrains.PRIMITIVE_DATA_TYPES) +
u'", found: ' + type_name)
class FileValidation(EmptyImplValidator):
def __init__(self, field, empty=None, comment=None):
super(FileValidation, self).__init__(empty, comment)
self._field = field
@property
def field(self):
return self._field
def to_impl_schema(self):
if self.empty is None:
return [u'file', {u'entity': self.field}]
else:
return [u'file', {u'entity': self.field, u'empty': self.empty}]
@staticmethod
def build(data):
if data[0] == FieldConstrains.FILE:
field = None
empty = None
if len(data) > 1:
field = data[1].get(u'entity', None)
empty = data[1].get(u'empty', None)
return FileValidation(field, empty, None)
else:
raise ValueError(u'Validator type must be in"' +
FieldConstrains.FILE +
u'", found: ' + data[0])
class IdListValidation(TypeReferenceValidation):
def __init__(self, type=None, pattern=None, comment=None):
super(IdListValidation, self).__init__(type, comment)
self._pattern = pattern
@property
def pattern(self):
return self._pattern
def to_impl_schema(self):
constrains = dict()
if self.pattern is not None:
constrains[u'pattern'] = self.pattern
if self.type is not None:
constrains[u'type'] = self.type
if len(constrains) > 0:
return [u'idList', constrains]
else:
return [u'idList']
@staticmethod
def build(data):
if data[0] == FieldConstrains.ID_LIST:
pattern = None
type = None
if len(data) > 1:
pattern = data[1].get(u'pattern', None)
type = data[1].get(u'type', None)
return IdListValidation(type, pattern, None)
else:
raise ValueError(u'Validator type must be in"' +
FieldConstrains.ID_LIST +
u'", found: ' + data[0])
class StringValidator(ImplValidator):
def __init__(self, length, comment):
super(StringValidator, self).__init__(comment)
self._length = length
@property
def length(self):
return self._length
def to_impl_schema(self):
constrains = dict()
if self.length is not None:
constrains[u'length'] = self.length
if len(constrains) > 0:
return [u'string', constrains]
else:
return [u'string']
@staticmethod
def build(data):
if data[0] == FieldConstrains.STRING:
length = None
if len(data) > 1:
length = data[1].get(u'length', None)
return StringValidator(length, None)
else:
ValueError(u'Validator type must be in"' +
FieldConstrains.STRING +
u'", found: ' + data[0])
class DateTimeValidator(MinMaxValidator):
def __init__(self, min, max, comment):
super(DateTimeValidator, self).__init__(min, max, comment)
self._comment = comment
def to_impl_schema(self):
result = [u'datetime']
if self.min is not None or self.max is not None:
constrains = dict()
if self._min is not None:
constrains[u'min'] = self.min
if self._max is not None:
constrains[u'max'] = self.max
result.append(constrains)
if self.comment is None:
result.append(None)
result.append(self.comment)
@staticmethod
def build(data):
if data[0] == FieldConstrains.DATETIME:
max = None
min = None
comment = None
if len(data) > 1:
max = data[1].get(u'max', None)
min = data[1].get(u'min', None)
if len(data) > 3:
comment = data[3]
return DateTimeValidator(
min=min,
max=max,
comment=comment,
)
else:
ValueError(u'Validator type must be in"' +
FieldConstrains.DATETIME +
u'", found: ' + data[0])
class BooleanValidator(ImplValidator):
def __init__(self, strict=None, comment=None):
super(BooleanValidator, self).__init__(comment)
self._strict = strict
@property
def strict(self):
return self._strict
def to_impl_schema(self):
if self.strict is not None:
return [u'boolean', {u'strict': self.strict}]
else:
return [u'boolean']
@staticmethod
def build(data):
if data[0] == FieldConstrains.BOOLEAN:
strict = None
if len(data) > 1:
strict = data[1].get(u'strict', None)
return BooleanValidator(strict, None)
else:
ValueError(u'Validator type must be in"' +
FieldConstrains.BOOLEAN +
u'", found: ' + data[0])
|
from django.urls import path
from . import views as users_views
from django.contrib.auth.views import LoginView, LogoutView, PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView
urlpatterns = [
path('signup/<uuid>/', users_views.SignupView.as_view(), name="signup"),
path('login/', LoginView.as_view(template_name="possys/login.html"), name="login"),
path('logout/', LogoutView.as_view(), name="logout"),
path('password_reset/',
PasswordResetView.as_view(template_name="possys/password_reset.html"),
name="password_reset"),
path('password_reset/done/',
PasswordResetDoneView.as_view(template_name="possys/password_reset_done.html"),
name="password_reset_done"),
path('password_reset/<uidb64>/<token>/',
PasswordResetConfirmView.as_view(template_name="possys/password_reset_confirm.html"),
name="password_reset_confirm"),
path('password_reset/complete/',
PasswordResetCompleteView.as_view(template_name="possys/password_reset_complete.html"),
name="password_reset_complete"),
path('add_card/<uuid>/', users_views.CardCreateView.as_view(template_name='possys/add_card.html'), name='add_card'),
path('charge_wallet/', users_views.ChargeWalletView.as_view(), name="charge_wallet"),
path('api/add_idm/<idm>/', users_views.add_idm),
]
|
print("This program makes usernames")
print("from a file of names")
infileName = input("What file is it in? ")
outfileName = input("Place username in this file: ")
infile = open(infileName, "r")
outfile = open(outfileName, "w")
for i in infile:
first, last = i.split()
uname = (first + last).upper()
print(uname, file=outfile)
infile.close()
outfile.close()
print("Usernames have been written to: ", outfileName)
|
from .birds import Birds
|
import re, os, subprocess, mmap, sys, urllib2, pprint
REPO_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
URL_REGEX = r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)'
SOURCE_TYPE_URL = 'URL'
SOURCE_TYPE_FILE = 'file'
SOURCE_TYPE_STRING = 'string'
APPEND_COMMAND = 'zsh -c \'. {}/configs/shell-functions.sh && {} \"{}\" {}\''
def isSourceContentsInDestFile(sourcePath, destFileFullPath, sourceType):
if sourceType == SOURCE_TYPE_URL:
sourceContents = urllib2.urlopen(sourcePath).read()
elif sourceType == SOURCE_TYPE_FILE:
sourceContents = open(sourcePath).read()
elif sourceType == SOURCE_TYPE_STRING:
sourceContents = sourcePath
print destFileFullPath
if os.path.isfile(destFileFullPath) and os.stat(destFileFullPath).st_size != 0:
with open(destFileFullPath) as destFile:
destFileAccess = mmap.mmap(destFile.fileno(), 0, access=mmap.ACCESS_READ)
if destFileAccess.find(sourceContents) != -1:
return True
else:
return False
else:
return False
print 'file is empty'
def append(source, destFileFullPath, sourceType):
if sourceType == SOURCE_TYPE_URL:
appendFunction = 'appendFromURL'
elif sourceType == SOURCE_TYPE_FILE:
appendFunction = 'appendFromFile'
elif sourceType == SOURCE_TYPE_STRING:
appendFunction = 'appendFromString'
command = APPEND_COMMAND.format(REPO_PATH, appendFunction, source, destFileFullPath)
os.system(command)
def appendIfMissing(source, dest):
if re.match(URL_REGEX, source, flags=0):
sourceType = SOURCE_TYPE_URL
elif os.path.isfile(source):
sourceType = SOURCE_TYPE_FILE
else:
sourceType = SOURCE_TYPE_STRING
destFileFullPath = os.path.expanduser(dest)
if os.path.isfile(destFileFullPath):
if isSourceContentsInDestFile(source, destFileFullPath, sourceType):
print ('Source\'s contents found in dest file, no need to append')
else:
print('Source\'s contents cannot be found in dest file, appending...')
append(source, destFileFullPath, sourceType)
else:
print "destfile not a file yet, copying sourcefile to destfile..."
append(source, destFileFullPath, sourceType)
if len(sys.argv) != 3:
print 'arguments: '
pprint.pprint(sys.argv)
sys.exit('[ERROR] appendIfMissing.py, line 31: number of arguments passed is not 2')
else:
appendIfMissing(sys.argv[1], sys.argv[2])
|
from django.apps import AppConfig
class ClinicAppConfig(AppConfig):
name = 'clinic_app'
|
#!/usr/local/bin/python3
import re
from bs4 import BeautifulSoup
from discord.ext import commands
from utils import web
class MonsterHunter(commands.Cog):
"""Monster Hunter Skill list"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def mh(self, ctx):
"""[search]. Get the full description for a spell in Monsterhunter"""
url = MonsterHunter.build_link_from_view(ctx.view)
page = await web.download_page(url)
if not page:
await ctx.send("Couldn't grab the requested page")
return
message = MonsterHunter.build_message_from_page(page)
await ctx.send(message)
@staticmethod
def build_link_from_view(view):
view.skip_ws()
request = view.read_rest()
search = request.replace(' ', '-')
search = search.lower()
url = "https://mhworld.kiranico.com/skill/" + search
return url
@staticmethod
def build_message_from_page(page):
page = page.encode("windows-1252")
soup = BeautifulSoup(page, 'lxml')
message = []
# Get the title
title = soup.find('h1')
message.append("# " + title.text)
effect = soup.find('p', class_="lead")
# MH site is bad so we gotta get rid of useless whitespaces
message.append(re.sub(' +', ' ', effect.text))
message.append("< " + "-" * 40 + " >")
# Get the shit it does
body = soup.find('table', class_="table table-sm")
rowCount = 0
text = "# "
for row in body.findAll('td'):
if rowCount % 2 == 0:
text += row.text
else:
text = text + "\n" + row.text
message.append(text)
text = "# "
rowCount += 1
# Make it into a proper message
message = "\n\n".join(message)
message = "```md\n" + message[0:1980] + "```"
return message
def setup(bot):
bot.add_cog(MonsterHunter(bot))
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import pygame
import OpenGL.GL as gl
import OpenGL.GLU as glu
import numpy as np
import itertools
import fractions
import copy
import numpy as np
#local imports
from common import COLORS, DEBUG, VSYNC_PATCH_HEIGHT_DEFAULT, VSYNC_PATCH_WIDTH_DEFAULT, DEFAULT_FLASH_RATE
from common import UserEscape
from screen import Screen
from checkerboard import CheckerBoard
class TripleCheckerBoardFlasher(Screen):
def setup(self,
nrows,
nrows_center = 1,
check_width = None,
check_width_center = 0.5,
check_color1 = 'white',
check_color2 = 'black',
screen_background_color = 'neutral-gray',
show_fixation_dot = False,
flash_rate_left = DEFAULT_FLASH_RATE,
flash_rate_right = DEFAULT_FLASH_RATE,
flash_rate_center = DEFAULT_FLASH_RATE,
#rate_compensation = None,
vsync_patch = None,
):
Screen.setup(self,
background_color = screen_background_color,
vsync_patch = vsync_patch,
)
#run colors through filter to catch names and convert to RGB
check_color1 = COLORS.get(check_color1, check_color1)
check_color2 = COLORS.get(check_color2, check_color2)
# set checkerboard-related attributes
if check_width is None:
check_width = 2.0/nrows #fill whole screen
self.board_width = check_width*nrows
self.board_width_center = check_width_center * nrows_center
self.nrows = nrows
self.CB1 = CheckerBoard(nrows, check_width, color1 = check_color1, color2 = check_color2, show_fixation_dot = show_fixation_dot)
self.CB2 = CheckerBoard(nrows, check_width, color1 = check_color2, color2 = check_color1, show_fixation_dot = show_fixation_dot) #reversed pattern
self.CB1_center = CheckerBoard(nrows_center, check_width_center, color1 = check_color1, color2 = check_color2, show_fixation_dot = False)#show_fixation_dot)
self.CB2_center = CheckerBoard(nrows_center, check_width_center, color1 = check_color2, color2 = check_color1, show_fixation_dot = False)#show_fixation_dot)
self.CB_cycle_left = itertools.cycle((self.CB1,self.CB2))
self.CB_cycle_right = itertools.cycle((self.CB1,self.CB2))
self.CB_cycle_center = itertools.cycle((self.CB1_center,self.CB2_center))
# set time-related attributes
self._last_CB_change_time_left = None
self._last_CB_change_time_right = None
self._last_CB_change_time_center = None
self.flash_rate_left = flash_rate_left
self.flash_interval_left = 1.0/flash_rate_left
self.flash_rate_right = flash_rate_right
self.flash_interval_right = 1.0/flash_rate_right
self.flash_rate_center = flash_rate_center
self.flash_interval_center = 1.0/flash_rate_center
#self.rate_compensation = rate_compensation
# get useful coordinate values for checkerboard rendering locations
self.xC, self.yC = (-0.5*self.board_width,-0.5*self.board_width)
self.xL, self.yL = (self.xC - 0.7*self.screen_right, self.yC)
self.xR, self.yR = (self.xC + 0.7*self.screen_right, self.yC)
# some lists for checking things
self.vals = itertools.cycle((1,0))
self.t_list = []
self.val_list = []
self.vals_current = self.vals.next()
def start_time(self,t):
# get start time and set current CB objects (and their change times)
Screen.start_time(self,t)
self._last_CB_change_time_left = t
self._last_CB_change_time_right = t
self._last_CB_change_time_center = t
self._current_CB_left = self.CB_cycle_left.next()
self._current_CB_right = self.CB_cycle_right.next()
self._current_CB_center = self.CB_cycle_center.next()
# also used for checking things
self.t_begin = t
def render(self):
# do general OpenGL stuff as well as FixationCross and Vsync Patch if needed
Screen.render(self)
# translate to position of left board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xL, self.yL, 0.0)
self._current_CB_left.render()
# translate to position of right board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xR, self.yR, 0.0)
self._current_CB_right.render()
# render center board
gl.glLoadIdentity()
gl.glTranslatef(-self.board_width_center / 2.0, -self.board_width_center / 2.0, 0.0)
self._current_CB_center.render()
def update(self, t, dt):
self.ready_to_render = False
# only update a checkerboard if its flash_interval has elapsed
if (t - self._last_CB_change_time_left) >= self.flash_interval_left:
self._last_CB_change_time_left = t
self._current_CB_left = self.CB_cycle_left.next()
self.ready_to_render = True
# checking things
self.vals_current = self.vals.next()
self.val_list.append(self.vals_current)
self.t_list.append(t - self.t_begin)
if (t - self._last_CB_change_time_right) >= self.flash_interval_right:
self._last_CB_change_time_right = t
self._current_CB_right = self.CB_cycle_right.next()
self.ready_to_render = True
if (t - self._last_CB_change_time_center) >= self.flash_interval_center:
self._last_CB_change_time_center = t
self._current_CB_center = self.CB_cycle_center.next()
self.ready_to_render = True
def run(self, **kwargs):
# loop rate set too high so it should run effectively as fast as python is capable of looping
Screen.run(self, display_loop_rate = 10000, **kwargs)
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
flash_rate_left = 17
flash_rate_right = 23
flash_rate_center = 19
duration = 5
show_plot = True
DCBF = TripleCheckerBoardFlasher.with_pygame_display(#VBI_sync_osx = False,
)
#DCBF = TripleCheckerBoardFlasher.with_psychopy_window()
DCBF.setup(flash_rate_left = flash_rate_left,
flash_rate_right = flash_rate_right,
flash_rate_center = flash_rate_center,
check_width = 1.0 / 16.0,
check_width_center = 0.5,
screen_background_color = 'neutral-gray',
nrows = 8,
nrows_center = 1,
show_fixation_dot = True,
)
DCBF.run(duration = duration)
pygame.quit()
if show_plot:
t_diffs = np.diff(np.array(DCBF.t_list))
print('Mean sample interval: ', t_diffs.mean())
print('Mean sample frequency:', 1.0/t_diffs.mean())
print('Sample interval STD: ', t_diffs.std())
import matplotlib.pyplot as plt
import scipy.signal as scps
# plt.subplot(2,1,1)
plt.step(DCBF.t_list, DCBF.val_list, color = 'red', label = 'Displayed')
time_vals = np.linspace(0, duration, duration * 720)
val_vals = [scps.square(flash_rate_left * np.pi * t, duty = 0.5) / 2.0 + 0.5 for t in time_vals]
plt.plot(time_vals, val_vals, color = 'blue', label = 'Ideal')
plt.legend(loc = 'best')
# must set ready_to_render to true in every loop for fft to work to get even sample spacing
# note that this introduces its own error, as rendering is not as optimized
# plt.subplot(2,1,2)
# fft_data = abs(np.fft.rfft(DCBF.val_list))
# fft_freqs = np.fft.rfftfreq(len(DCBF.val_list), 1.0/60)
# plt.plot(fft_freqs, fft_data)
# plt.show()
|
import json
import requests
import uuid
response = requests.get("https://data.kcmo.org/resource/c46m-hv6s.json")
contracts = json.loads(response.text)
for x in contracts:
x['effective_date'] = x.pop('contract_date')
x['supplier'] = x.pop('vendor')
x.update({
'id' : str(uuid.uuid4()),
'type' : 'add',
'fields' : {
'buyer_lead_agency' : 'City of Kansas City, Missouri',
'states' : 'MO',
'contract_amount' : x['contract_amount'],
'contract_number' : x['contract_number'],
'department' : x['department'],
'description' : x['description'],
'effective_date' : x['effective_date'][0:10],
'supplier' : x['supplier'],
}
})
del x['contract_amount'], x['contract_number'], x['department'], x['description'], x['effective_date'], x['supplier']
with open('kcmo.json', 'w') as outfile:
json.dump(contracts, outfile)
for x in contracts:
filename = x['id']+'.json'
with open(filename, 'w') as outfile:
json.dump(contracts, outfile)
|
from random import Random
from goodstudy.settings import EMAIL_FROM
from django.core.mail import send_mail
from apps.user.models import EmailCaptcha
def random_num(randomlenth=8):
my_str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
lenth = len(chars) - 1
random = Random()
for i in range(randomlenth):
my_str += chars[random.randint(0, lenth)]
return my_str
def send_email(email, send_type='register'):
email_captcha = EmailCaptcha()
code = random_num(16)
email_captcha.code = code
email_captcha.email = email
email_captcha.send_type = send_type
email_captcha.save()
email_title = ''
email_body = ''
if send_type == 'register':
email_title = '乐之在线网注册激活链接'
email_body = '请点击一下链接激活您的账户:http://127.0.0.1:8000/active/{0}'.format(code)
send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
if send_status:
pass
else:
email_title = '乐之在线网找回密码链接'
email_body = '请点击一下链接进入页面重置您的密码:http://127.0.0.1:8000/resetpwd/{0}'.format(code)
send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
if send_status:
pass
|
def ask_for_int():
while True:
try:
age = int(input("enter your age = "))
except(ValueError):
print("Entered input is not a number")
continue
else:
if(age >= 18):
print("eligible to vote")
break
else:
print("not eligible to vote")
break
ask_for_int() |
import autodisc as ad
import plotly
import zipfile
import os
import numpy as np
from PIL import Image
import plotly.graph_objs as go
import warnings
import random
from autodisc.gui.jupyter.misc import create_colormap, transform_image_from_colormap
#def plot_discoveries_treemap(experiment_definitions=None, repetition_ids=None, experiment_statistics=None, data_filters=None, config=None, **kwargs):
def plot_discoveries_treemap(experiment_definition, repetition_id=0, experiment_statistics=None, data_filters=None, config=None, **kwargs):
default_config = dict(
random_seed = 0,
squarify = dict(
x = 0.0,
y = 0.0,
width = 100.0,
height = 100.0,
),
# global style config
global_layout = dict(
height=700,
width=700,
margin = dict(
l = 0,
r = 0,
b = 0,
t = 0
),
xaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False
),
yaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False
),
title = dict(
text = '',
font = dict(
color = "black",
size = 22,
family='Times New Roman'
)
),
hovermode='closest',
showlegend = True,
),
# Shapes style config
shapes = dict(
line = dict(
width = 2
),
layer = "below"
),
shapes_background_colors = ['rgb(0,0,0)', 'rgb(204,121,167)', 'rgb(0,114,178)'],
shapes_lines_colors = [ 'rgb(0,0,0)','rgb(154,71,117)', 'rgb(0,64,128)'],
# Images style config
margin_x = 1,
margin_y = 1,
images = dict (
xref= "x",
yref= "y",
sizex= 10,
sizey= 10,
opacity = 1,
xanchor= "center",
yanchor= "middle",
layer = "above"
),
images_transform = True,
images_transform_colormaps = [
create_colormap(np.array([[255,255,255], [119,255,255],[23,223,252],[0,190,250],[0,158,249],[0,142,249],[81,125,248],[150,109,248],[192,77,247],[232,47,247],[255,9,247],[200,0,84]])/255*8), #WBPR
create_colormap(np.array([[0,0,4],[0,0,8],[0,4,8],[0,8,8],[4,8,4],[8,8,0],[8,4,0],[8,0,0],[4,0,0]])), #BCYR
create_colormap(np.array([[0,2,0],[0,4,0],[4,6,0],[8,8,0],[8,4,4],[8,0,8],[4,0,8],[0,0,8],[0,0,4]])), #GYPB
create_colormap(np.array([[4,0,2],[8,0,4],[8,0,6],[8,0,8],[4,4,4],[0,8,0],[0,6,0],[0,4,0],[0,2,0]])), #PPGG
create_colormap(np.array([[4,4,6],[2,2,4],[2,4,2],[4,6,4],[6,6,4],[4,2,2]])), #BGYR
create_colormap(np.array([[4,6,4],[2,4,2],[4,4,2],[6,6,4],[6,4,6],[2,2,4]])), #GYPB
create_colormap(np.array([[6,6,4],[4,4,2],[4,2,4],[6,4,6],[4,6,6],[2,4,2]])), #YPCG
create_colormap(np.array([[8,8,8],[7,7,7],[5,5,5],[3,3,3],[0,0,0]]), is_marker_w=False), #W/B
create_colormap(np.array([[0,0,0],[3,3,3],[5,5,5],[7,7,7],[8,8,8]]))], #B/W
# Annotations style config
annotations = dict(
font = dict(
color = "#140054",
size = 18,
family='Times New Roman'
),
showarrow = False,
opacity=1.0,
bgcolor='rgb(255,255,255)'
)
)
config = ad.config.set_default_config(kwargs, config, default_config)
random.seed(config.random_seed)
# experiment_ids=[exp_def['id'] for exp_def in experiment_definitions]
# for experiment_id in experiment_ids:
# experiment_idx = experiment_ids.index(experiment_id)
# for repetition_idx in repetition_ids:
# experiment_statistics[experiment_id][repetition_idx] = ad.gui.jupyter.misc.load_statistics(os.path.join(experiment_definitions[experiment_idx]['directory'], 'repetition_{:06d}'.format(repetition_idx)))
experiment_id = experiment_definition['id']
#exp_idx = experiment_ids.index(experiment_id)
if repetition_id is None:
path = experiment_definition['directory']
else:
path = os.path.join(
experiment_definition['directory'],
'repetition_{:06d}'.format(int(repetition_id))
)
path = os.path.join(path, 'statistics')
final_observation_path = os.path.join(path, 'final_observation')
# Recover images (from png or zip)
all_images = []
if os.path.isdir(final_observation_path):
dir_content = os.listdir(final_observation_path)
for image_file_name in dir_content:
file_path = os.path.join(path, image_file_name)
if os.path.isfile(file_path) and '.png' in image_file_name:
item_id = image_file_name.split('.')[0]
item_id = int(item_id)
file = open(file_path, "rb")
image_PIL = Image.open(file)
if config.images_transform:
image_PIL = transform_image_from_colormap(image_PIL, config.images_transform_colormaps[0])
all_images.append(image_PIL)
elif zipfile.is_zipfile(final_observation_path + '.zip'):
zf = zipfile.ZipFile(final_observation_path + '.zip', 'r')
dir_content = zf.namelist()
for image_file_name in dir_content:
item_id = image_file_name.split('.')[0]
item_id = int(item_id)
with zf.open(image_file_name) as file:
image_PIL = Image.open(file)
if config.images_transform:
image_PIL = transform_image_from_colormap(image_PIL, config.images_transform_colormaps[0])
all_images.append(image_PIL)
else:
warnings.warn('No Images found under {!r}'.format(final_observation_path))
# Recover labels
filters_ids = list(data_filters.keys())
database_filtered_ids = dict()
for curr_filter_k, curr_filter_val in data_filters.items():
# filter
if curr_filter_val is None or not curr_filter_val:
img_ids = range(len(all_images))
else:
run_ids = ad.gui.jupyter.misc.filter_single_experiment_data(experiment_statistics[experiment_id], curr_filter_val, int(repetition_id))
img_ids = np.where(run_ids)[0]
database_filtered_ids[curr_filter_k] = list(img_ids)
values = []
for filter_id in filters_ids:
values.append(len(database_filtered_ids[filter_id])/len(all_images)*100)
sorted_indexes = np.argsort(-np.array(values))
values = np.array(values)[sorted_indexes]
filters_ids = np.array(filters_ids)[sorted_indexes]
config.shapes_background_colors = np.array(config.shapes_background_colors)[sorted_indexes]
config.shapes_lines_colors = np.array(config.shapes_lines_colors)[sorted_indexes]
normed = ad.gui.jupyter.squarify.normalize_sizes(values, config.squarify.width, config.squarify.height)
rects = ad.gui.jupyter.squarify.squarify(normed, config.squarify.x, config.squarify.y, config.squarify.width, config.squarify.height)
annotations = []
shapes = []
images = []
counter = 0
for r in rects:
# annotations layout
annotation_config = ad.config.set_default_config(
dict(
x = r['x']+(r['dx']/2),
y = r['y']+(r['dy']/2),
text = "<b>{}:<br>{:.1f}%<b>".format(filters_ids[counter],values[counter]),
),
config.annotations
)
annotations.append(annotation_config)
# shapes layout
shape_config = ad.config.set_default_config(
dict(
type = 'rect',
x0 = r['x'],
y0 = r['y'],
x1 = r['x']+r['dx'],
y1 = r['y']+r['dy'],
fillcolor = config.shapes_background_colors[counter],
line = dict(color = config.shapes_lines_colors[counter])
),
config.shapes
)
shapes.append(shape_config)
# images layout
x0 = r['x']
y0 = r['y']
w = r['dx']
h = r['dy']
n_cols = int((w - 2 * config.margin_x) // config.images.sizex)
space_x = (w - 2 * config.margin_x) / n_cols
centers_x = []
for j in range(n_cols):
centers_x.append(j * space_x + space_x / 2.0)
n_rows = int((h - 2 * config.margin_y) // config.images.sizey)
space_y = (h - 2 * config.margin_y) / n_rows
centers_y = []
for i in range(n_rows):
centers_y.append(i * space_y + space_y / 2.0)
list_of_random_items = random.sample(database_filtered_ids[filters_ids[counter]], n_rows*n_cols)
for i in range(n_rows):
for j in range(n_cols):
image_config = ad.config.set_default_config(
dict(
source = all_images[list_of_random_items[np.ravel_multi_index((i, j), dims=(n_rows,n_cols), order='F')]],
x = x0 + config.margin_x + centers_x[j],
y = y0 + config.margin_y + centers_y[i],
),
config.images)
images.append(image_config)
counter = counter + 1
if counter >= len(config.shapes_background_colors):
counter = 0
# append to global layout
global_layout = ad.config.set_default_config(
dict(
annotations = annotations,
shapes=shapes,
images=images
),
config.global_layout
)
figure = dict(data=[go.Scatter()],layout=global_layout)
plotly.offline.iplot(figure)
return figure |
from flask import request
from flask_restful import Resource, marshal_with
from ..fields import Fields
from app.models.models import Category, db
from app.forms import CreateCategoryForm, UpdateCategoryForm
from app.utils import output_json
category_fields = Fields().category_fields()
class CategoryListAPI(Resource):
@marshal_with(category_fields)
def get(self):
categorys = Category.query.all()
return categorys
def post(self):
data = request.json
create_category_form = CreateCategoryForm(data=data, meta={"csrf":False})
if create_category_form.validate_on_submit():
name = data.get("name")
category = Category(name=name)
db.session.add(category)
db.session.commit()
return self.get()
else:
return output_json({"message":create_category_form.errors}, 406)
class CategoryAPI(Resource):
@marshal_with(category_fields)
def get(self, id):
category = Category.query.get(id)
return category
@marshal_with(category_fields)
def delete(self, id):
category = Category.query.get(id)
db.session.delete(category)
db.session.commit()
categorys = Category.query.all()
return categorys
def put(self, id):
category = Category.query.get(id)
data = request.json
data["id"] = id
update_category_form = UpdateCategoryForm(data=data, meta={"csrf":False})
if update_category_form.validate_on_submit():
category.name = data.get("name")
db.session.commit()
return CategoryListAPI.get(CategoryListAPI)
else:
return output_json({"message":update_category_form.errors}, 406)
|
from os.path import dirname
from os.path import join
from kivy.uix.button import Button
from kivy.uix.image import Image
class ButtonIcon(Button):
# def __init__(self, icon_name, **kwargs):
# super(ButtonIcon, self).__init__(**kwargs)
# icon_path = glob(join(dirname(__file__), "resources", icon_name+".svg"))
# with self.canvas:
# svg = Svg(icon_path)
# self.size = 20, 20
icon = None
def on_pos(self, instance, value):
self.icon.pos = value
def on_size(self, instance, value):
self.icon.size = value
def __init__(self, icon_name, **kwargs):
super(ButtonIcon, self).__init__(**kwargs)
icon_path = join(dirname(__file__), "resources", icon_name + ".png")
self.icon = Image(source=str(icon_path))
self.add_widget(self.icon)
self.background_color = (0, 0, 0, 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.