text
stringlengths 8
6.05M
|
|---|
#this is boilerplate for making a flask app - flask is downloaded previously with the console "sudo pip3 install flask" command
#the below takes the libraries flask and request
from flask import Flask, request, render_template
import os
import datetime
#the right below creates an app based in flask
app = Flask(__name__)
#the below @app conects the html file index.html and renders it
@app.route("/")
def say_hi():
return render_template("index.html")
#this will make the form send the form input input by the user to be sent to the search page rather than the one that the form is in
@app.route("/search")
def do_search():
return "Search Page"
@app.route("/")
def show_photo():
return "<h1>This is the photos page</h1>"
@app.route("/")
def time():
now = datetime.datetime.now()
return "the current time is " + now.strftime("%H:%M:%S")
#the below ensures that the url has the same string as the <h1> so that when you change the <h1>{0}or {1} the url changes too.
@app.route("/cars/<car>/image/<carid>")
def cars(car, carid):
return "<h1>You asked for image {0} for car {1}</h1>".format(carid, car)
#the directly below is is part of the boiler plate for flask
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True)
|
def jumping_number(number):
num = str(number)
for i, n in enumerate(num[:-1]):
subt = int(n)-int(num[i+1])
if subt!=1 and subt!=-1:
return "Not!!"
return "Jumping!!"
'''
Definition
Jumping number is the number that All adjacent digits in it differ by 1.
Task
Given a number, Find if it is Jumping or not .
Warm-up (Highly recommended)
Playing With Numbers Series
Notes
Number passed is always Positive .
Return the result as String .
The difference between ‘9’ and ‘0’ is not considered as 1 .
All single digit numbers are considered as Jumping numbers.
Input >> Output Examples
jumpingNumber(9) ==> return "Jumping!!"
Explanation:
It's single-digit number
jumpingNumber(79) ==> return "Not!!"
Explanation:
Adjacent digits don't differ by 1
jumpingNumber(23) ==> return "Jumping!!"
Explanation:
Adjacent digits differ by 1
jumpingNumber(556847) ==> return "Not!!"
Explanation:
Adjacent digits don't differ by 1
jumpingNumber(4343456) ==> return "Jumping!!"
Explanation:
Adjacent digits differ by 1
jumpingNumber(89098) ==> return "Not!!"
Explanation:
Adjacent digits don't differ by 1
jumpingNumber(32) ==> return "Jumping!!"
Explanation:
Adjacent digits differ by 1
'''
|
"""
Test script
"""
import os
import copy
import collections
from time import time
import torch
import numpy as np
import pandas as pd
import scipy.ndimage as ndimage
import SimpleITK as sitk
import skimage.measure as measure
import skimage.morphology as morphology
from net.ResUNet import ResUNet
from utilities.calculate_metrics import Metirc
import parameter as para
os.environ['CUDA_VISIBLE_DEVICES'] = para.gpu
# In order to calculate the two variables defined by dice_globaldice_intersection = 0.0
dice_union = 0.0
file_name = [] # file name
time_pre_case = [] # Singleton data consumption time
# Define evaluation indicators
liver_score = collections.OrderedDict()
liver_score['dice'] = []
liver_score['jacard'] = []
liver_score['voe'] = []
liver_score['fnr'] = []
liver_score['fpr'] = []
liver_score['assd'] = []
liver_score['rmsd'] = []
liver_score['msd'] = []
for file_index, file in enumerate(os.listdir(para.test_ct_path)):
# Read the gold standard into memory
seg = sitk.ReadImage(os.path.join(para.test_seg_path, file.replace('volume', 'segmentation')), sitk.sitkUInt8)
seg_array = sitk.GetArrayFromImage(seg)
seg_array[seg_array > 0] = 1
# Extract the largest connected domain of the liver, remove small areas, and fill the internal holes
pred = sitk.ReadImage(os.path.join(para.pred_path, file.replace('volume', 'pred')), sitk.sitkUInt8)
liver_seg = sitk.GetArrayFromImage(pred)
liver_seg[liver_seg > 0] = 1
# Calculate segmentation evaluation index
liver_metric = Metirc(seg_array, liver_seg, ct.GetSpacing())
liver_score['dice'].append(liver_metric.get_dice_coefficient()[0])
liver_score['jacard'].append(liver_metric.get_jaccard_index())
liver_score['voe'].append(liver_metric.get_VOE())
liver_score['fnr'].append(liver_metric.get_FNR())
liver_score['fpr'].append(liver_metric.get_FPR())
liver_score['assd'].append(liver_metric.get_ASSD())
liver_score['rmsd'].append(liver_metric.get_RMSD())
liver_score['msd'].append(liver_metric.get_MSD())
dice_intersection += liver_metric.get_dice_coefficient()[1]
dice_union += liver_metric.get_dice_coefficient()[2]
# Write evaluation indicators into exel
liver_data = pd.DataFrame(liver_score, index=file_name)
liver_data['time'] = time_pre_case
liver_statistics = pd.DataFrame(index=['mean', 'std', 'min', 'max'], columns=list(liver_data.columns))
liver_statistics.loc['mean'] = liver_data.mean()
liver_statistics.loc['std'] = liver_data.std()
liver_statistics.loc['min'] = liver_data.min()
liver_statistics.loc['max'] = liver_data.max()
writer = pd.ExcelWriter('./result.xlsx')
liver_data.to_excel(writer, 'liver')
liver_statistics.to_excel(writer, 'liver_statistics')
writer.save()
# dice global
print('dice global:', dice_intersection / dice_union)
|
"""
Module with handy utilities for plotting genomic signal
"""
from itertools import groupby
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
def nice_log(x):
"""
Uses a log scale but with negative numbers.
:param x: NumPy array
"""
neg = x < 0
xi = np.log2(np.abs(x) + 1)
xi[neg] = -xi[neg]
return xi
def tip_zscores(a):
"""
Calculates the "target identification from profiles" (TIP) zscores
from Cheng et al. 2001, Bioinformatics 27(23):3221-3227.
:param a: NumPy array, where each row is the signal for a feature.
"""
weighted = a * a.mean(axis=0)
scores = weighted.sum(axis=1)
zscores = (scores - scores.mean()) / scores.std()
return zscores
def tip_fdr(a, alpha=0.05):
"""
Returns adjusted TIP p-values for a particular `alpha`.
(see :func:`tip_zscores` for more info)
:param a: NumPy array, where each row is the signal for a feature
:param alpha: False discovery rate
"""
zscores = tip_zscores(a)
pvals = stats.norm.pdf(zscores)
rejected, fdrs = fdrcorrection0(pvals)
return fdrs
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
"""
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi
def matrix_and_line_shell(figsize=(5, 12), strip=False):
"""
Helper function to construct an empty figure that has space for a matrix,
a summary line plot directly below it, a colorbar axis, and an optional
"strip" axis that parallels the matrix (and shares its y-axis) where data
can be added to create callbacks.
Returns a tuple of (fig, matrix_ax, line_ax, strip_ax, colorbar_ax) that
can then be used to plot upon.
:param figsize: Tuple of (width, height), in inches, for the figure to be
:param strip: If `strip` is False, then the returned `strip_ax` will be
None and no strip axes will be created.
"""
fig = plt.figure(figsize=figsize)
# Constants to keep track
if strip:
STRIP_COLS = 1
else:
STRIP_COLS = 0
ROWS = 4
COLS = 8 + STRIP_COLS
MAT_COLS = 7
MAT_ROWS = 3
LINE_ROWS = ROWS - MAT_ROWS
mat_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, STRIP_COLS),
rowspan=MAT_ROWS,
colspan=MAT_COLS,
)
line_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(MAT_ROWS, STRIP_COLS),
rowspan=LINE_ROWS,
colspan=MAT_COLS,
sharex=mat_ax)
if strip:
strip_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, 0),
rowspan=MAT_ROWS,
colspan=STRIP_COLS,
sharey=mat_ax,
)
else:
strip_ax = None
cax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(ROWS - MAT_ROWS, MAT_COLS + STRIP_COLS),
rowspan=1,
colspan=1,
)
fig.subplots_adjust(hspace=0.1, wspace=0.2, right=0.88, left=0.23)
return fig, mat_ax, line_ax, strip_ax, cax
def clustered_sortind(x, k=10, scorefunc=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param scorefunc: Optional function for sorting rows within clusters. Must
accept a single argument of a NumPy array.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if not scorefunc:
def scorefunc(x):
return x.mean(axis=0).max()
for label in range(k):
ind = labels == label
score = scorefunc(x[ind, :])
scores[ind] = score
pos = 0
breaks = []
ind = np.argsort(scores)
for k, g in groupby(labels[ind]):
pos += len(list(g))
breaks.append(pos)
return ind, breaks
def input_ip_plots(iparr, inputarr, diffed, x, sort_ind,
prefix=None, limits1=(None, None), limits2=(None, None),
hlines=None, vlines=None):
"""
All-in-one plotting function to make a 5-panel figure.
Panels are IP, input, and diffed; plus 2 line plots showing averages.
:param iparr, inputarr: NumPy arrays constructed by a genomic_signal object
:param diffed: Difference of `iparr` and `inputarr`, but can be some other transformation.
:param x: Extent to use -- for TSSs, maybe something like
np.linspace(-1000, 1000, bins), or for just bin IDs, something like
`np.arange(bins)`.
:param sort_ind: row order for each of the 3 panels -- usually interesting
to use `clustered_sortind` or `tip_zscores`
:param prefix: Used to prefix plot titles with '%(prefix)s IP", etc
:param limits1: Tuple passed to the Normalize function for IP and input.
:param limits2: Tuple passed tot he Normalize function for the diffed array
:param hlines: List of (position, kwarg) tuples for plotting horizontal
lines. Kwargs are passed directly to axhline. Useful for delimiting
clusters, if you used `clustered_sortind` and have both `row_order` and
`breaks`.
:param vlines: List of (position, kwargs) tuples. A vertical line will be
plotted at each position using kwargs.
"""
# global min and max
gmin = min(iparr.min(), inputarr.min())
gmax = max(iparr.max(), inputarr.max())
fig = plt.figure(figsize=(10, 10))
# 3 arrays, 2 line plots, a gene strip, and 2 colorbars. Plots share the
# axes that make sense
#
# 3 arrays
ax1 = plt.subplot2grid((9, 9), (0, 0),
colspan=3, rowspan=6)
ax2 = plt.subplot2grid((9, 9), (0, 3),
colspan=3, rowspan=6, sharex=ax1, sharey=ax1)
ax3 = plt.subplot2grid((9, 9), (0, 6),
colspan=3, rowspan=6, sharex=ax1, sharey=ax1)
# 2 line plots
ax4 = plt.subplot2grid((9, 9), (6, 3), colspan=3, rowspan=3, sharex=ax1)
ax5 = plt.subplot2grid((9, 9), (6, 6), colspan=3, rowspan=3, sharex=ax1)
# 2 colorbars
cax1 = plt.Axes(fig, rect=(0.05, 0.25, 0.25, 0.025))
cax2 = plt.Axes(fig, rect=(0.05, 0.15, 0.25, 0.025))
# For nice imshow axes
extent = (min(x), max(x), 0, diffed.shape[0])
cm = matplotlib.cm.gist_gray
cm.set_bad('k')
cm.set_over('r')
cm.set_under('b')
limits1 = list(limits1)
limits2 = list(limits2)
all_base = np.column_stack((iparr.ravel(), inputarr.ravel())).ravel()
if limits1[0] is None:
limits1[0] = stats.scoreatpercentile(
all_base, 1. / all_base.size)
if limits1[1] is None:
limits1[1] = stats.scoreatpercentile(
all_base, 100 - 1. / all_base.size)
if limits2[0] is None:
limits2[0] = stats.scoreatpercentile(
diffed.ravel(), 1. / all_base.size)
if limits2[1] is None:
limits2[1] = stats.scoreatpercentile(
diffed.ravel(), 100 - 1. / all_base.size)
del all_base
imshow_kwargs = dict(
interpolation='nearest',
aspect='auto',
cmap=cm,
norm=matplotlib.colors.Normalize(*limits1),
extent=extent,
origin='lower')
# modify kwargs for diffed (by changing the normalization)
diffed_kwargs = imshow_kwargs.copy()
diffed_kwargs['norm'] = matplotlib.colors.Normalize(*limits2)
# IP
mappable1 = ax1.imshow(iparr[sort_ind, :], **imshow_kwargs)
# input
mappable2 = ax2.imshow(inputarr[sort_ind, :], **imshow_kwargs)
# diffed
mappable3 = ax3.imshow((diffed)[sort_ind, :], **diffed_kwargs)
# IP and input line plot with vertical line
ax4.plot(x, inputarr.mean(axis=0), color='k', linestyle='--',
label='input')
ax4.plot(x, iparr.mean(axis=0), color='k', label='ip')
ax4.axvline(0, color='k', linestyle=':')
# Diffed line plot with vertical line
ax5.plot(x, diffed.mean(axis=0), 'k', label='enrichment')
ax5.axvline(0, color='k', linestyle=':')
# Colorbars
cbar1 = fig.colorbar(mappable1, cax1, orientation='horizontal')
cbar2 = fig.colorbar(mappable3, cax2, orientation='horizontal')
fig.add_axes(cax1)
fig.add_axes(cax2)
# labeling...
ax1.set_ylabel('features')
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
ax4.set_xlabel('bp')
ax4.set_ylabel('mean reads per million mapped reads')
ax5.set_xlabel('bp')
cax1.set_xlabel('Reads per million mapped reads')
cax2.set_xlabel('Enrichment (RPMMR)')
if prefix is None:
prefix = ""
ax1.set_title('%s IP' % prefix)
ax2.set_title('%s input' % prefix)
ax3.set_title('Difference')
# diffed line plot should have y ax on right
ax5.yaxis.set_ticks_position('right')
ax5.yaxis.set_label_position('right')
ax5.set_ylabel('enriched reads per million mapped reads')
# Legends
ax4.legend(loc='best', frameon=False)
ax5.legend(loc='best', frameon=False)
# Make sure everybody snaps to xmin/xmax
for ax in [ax1, ax2, ax3, ax4, ax5]:
ax.axis(xmin=extent[0], xmax=extent[1])
if not hlines:
hlines = []
if not vlines:
vlines = []
for ax in [ax1, ax2, ax3]:
for pos, kwargs in hlines:
ax.axhline(pos, **kwargs)
for pos, kwargs in vlines:
ax.axvline(pos, **kwargs)
fig.subplots_adjust(bottom=0.05, top=0.95, hspace=0.75, wspace=0.9)
return fig
|
import os
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from pdfwebsite.models import File
class Command(BaseCommand):
help = 'Removes files that are more than an hour old'
def handle(self, *args, **kwargs):
time_threshold = timezone.now() - timedelta(hours=1)
results = File.objects.filter(date_posted__lt=time_threshold)
for file in results:
path = file.path
try:
os.remove(path)
except OSError:
pass
file.delete()
|
from Site import *
class magiccardmarket(Site):
def nextPage():
return ""
def extract(self):
print self.html.encode('utf-8')
return ' '
ourTable = []
#State we keep track of
inURL = False
#debut d'une balise image
tagStart = self.html.find("<img", 0)
while( tagStart != -1):
#localise la fin de la balise
tagEnd = self.html.find('>', tagStart)
if tagEnd == -1: #We are done, return the data!
return ourTable
#contenu de la balise
tagText = self.html[tagStart:tagEnd+1]
#localisation de l'url de l'image
imgStart=self.html.find('src="', tagStart)
imgEnd=self.html.find('"', imgStart+5)
imgURL = self.html[imgStart+5:imgEnd]
taille=0
baliseTaille=self.html.find('height="',imgEnd)
if not baliseTaille == -1:
baliseTaille=baliseTaille+8
finBalise= self.html.find(">",imgEnd)
if baliseTaille < finBalise:
fin=self.html.find('" ',baliseTaille)
taille=self.html[baliseTaille:fin]
if (imgURL.find('.jpg')>=0 or imgURL.find('.jpeg')>=0) and (int(taille) > 200 or int(taille)==0):
ourTable.append(imgURL)
#print imgURL
#Look for the NEXT start URL. Anything between the current
#end tag and the next Start Tag is potential data!
tagStart = self.html.find("<img", tagEnd+1)
return(ourTable)
|
import logging
import time
import io
from . import projection
from . import simple_downloader
from PIL import Image
class TimeMachine(object):
def __init__(self, dm_map):
self._dm_map = dm_map
# self.dynmap = dynmap.DynMap(url)
def capture_single(self, map, t_loc, size, pause=0.25):
from_tile, to_tile = t_loc.make_range(size[0], size[1])
zoomed_scale = projection.zoomed_scale(t_loc.zoom)
width, height = (abs(to_tile.x - from_tile.x) * 128 / zoomed_scale, abs(to_tile.y - from_tile.y) * 128 / zoomed_scale)
logging.info('final size in px: [%d, %d]', width, height)
dest_img = Image.new('RGB', (int(width), int(height)))
logging.info('downloading tiles...')
# logging.info('tile image path: %s', image_url)
total_tiles = len(range(from_tile.x, to_tile.x, zoomed_scale)) * len(range(from_tile.y, to_tile.y, zoomed_scale))
processed = 0
for x in range(from_tile.x, to_tile.x, zoomed_scale):
for y in range(from_tile.y, to_tile.y, zoomed_scale):
img_rel_path = map.image_url(projection.TileLocation(x, y, t_loc.zoom))
img_url = self._dm_map.url + img_rel_path
processed += 1
logging.info('tile %d/%d [%d, %d]', processed, total_tiles, x, y)
try:
img_data = simple_downloader.download(img_url, True)
except Exception as e:
logging.info('Unable to download "%s": %s', img_url, str(e))
continue
stream = io.BytesIO(img_data)
im = Image.open(stream)
box = (int(abs(x - from_tile.x) * 128 / zoomed_scale), int((abs(to_tile.y - y) - zoomed_scale) * 128 / zoomed_scale))
logging.debug('place to [%d, %d]', box[0], box[1])
dest_img.paste(im, box)
# avoid throttle limit, don't overload the server
time.sleep(float(pause))
return dest_img
def compare_images(self, image1, image2):
file1data = list(image1.getdata())
file2data = list(image2.getdata())
diff = 0
for i in range(len(file1data)):
if file1data[i] != file2data[i]:
diff += 1
return float(diff) / len(file1data)
|
from flask_restful import Resource
class HealthcheckResource(Resource):
def get(self):
"""
This is a helthcheck endpoint
---
responses:
200:
description: healthcolor
"""
return {"status": "green"}, 200
|
#!/usr/bin/env python
from fsevents import Observer
from fsevents import Stream
from googlestorage import Googlestorage
"""
Bit Description
IN_ACCESS File was accessed (read) (*)
IN_ATTRIB Metadata changed (permissions, timestamps, extended attributes, etc.) (*)
IN_CLOSE_WRITE File opened for writing was closed (*)
IN_CLOSE_NOWRITE File not opened for writing was closed (*)
IN_CREATE File/directory created in watched directory (*)
IN_DELETE File/directory deleted from watched directory (*)
IN_DELETE_SELF Watched file/directory was itself deleted
IN_MODIFY File was modified (*)
IN_MOVE_SELF Watched file/directory was itself moved
IN_MOVED_FROM File moved out of watched directory (*)
IN_MOVED_TO File moved into watched directory (*)
IN_OPEN File was opened (*)
"""
"""
==mask value==
512:
2:
256: looks like new file comming
"""
# TODO
# DATABASE, METADATA
# NOT BIG DEAL!!!
def main():
observer = Observer()
observer.start()
path = '/Users/seungjin/Desktop'
def callback(event):
#print "mask: " + str(event.mask)
#print "cookie: " + str(event.cookie)
#print "name: " + str(event.name)
print event
if event.mask == 256: #looks like new file comming
newFile(str(event.name))
elif event.mask == 512: #looks line file deleted
rmFile(str(event.name))
elif event.mask == 2: #looks like overwriting?
print "hihihihi"
def newFile(filename):
print "new file is comming"
#pushing this file into cloud
gs = Googlestorage()
#print gs.list_objects()
gs.upload_objects(filename)
def rmFile(filename):
#print "%s is removed" % filename
gs = Googlestorage() # this is evil.. do i need to make global goolgestorage object??? idk
gs.delete_objects_and_buckets(filename)
stream = Stream(callback,path,file_events=True)
observer.schedule(stream)
if __name__ == "__main__":
watch_folder = ""
main()
|
def creer_pile():
'''pour crťer une pile'''
return []
def empiler(ma_pile,valeur):
'''ajoute une valeur à la pile'''
ma_pile.append(valeur)
def depiler(ma_pile):
'''retire le dernier élément de la pile'''
assert len(ma_pile)>0
return ma_pile.pop()
def sommet(ma_pile):
'''renvoie le sommet de la pile donc du dernier élément ajouter'''
assert len(ma_pile)>0
return ma_pile[-1]
def taille(ma_pile):
'''retourne la longueur de la pile'''
return len(ma_pile)
def est_vide(ma_pile):
'''renvoie True si la pile est vide'''
return taille(ma_pile)==0
|
'''
File name: convert_coords_wExons.py
Author: Patrick Monnahan
Date created: 09/01/18
Python Version: 3.6
Project: Split Genes
Downstream of: get_boundary_exons.py
Upstream of: make_psuedo_annotation.py
Description: For first/last exon pairs in the provided bed file, this script converts coordinates from the original reference to a user-specified reference via automated blasting/parsing. The script get_boundary_exons.py can be used to generate the input
'''
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
from Bio import SeqIO
import argparse
import pdb
import os
import random
import string
import glob
def makeCoordDict(bedfile):
coord_dict = {} # This dictionary will organize gene/coords within each chromosome
gene_dict = {} # This dictionary uses genes as keys and simply stores coordinates
with open(bedfile, 'r') as bed:
for line in bed:
line = line.strip("\n").split("\t")
chrom = line[0].strip("chr0")
chrom = chrom.strip("chr")
start = line[1]
stop = line[2]
geneID = line[3]
if geneID.endswith("L"):
try:
gene_dict[geneID[:-2]].append(start)
except KeyError:
gene_dict[geneID[:-2]] = [chrom, start]
elif geneID.endswith("U"):
try:
gene_dict[geneID[:-2]].append(stop)
except KeyError:
gene_dict[geneID[:-2]] = [chrom, stop]
try:
coord_dict[chrom].append([start,stop,geneID])
except KeyError:
coord_dict[chrom] = [[start,stop,geneID]]
# pdb.set_trace()
return(coord_dict, gene_dict)
def getSequences(coord_dict, source_ref, temp_file):
with open(temp_file, 'w') as multi_fasta: # We will get sequences corresponding to gene boundaries from the current reference that we wish to blast and store these in a file
for i, chrom in enumerate(SeqIO.parse(source_ref, "fasta")): # Use BIOpython to parse reference fasta.
try:
for coord in coord_dict[chrom.id]: # Loop over all gene coordinates of interest for the current chromosome
seq = str(chrom.seq)[int(coord[0]):int(coord[1])] # Pull sequence from reference using current gene coordinates
N = str(len(seq) - seq.count("N")) # Count number of N's in the sequence in order to adjust query length that is included in multi_fasta
# pdb.set_trace()
if coord[-1].endswith("L"): # L stands for Lower boundary or Lower exon
multi_fasta.write(f">{chrom.id}:{coord[0]}-{coord[1]};{coord[2][:-2]}S;{N}\n{seq}\n") # Write tmp gene id and exon sequence
else: # Catch genes that end with U for Upper boundary
multi_fasta.write(f">{chrom.id}:{coord[0]}-{coord[1]};{coord[2][:-2]}E;{N}\n{seq}\n")
except KeyError: pass
multi_fasta.close()
return()
def Blast(multi_fasta, Toblastdb, cluster=False):
if cluster:
blastn_cline = NcbiblastnCommandline(query=multi_fasta, db=Toblastdb, evalue=0.001,
outfmt=5, max_target_seqs=4, max_hsps_per_subject=4, out=multi_fasta.replace(".fasta",".xml"))
else:
blastn_cline = NcbiblastnCommandline(query=multi_fasta, db=Toblastdb, evalue=0.001,
outfmt=5, max_target_seqs=6, max_hsps=6, out=multi_fasta.replace(".fasta",".xml"))
blastn_cline()
results = NCBIXML.parse(open(multi_fasta.replace(".fasta",".xml")))
return(results)
def parseBlastResults(blastResults, newCoords, thresshold, breakFirst=False):
noHits =[] # Initially stores all results but we will subtract out the found genes to just return those with no hits
found = []
for result in blastResults: # each 'result' is for a blasted exon
noHits.append(result.query)
gene = result.query.split(";")[1]
qlen = int(result.query.split(";")[-1]) # exon length - # of N's
chrom = result.query[0]
for alignment in result.alignments: # each alignment
for hsps in alignment.hsps: # Loop over High Scoring Sequence Pairs with each alignement
if abs(hsps.align_length - qlen) / qlen < thresshold: # Percent difference of alignment length WRT query must be within thresshold
found.append(result.query)
if gene[-1] == "S":
pos = hsps.sbjct_start - hsps.query_start + 1 # sbjct_start is blast location of ref, but need to modify recorded position based on where first exon ought to start??
elif gene[-1] == "E":
pos = hsps.sbjct_end + (qlen - hsps.query_end)
else: print("Did not find gene bound of query gene") # This should never trip
try:
newCoords[gene].append(int(pos))
except KeyError:
newCoords[gene] = [int(chrom), int(pos)]
if breakFirst: break # Can be used to just take the first sufficiently matching alignment
if breakFirst: break
noHits = list(set(noHits) - set(found)) # Determine which genes were not found by blasting
return(newCoords, noHits, found)
def removeDups(newCoords, gene_dict):
"""In the case that an exon returns multiple sufficiently good matches (happens frequently), this function will look across all candidate positions for start and end coordinates and select the pair that best matches the expected gene length"""
dups = []
for gene in newCoords: # Find duplicates as gene entries with
if len(newCoords[gene]) > 2: # items for a gene key include at least the chromosome and one position entry. Anything greater than two means multiple position entries, hench duplicate
dups.append(gene)
# pdb.set_trace()
done =[]
finCoords = {}
for dup in dups: #Remove duplicate entries based on which entry is closest in expected position
# pdb.set_trace()
gene = dup[:-1] # Remove the S or E suffix
if gene not in done: # Skip duplicate removal if already done for either E or S entry for a particular gene.
try:
starts = newCoords[gene + "S"][1:] # Get list of candidate starts
ends = newCoords[gene + "E"][1:]
except KeyError:
print(f"Did not find new coordinates for {gene}")
continue # Skip this gene if either E or S are not found
elen = abs(int(gene_dict[gene][1]) - int(gene_dict[gene][2])) # Expected length of the gene based on old coordinates
best_match = 999999999 # Initialize for comparison
# pdb.set_trace()
for i, start in enumerate(starts):
olens = [abs(start - end) for end in ends] # Calculate the observed length for every candidate end with current start
len_diffs = [abs(olen - elen) for olen in olens]
new_best = min(len_diffs) # Find end position that minimizes difference in observed and expected length for current start
if new_best < best_match: # See if current start has a candidate end that provides a better match to expected length
best_match = new_best
idx = len_diffs.index(new_best)
winners = [i + 1, idx + 1] # Record index positions within start and end lists for the selected best match coordinates
# pdb.set_trace()
finCoords[gene] = [newCoords[gene + "S"][0], newCoords[gene + "S"][winners[0]], newCoords[gene + "E"][winners[1]]] # Report the identified winners from the S and E lists after going through all possible start and end pairs
done.append(gene)
for gene in newCoords: # Complete final coordinate list by adding all non-duplicated genes
gene = gene[:-1]
if gene not in done:
try:
finCoords[gene] = [newCoords[gene + "S"][0], newCoords[gene + "S"][1], newCoords[gene + "E"][1]]
except KeyError:
print(f"Did not find new coordinates for {gene}")
return(finCoords)
def getNewCoords(multi_fasta, Toblastdb, thresshold, gene_dict, cluster=False):
# pdb.set_trace()
results = Blast(multi_fasta, Toblastdb, cluster)
newCoords ={}
newCoords, noHits, found = parseBlastResults(results, newCoords, thresshold)
# pdb.set_trace()
newCoords = removeDups(newCoords, gene_dict)
# pdb.set_trace()
return(newCoords)
def verifyNewCoords(newCoords, gene_dict, threshold):
"""This function checks whether the new gene lengths determined via blasting are within a given threshold of the old distance determined from the annotation"""
verCoords = {}
for gene, coords in gene_dict.items():
# pdb.set_trace()
start = int(coords[1])
stop = int(coords[2])
length = abs(stop - start) ##THIS HAS TO ABS IN ORDER TO ACCOUNT FOR ALIGNMENT TO OPPOSITE STRAND
try:
nchrom, nstart, nstop = newCoords[gene] # info from new coordinates
nlen = abs(nstop - nstart)
if abs(nlen - length) / length > threshold: # Difference in new and old length expressed as a percent of the old length
print(f"bad match for {gene}: old {start}-{stop} {int(stop)-int(start)} new {nstart}-{nstop} {nlen}")
else:
if stop - start < 0: # Write entry such that start precedes stop
verCoords[gene] = [str(newCoords[gene][0]), str(newCoords[gene][2]), str(newCoords[gene][1])]
else:
verCoords[gene] = [str(x) for x in newCoords[gene]]
print(f"good match for {gene}: old {start}-{stop} {int(stop)-int(start)} new {nstart}-{nstop} {int(nstop) - int(nstart)}")
except KeyError: pass
return(verCoords)
def writeOutBed(outFile, newCoords, oldCoords):
"""newCoords should be output of verifyNewCoords and oldCoords should be gene_dict from makeCoordDict"""
with open(outFile, 'w') as out: # Write output to file
for gene in verCoords:
new_coord = "\t".join(newCoords[gene])
old_coord = "\t".join(oldCoords[gene])
new_len = int(verCoords[gene][2]) - int(verCoords[gene][1])
old_len = int(gene_dict[gene][2]) - int(gene_dict[gene][1])
out.write(f"{new_coord}\t{gene}\t{old_coord}\t{new_len}\t{old_len}\n")
return()
if __name__ == "__main__":
import time
start_time = time.time()
parser = argparse.ArgumentParser(description = "For first/last exon pairs in the provided bed file, this script converts coordinates from the original reference to a user-specified reference via automated blasting/parsing. The script get_boundary_exons.py can be used to generate the input")
parser.add_argument('-b', type=str, metavar='bed_file', required=True, help='bed file containing coordinates and geneIDs for genes you wish to convert')
parser.add_argument('-d', type=str, metavar='blastdb', required=True, help='Full path to blast database. This corresponds to the reference whose coordinates you are trying to convert to')
parser.add_argument('-r', type=str, metavar='source_ref', required=True, help='Reference fasta corresponding to reference converting from as in bed file')
parser.add_argument('-t', type=str, metavar="tmp_dir", default=os.getcwd(), help="temporary directory that will store multi fasta for blasting along with blast results")
parser.add_argument('-T1', type=int, metavar="size_similarity_threshold", default=0.5, help="percent difference between aligned length and query length must be within this thresshold, which is specified as a proportion of the query length")
parser.add_argument('-T2', type=int, metavar="size_similarity_threshold", default=3.0, help="proportion difference between gene size as function of old and new coordinates")
parser.add_argument('-O', type=str, metavar='output_bed_name', required=True, help="bed file to write output to. Fields are new chrom, start and stop position followed by geneID, the old coordinates, and the new and old gene lengths")
parser.add_argument('-v', action="store_true")
parser.add_argument('--cluster', action="store_true", help="set this flag if running on cluster. Necessary for using MSI blast installations")
parser.add_argument('-k', action="store_true", help="keep temporary files: fastas and blast results")
args = parser.parse_args()
coord_dict, gene_dict = makeCoordDict(args.s) # Parse info in input bed file
print("Made Coordinate Dictionary")
tmp_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) # Make random string for tmp fasta and blast results
tmp_file = f"{args.t}/tmp{tmp_str}.fasta"
getSequences(coord_dict, args.r, tmp_file) # Retrieve original exon sequences
print("Retrieved sequences of coordinates")
newCoords = getNewCoords(tmp_file, args.b, args.T1, gene_dict, args.cluster) # get new coordinates via blasting exon sequence
# pdb.set_trace()
verCoords = verifyNewCoords(newCoords, gene_dict, args.T2) # Verify that new gene lengths are within given distance from old gene lengths
writeOutBed(args.O, verCoords, gene_dict) # Write output to bed file
if not args.k:
for j in glob.glob(f'{args.t}/tmp{tmp_str}*'):
os.remove(j)
|
from django.test import TestCase
from django.contrib.auth.models import User
import mock
from projects.models import (
ProjectBuild, ProjectDependency, ProjectBuildDependency)
from projects.helpers import (
build_project, build_dependency, archive_projectbuild,
get_transport_for_projectbuild)
from .factories import ProjectFactory, DependencyFactory
from jenkins.tests.factories import BuildFactory, ArtifactFactory
from archives.tests.factories import ArchiveFactory
class BuildProjectTest(TestCase):
def test_build_project(self):
"""
build_project should create build dependencies for each of the project
dependencies and schedule builds of each.
"""
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
with mock.patch("projects.helpers.build_job") as mock_build_job:
new_build = build_project(project)
self.assertIsInstance(new_build, ProjectBuild)
build_dependencies = ProjectBuildDependency.objects.filter(
projectbuild=new_build)
self.assertEqual(2, build_dependencies.count())
self.assertEqual(
[dependency1.pk, dependency2.pk],
list(build_dependencies.values_list("dependency", flat=True)))
mock_build_job.delay.assert_has_calls(
[mock.call(dependency1.job.pk, build_id=new_build.build_id),
mock.call(dependency2.job.pk, build_id=new_build.build_id)])
def test_build_project_with_no_queue_build(self):
"""
If we pass queue_build = False to build_project, then no builds should
happen.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
with mock.patch("projects.helpers.build_job") as mock_build_job:
build_project(project)
self.assertItemsEqual([], mock_build_job.call_args_list)
def test_build_project_with_dependency_with_parameters(self):
"""
build_project should create pass the parameters for a dependency to the
build_job request.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create(parameters="THISVALUE=mako")
ProjectDependency.objects.create(
project=project, dependency=dependency)
with mock.patch("projects.helpers.build_job") as mock_build_job:
new_build = build_project(project)
self.assertIsInstance(new_build, ProjectBuild)
mock_build_job.delay.assert_called_once_with(
dependency.job.pk, build_id=new_build.build_id,
params={"THISVALUE": "mako"})
def test_build_project_with_specified_dependencies(self):
"""
If a list of dependencies is provided, then we should only build those
dependencies.
"""
[dep1, dep2, dep3] = DependencyFactory.create_batch(3)
project = ProjectFactory.create()
for dep in [dep1, dep2, dep3]:
ProjectDependency.objects.create(
project=project, dependency=dep, auto_track=True)
build = BuildFactory.create(job=dep1.job)
# Reload object from database.
project_dep1 = ProjectDependency.objects.get(
project=project, dependency=dep1)
self.assertEqual(build, project_dep1.current_build)
with mock.patch("projects.helpers.build_job") as mock_build_job:
new_build = build_project(project, dependencies=[dep1, dep2])
projectbuild_dependencies = ProjectBuildDependency.objects.filter(
projectbuild=new_build)
self.assertEqual(3, projectbuild_dependencies.all().count())
self.assertEqual(
set([dep1, dep2, dep3]),
set([x.dependency for x in projectbuild_dependencies.all()]))
mock_build_job.delay.assert_has_calls(
[mock.call(dep1.job.pk, build_id=new_build.build_id),
mock.call(dep2.job.pk, build_id=new_build.build_id)])
def test_build_project_assigns_user_correctly(self):
"""
If we pass a user to build_project, the user is assigned as the user
for the projectbuild.
"""
user = User.objects.create_user("testing")
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
new_build = build_project(project, user=user, queue_build=False)
self.assertEqual(user, new_build.requested_by)
class BuildDependencyTest(TestCase):
def test_build_dependency(self):
"""
build_dependency schedules the build of a dependency.
"""
dependency = DependencyFactory.create()
with mock.patch("projects.helpers.build_job") as mock_build_job:
build_dependency(dependency)
mock_build_job.delay.assert_called_once_with(dependency.job.pk)
def test_build_dependency_with_parameters(self):
"""
build_dependency schedules the build of a dependency along with any
parameters.
"""
dependency = DependencyFactory.create(
parameters="THISVAL=500\nTHATVAL=testing")
with mock.patch("projects.helpers.build_job") as mock_build_job:
build_dependency(dependency)
mock_build_job.delay.assert_called_once_with(
dependency.job.pk, params={"THISVAL": "500", "THATVAL": "testing"})
def test_build_dependency_with_build_id(self):
"""
build_dependency schedules the build of a dependency along with the
build_id.
"""
dependency = DependencyFactory.create()
with mock.patch("projects.helpers.build_job") as mock_build_job:
build_dependency(dependency, build_id="201403.2")
mock_build_job.delay.assert_called_once_with(
dependency.job.pk, build_id="201403.2")
class ArchiveProjectBuildTest(TestCase):
def test_get_transport_for_projectbuild(self):
"""
get_transport_for_projectbuild returns an Archiver ready to archive a
project build.
"""
archive = ArchiveFactory.create()
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
mock_policy = mock.Mock()
with mock.patch.multiple(
archive, get_archiver=mock.DEFAULT,
get_policy=mock.DEFAULT) as mock_archive:
mock_archive["get_policy"].return_value = mock_policy
get_transport_for_projectbuild(projectbuild, archive)
mock_policy.assert_called_once_with(projectbuild)
mock_archive["get_archiver"].return_value.assert_called_once_with(
mock_policy.return_value, archive)
def test_archive_projectbuild(self):
"""
Archive project build should create an archiver and archive it.
"""
archive = ArchiveFactory.create()
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_id)
ArtifactFactory.create_batch(3, build=build)
with mock.patch.object(archive, "get_archiver") as mock_archive:
archive_projectbuild(projectbuild, archive)
mock_archive.return_value.return_value.assert_has_calls(
[mock.call.archive()])
|
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from forms import BlogEntry
from models import blog
from markdown2 import Markdown
def index(request):
if 'username' not in request.session:
return HttpResponseRedirect('../login')
temp = loader.get_template('blog/index.html')
blogs_all = list(blog.objects.all())
context = dict()
context['username'] = request.session['username']
context['objects'] = blogs_all
return HttpResponse(temp.render(context, request))
def create_blog(request):
if 'username' not in request.session:
return HttpResponseRedirect('../login')
if request.method == 'POST':
form = BlogEntry(request.POST, request.FILES)
if form.is_valid():
ent = blog()
ent.user = request.session['username']
ent.title = form.cleaned_data['blogTitle']
text = form.cleaned_data['blogContent']
con = Markdown()
ent.body = con.convert(text)
ent.image = form.cleaned_data['blogPic']
ent.save()
return HttpResponseRedirect('../')
form = BlogEntry()
context = dict()
context['form'] = form
temp = loader.get_template('blog/register.html')
return HttpResponse(temp.render(context, request))
def show_blog(request, blog_id):
lt = blog.objects.get(id1=blog_id)
context = {'blog': lt, 'username': request.session['username']}
temp = loader.get_template('blog/show_blog.html')
return HttpResponse(temp.render(context, request))
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Build a .gyp that depends on 2 gyp files with the same name.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('all.gyp', chdir='relocate/src')
expect1 = """\
Hello from main1.cc
"""
expect2 = """\
Hello from main2.cc
"""
if test.format == 'xcode':
chdir1 = 'relocate/src/subdir1'
chdir2 = 'relocate/src/subdir2'
else:
chdir1 = chdir2 = 'relocate/src'
test.run_built_executable('program1', chdir=chdir1, stdout=expect1)
test.run_built_executable('program2', chdir=chdir2, stdout=expect2)
test.pass_test()
|
# -*- coding: utf-8 -*-
"""
It's a parser which find connection between abbreviation and full name of dictionary
DONE:
- Input/Output
- Go to every dictionary and find full name and abbreviation
TO DO:
- All is already done, you should only run the spider
- Maybe rewrite code for use Pandas as table engine instead of csv (optional)
It has not input csv file.
For every word from input file, the parser creates several columns of information and saves to output csv file:
abbreviation | full name
Algorithms:
- Firstly, the parser finds all links on dictionaries and go to everyone (dictionaries have only full name in main page)
- For any dictionary, go to first word from it.
It based on think that all words from any dictionary contatins translations from the dictionary.
- For word finds all dictionaries (page with translation contains only abbreviations) and link on them which contains full name
## Parsing speed increasing (important settings):
For it, you should change settings.py.
The Scrapy is based on asynchronous framework Twisted. Please see good lecture about async http://cs.brown.edu/courses/cs168/s12/handouts/async.pdf
So Twisted has several flows. Flows are conditionally "concurrent".
And so settings.py includes CONCURRENT_REQUESTS. It's count of flows. And you should set it.
Of course, bigger CONCURRENT_REQUESTS provides big speed, but it can creates some errors, for example TimeError.
With big speed the parser tries to download many links simultaneously and someone can stuck.
When time is not critical, you should set CONCURRENT_REQUESTS < 16 otherwise > 16.
For timeout error solving, you can increase DOWNLOAD_TIMEOUT (in sec).
Also you can except some dictionaries for some narrow parsing using EXCEPTED_DICTIONARIES (dictionary abbreviation list).
"""
import csv
import scrapy
from scrapy import Request
# Settings
# Delimiter and quotechar are parameters of csv file. You should know it if you created the file
CSV_DELIMITER = ' '
CSV_QUOTECHAR = '"' # '|'
OUTPUT_CSV_NAME = 'output_dictionaries_abbreviations.csv' # Path to output file with csv type
TRANSLATE_WORD_INDEX = 0 # Index of column which should be translated. Others columns will be copied to output file
EXCEPTED_DICTIONARIES = ['Сленг', 'Разговорное выражение', 'табу'] # Dictionaries which shouldn't be in output
class MultitranSpider(scrapy.Spider):
"""
This spider parses all dictionaries and finds corresponding reduction
"""
name = "multitran_dictionaries"
allowed_domains = ["multitran.com"]
start_urls = ['http://www.multitran.com/m.exe?CL=1&s&l1=1&l2=2&SHL=2']
def __init__(self):
self.output_file = open(OUTPUT_CSV_NAME, 'w')
self.output_writer = csv.writer(self.output_file, delimiter=CSV_DELIMITER, quotechar=CSV_QUOTECHAR,
quoting=csv.QUOTE_ALL)
self.output = []
def parse(self, response):
dict_xpath = '//*/tr/td[@width=110]/a/@href'
# i = 0
for dictionaries in response.xpath(dict_xpath):
# print(i)
# i += 1
yield Request("http://multitran.com{}&SHL=2".format(dictionaries.extract()), callback=self.parse_dict)
def parse_dict(self, response):
# print(response.url)
dict_name = response.xpath('//*/td/b/text()').extract()[0]
# self.output_writer.writerow([])
# print()
if response.meta.get("dict_abbr", None) is not None:
# dict_name = response.xpath('//*/tr[1]/td[@class="termsforsubject"][1]/a/@href').extract()[0]
row = [response.meta.get("dict_abbr").split(",")[0], dict_name]
if not "|".join(row) in self.output:
self.output_writer.writerow(row)
self.output.append("|".join(row))
else:
url = "http://multitran.com{}&SHL=2".format(
response.xpath('//*/tr/td[@class="termsforsubject"][1]/a/@href').extract()[0])
# print(url)
yield Request(url=url, callback=self.parse_word,
meta={"dict_name": dict_name, 'prev_url': response.url})
def parse_word(self, response):
# self.output_writer.writerow([response.meta['dict_name'], response.meta['prev_url'], response.url])
dict_xpath = '//*/td[@class="subj"]/a'
for d in response.xpath(dict_xpath):
name = d.xpath("text()").extract()[0]
url = "http://multitran.com{}&SHL=2".format(d.xpath("@href").extract()[0])
yield Request(url=url, callback=self.parse_dict,
meta={"dict_abbr": name})
def close(self, reason):
self.output_file.close()
|
# -*- coding: utf-8 -*-
import itertools
class Solution:
def maxProduct(self, words):
lengths = [len(word) for word in words]
bits = [0] * len(words)
for i, word in enumerate(words):
for c in word:
bits[i] |= 1 << (ord(c) - ord("a"))
result = 0
for i, j in itertools.combinations(range(len(words)), 2):
if lengths[i] * lengths[j] > result and not bits[i] & bits[j]:
result = lengths[i] * lengths[j]
return result
if __name__ == "__main__":
solution = Solution()
assert 16 == solution.maxProduct(["abcw", "baz", "foo", "bar", "xtfn", "abcdef"])
assert 4 == solution.maxProduct(["a", "ab", "abc", "d", "cd", "bcd", "abcd"])
assert 0 == solution.maxProduct(["a", "aa", "aaa", "aaaa"])
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from textwrap import dedent
import pytest
from pants.backend.javascript import package_json
from pants.backend.javascript.dependency_inference.rules import (
InferJSDependenciesRequest,
InferNodePackageDependenciesRequest,
JSSourceInferenceFieldSet,
NodePackageInferenceFieldSet,
)
from pants.backend.javascript.dependency_inference.rules import rules as dependency_inference_rules
from pants.backend.javascript.package_json import AllPackageJson
from pants.backend.javascript.target_types import JSSourcesGeneratorTarget, JSSourceTarget
from pants.build_graph.address import Address
from pants.engine.internals.graph import Owners, OwnersRequest
from pants.engine.rules import QueryRule
from pants.engine.target import InferredDependencies, Target
from pants.testutil.rule_runner import RuleRunner
from pants.util.ordered_set import FrozenOrderedSet
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*package_json.rules(),
*dependency_inference_rules(),
QueryRule(AllPackageJson, ()),
QueryRule(Owners, (OwnersRequest,)),
QueryRule(InferredDependencies, (InferNodePackageDependenciesRequest,)),
QueryRule(InferredDependencies, (InferJSDependenciesRequest,)),
],
target_types=[*package_json.target_types(), JSSourceTarget, JSSourcesGeneratorTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def given_package(name: str, version: str, **kwargs: str | dict[str, str]) -> str:
return json.dumps({"name": name, "version": version, **kwargs})
def given_package_with_workspaces(name: str, version: str, *workspaces: str) -> str:
return json.dumps({"name": name, "version": version, "workspaces": list(workspaces)})
def get_inferred_package_jsons_address(
rule_runner: RuleRunner, tgt: Target
) -> FrozenOrderedSet[Address]:
return rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(tgt))],
).include
def test_infers_esmodule_js_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "javascript_sources()",
"src/js/index.mjs": dedent(
"""\
import fs from "fs";
import { x } from "./xes.mjs";
"""
),
"src/js/xes.mjs": "",
}
)
index_tgt = rule_runner.get_target(Address("src/js", relative_file_path="index.mjs"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(index_tgt))],
).include
assert set(addresses) == {Address("src/js", relative_file_path="xes.mjs")}
def test_infers_esmodule_js_dependencies_from_ancestor_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "javascript_sources()",
"src/js/a/BUILD": "javascript_sources()",
"src/js/a/index.mjs": dedent(
"""\
import fs from "fs";
import { x } from "../xes.mjs";
"""
),
"src/js/xes.mjs": "",
}
)
index_tgt = rule_runner.get_target(Address("src/js/a", relative_file_path="index.mjs"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(index_tgt))],
).include
assert set(addresses) == {Address("src/js", relative_file_path="xes.mjs")}
def test_infers_commonjs_js_dependencies_from_ancestor_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "javascript_sources()",
"src/js/a/BUILD": "javascript_sources()",
"src/js/a/index.cjs": dedent(
"""\
const fs = require("fs");
const { x } = require("../xes.cjs");
"""
),
"src/js/xes.cjs": "",
}
)
index_tgt = rule_runner.get_target(Address("src/js/a", relative_file_path="index.cjs"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(index_tgt))],
).include
assert set(addresses) == {Address("src/js", relative_file_path="xes.cjs")}
def test_infers_main_package_json_field_js_source_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package("ham", "0.0.1", main="lib/index.js"),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": "",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
addresses = rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/lib", relative_file_path="index.js")}
def test_infers_browser_package_json_field_js_source_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package("ham", "0.0.1", browser="lib/index.js"),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": "",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
addresses = rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/lib", relative_file_path="index.js")}
def test_infers_bin_package_json_field_js_source_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package("ham", "0.0.1", bin="bin/index.js"),
"src/js/bin/BUILD": "javascript_sources()",
"src/js/bin/index.js": "",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
addresses = rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/bin", relative_file_path="index.js")}
@pytest.mark.parametrize(
"exports", ("lib/index.js", {".": "lib/index.js"}, {"lib": "lib/index.js"})
)
def test_infers_exports_package_json_field_js_source_dependency(
rule_runner: RuleRunner, exports: str | dict[str, str]
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package("ham", "0.0.1", exports=exports),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": "",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
addresses = rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/lib", relative_file_path="index.js")}
@pytest.mark.parametrize("exports", ("lib/*.js", {".": "lib/*.js"}, {"lib": "lib/*.js"}))
def test_infers_exports_package_json_field_js_source_dependency_with_stars(
rule_runner: RuleRunner, exports: str | dict[str, str]
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package("ham", "0.0.1", exports=exports),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": "",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
addresses = rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/lib", relative_file_path="index.js")}
@pytest.mark.parametrize("exports", ("lib/*.js", {".": "lib/*.js"}, {"lib": "lib/*"}))
def test_infers_exports_package_json_field_js_source_dependency_with_stars_interpreted_as_recursive(
rule_runner: RuleRunner, exports: str | dict[str, str]
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package("ham", "0.0.1", exports=exports),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": "",
"src/js/lib/subdir/BUILD": "javascript_sources()",
"src/js/lib/subdir/index.js": "",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
addresses = rule_runner.request(
InferredDependencies,
[InferNodePackageDependenciesRequest(NodePackageInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {
Address("src/js/lib/subdir", relative_file_path="index.js"),
Address("src/js/lib", relative_file_path="index.js"),
}
def test_infers_third_party_package_json_field_js_source_dependency(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package(
"ham", "0.0.1", main="lib/index.js", dependencies={"chalk": "5.0.2"}
),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": dedent(
"""\
import chalk from "chalk";
"""
),
}
)
pkg_tgt = rule_runner.get_target(Address("src/js/lib", relative_file_path="index.js"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js", generated_name="chalk")}
def test_infers_third_party_package_json_field_js_source_dependency_with_import_subpaths(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package(
"ham",
"0.0.1",
main="lib/index.js",
dependencies={"chalk": "5.0.2"},
imports={"#myChalk": "chalk"},
),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": dedent(
"""\
import chalk from "#myChalk";
"""
),
}
)
pkg_tgt = rule_runner.get_target(Address("src/js/lib", relative_file_path="index.js"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js", generated_name="chalk")}
def test_infers_third_party_package_json_field_js_source_dependency_with_import_subpaths_with_star_replacements(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package(
"ham",
"0.0.1",
main="lib/index.js",
dependencies={"chalk": "5.0.2"},
imports={"#myChalk/*.js": "chalk/stuff/*.js"},
),
"src/js/lib/BUILD": "javascript_sources()",
"src/js/lib/index.js": dedent(
"""\
import chalk from "#myChalk/index.js";
"""
),
}
)
pkg_tgt = rule_runner.get_target(Address("src/js/lib", relative_file_path="index.js"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js", generated_name="chalk")}
def test_infers_first_party_package_json_field_js_source_dependency(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": given_package("ham", "0.0.1"),
"src/js/a/lib/BUILD": "javascript_sources()",
"src/js/a/lib/index.js": dedent(
"""\
import { x } from "spam";
"""
),
"src/js/b/BUILD": "package_json()",
"src/js/b/package.json": given_package("spam", "0.0.1"),
"src/js/b/lib/BUILD": "javascript_sources()",
"src/js/b/lib/index.js": "const x = 2;",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js/a/lib", relative_file_path="index.js"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/b", generated_name="spam")}
def test_infers_first_party_package_json_field_js_source_dependency_with_import_subpaths(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": given_package("ham", "0.0.1", imports={"#spam": "spam"}),
"src/js/a/lib/BUILD": "javascript_sources()",
"src/js/a/lib/index.js": dedent(
"""\
import { x } from "#spam";
"""
),
"src/js/b/BUILD": "package_json()",
"src/js/b/package.json": given_package("spam", "0.0.1"),
"src/js/b/lib/BUILD": "javascript_sources()",
"src/js/b/lib/index.js": "const x = 2;",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js/a/lib", relative_file_path="index.js"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/b", generated_name="spam")}
def test_infers_first_party_package_json_field_js_source_dependency_with_starred_import_subpaths(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": given_package(
"ham", "0.0.1", imports={"#spam/*.js": "spam/lib/*.js"}
),
"src/js/a/lib/BUILD": "javascript_sources()",
"src/js/a/lib/index.js": dedent(
"""\
import { x } from "#spam/index.js";
"""
),
"src/js/b/BUILD": "package_json()",
"src/js/b/package.json": given_package("spam", "0.0.1"),
"src/js/b/lib/BUILD": "javascript_sources()",
"src/js/b/lib/index.js": "const x = 2;",
}
)
pkg_tgt = rule_runner.get_target(Address("src/js/a/lib", relative_file_path="index.js"))
addresses = rule_runner.request(
InferredDependencies,
[InferJSDependenciesRequest(JSSourceInferenceFieldSet.create(pkg_tgt))],
).include
assert set(addresses) == {Address("src/js/b", generated_name="spam")}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Luis Cañas-Díaz <lcanas@bitergia.com>
#
#
# This script automates the checkout of Git repositories based on two lists,
# one with all the repos and the other with a blacklist
import sys
import logging
import subprocess
import os
import MySQLdb
import shutil
import git
from optparse import OptionGroup, OptionParser
TOOLS = {
'rremoval': '/usr/local/bin/rremoval'
}
PROGRAM_NAME = "Reposaurs"
# Some ideas for the future:
# - recover mode: if repo is in database and not in scmdir, fetch it
def connect_db():
try:
if opts.dbpassword:
db = MySQLdb.connect(host="localhost", port=3306, user=opts.dbuser,
passwd=opts.dbpassword, db=opts.dbname)
else:
db = MySQLdb.connect(host="localhost", port=3306, user=opts.dbuser,
db=opts.dbname)
cursor = db.cursor()
return cursor
except MySQLdb.Error:
logger.error("There was a problem in connecting to the database")
print "\nOups! There was a problem in connecting to the database." +\
"\nPlease ensure that the database exists on the local host system "+ \
"and the MySQL service is running.\n"
raise MySQLdb.Error
#except MySQLdb.Warning:
# pass
def checkout_repositories(repos, opts):
for r in repos:
checkout_single_repo(r,opts)
def checkout_single_repo(repo, opts):
def _get_dir_name(url):
url = url.replace('https://','')
url = url.replace('http://','')
url = url.replace('git@','')
url = url.replace('/','__')
url = url.replace(':','__')
return url
"""def _add_fake_user(url):
# we add a fake user to avoid get stuck in the Authentication
# message when url is a private repo or it does not exist
if url.rfind("https:") == 0:
url = url.replace("https://","https://fakeuser:fakepass@")
elif url.rfind("http:") == 0:
url = url.replace("http://","http://fakeuser:fakepass@")
return url"""
# checkout the remote repos to opts.scmdir
#url = _add_fake_user(repo)
url = repo
repo_dir = opts.scmdir + _get_dir_name(repo)
logger.debug("cloning %s to %s" % (url, repo_dir))
if os.path.isdir(repo_dir):
logger.error("destination directory exists: %s" % repo_dir)
else:
try:
os.environ['GIT_ASKPASS'] = '/bin/echo'
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
logger.error("error cloning repo %s to %s" % (url, repo_dir))
#raise
def encode_repositories(tool, source, repositories):
if tool == 'bicho' or tool == 'gerrit':
return [source + '_' + repo for repo in repositories]
else:
return repositories
def read_repository_file(filepath):
with open(filepath, 'r') as fd:
repositories = fd.readlines()
return repositories
def read_repositories_files(whitelist_url, blacklist_url):
# return content both for total list and blacklist
import urllib3
http = urllib3.PoolManager()
w = http.request('GET', whitelist_url)
b = http.request('GET', blacklist_url)
# format is plain text
logger.debug("Data read from %s and %s" % (whitelist_url, blacklist_url))
return w.data.split(), b.data.split()
def _get_db_repos():
cursor = connect_db()
cursor.execute("SELECT uri FROM repositories")
data = []
db_rep = [ row[0] for row in cursor.fetchall()]
cursor.close()
return db_rep
def get_current_repositories(opts):
#
dir_rep = _get_scm_repos(opts.scmdir)
db_rep = _get_db_repos()
logger.info("%s git clone directories" % len(dir_rep))
logger.info("%s repositories in the database" % len(db_rep))
repos_with_clone = set(dir_rep.keys())
repos_in_db = set(db_rep)
logger.info("%s repos with git directory cloned not stored in database" % len(repos_with_clone - repos_in_db))
logger.info("%s repos in database without git clone directory" % len(repos_in_db - repos_with_clone))
repos = {}
for r in db_rep:
if dir_rep.has_key(r):
repos[r] = dir_rep[r]
else:
repos[r] = None
logger.warning("repository %s does not have an associated git clone" % r)
if opts.recovery_mode:
logger.info("recovering clone for repository %s" % r)
checkout_single_repo(r, opts)
return repos
def _get_fetch_url(repo_dir):
# Gets the Fetch URL for a git clone given
#FIXME use the Git library to get this
os.chdir(repo_dir)
os.environ['GIT_ASKPASS'] = '/bin/echo'
remote = subprocess.Popen(['git','remote','show','origin'],stdout=subprocess.PIPE)
grep = subprocess.Popen(['grep', 'Fetch'],stdin=remote.stdout, stdout=subprocess.PIPE)
remote.wait()
proc = subprocess.Popen(['cut', '-f5', '-d',' '],stdin=grep.stdout,stdout=subprocess.PIPE)
grep.wait()
try:
url = proc.stdout.readline().split()[0]
except IndexError:
url = None
logger.error("could not get Fetch URL from %s" % repo_dir)
return url
def _get_scm_repos(dir):
all_repos = {}
##sub_repos = {}
#if (dir == ''): dir = scm_dir
if not os.path.isdir(dir): return all_repos
repos = os.listdir(dir)
for r in repos:
#repo_dir_svn = os.path.join(dir,r,".svn")
repo_dir_git = os.path.join(dir,r,".git")
if os.path.isdir(repo_dir_git): #or os.path.isdir(repo_dir_svn):
url = _get_fetch_url(os.path.join(dir,r))
all_repos[url] = os.path.join(dir,r)
logger.debug(" %s with origin %s" % (os.path.join(dir,r), url))
sub_repos = _get_scm_repos(os.path.join(dir,r))
#for sub_repo in sub_repos:
all_repos = dict(all_repos.items() + sub_repos.items())
return all_repos
def read_options():
# Generic function used by report_tool.py and other tools to analyze the
# information in databases. This contains a list of command line options
parser = OptionParser(usage="usage: %prog [options]",
version="%prog 0.1",
conflict_handler="resolve")
parser.add_option("-d", "--database",
action="store",
dest="dbname",
help="Database where information is stored")
parser.add_option("-u","--dbuser",
action="store",
dest="dbuser",
default="root",
help="Database user")
parser.add_option("-p","--dbpassword",
action="store",
dest="dbpassword",
default="",
help="Database password")
parser.add_option("-m", "--mode",
type="choice",
choices=["soft","hard"],
dest="mode",
help="soft, hard mode")
parser.add_option("-w","--whilelist",
action="store",
dest="whilelist_url",
help="URL of whilelist file")
parser.add_option("-b","--blacklist",
action="store",
dest="blacklist_url",
help="URL of blacklist file")
parser.add_option("-l","--log_file",
action="store",
dest="log_file",
help="path of log file")
parser.add_option("-s","--scmdir",
action="store",
dest="scmdir",
help="Path for git clones")
parser.add_option("-r","--recover",
action="store_true",
dest="recovery_mode",
help="checkout repos if they are missing from scmdir")
parser.add_option("-g","--debug",
action="store_true",
dest="debug",
help="sets debug mode")
(opts, args) = parser.parse_args()
return opts
def remove_repositories(repositories, db_user, db_pass, database, tool, current_repos = None):
if tool == 'gerrit':
tool = 'bicho'
for r in repositories:
# Remove not found projects.
# WARNING: if a repository name is different from the one in the database
# list of repositories, this piece of code may remove all
# of the repositories in the database.
# An example would be how Gerrit returns the name of the projects, while
# Bicho stores such information in URL format.
proc = subprocess.Popen([TOOLS['rremoval'], "-u", db_user, "-p", db_pass,
"-d", database, "-b", tool, "-r", r],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()
logger.info("%s removed from database" % (r))
if tool == 'cvsanaly':
target_dir = current_repos[r]
if target_dir is not None:
try:
shutil.rmtree(target_dir)
logger.info("directory %s removed" % target_dir)
except OSError as exc:
if os.path.isdir(target_dir):
logger.error("directory %s couldn't be removed" % target_dir)
else:
logger.info("directory %s already removed by someone else" % target_dir)
else:
logger.warning("directory not present for repo %s" % (r))
def set_up_logger(level, filename):
logger = logging.getLogger(__name__)
logger.setLevel(level)
# create a file handler
handler = logging.FileHandler(filename)
handler.setLevel(level)
# create a logging format
formatter = logging.Formatter("[%(asctime)s] - %(levelname)s - %(message)s", datefmt='%d/%b/%Y:%H:%M:%S')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger
def update_repositories_list(db_user, db_pass, database, source, tool):
repos = get_curret_repositories(db_user, db_pass, database, tool)
whitelisted, blacklisted = read_repositories_files(tool)
repos = encode_repositories(tool, source, repos)
whitelisted = encode_repositories(tool, source, whitelisted)
blacklisted = encode_repositories(tool, source, blacklisted)
whitelisted = [r for r in whitelisted if r not in blacklisted]
# Remove blacklisted repositories if they are found in the database
blacklisted = [r for r in blacklisted if r in repos]
# Checking if more than a 5% of the total list is going to be removed.
# If so, a warning message is raised and no project is removed.
if len(whitelisted) == 0 or float(len(blacklisted))/float(len(whitelisted)) > 0.05:
main_log.info("WARNING: More than a 5% of the total number of repositories is required to be removed. No action.")
else:
remove_repositories(blacklisted, db_user, db_pass, database, tool)
# Removing those respositories that are found in the database, but not in
# the list of repositories.
to_remove = [r for r in repos if r not in whitelisted]
main_log.info("Removing the following deprecated repositories from the database")
if len(whitlelisted) == 0 or float(len(to_remove)) / float(len(whitelisted)) >= 0.05:
main_log.info("WARNING: More than a 5% of the total number of repositories is required to be removed. No action.")
else:
remove_repositories(to_remove, db_user, db_pass, database, tool)
if __name__ == '__main__':
# read options
opts = read_options()
if opts.debug:
level = logging.DEBUG
else:
level = logging.INFO
logger = set_up_logger(level, opts.log_file)
logger.info("%s starts .." % PROGRAM_NAME)
# read files (remember, different formats)
all_repos, blacklisted = read_repositories_files(opts.whilelist_url,
opts.blacklist_url)
whiteset = set(all_repos) - set(blacklisted)
whitelist = list(whiteset)
# get current repos from db [and scm/]
current_repos = get_current_repositories(opts)
# calculate whitelist
current_set = set(current_repos.keys())
#print("\nRepos to be removed from DB")
to_be_removed = list(current_set - whiteset)
# are the studied repos in our whitelist?
logger.info("%s repos to be removed" % (len(to_be_removed)))
for tb in to_be_removed: logger.debug("%s to be removed" % tb)
# remove repos (blacklist)
# are the repos from blacklist stored in our system?
remove_repositories(to_be_removed, opts.dbuser, opts.dbpassword,
opts.dbname, 'cvsanaly', current_repos)
# clone W or add W to a file
to_be_downloaded = list(whiteset - current_set)
logger.info("%s repos to be downloaded" % (len(to_be_downloaded)))
checkout_repositories(to_be_downloaded, opts)
logger.info("Finished")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-18 21:00
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0005_prescription'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('treatment_sheets', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='txitem',
name='dose',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='txitem',
name='freq',
field=models.CharField(choices=[('SID', 'SID'), ('BID', 'BID')], default='', max_length=5),
),
migrations.AddField(
model_name='txitem',
name='instruction',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='txitem',
name='med',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='common.Prescription'),
),
migrations.AddField(
model_name='txitem',
name='sheet',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='treatment_sheets.TxSheet'),
),
migrations.AddField(
model_name='txitem',
name='unit',
field=models.CharField(choices=[('mL', 'mLs'), ('C', 'Capsules'), ('T', 'Tablets')], default='', max_length=5),
),
migrations.AddField(
model_name='txsheet',
name='comment',
field=models.TextField(default='', max_length=300),
),
migrations.AddField(
model_name='txsheet',
name='name',
field=models.CharField(default='', max_length=140),
),
migrations.AddField(
model_name='txsheet',
name='owner',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
import os.path as path
import pickle as pkl
from bibpdf.config import config
__author__ = 'Keji Li'
def find_folder(full_path):
folder, sub_folder = path.split(full_path)
if sub_folder != "Paper":
return find_folder(folder) + [sub_folder]
else:
return []
data = pkl.load(open(config['path']['legacy-database'], 'rb'))
entries = data[0]
count = 0
for citation in entries:
count += 1
file = entries[citation].pdf_file
if file is not None:
file_path = file.glob('root', file.name)
entries[citation].keywords |= set(find_folder(file_path)[0:-1])
file.move('root', '')
|
import logging
#
# def func(self, filename="text_logs_help_links.log", name="logs.log"):
#
# logging.basicConfig(filename=filename,
# level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
# logger = logging.getLogger(name)
# console = logging.StreamHandler()
# logger.addHandler(console)
|
# 10870번 피보나치수 5
# https://www.acmicpc.net/problem/10870
def star(x, y):
for n in range(x):
for m in range(y):
i = int(n/3)
if i==1:
if n%3==1 and m%3==1:
print(" ")
else:
print("*")
else :
star(i, i)
print("\n")
n = int(input())
star(n, n)
|
# Python Imports
from xml.dom import minidom
from threading import Thread, Timer
import time
# Local Imports
import globals
from helpers import *
from yamaha_xml import *
def send_any(self, value, action):
if action == "Put":
put_xml(self, value)
else:
#now find param
#to do this, parse value (originally passed)
param = value.split("GetParam")
param = param[0].split("<")
param = param[-1]
param = param[0:-1]
values = value.split("<" + param + ">")
values2 = values[1].split("</" + param + ">")
value = values[0] + "GetParam" + values2[1]
xml = get_xml(self, value)
xmldoc = minidom.parseString(xml)
return xmldoc.getElementsByTagName(param)[0].firstChild.data
def increase_volume(self, zone=-1, inc=0.5):
change_volume(self, zone, inc)
def decrease_volume(self, zone=-1, dec=0.5):
change_volume(self, zone, -1 * dec)
def change_volume(self, zone=-1, diff=0.0):
if abs(diff) == 0.5 or int(abs(diff)) in [1, 2, 5]:
# Faster volume method which uses the built in methods
param1 = 'Up' if diff > 0 else 'Down'
param2 = ' {0} dB'.format(int(abs(diff))) if abs(diff) != 0.5 else ''
zone_put_xml(self, zone, '<Volume><Lvl><Val>{0}{1}</Val><Exp></Exp><Unit></Unit></Lvl></Volume>'.format(param1, param2))
# Sleep for a little amount of time to ensure we do not get "stuck" sending too many calls in short succession
time.sleep(0.03)
else:
# Slower method that relies on get_volume() first
set_volume(self, zone, (get_volume(self) / 10.0) + diff)
def get_volume(self):
return get_status_int(self, 'Val')
def set_volume(self, zone=-1, value=-25.0):
zone_put_xml(self, zone, '<Volume><Lvl><Val>{0}</Val><Exp>1</Exp><Unit>dB</Unit></Lvl></Volume>'.format(int(value * 10.0)))
def set_max_volume(self, zone=-1, value=16.5):
zone_put_xml(self, zone, '<Volume><Max_Lvl><Val>{0}</Val><Exp>1</Exp><Unit>dB</Unit></Max_Lvl></Volume>'.format(int(value * 10.0)))
def set_init_volume(self, zone=-1, value=-50.0, mode="Off"):
zone_put_xml(self, zone, '<Volume><Init_Lvl><Mode>{1}</Mode><Lvl><Val>{0}</Val><Exp>1</Exp><Unit>dB</Unit></Lvl></Init_Lvl></Volume>'.format(int(value * 10.0), mode))
def set_pattern1(self, levels):
for speaker in levels:
put_xml(self, '<System><Speaker_Preout><Pattern_1><Lvl><{0}><Val>{1}</Val><Exp>1</Exp><Unit>dB</Unit></{0}></Lvl></Pattern_1></Speaker_Preout></System>'.format(speaker[0], int(speaker[1]*10)))
def set_bass(self, zone=-1, value=-0.0):
zone_put_xml(self, zone, '<Sound_Video><Tone><Bass><Val>{0}</Val><Exp>1</Exp><Unit>dB</Unit></Bass></Tone></Sound_Video>'.format(int(value * 10.0)))
def set_treble(self, zone=-1, value=-0.0):
zone_put_xml(self, zone, '<Sound_Video><Tone><Treble><Val>{0}</Val><Exp>1</Exp><Unit>dB</Unit></Treble></Tone></Sound_Video>'.format(int(value * 10.0)))
def mute_on(self, zone=-1):
zone_put_xml(self, zone, '<Volume><Mute>On</Mute></Volume>')
def mute_off(self, zone=-1):
zone_put_xml(self, zone, '<Volume><Mute>Off</Mute></Volume>')
def get_mute(self, zone=-1):
return get_status_param_is_on(self, 'Mute', zone)
def power_on(self, zone=-1):
zone_put_xml(self, zone, '<Power_Control><Power>On</Power></Power_Control>')
def power_off(self, zone=-1):
zone_put_xml(self, zone, '<Power_Control><Power>Off</Power></Power_Control>')
def power_standby(self, zone=-1):
zone_put_xml(self, zone, '<Power_Control><Power>Standby</Power></Power_Control>')
def toggle_on_standby(self, zone=-1):
zone_put_xml(self, zone, '<Power_Control><Power>On/Standby</Power></Power_Control>')
def toggle_mute(self, zone=-1):
if get_mute(self, zone):
mute_off(self, zone)
else:
mute_on(self, zone)
def change_source(self, source, zone=-1):
#first look to see if the source has been renamed
for s in self.AVAILABLE_SOURCES_RENAME:
if source == s[1]:
source = s[0]
zone_put_xml(self, zone, '<Input><Input_Sel>{0}</Input_Sel></Input>'.format(source))
def feature_video_out(self, feature, source):
#first look to see if the source has been renamed
for s in self.AVAILABLE_SOURCES_RENAME:
if source == s[1]:
source = s[0]
#first look to see if the source has been renamed
for s in self.AVAILABLE_SOURCES_RENAME:
if feature == s[1]:
feature = s[0]
put_xml(self, '<System><Input_Output><Assign><Video_Out><{0}>{1}</{0}></Video_Out></Assign></Input_Output></System>'.format(feature, source))
def source_audio_in(self, audio, video):
#first look to see if the source has been renamed
for s in self.AVAILABLE_SOURCES_RENAME:
if audio == s[1]:
audio = s[0]
#first look to see if the source has been renamed
for s in self.AVAILABLE_SOURCES_RENAME:
if video == s[1]:
video = s[0]
put_xml(self, '<System><Input_Output><Assign><Audio_In><{0}>{1}</{0}></Audio_In></Assign></Input_Output></System>'.format(video, audio))
def wallpaper(self, pic):
put_xml(self, '<System><Misc><Display><Wall_Paper>{0}</Wall_Paper></Display></Misc></System>'.format(pic))
def DisplayDimmer(self, level):
put_xml(self, '<System><Misc><Display><FL><Dimmer>{0}</Dimmer></FL></Display></Misc></System>'.format(level))
def straight(self, zone=-1):
zone_put_xml(self, zone, '<Surround><Program_Sel><Current><Straight>On</Straight><Sound_Program>Straight</Sound_Program></Current></Program_Sel></Surround>')
def surround_decode(self, zone=-1):
zone_put_xml(self, zone, '<Surround><Program_Sel><Current><Straight>Off</Straight><Sound_Program>Surround Decoder</Sound_Program></Current></Program_Sel></Surround>')
def toggle_straight_decode(self, zone=-1):
if get_straight(self, zone):
surround_decode(self, zone)
else:
straight(self, zone)
def get_straight(self, zone=-1):
return get_status_param_is_on(self, 'Straight', zone)
def channel7_on(self, zone=-1): # McB 1/11/2014 - Turn 7-channel mode on and off
zone_put_xml(self, zone, '<Surround><Program_Sel><Current><Sound_Program>7ch Stereo</Sound_Program></Current></Program_Sel></Surround>')
def channel7_off(self, zone=-1):
zone_put_xml(self, zone, '<Surround><Program_Sel><Current><Sound_Program>Standard</Sound_Program></Current></Program_Sel></Surround>')
def set_enhancer(self, arg, zone=-1):
zone_put_xml(self, zone, '<Surround><Program_Sel><Current><Enhancer>{0}</Enhancer></Current></Program_Sel></Surround>'.format(arg))
def get_enhancer(self, zone=-1):
return get_status_param_is_on(self, 'Enhancer', zone)
def toggle_enhancer(self):
if get_enhancer(self):
set_enhancer(self, "Off")
else:
set_enhancer(self, "On")
def set_sleep(self, arg, zone=-1):
zone_put_xml(self, zone, '<Power_Control><Sleep>{0}</Sleep></Power_Control>'.format(arg))
def set_radio_preset(self, preset):
put_xml(self, '<Tuner><Play_Control><Preset><Preset_Sel>{0}</Preset_Sel></Preset></Play_Control></Tuner>'.format(preset))
def get_radio_band(self):
return get_tuner_string(self, 'Band')
def toggle_radio_amfm(self):
if get_radio_band(self) == 'FM':
set_radio_band(self, 'AM')
else:
set_radio_band(self, 'FM')
def set_radio_band(self, band):
put_xml(self, '<Tuner><Play_Control><Tuning><Band>{0}</Band></Tuning></Play_Control></Tuner>'.format(band))
def next_radio_preset(self):
put_xml(self, '<Tuner><Play_Control><Preset><Preset_Sel>Up', close_xml=True)
def prev_radio_preset(self):
put_xml(self, '<Tuner><Play_Control><Preset><Preset_Sel>Down', close_xml=True)
def modify_radio_preset(self, diff, turn_on, wrap):
"""
Deprecated
"""
oldpreset = get_tuner_int(self, 'Preset_Sel')
preset = oldpreset + diff
set_radio_preset(self, preset)
if turn_on:
is_on = is_radio_on(self)
if not is_on:
change_source('TUNER')
if wrap and (not turn_on or is_on):
count = get_radio_preset_count(self)
if diff > 0 and preset > count:
preset = 1
set_radio_preset(self, preset)
elif diff < 0 and preset < 1:
preset = count
set_radio_preset(self, preset)
def get_radio_preset_count(**kwargs):
"""
Currently broken
"""
xml = get_tuner_presets(self, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
count = 0
done = False
while not done and count <= 40:
num = "Number_{0}".format(count + 1)
value = xmldoc.getElementsByTagName(num)[0].getElementsByTagName('Status')[0].firstChild.data
if value == 'Exist':
count += 1
else:
done = True
return count
def is_radio_on(self):
return get_status_string(self, 'Input_Sel') == "TUNER"
def radio_freq(self, updown):
if get_radio_band(self) == 'FM':
val = '<FM><Val>{0}</Val></FM>'.format(updown)
else:
val = '<AM><Val>{0}</Val></AM>'.format(updown)
put_xml(self, '<Tuner><Play_Control><Tuning><Freq>{0}</Freq></Tuning></Play_Control></Tuner>'.format(val))
def set_radio_freq(self, freq, band):
if band == 'FM':
put_xml(self, '<Tuner><Play_Control><Tuning><Freq><FM><Val>{0}</Val></FM></Freq></Tuning></Play_Control></Tuner>'.format(int(freq*100)))
else:
put_xml(self, '<Tuner><Play_Control><Tuning><Freq><AM><Val>{0}</Val></AM></Freq></Tuning></Play_Control></Tuner>'.format(int(freq)))
def set_scene(self, scene_num, zone=-1):
zone_put_xml(self, zone, '<Scene><Scene_Sel>Scene {0}</Scene_Sel></Scene>'.format(scene_num))
def send_code(self, code):
put_xml(self, '<System><Misc><Remote_Signal><Receive><Code>{0}</Code></Receive></Remote_Signal></Misc></System>'.format(code))
def set_active_zone(self, zone):
self.active_zone = zone
print "Active Zone: Zone", zone if zone > -1 else chr(-1 * zone)
def get_source_name(self, zone=-1):
return get_status_string(self, "Input_Sel", zone)
def get_system_config(self, **kwargs):
xml = get_config(self, **kwargs)
xmldoc = minidom.parseString(xml)
return xmldoc
def get_system_io_vol_trim(self):
sources = []
xml = get_xml(self, '<System><Input_Output><Volume_Trim>GetParam</Volume_Trim></Input_Output></System>')
xmldoc = minidom.parseString(xml)
for item in xmldoc.getElementsByTagName('Val'):
sources.append([item.parentNode.tagName, item.firstChild.data])
return sources
def set_system_io_vol_trim(self, sources):
for source in sources:
put_xml(self, '<System><Input_Output><Volume_Trim><{0}><Val>{1}</Val><Exp>1</Exp><Unit>dB</Unit></{0}></Volume_Trim></Input_Output></System>'.format(source[0], source[1]))
def get_main_zone_inputs(self):
xml = get_xml(self, '<Main_Zone><Input><Input_Sel_Item>GetParam</Input_Sel_Item></Input></Main_Zone>')
xmldoc = minidom.parseString(xml)
return xmldoc
def get_availability_dict(self, items_to_check):
xml = get_config(self)
xmldoc = minidom.parseString(xml)
res = {}
for item in items_to_check:
try:
value = xmldoc.getElementsByTagName(item)[0].firstChild.data
except:
value = None
res[item] = value
return res
|
import pygame
from engine.gameobject import GameObject
class CardInfo(GameObject):
def __init__(self, card, parent, app):
GameObject.__init__(self, None, (55, 140), parent, app)
self.card = card
self.last_power = card.power
self.text_power = self.app.font.render(f"{self.card.power} / {self.card.power}", True, (10, 10, 10))
self.rec_power = self.text_power.get_rect()
self.text_symbol = self.app.font.render(self.card.symbol, True, (10, 10, 10))
self.rec_symbol = self.text_symbol.get_rect()
def update(self, delta_time):
if self.last_power != self.card.power:
self.last_power = self.card.power
self.text_power = self.app.font.render(f"{self.card.power} / {self.card.power}", True, (10, 10, 10))
self.rec_power = self.text_power.get_rect()
def draw(self, screen):
global_coords = self.transform.get_global_coords()
color = (50, 200, 100) if self.card.color == "green" else (200, 200, 200)
pygame.draw.circle(screen, color, global_coords, 40)
self.rec_power.center = (global_coords[0], global_coords[1] - 15)
self.rec_symbol.center = (global_coords[0], global_coords[1] + 15)
screen.blit(self.text_power, self.rec_power)
screen.blit(self.text_symbol, self.rec_symbol)
|
import eventClass
import tweepy
import auth
import database
api1 = tweepy.API(auth.auth)
#print(api1.direct_messages(count=100)[0].text)x
#print(api1.direct_messages(count=100, since_id=0)[1].text)
numDM = len(api1.direct_messages())
#print("Number of DMs: ", numDM)
###print(api1.direct_messages(count=200)[i].sender_screen_name)
def checkDM():
for i in range (0, numDM):
print("i is: ", i, "numDM is: ", numDM)
tweetID=api1.direct_messages(count=200)[i].id
author=api1.direct_messages(count=200)[i].sender_screen_name
#print(i)
text = "Formatting error, please try again"
#print(api1.direct_messages(count=200)[i].text)
dm=api1.direct_messages(count=200)[i].text
with open('profanityList.txt') as f:
for line in f:
dm=dm.lower()
if dm.find(line)!=-1:
text1 = "Your message does not pass the profanity filter"
api1.send_direct_message(screen_name=author, text=text1)
api1.destroy_direct_message(tweetID)
return False
dm = api1.direct_messages(count=200)[i].text
print (dm.find("EVENT: "))
if (dm.find("EVENT: ")==0):
postTweet(addEvent(i))
api1.destroy_direct_message(tweetID)
elif (dm.find("INFO: ")==0):
info(i)
api1.destroy_direct_message(tweetID)
else:
api1.send_direct_message(screen_name=author,text="1")
api1.destroy_direct_message(tweetID)
def info(x=0):
dm=api1.direct_messages(count=200)[x].text
tweetID=api1.direct_messages(count=200)[x].id
author=api1.direct_messages(count=200)[x].sender_screen_name
text = "Formatting error, please try again"
if not (dm.find("INFO: ")==-1):
if not (dm.find("NAME: ")==-1):
nameStr = dm.find("NAME: ")
eventList = database.nameLookup(dm[nameStr+len("NAME: "):])
tweetText="";
for x in range(0, len(eventList)):
tweetText += "#" + eventList[x].eventName
elif not (dm.find("DATE: ")==-1):
dateStr = dm.find("DATE: ")
database.dateLookup(dm[dateStr+len("DATE: "):])
elif not (dm.find("CAT: ")==-1):
catStr = dm.find("CAT: ")
database.catLookup(dm[catStr+len("CAT: "):])
else:
api1.send_direct_message(screen_name=author,text=text)
return False
def addEvent(x=0):
dm=api1.direct_messages(count=200)[x].text
tweetID = api1.direct_messages(count=200)[x].id
author = api1.direct_messages(count=200)[x].sender_screen_name
text = "Formatting error, please try again"
print(dm)
if not (dm.find("EVENT: ")==-1 or dm.find("DATE: ")==-1 or dm.find("DESC: ")==-1 or dm.find("WEB: ")==-1 or dm.find("CATE: ")==-1 or dm.find("FOOD: ")==-1):
eventStr = dm.find("EVENT: ")
dateStr = dm.find("DATE: ")
descStr = dm.find("DESC: ")
webStr = dm.find("WEB: ")
catStr = dm.find("CATE: ")
foodStr = dm.find("FOOD: ")
#print (eventStr, dateStr, descStr, webStr, catStr, foodStr)
newEvent = eventClass.Event(eventName=dm[eventStr+len("EVENT: "):dateStr-2], eventDate=dm[dateStr+len("DATE: "):descStr-2], eventDesc=dm[descStr+len("DESC: "):webStr-2], eventWeb=dm[webStr+len("WEB: "):catStr-2], eventCategory=dm[catStr+len("CATE: "):foodStr-2], eventFood=dm[foodStr+len("FOOD: "):])
#print(newEvent.eventName)
return newEvent
else:
api1.send_direct_message(screen_name=author, text=text)
#destroy dm
return False
def postTweet(event):
tweetText = "Hey everyone, " + event.eventName + " is an upcoming event on " + event.eventDate + ". Here's the description: " + event.eventDesc + ", and here is the event's website: " + event.eventWeb + " Food status: " + event.eventFood + " #" + event.eventCategory + "Event"
api1.update_status(tweetText)
checkDM()
|
"""
Model objects for mimic flavors.
"""
from __future__ import absolute_import, division, unicode_literals
import attr
@attr.s
class Flavor(object):
"""
A Flavor object
"""
flavor_id = attr.ib()
tenant_id = attr.ib()
name = attr.ib()
ram = attr.ib()
vcpus = attr.ib()
rxtx = attr.ib()
disk = attr.ib()
static_defaults = {
"swap": "",
"OS-FLV-EXT-DATA:ephemeral": 0,
}
def links_json(self, absolutize_url):
"""
Create a JSON-serializable data structure describing the links to this
flavor.
"""
return [
{
"href": absolutize_url("v2/{0}/flavors/{1}"
.format(self.tenant_id, self.flavor_id)),
"rel": "self"
},
{
"href": absolutize_url("{0}/flavors/{1}"
.format(self.tenant_id, self.flavor_id)),
"rel": "bookmark"
}
]
def brief_json(self, absolutize_url):
"""
Brief JSON-serializable version of this flavor, for the non-details
list flavors request.
"""
return {
"id": self.flavor_id,
"links": self.links_json(absolutize_url),
"name": self.name
}
def detailed_json(self, absolutize_url):
"""
Long-form JSON-serializable object representation of this flavor, as
returned by either a GET on this individual flavor or a member in the
list returned by the list-details request.
"""
template = self.static_defaults.copy()
template.update({
"id": self.flavor_id,
"links": self.links_json(absolutize_url),
"name": self.name,
"ram": self.ram,
"vcpus": self.vcpus,
"rxtx_factor": self.rxtx,
"disk": self.disk,
"OS-FLV-WITH-EXT-SPECS:extra_specs": self.extra_specs_json()
})
return template
class RackspaceStandardFlavor(Flavor):
"""
A Rackspace standard flavor object representation
"""
flavors = {"512MB Standard Instance": {"id": "2", "ram": 512, "vcpus": 1, "rxtx_factor": 80,
"disk": 20},
"1GB Standard Instance": {"id": "3", "ram": 1024, "vcpus": 1, "rxtx_factor": 120,
"disk": 40},
"2GB Standard Instance": {"id": "4", "ram": 2048, "vcpus": 2, "rxtx_factor": 240,
"disk": 80},
"4GB Standard Instance": {"id": "5", "ram": 4096, "vcpus": 2, "rxtx_factor": 400,
"disk": 160},
"8GB Standard Instance": {"id": "6", "ram": 8192, "vcpus": 4, "rxtx_factor": 600,
"disk": 320},
"15GB Standard Instance": {"id": "7", "ram": 15360, "vcpus": 6, "rxtx_factor": 800,
"disk": 620},
"30GB Standard Instance": {"id": "8", "ram": 30720, "vcpus": 8, "rxtx_factor": 1200,
"disk": 1200}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a standard flavor.
"""
return {
"class": "standard1",
"policy_class": "standard_flavor"
}
class RackspaceComputeFlavor(Flavor):
"""
A Rackspace compute flavor object representatione
"""
flavors = {"3.75 GB Compute v1": {"id": "compute1-4", "ram": 3840, "vcpus": 2, "rxtx_factor": 312.5,
"disk": 0},
"7.5 GB Compute v1": {"id": "compute1-8", "ram": 7680, "vcpus": 4, "rxtx_factor": 625,
"disk": 0},
"15 GB Compute v1": {"id": "compute1-15", "ram": 15360, "vcpus": 8, "rxtx_factor": 1250,
"disk": 0},
"30 GB Compute v1": {"id": "compute1-30", "ram": 30720, "vcpus": 16, "rxtx_factor": 2500,
"disk": 0},
"60 GB Compute v1": {"id": "compute1-60", "ram": 61440, "vcpus": 32, "rxtx_factor": 5000,
"disk": 0}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a compute flavor.
"""
return {
"class": "compute1",
"policy_class": "compute_flavor"
}
class RackspaceGeneralFlavor(Flavor):
"""
A Rackspace general flavor object representation
"""
flavors = {"1 GB General Purpose v1": {"id": "general1-1", "ram": 1024, "vcpus": 1,
"rxtx_factor": 200, "disk": 20},
"2 GB General Purpose v1": {"id": "general1-2", "ram": 2048, "vcpus": 2,
"rxtx_factor": 400, "disk": 40},
"4 GB General Purpose v1": {"id": "general1-4", "ram": 4096, "vcpus": 4,
"rxtx_factor": 800, "disk": 80},
"8 GB General Purpose v1": {"id": "general1-8", "ram": 8192, "vcpus": 8,
"rxtx_factor": 1600, "disk": 160}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a general purpose flavor.
"""
return {
"class": "general1",
"policy_class": "general_flavor"
}
class RackspaceIOFlavor(Flavor):
"""
A Rackspace IO flavor object representation
"""
flavors = {"15 GB I/O v1": {"id": "io1-15", "ram": 15360, "vcpus": 4, "rxtx_factor": 1250,
"disk": 40},
"30 GB I/O v1": {"id": "io1-30", "ram": 30720, "vcpus": 8, "rxtx_factor": 2500,
"disk": 40},
"60 GB I/O v1": {"id": "io1-60", "ram": 61440, "vcpus": 16, "rxtx_factor": 5000,
"disk": 40},
"90 GB I/O v1": {"id": "io1-90", "ram": 92160, "vcpus": 24, "rxtx_factor": 7500,
"disk": 40},
"120 GB I/O v1": {"id": "io1-120", "ram": 122880, "vcpus": 32, "rxtx_factor": 10000,
"disk": 40}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a memory flavor.
"""
return {
"class": "io1",
"policy_class": "io_flavor"
}
class RackspaceMemoryFlavor(Flavor):
"""
A Rackspace memory flavor object representation
"""
flavors = {"15 GB Memory v1": {"id": "memory1-15", "ram": 15360, "vcpus": 2, "rxtx_factor": 625,
"disk": 0},
"30 GB Memory v1": {"id": "memory1-30", "ram": 30720, "vcpus": 4, "rxtx_factor": 1250,
"disk": 0},
"60 GB Memory v1": {"id": "memory1-60", "ram": 61440, "vcpus": 8, "rxtx_factor": 2500,
"disk": 0},
"120 GB Memory v1": {"id": "memory1-120", "ram": 122880, "vcpus": 16, "rxtx_factor": 5000,
"disk": 0},
"240 GB Memory v1": {"id": "memory1-240", "ram": 245760, "vcpus": 32,
"rxtx_factor": 10000, "disk": 0}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a flavor.
"""
return {
"class": "memory1",
"policy_class": "memory_flavor"
}
class RackspaceOnMetalFlavor(Flavor):
"""
A Rackspace onMetal flavor object representation
"""
flavors = {"OnMetal Compute v1": {"id": "onmetal-compute1", "ram": 32768, "vcpus": 20,
"rxtx_factor": 10000, "disk": 32},
"OnMetal IO v1": {"id": "onmetal-io1", "ram": 131072, "vcpus": 40,
"rxtx_factor": 10000, "disk": 32},
"OnMetal Memory v1": {"id": "onmetal-memory1", "ram": 524288, "vcpus": 24,
"rxtx_factor": 10000, "disk": 32}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for an onMetal flavor.
"""
return {
"quota_resources": "instances=onmetal-compute-v1-instances,ram=onmetal-compute-v1-ram",
"class": "onmetal",
"policy_class": "onmetal_flavor"
}
class RackspacePerformance1Flavor(Flavor):
"""
A Rackspace perfomance flavor object representation
"""
flavors = {"1 GB Performance": {"id": "performance1-1", "ram": 1024, "vcpus": 1, "rxtx_factor": 200,
"disk": 20},
"2 GB Performance": {"id": "performance1-2", "ram": 2048, "vcpus": 2, "rxtx_factor": 400,
"disk": 40},
"4 GB Performance": {"id": "performance1-4", "ram": 4096, "vcpus": 4, "rxtx_factor": 800,
"disk": 40},
"8 GB Performance": {"id": "performance1-8", "ram": 8192, "vcpus": 8, "rxtx_factor": 1600,
"disk": 40}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a performance1 flavor.
"""
return {
"class": "performance1",
"policy_class": "performance_flavor"
}
class RackspacePerformance2Flavor(Flavor):
"""
A Rackspace performance flavor object representation
"""
flavors = {"15 GB Performance": {"id": "performance2-15", "ram": 15360, "vcpus": 4,
"rxtx_factor": 1250, "disk": 40},
"30 GB Performance": {"id": "performance2-30", "ram": 30720, "vcpus": 8,
"rxtx_factor": 2500, "disk": 40},
"60 GB Performance": {"id": "performance2-60", "ram": 61440, "vcpus": 16,
"rxtx_factor": 5000, "disk": 40},
"90 GB Performance": {"id": "performance2-90", "ram": 92160, "vcpus": 24,
"rxtx_factor": 7500, "disk": 40},
"120 GB Performance": {"id": "performance2-120", "ram": 122880, "vcpus": 32,
"rxtx_factor": 10000, "disk": 40}}
def extra_specs_json(self):
"""
Create a JSON-serializable data structure describing
``OS-FLV-WITH-EXT-SPECS:extra_specs`` for a performance 2 flavor.
"""
return {
"class": "performance2",
"policy_class": "performance_flavor"
}
|
# hard
# bfs + 剪枝
class Solution:
def minJump(self, jump: List[int]) -> int:
n = len(jump)
step = 0
# mx表示其左边弹簧都已经访问过了
# 0为初始访问,所以从1开始
mx = 1
q = deque([0])
while q:
# 每一次循环表示在step下能到达的弹簧
tempSize = len(q)
for i in range(tempSize):
cur = q.popleft()
if cur + jump[cur] >= n:
return step + 1
if cur + jump[cur] >= mx:
q.append(cur + jump[cur])
# 左边从未访问过的弹簧开始遍历,剪枝
# 否则会TLE
for j in range(mx, cur):
q.append(j)
# 此时cur左边的都已经访问过了
# 更新mx
mx = max(mx, cur + 1)
step += 1
return step
# dp 从后往前
class Solution:
def minJump(self, jump: List[int]) -> int:
dp = [0] * len(jump)
for i in range(len(jump)-1,-1,-1):
if i + jump[i] >= len(jump):
dp[i] = 1
else:
dp[i] = 1 + dp[i + jump[i]]
for j in range(i+1,len(jump)):
# 此时对于i后面的j出现了两种选择:向右跳还是向左跳
if dp[j] <= dp[i]: # 当出现这种情况,下标比j还大的后面的就可以选择从j跳了,因此不用再从i跳
break
else:
# 出现更优解为:先向左跳到i,再继续跳
dp[j] = dp[i] + 1
return dp[0]
|
## Santosh Khadka
# two.py
import one
def func():
print("Func() in two.py")
one.func()
if __name__ == "__main__":
# runs the code here if this file/program is being run directly - not called in another script
print("two.py is being run directly!")
else:
print("two.py has been imported!")
|
import csv
import sys
#type into command line on Biowulf:
#python compilation_filter.py ref_seq_with_IUPAC allele_table_1 ... allele_table_n
#
#ref_seq_with_IUPAC = reference sequence with IUPAC
# (to account for heterozygous SNPs unique to a specific macaque)
#allele_table_n = alleles frequency table from CRISPResso
#list of canonical nucleotides
#(for determining which nucleotides in the reference seuqence are noncanonical
#and need to be replaced with IUPAC)
nt_lst=['A','T','G','C']
#dictionary for saving noncanonical nucleotides and their positions in the reference sequence
iupac_nts = {}
#variable for saving the command line argument number of the alleles frequency table
#that is currently being used
allele_table_num = 2
#dictionary of read numbers at different timepoints for each unique read sequence
read_num_dict = {}
#dictionary of read percentages at different timepoints for each unique read sequence
read_percent_dict = {}
#variable for saving the position in the list of read numbers/percentages
#within the dictionary of read numbers/percentages that is currently being updated
timepoint_num = 0
#list of zeroes that is used to construct
#list of read numbers at different timepoints for a unique read sequence
read_num_zeroes_lst = []
#list of zeroes that is used to construct
#list of read percentages at different timepoints for a unique read sequence
read_percent_zeroes_lst = []
#list of column titles (names of alleles frequency tables)
col_titles = []
#threshold value for the filter
#(for a read sequence to be kept, at least one timepoint's read percentage after editing
#needs to differ by more than the threshold value from the read percentage befor editing)
filter_threshold = 0.1
#variable used to count how many times
#the read percentage of a sequence at a timepoint after editing differs by more than
#the threshold value from the read percentage of the sequence before editing
similar_count = 0
#for each command line argument
for arg in sys.argv:
#if the 2nd arguemnt (if the reference sequence with IUPAC)
if sys.argv.index(arg) == 1:
#for each nucleotide in reference sequence that is noncanonical (not A, T, G, or C),
#save their position as a key and IUPAC notation as the corresponding value
#in the iupac_nts dictionary
iupac_ref = arg
for ref_nt_num,ref_nt in enumerate(iupac_ref):
if ref_nt not in nt_lst:
iupac_nts[ref_nt_num] = ref_nt
#if the 3rd or later argument (if an alleles frequency table)
if sys.argv.index(arg) > 1:
#open alleles frequency table
with open(sys.argv[allele_table_num],"U") as allele_table:
reader = csv.reader(allele_table, delimiter ='\t')
#for each row in the alleles frequency table
for row_num,allele_table_row in enumerate(reader):
#skip 1st row with column titles
if row_num == 0:
continue
#for all later rows with data (sequence, read number, read percentage),
#change the nucleotide(s) in the sequence to IUPAC notation
#if those nucleotide(s) are IUPAC notation in the reference sequence
#(with this info having been previously saved in iupac_nts dictionary)
else:
row_seq = list(allele_table_row[0])
for seq_nt_num,seq_nt in enumerate(row_seq):
if seq_nt_num in iupac_nts:
row_seq[seq_nt_num] = iupac_nts[seq_nt_num]
allele_table_row[0] = ''.join(row_seq)
#if the read sequence (after IUPAC) is the same as the reference sequence,
#or if CRISPResso has identified an insertion or deletion in the sequence
if allele_table_row[0] == sys.argv[1] or allele_table_row[3] =='False':
#if the read sequence is not already in the dictionary of read numbers,
if allele_table_row[0] not in read_num_dict:
#make a list of zeroes equal in length to the number of alleles frequency tables
for num in range(2,len(sys.argv)):
read_num_zeroes_lst.append(0)
#add a new key to the dictionary of read numbers equal to the read sequence
#and change the corresponding value to be equal to the list of zeroes
#then update the list to include the read number at the sequence's timepoint
read_num_dict[allele_table_row[0]] = read_num_zeroes_lst
read_num_dict[allele_table_row[0]][timepoint_num] = int(allele_table_row[8])
#make a list of zeroes equal in length to the number of alleles frequency tables
for num in range(2,len(sys.argv)):
read_percent_zeroes_lst.append(0)
#add a new key to the dictionary of read percents equal to the read sequence
#and change the corresponding value to be equal to the list of zeroes
#then update the list to include the read percent at the sequence's timepoint
read_percent_dict[allele_table_row[0]] = read_percent_zeroes_lst
read_percent_dict[allele_table_row[0]][timepoint_num] = float(allele_table_row[9])
#empty lists of zeroes
read_num_zeroes_lst = []
read_percent_zeroes_lst = []
#if the read sequence is already in the dictionary of read numbers,
else:
#update the value of the read sequence key to include the read number at the new timepoint
read_num_dict[allele_table_row[0]][timepoint_num]= read_num_dict[allele_table_row[0]][timepoint_num] + int(allele_table_row[8])
#update the value of the read sequence key to include the read percentage at the new timepoint
read_percent_dict[allele_table_row[0]][timepoint_num]= read_percent_dict[allele_table_row[0]][timepoint_num] + float(allele_table_row[9])
#move on to working with next allele frequency table inputted in command line
allele_table_num += 1
#input data into the next column of the tables of read numbers/percentages
timepoint_num += 1
#construct list of names of the alleles frequency tables
#(using a method independent of the file extension of the alleles frequency tables)
for num,arg in enumerate(sys.argv):
if num > 1:
col_title = arg.split('.')
col_title = col_title[:-1]
col_titles.append('.'.join(col_title))
#print out the list of alleles frequency table names as the column titles of new data file
print '\t'.join(col_titles)
#for each key in the dictionary of read percents
for read_seq in read_percent_dict:
#compare read percentage of sequence before editing with
#the read percentage of each timepoint after editing,
#and if the percentage of a timepoint after editing differs
#by more than the threshold value,
#then increase the counter variable (similar_count) by 1
for num,percent in enumerate(read_percent_dict[read_seq]):
if num == 0:
before_edit_percent = percent
if num > 0:
if abs(float(percent - before_edit_percent)) > filter_threshold:
similar_count += 1
#if the counter variable (similar_count) is greater than 0
#(if at least one timepoint's percentage after editing differs
#by more than the threshold value from the percentage before editing),
#then print out the key's corresponding value in the dictionary of read numbers
if similar_count > 0:
print read_seq,'\t', '\t'.join(map(str,read_num_dict[read_seq]))
#reset counter variable (similar_count) to 0
similar_count=0
|
import os
import sys
def convert_bytes(num):
for size in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, size)
num /= 1024.0
def file_size(file_path):
if os.path.isfile(file_path):
print("This is file")
file_info = os.stat(file_path)
return convert_bytes(file_info.st_size)
elif os.path.isdir(file_path):
print("This is directory")
file_info = os.stat(file_path)
return convert_bytes(file_info.st_size)
def main():
if len(sys.argv) > 1:
file_path = sys.argv[1]
print("Size = ", file_size(file_path))
else:
print("Specify the path to file!")
main()
|
#!/usr/bin/env python
"""
Customised serilier
"""
import os
import marshal
import ujson as json
ser_type = os.environ["ser_type"]
def load(ser_file_handle):
if ser_type == "marshal":
return marshal.load(ser_file_handle)
else:
return json.load(ser_file_handle)
def dump(obj, ser_file_handle):
if ser_type == "marshal":
marshal.dump(obj, ser_file_handle)
else:
json.dump(obj, ser_file_handle)
#def dumps(obj):
# return ser_obj.dumps(obj)
#
#def loads(obj):
# return ser_obj.dumps(obj)
|
from .basic_nodes import DerivedCSVProcessingNode, DerivedJSONProcessingNode, \
OriginalProcessingNode
from .view_nodes import DerivedPreviewProcessingNode
from .output_nodes import OutputToZipProcessingNode
from .report_nodes import ReportProcessingNode
ORDERED_NODE_CLASSES = [
ReportProcessingNode,
DerivedCSVProcessingNode,
DerivedJSONProcessingNode,
OutputToZipProcessingNode,
DerivedPreviewProcessingNode,
OriginalProcessingNode,
]
def collect_artifacts(artifacts, outputs, allowed_types=None):
for cls in ORDERED_NODE_CLASSES:
node = cls(artifacts, outputs)
ret = list(node.get_artifacts())
if allowed_types is not None:
ret = list(filter(lambda a: a.datahub_type in allowed_types, ret))
artifacts.extend(ret)
yield from ret
|
#! /usr/bin/python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
raw_data = {'graph': ['Youtube', 'LiveJournal', 'Pokec', 'RMAT-19-32', 'RMAT-21-32'],
'cache': [16.1344/16.1344, 90.5813/90.5813, 32.1934/32.1934, 12.6965/12.6965, 50.7354/50.7354],
'hash': [16.1344/5.5074, 90.5813/48.601, 32.1934/20.7189, 12.6965/10.7273, 50.7354/42.7774],
'hub-buffer': [16.1344/5.2948, 90.5813/47.7471, 32.1934/20.4841, 12.6965/10.6896, 50.7354/42.607],
'': [16.1344/4.7289, 90.5813/45.5692, 32.1934/19.747, 12.6965/10.519, 50.7354/41.9917],
'c5': [16.1344/3.888, 90.5813/40.9439, 32.1934/17.9292, 12.6965/9.7478, 50.7354/38.9701],
'c6': [16.1344/1.4022, 90.5813/9.7264, 32.1934/3.8728, 12.6965/2.1245, 50.7354/8.6928],
'c7': [16.1344/1.4053, 90.5813/9.7447, 32.1934/3.9781, 12.6965/2.1248, 50.7354/8.7066],
'c8': [16.1344/11.203, 90.5813/116.173, 32.1934/50.658, 12.6965/27.400, 50.7354/109.523],
'c9': [16.1344/5.4649, 90.5813/51.455, 32.1934/22.1605, 12.6965/11.5924, 50.7354/46.5335]
}
df = pd.DataFrame(raw_data, columns = ['graph', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'])
fuck_label=('c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9')
# Setting the positions and width for the bars
pos = list(range(len(df['c1'])))
width = 0.1
print pos
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1.0, len(fuck_label)))
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with pre_score data,
# in position pos,
plt.bar(pos,
#using df['pre_score'] data,
df['c1'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[0],
# with label the first value in first_name
label=fuck_label[0])
# Create a bar with mid_score data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['mid_score'] data,
df['c2'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[1],
# with label the second value in first_name
label=fuck_label[1])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['post_score'] data,
df['c3'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[2],
# with label the third value in first_name
label=fuck_label[2])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['post_score'] data,
df['c4'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[3],
# with label the third value in first_name
label=fuck_label[3])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*4 for p in pos],
#using df['post_score'] data,
df['c5'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[4],
# with label the third value in first_name
label=fuck_label[4])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*5 for p in pos],
#using df['post_score'] data,
df['c6'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[5],
# with label the third value in first_name
label=fuck_label[5])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*6 for p in pos],
#using df['post_score'] data,
df['c7'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[6],
# with label the third value in first_name
label=fuck_label[6])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*7 for p in pos],
#using df['post_score'] data,
df['c8'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[7],
# with label the third value in first_name
label=fuck_label[7])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*8 for p in pos],
#using df['post_score'] data,
df['c9'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color=colors[8],
# with label the third value in first_name
label=fuck_label[8])
# Set the y axis label
ax.set_ylabel('Performance Speedup')
# Set the chart's title
#ax.set_title('Test Subject Scores')
# Set the position of the x ticks
ax.set_xticks([p + 4.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['graph'])
# Setting the x-axis and y-axis limits
#plt.xlim(min(pos)-width, max(pos)+width*4)
#plt.ylim([0, max(df['c1'] + df['c2'] + df['c3'] + df['c4'] + df['c5'] + df['c6'] + df['c7'] + df['c8'] + df['c9'])] )
# Adding the legend and showing the plot
plt.legend(['c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'], loc='upper left')
plt.grid()
#plt.show()
plt.savefig("../pipeline-performance.pdf", bbox_inches='tight')
|
# -*- coding: utf-8 -*-
# @Time : 2018/4/5 12:22
# @Author : Andywei
# @Email : andycfa2@163.com
# @File : pandas 1.py
# @Software: PyCharm
# pandas 绘图练习
import matplotlib.pyplot as plt #显示图片
import pandas as pd
import numpy as np
from pylab import mpl #解决中文显示乱码
import seaborn as sns
#设置中文字体
mpl.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 指定默认字体:解决plot不能显示中文问题
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
#读取本地excel文件
df = pd.read_excel('C://Users//ahuang3//Desktop//test.xlsx')
#使用numpy生成随机数据文件
df1 = pd.DataFrame(np.random.rand(50,4),columns=['a','b','c','d']) #dataframe
ser1 = pd.Series(3*np.random.rand(4),index=['a','b','c','d'],name = 'series') #列数据
df2 = pd.DataFrame(3*np.random.rand(4,2),index=['a','b','c','d'],columns = ['x','y']) #dataframe
df3 = pd.DataFrame(np.random.randn(10, 4).cumsum(0), columns=list('ABCD'), index=np.arange(0, 100, 10))
#绘图参数设置
# df.plot() #无参数默认形式
#柱状图
# df.plot(kind = 'bar')
#散点图
#单一散点图
#df1.plot.scatter(x = 'a',y='b')
# 不同散点绘制到一张表
# ab = df1.plot.scatter(x='a',y='b',color = 'DarkBlue',label = 'Group1')
# abc = df1.plot.scatter(x='c',y='d',color = 'DarkGreen',label = 'Group2',ax=ab)
# abcd = df1.plot.scatter(x='a',y='c',color = 'DarkRed',label = 'Group3',ax=abc)
# df1.plot.scatter(x='b',y='d',color = 'DarkGray',label = 'Group4',ax=abcd)
#设置点大小和灰度水平
# df1.plot.scatter(x='b',y='d',c = 'c',s = 50)
# df1.plot.scatter(x='a',y='b',s = df1['c']*200)
#饼状图
# ser1.plot.pie(figsize=(6,6)) #单一列
# ser1.plot.pie(labels = ['AA','BB','CC','DD'],colors = ['r','g','b','c'],autopct = '%.1f',fontsize = 10) # '%.1f'设置小数点位数
# df2.plot.pie(subplots = True,figsize = (12,6)) #多列数据
#直方图
# t1 = np.random.normal(0,1,size=200)
# # t2 = np.random.normal(10,2,size = 200)
# # values = pd.Series(np.concatenate([t1,t2]))
# #
# # values.hist(bins =100,alpha =0.3,color = 'k',normed =True)
# # values.plot(kind = 'kde',style = 'k--')
# #
# # plt.xlabel('横轴')
# # plt.ylabel('纵轴')
# # # plt.xlim(0,10) #设置x轴范围
# # # plt.ylim(0,20) #设置Y轴范围
# # plt.title('测试')
# # plt.legend('分布')
#图表设置函数
# fig,axes = plt.subplots(1,2)
# df3.plot(ax=axes[0])
# df2.plot(ax=axes[1])
plt.show()
|
class TleParser:
CELESTRAK = 1
# Describe a TLE structure
CELESTRAK_BP = {
0: {
'name': {
'start': 0,
'end': 23,
}
},
1: {
'line_number': {
'start': 0,
'end': 1,
},
'satellite_number': {
'start': 2,
'end': 7
},
'classification': {
'start': 7,
'end': 8,
},
'international_designator_year': {
'start': 9,
'end': 11,
},
'international_designator_number': {
'start': 11,
'end': 14,
},
'international_designator_piece': {
'start': 14,
'end': 16,
},
'epoch_year': {
'start': 18,
'end': 20,
},
'epoch_day': {
'start': 20,
'end': 32,
},
'first_derivative_mean_motion': {
'start': 34,
'end': 43,
},
'second_derivative_mean_motion': {
'start': 44,
'end': 52,
},
'drag': {
'start': 53,
'end': 62,
},
'set_number': {
'start': 63,
'end': 68,
},
'first_checksum': {
'start': 68,
'end': 69,
},
},
2: {
'inclination': {
'start': 8,
'end': 16,
},
'ascending_node': {
'start': 17,
'end': 25,
},
'eccentricity': {
'start': 26,
'end': 33,
},
'perigee_argument': {
'start': 34,
'end': 42,
},
'mean_anomaly': {
'start': 43,
'end': 51,
},
'mean_motion': {
'start': 52,
'end': 63,
},
'revolution_number': {
'start': 63,
'end': 68,
},
'second_checksum': {
'start': 68,
'end': 69,
}
}
}
def __init__(self, format=CELESTRAK):
self.format = format
if self.format == TleParser.CELESTRAK:
self.structure = TleParser.CELESTRAK_BP
def parse(self, tle):
for line in tle:
tle[line] = tle[line].decode("utf-8")
return self.explode(tle)
def explode(self, tle):
"""
Extract data from TLE into a dict
"""
blacklist = ['']
exploded = {}
for n in self.structure:
exploded["line_%i_full" % n] = tle[n].strip()
for e in self.structure[n]:
start = self.structure[n][e]['start']
end = self.structure[n][e]['end']
value = tle[n][start:end].strip()
if value in blacklist:
value = None
next
if e == 'first_derivative_mean_motion':
value = "0%s" % value
if e == 'second_derivative_mean_motion':
value = '0'
if e == 'drag':
value = self.format_drag(value)
if e == 'eccentricity':
value = '0.%s' % value
exploded[e] = value
return exploded
def format_drag(self, raw_drag):
"""
Format a drag value from a raw value in a TLE to a readable number
"""
if raw_drag[0] == '-':
raw_drag = raw_drag[1:]
# Separate the two parts of iiiii-i
raw_drag = raw_drag.replace('+', '-')
parts = raw_drag.split('-')
ending = parts[0]
power = 0
if (len(parts) != 1):
power = parts[1]
# Generate zeros depending on size of the power
# try:
zeros = ''.join(['0' for x in range(0, int(parts[1])-1)])
# except ValueError:
# return 0
# Concatenate values, ending up with a value like 0.000iiiii
return float('0.%s%s' % (zeros, ending))
|
from page.base import Base
import logging
from runlog import testLog
class Contact(Base):
"""联系人应用的所有页面操作类,此类中的d可以直接使用u2的所有方法"""
def __init__(self):
self.contact_info = self.get_data('contact.yaml')
def click_add_bt(self):
"""点出联系人新建按钮"""
self.mclick(resourceId=self.contact_info['addbt'])
def click_meun_bt(self):
"""点击主菜单"""
self.mclick(description=self.contact_info['meun'])
def longclick_frist_contact(self):
"""长按第一个联系人"""
logging.info('long click')
self.mlong_click(className=self.contact_info['contact_list'],
index=3)
def input_contact_mesg(self):
"""输入联系人姓名及号码"""
self.minput(self.contact_info['contact_name'],
text=self.contact_info['name_input'])
self.minput(self.contact_info['phonenumber'],
text=self.contact_info['phone_input'])
def save_contact(self):
"""点击联系人保存按钮"""
self.mclick(resourceId=self.contact_info['saveid'])
logging.info('save success')
# def get_Verify_text(self):
# """取得控件信息"""
# # try:
# return self.d(resourceId=self.contact_info['saved_phone_number'])\
# .get_text()
# # except Exception as e:
# # logging.error(e)
def contact_isempty(self):
"""判断是否有联系人"""
return self.d(text=self.contact_info['emptytext']).exists
def new_contact(self):
"""新建联系人
点击添加按钮-输入信息-点击保存"""
self.click_add_bt()
self.input_contact_mesg()
self.save_contact()
def del_contact(self):
"""删除联系人
长按联系人-点击删除按钮-确认删除"""
self.longclick_frist_contact()
self.mclick(resourceId=self.contact_info['delbt'])
self.mclick(resourceId=self.contact_info['do_del'])
def click_all_meun(self,s_meun,t_meun):
"""遍历一二级菜单
后续需要优化
点击菜单-点击设置-点击二级菜单列表-返回-重启APP"""
#开始遍历
self.click_meun_bt()
self.mclick(resourceId=self.contact_info['setting_id'])
self.mclick(text=s_meun)
logging.info('clicked meun %s'%s_meun)
t = self.findele(text=t_meun).get_text()
if s_meun is 'My info':
self.d.press('back')
self.d.press('back')
elif s_meun is 'Blocked numbers':
# self.mclick(className=self.contact_info['block_number'])
self.mapp_stop('com.android.server.telecom')
else:
self.d.press('back')
logging.info('Thired meun is %s' % t)
assert t in t_meun
|
a = input("Enter a string:")
b = input("Enter a char:")
i=0
m=0
x = len(a)
while (i<x):
if(b == a[i]):
m+=1
i+=1
print(m)
|
import random
from functools import reduce
import torch
from torch.utils import data
from torch.autograd import Variable
from util import *
from harry import HarryPotterText
class HarryPotterTextDataset(data.Dataset):
def __init__(self, root="data", num_copies=10):
self.harry = HarryPotterText(book_dir=root)
self.length = reduce((lambda x, y: x + y), self.harry.num_chapters)
self.ncopy = num_copies
# Generate dictionary
self.character_dict = self.harry.characters
# text = self.harry.get_text(0, 0)
def __len__(self):
return self.length * self.ncopy
def __getitem__(self, idx):
idx %= self.length
cum = self.harry.cum_num_chapters
bid = next(i for i, c in enumerate(cum) if c >= idx + 1) + 1
cid = idx + 1
if bid > 1:
cid -= cum[bid - 2]
text = self.harry.get_text(bid, cid)
return text
class TextCollate(object):
def __init__(self, chunk_len, characters, cuda=True):
self.chunk_len = chunk_len
self.characters = characters
self.cuda = cuda
def char_collate(self, batch):
batch_size = len(batch)
primer = torch.LongTensor(batch_size, self.chunk_len)
target = torch.LongTensor(batch_size, self.chunk_len)
for bi, text in enumerate(batch):
start_index = random.randint(0, len(text) - self.chunk_len - 2)
end_index = start_index + self.chunk_len + 1
chunk = text[start_index:end_index]
primer[bi] = char_tensor(chunk[:-1], self.characters)
target[bi] = char_tensor(chunk[1:], self.characters)
primer = Variable(primer)
target = Variable(target)
if self.cuda:
primer = primer.cuda()
target = target.cuda()
return primer, target
def word_collate(self, batch):
batch_size = len(batch)
primer = torch.LongTensor(batch_size, self.chunk_len)
target = torch.LongTensor(batch_size, self.chunk_len)
for bi, text in enumerate(batch):
start_index = random.randint(0, len(text) - self.chunk_len - 2)
end_index = start_index + self.chunk_len + 1
chunk = text[start_index:end_index]
primer[bi] = char_tensor(chunk[:-1])
target[bi] = char_tensor(chunk[1:])
primer = Variable(primer)
target = Variable(target)
if self.cuda:
primer = primer.cuda()
target = target.cuda()
return primer, target
def make_dataloader(batch_size, chunk_len, root="data", num_copies=10):
dataset = HarryPotterTextDataset(
root=root,
num_copies=num_copies
)
collate = TextCollate(chunk_len, dataset.character_dict)
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate.char_collate
)
return dataloader
def test():
dset = HarryPotterTextDataset(num_copies=1)
print(len(dset))
# print(dset[35])
# input()
# loader = make_dataloader(batch_size=2, chunk_len=100)
# print(len(loader))
# for p, t in loader:
# print('p', p)
# print('t', t)
if __name__ == "__main__":
test()
|
def b_search(arr, zero, lenghtOfArr, mynum):
while zero <= lenghtOfArr:
middle = zero + (lenghtOfArr - zero) // 2;
if arr[middle] == mynum:
return middle
else:
if arr[middle] < mynum:
zero=middle+1
else:
lenghtOfArr = middle - 1
return -1
arr = [ 2, 3, 4, 10, 40 ]
find_x = 10
result = b_search(arr, 0, len(arr)-1, find_x)
if result==-1:
print("Not found .")
else:
print("Index Of : ",result)
|
# 区间dp,之前刷csp时都只做过几题类似的...
# 模板,O(n^3),不出意外的超时了
class Solution:
def stoneGameV(self, stoneValue: List[int]) -> int:
n = len(stoneValue)
presum = [0]*(n+1)
for i in range(n):
presum[i+1] = presum[i] + stoneValue[i]
f = [[0] * n for _ in range(n)]
# 先枚举长度,再枚举起点,最后枚举切分点
# 故时间复杂度为O(n^3)
# 分段长度d
for d in range(2, n + 1):
# 起点i
for i in range(n - d + 1):
# 终点j
j = i + d - 1
for k in range(i, j):
left = presum[k+1] - presum[i]
right = presum[j+1] - presum[k+1]
# 状态转移
# 因为题意Bob为让Alice得到小的那行,所以应该有和小的那边转移过来
if left > right:
f[i][j] = max(f[i][j], right + f[k+1][j])
elif left < right:
f[i][j] = max(f[i][j], left + f[i][k])
else:
# 相等的话,需要选更大的分数
f[i][j] = max(f[i][j], right + f[k+1][j], left + f[i][k])
return f[0][n-1]
# 优化到O(n^2),贴个题解,这不可能想出来的...
class Solution:
def stoneGameV(self, stoneValue: List[int]) -> int:
n = len(stoneValue)
f = [[0] * n for _ in range(n)]
maxl = [[0] * n for _ in range(n)]
maxr = [[0] * n for _ in range(n)]
for left in range(n - 1, -1, -1):
maxl[left][left] = maxr[left][left] = stoneValue[left]
total = stoneValue[left]
suml = 0
i = left - 1
for right in range(left + 1, n):
total += stoneValue[right]
while i + 1 < right and (suml + stoneValue[i + 1]) * 2 <= total:
suml += stoneValue[i + 1]
i += 1
if left <= i:
f[left][right] = max(f[left][right], maxl[left][i])
if i + 1 < right:
f[left][right] = max(f[left][right], maxr[i + 2][right])
if suml * 2 == total:
f[left][right] = max(f[left][right], maxr[i + 1][right])
maxl[left][right] = max(maxl[left][right - 1], total + f[left][right])
maxr[left][right] = max(maxr[left + 1][right], total + f[left][right])
return f[0][n - 1]
|
import uuid
from unittest.mock import patch
import pytest
import s3fs
from rubicon_ml import domain
from rubicon_ml.repository import S3Repository
from rubicon_ml.repository.utils import slugify
def test_initialization():
s3_repo = S3Repository(root_dir="s3://bucket/root")
assert s3_repo.PROTOCOL == "s3"
assert type(s3_repo.filesystem) == s3fs.core.S3FileSystem
@patch("s3fs.core.S3FileSystem.open")
def test_persist_bytes(mock_open):
bytes_data = b"test data {uuid.uuid4()}"
bytes_path = "s3://bucket/root/path/to/data"
s3_repo = S3Repository(root_dir="s3://bucket/root")
s3_repo._persist_bytes(bytes_data, bytes_path)
mock_open.assert_called_once_with(bytes_path, "wb")
@patch("s3fs.core.S3FileSystem.open")
def test_persist_domain(mock_open):
project = domain.Project(f"Test Project {uuid.uuid4()}")
project_metadata_path = f"s3://bucket/root/{slugify(project.name)}/metadata.json"
s3_repo = S3Repository(root_dir="s3://bucket/root")
s3_repo._persist_domain(project, project_metadata_path)
mock_open.assert_called_once_with(project_metadata_path, "w")
@patch("s3fs.core.S3FileSystem.open")
def test_persist_domain_throws_error(mock_open):
not_serializable = str
project = domain.Project(f"Test Project {uuid.uuid4()}", description=not_serializable)
project_metadata_path = f"s3://bucket/root/{slugify(project.name)}/metadata.json"
s3_repo = S3Repository(root_dir="s3://bucket/root")
with pytest.raises(TypeError):
s3_repo._persist_domain(project, project_metadata_path)
mock_open.assert_not_called()
|
import os, re, hmac, random, string, webapp2, logging, jinja2, hashlib, json, logging
from google.appengine.ext import db
### template helpers
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)
### Main handler
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def render_json(self, obj):
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
self.write(json.dumps(obj))
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
if self.request.url.endswith('.json'):
self.format = 'json'
else:
self.format = 'html'
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return PASS_RE.match(password)
EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
def valid_email(email):
return EMAIL_RE.match(email)
SECRET = ''.join(random.sample(string.ascii_letters + string.digits, 10))
def encode_blog(obj):
if isinstance(obj, Blog):
return obj.__dict__
return obj
### Blog table
class Blog(db.Model):
subject = db.StringProperty(required = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
def as_dict(self):
time_format = '%c'
d = {'subject': self.subject,
'content': self.content,
'created': self.created.strftime(time_format)}
return d
|
import pytest
from belt import get_belt
@pytest.mark.parametrize("input_argument, expected_return", [
(0, None),
(9, None),
(10, 'white'),
(48, 'white'),
(50, 'yellow'),
(101, 'orange'),
(249, 'green'),
(250, 'blue'),
(251, 'blue'),
(400, 'brown'),
(599, 'brown'),
(600, 'black'),
(788, 'black'),
(800, 'paneled'),
(999, 'paneled'),
(1000, 'red'),
(1200, 'red'),
])
def test_get_belt(input_argument, expected_return):
assert get_belt(input_argument) == expected_return
|
#!/usr/bin/python
from kestrelcli import cli
if __name__ == '__main__':
cli.main()
|
'''
Created on 2017年2月28日
@author: admin
'''
#!D:\python35\python.exe
#encoding=UTF-8
print ('Content-type: text/html\n')
from os.path import join, abspath
import cgi, sys
BASE_DIR = abspath('data')
form = cgi.FieldStorage()
filename = form.getvalue('filename')
if not filename:
print('Please enter a file name')
sys.exit()
text = open(join(BASE_DIR, filename)).read()
|
#<Point 클래스의 메쏘드 (연산자 오버로딩 포함)>
"""메쏘드에는 __init__,__str__,__len__ 등과 같이 magic method라 불리는 특별한 메쏘드와 일반 메쏘드로 구분할 수 있다
magic method의 이름은 두 개의 underscore로 메쏘드 이름을 감싼 형태이다.
"""
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return f"({self.x}, {self.y})"
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self,other):
return Point(self.x - other.x, self.y - other.y)
def __rmul__(self,other):
return Point(self.x * other, self.y * other)
"""def add(self, other):
return Point(self.x + other.x, self.y + other.y)
p = Point(1,2)
q = Point(3,4)
r = p.add(q)
print(r) # r = (4,6)"""
"""이렇게 생성할수도 있지만, 정수, 실수 덧셈처럼 r = p + q 의 형식으로 + 연산자를 사용할 수 있다면 직관적이라 이해하기 쉬운 장점이 있다.
파이썬에서는 이를 위한 magic 메쏘드를 제공한다. 정수, 실수의 덧셈 연산자에 Point클래스의 덧셈 연산을 '덧 입혔다'는 의미에서 이러한 기능을
'연산자 overloading'이라 한다"""
"""특별 메쏘드의 이름은 __add__ 이다. r = p + q를 하면, 실제로는 r = p.__add__(q)가 호출되어,
두 벡터의 합 벡터가 리턴되어 r에 저장된다."""
p = Point(1,2)
q = Point(3,4)
r = p + q
s = p.__add__(q)
a = p - q
b = p.__sub__(q)
print(r,s)
print(a,b)
"""이제 곱셈 연산을 생각해보자. 두 벡터의 곱은 덧셈이나 뺄셈처럼 대응되는 좌표 값을 더하거나 빼는 식으로
정의되지 않는다. 벡터의 곱셈 연산은 r = 3 * p의 형태처럼 p의 좌표 값에 모두 상수 3을 곱하는 식으로 사용된다.
즉, scalar * vector 형식으로 사용된다."""
"""scalar 값은 Point 객체가 아니기 때문에 연산에 참여하는 두 객체의 타입이 같지 않다는 문제가 발생한다.
파이썬에서는 이러한 경우에도 연산자 오버로딩 기능을 지원한다. 단, 오른쪽 객체를 기준으로 오버로딩을 해야한다.
__rmul__(right multiplication)magic 메쏘드는 * 연산자의 오른쪽에 등장하는 객체가 self가 되고
왼쪽 객체가 other가 되는 형식이다. 그래서 이름에 r이 붙었다. 이 경우에 self와 other의 타입이 달라도 된다."""
# r = 3 * p # r = p.__mul__(3)의 형식으로 호출됨(반대가 아님에 유의!)
r = 3 * p
print(r)
r = p * 3 # 이 문장은 에러를 발생시킨다. self가 오른쪽에 와야함!
print(r)
|
#导入所有依赖包
import flask
import werkzeug
import os
import getConfig
import numpy as np
import execute
from PIL import Image
#初始化一个字典,用于存放从配置文件中获取的配置参数
gConfig = {}
#使用get_config方法从配置文件中获取配置参数
gConfig = getConfig.get_config(config_file='config.ini')
#创建一个flask wen应用,名称为imgClassifierWeb
app = flask.Flask("imgClassifierWeb")
#定义一个函数,初步处理获得数据并调用execute中的方法进行预存
def CNN_predict():
global secure_filename
#使用PIL中 的Image打开文件并获取图像文件中的信息
img = Image.open(os.path.join(app.root_path, 'predict_img/'+secure_filename))
#将图像文件的格式转换为RGB
img = img.convert("RGB")
#分别获取r,g,b三元组的像素数据并进行拼接
r, g, b = img.split()
r_arr = np.array(r)
g_arr = np.array(g)
b_arr = np.array(b)
img = np.concatenate((r_arr, g_arr, b_arr))
#将拼接得到的数据按照模型输入维度需要转换为(32,32,3),并对数据进行归一化
image = img.reshape([1, 32, 32, 3])/255
#调用execute中的predict方法进行预测
predicted_class = execute.predict(image)
print(predicted_class)
#将预测结果返回并使用模板进行页面渲染
return flask.render_template(template_name_or_list="prediction_result.html",
predicted_class=predicted_class)
"""
flask路由系统:
1、使用flask.Flask.route() 修饰器。
2、使用flask.Flask.add_url_rule()函数。
3、直接访问基于werkzeug路由系统的flask.Flask.url_map.
参考知识链接:https://www.jianshu.com/p/e69016bd8f08
1、@app.route('/index.html')
def index():
return "Hello World!"
2、def index():
return "Hello World!"
index = app.route('/index.html')(index)
app.add_url_rule:app.add_url_rule(rule,endpoint,view_func)
关于rule、ednpoint、view_func以及函数注册路由的原理可以参考:https://www.cnblogs.com/eric-nirnava/p/endpoint.html
"""
app.add_url_rule(rule="/predict/", endpoint="predict", view_func=CNN_predict)
"""
知识点:
flask.request属性
form:
一个从POST和PUT请求解析的 MultiDict(一键多值字典)。
args:
MultiDict,要操作 URL (如 ?key=value )中提交的参数可以使用 args 属性:
searchword = request.args.get('key', '')
values:
CombinedMultiDict,内容是form和args。
可以使用values替代form和args。
cookies:
顾名思义,请求的cookies,类型是dict。
stream:
在可知的mimetype下,如果进来的表单数据无法解码,会没有任何改动的保存到这个·stream·以供使用。很多时候,当请求的数据转换为string时,使用data是最好的方式。这个stream只返回数据一次。
headers:
请求头,字典类型。
data:
包含了请求的数据,并转换为字符串,除非是一个Flask无法处理的mimetype。
files:
MultiDict,带有通过POST或PUT请求上传的文件。
method:
请求方法,比如POST、GET
知识点参考链接:https://blog.csdn.net/yannanxiu/article/details/53116652
werkzeug
"""
def upload_image():
global secure_filename
if flask.request.method == "POST": # 设置request的模式为POST
img_file = flask.request.files["image_file"] # 获取需要分类的图片
secure_filename = werkzeug.secure_filename(img_file.filename) # 生成一个没有乱码的文件名
img_path = os.path.join(app.root_path, "predict_img/"+secure_filename) # 获取图片的保存路径
img_file.save(img_path) # 将图片保存在应用的根目录下
print("图片上传成功.")
return flask.redirect(flask.url_for(endpoint="predict"))
return "图片上传失败"
#增加upload路由,使用POST方法,用于文件的上窜
app.add_url_rule(rule="/upload/", endpoint="upload", view_func=upload_image, methods=["POST"])
def predirect_upload():
return flask.render_template(template_name_or_list="upload_image.html")
"""
"""
app.add_url_rule(rule="/", endpoint="homepage", view_func=predirect_upload)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8008, debug=False)
|
##################################################################################
# This file searches nature journal for the the date published, the article title,
# and the journal it was published in
##################################################################################
from bs4 import BeautifulSoup
import requests
import os
import pandas as pd
import numpy as np
##################################################################################
def text_write(path, filename, df):
"""This function writes the provided dataframe to a specified file"""
filepath = os.path.join(path, filename)
df.to_csv(filepath, sep="\t")
def getSoupACS(address):
page = requests.get(address)
soup = BeautifulSoup(page.content, "html.parser")
time_tag = soup.find_all("span", class_="pub-date-value")
title_tag = soup.find_all("h5", class_="issue-item_title")
author_tag = soup.find_all("span", class_="hlFld-ContribAuthor")
article_tag = soup.find_all("span", class_="issue-item_jour-name")
article_dict = dict()
for i in range(0, 6):
if not bool(article_dict):
article_dict = {
"date_published": [time_tag[i].get_text().strip()],
"article_title": [title_tag[i].get_text().strip()],
"author": [author_tag[i].get_text().strip()],
"Journal_Published_in": [article_tag[i].get_text().strip()],
}
else:
article_dict["date_published"].append(time_tag[i].get_text().strip())
article_dict["article_title"].append(title_tag[i].get_text().strip())
article_dict["author"].append(author_tag[i].get_text().strip())
article_dict["Journal_Published_in"].append(
article_tag[i].get_text().strip()
)
article_df = pd.DataFrame.from_dict(article_dict, orient="columns")
path = input("What is the file path you wish to use:")
filename = input("What would you like to name the file:")
text_write(path, filename, article_df)
|
from charm.schemes.dabe_aw11 import Dabe
from charm.adapters.dabenc_adapt_hybrid import HybridABEncMA
from charm.toolbox.pairinggroup import PairingGroup, GT
import unittest
debug = False
class DabeTest(unittest.TestCase):
def testDabe(self):
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
GP = dabe.setup()
#Setup an authority
auth_attrs= ['ONE', 'TWO', 'THREE', 'FOUR']
(SK, PK) = dabe.authsetup(GP, auth_attrs)
if debug: print("Authority SK")
if debug: print(SK)
#Setup a user and give him some keys
gid, K = "bob", {}
usr_attrs = ['THREE', 'ONE', 'TWO']
for i in usr_attrs: dabe.keygen(GP, SK, i, gid, K)
if debug: print('User credential list: %s' % usr_attrs)
if debug: print("\nSecret key:")
if debug: groupObj.debug(K)
#Encrypt a random element in GT
m = groupObj.random(GT)
policy = '((one or three) and (TWO or FOUR))'
if debug: print('Acces Policy: %s' % policy)
CT = dabe.encrypt(PK, GP, m, policy)
if debug: print("\nCiphertext...")
if debug: groupObj.debug(CT)
orig_m = dabe.decrypt(GP, K, CT)
assert m == orig_m, 'FAILED Decryption!!!'
if debug: print('Successful Decryption!')
class HybridABEncMATest(unittest.TestCase):
def testHybridABEncMA(self):
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
hyb_abema = HybridABEncMA(dabe, groupObj)
#Setup global parameters for all new authorities
gp = hyb_abema.setup()
#Instantiate a few authorities
#Attribute names must be globally unique. HybridABEncMA
#Two authorities may not issue keys for the same attribute.
#Otherwise, the decryption algorithm will not know which private key to use
jhu_attributes = ['jhu.professor', 'jhu.staff', 'jhu.student']
jhmi_attributes = ['jhmi.doctor', 'jhmi.nurse', 'jhmi.staff', 'jhmi.researcher']
(jhuSK, jhuPK) = hyb_abema.authsetup(gp, jhu_attributes)
(jhmiSK, jhmiPK) = hyb_abema.authsetup(gp, jhmi_attributes)
allAuthPK = {}; allAuthPK.update(jhuPK); allAuthPK.update(jhmiPK)
#Setup a user with a few keys
bobs_gid = "20110615 bob@gmail.com cryptokey"
K = {}
hyb_abema.keygen(gp, jhuSK,'jhu.professor', bobs_gid, K)
hyb_abema.keygen(gp, jhmiSK,'jhmi.researcher', bobs_gid, K)
msg = b'Hello World, I am a sensitive record!'
size = len(msg)
policy_str = "(jhmi.doctor or (jhmi.researcher and jhu.professor))"
ct = hyb_abema.encrypt(allAuthPK, gp, msg, policy_str)
if debug:
print("Ciphertext")
print("c1 =>", ct['c1'])
print("c2 =>", ct['c2'])
decrypted_msg = hyb_abema.decrypt(gp, K, ct)
if debug: print("Result =>", decrypted_msg)
assert decrypted_msg == msg, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
del groupObj
if __name__ == "__main__":
unittest.main()
|
from marshmallow import Schema, validate, fields
class VideoSchema(Schema):
id = fields.Integer(dump_only=True)
user_id = fields.Integer(dump_only=True)
name = fields.String(required=True, validate=validate.Length(max=250))
description = fields.String(required=True, validate=validate.Length(max=500))
url = fields.String(dump_only=True)
cover =fields.String(dump_only=True)
message = fields.String(dump_only=True)
class UserSchema(Schema):
name = fields.String(required=True, validate=validate.Length(max=250))
email = fields.String(required=True, validate=validate.Length(max=250))
password = fields.String(
required=True, validate=validate.Length(max=100), load_only=True
)
videos = fields.Nested(VideoSchema, many=True, dump_only=True)
class AuthSchema(Schema):
access_token = fields.String(dump_only=True)
message = fields.String(dump_only=True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-01-21 09:58
from __future__ import unicode_literals
from datetime import date
from django.db import migrations
class Migration(migrations.Migration):
def calculate_age(apps, schema_editor):
Profile = apps.get_model('registration', 'Profile')
for profile in Profile.objects.all():
today = date.today()
dob = profile.date_of_birth
age = (today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day)))
profile.user_age = age
profile.save()
dependencies = [
('registration', '0008_auto_20190121_0957'),
]
operations = [
migrations.RunPython(calculate_age),
]
|
import cv2
import numpy as np
import json
import os
import pandas as pd
# 이미지읽기
img = cv2.imread('../0706_data/dog.jpg')
c_img = cv2.imread('../0706_data/cat.jpg')
# 너비, 높이, RGB(색상)
print(img.shape)
# 이미지 저장
# cv2.imwrite('copy_img.jpg',img)
# cv2.imshow('dog',img)
# cv2.waitKey()
# 색 변화 컨버터 cv2는 BGR로 읽힌다 색깔범위 0~255
# rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# cv2.imshow('',rgb_img)
# cv2.imshow('g',gray_img)
# cv2.waitKey()
# 색으로 보기 이런게있다 정도만
# (B, G, R) = cv2.split(img)
#
# color = R
# cv2.imshow("",color)
# cv2.waitKey()
#
# zeros = np.zeros(img.shape[:2],dtype='uint8')
#
# cv2.imshow('Red',cv2.merge([zeros,zeros,R]))
# cv2.imshow('Green',cv2.merge([zeros,G,zeros]))
# cv2.imshow('Blue',cv2.merge([B,zeros,zeros]))
# cv2.waitKey(0)
# 픽셀 값 접근
# print(img[100,200])
# cv2.imshow('',img)
# cv2.waitKey()
# cv2.destroyAllWindows()
'''자르기 사각형 정도만'''
# 크기조절
# cv2.imshow('',img)
# img = cv2.resize(img,(408,380))
# cv2.imshow('big',img)
# img = cv2.resize(img,(108,58))
# cv2.imshow('small',img)
# cv2.waitKey()
# 자르기
# cv2.imshow('',img[0:150,0:180])
#
# cv2.imshow('change',img[100:150,50:100])
# h, w, c = img.shape
#
# cv2.imshow('crop',img[int(h/2-50): int(h/2+50),int(w/2-50):int(w/2+50)])
# print(int(h/2-50),int(h/2+50),int(w/2 - 50),int(w/2 +50))
# cv2.waitKey()
# 도형 그리기
# line
# 시작점,끝점,rgb값, 선굵기 // BGR인것을 인지
# img = cv2.line(img,(100,100),(180,150),(0,255,0),4)
# cv2.imshow('',img)
# cv2.waitKey()
# rectangle
# 시작점,끝점,rgb값, 선굵기 // BGR인것을 인지
# img = cv2.rectangle(img,(35,26),(160,170),(0,255,0),3)
# cv2.imshow('',img)
# cv2.waitKey()
# circle
# # 시작점,반지름,rgb값, 선굵기 // -1이 꽉찬 원
# img = cv2.circle(img,(200,100),30,(0,255,0),3)
# cv2.imshow('',img)
# cv2.waitKey()
# # poly
# 다각형 거의 안씀
# pts = np.array([[35,26],[35,170],[160,170],[190,26]])
# img = cv2.polylines(img,[pts],True,(0,255,0),3)
# cv2.imshow('',img)
# cv2.waitKey()
# text 텍스트 넣기 // 시작점 폰트종류 글자크기 글자굵기
# img = cv2.putText(img,'dog',(200,100),0,1,(0,25,0),2)
# cv2.imshow('',img)
# cv2.waitKey()
# 이미지 붙여넣기
# img = cv2.rectangle(img,(200,100),(275,183),(0,255,0),2)
#
#
# c_img = cv2.resize(c_img,(75,83))
# img[100:183,200:275] = c_img
# cv2.imshow('change',img)
# cv2.waitKey()
# 이미지 더하기
# img = cv2.resize(img,(217,232))
# add1 = img + c_img
# add2 = cv2.addWeighted(img,float(0.8),c_img,float(0.2),5) # 이미지 비중
# cv2.imshow('1',add1)
# cv2.imshow('2',add2)
# cv2.waitKey()
'''자주 사용하는 기능'''
# 이미지 회전
# height, width, c = img.shape
# img90 = cv2.rotate(img,cv2.ROTATE_90_CLOCKWISE) #시계방향 90도
# img270 = cv2.rotate(img,cv2.ROTATE_90_COUNTERCLOCKWISE) # 반시계 90도
# img180 = cv2.rotate(img,cv2.ROTATE_180)
#
#
# # 중심점
# img_r = cv2.getRotationMatrix2D((width/2,height/2),45,1)
# cv2.imshow('90',img90)
# cv2.imshow('2',img270)
# cv2.imshow('3',img180)
# cv2.imshow('4',img_r)
# cv2.waitKey()
# 이미지 반전 flip 0=상하대칭 1=좌우대칭 좌우대칭을 많이 쓴다더라
# cv2.imshow('origin',img)
# img = cv2.flip(img,0)
# cv2.imshow('270',img)
# cv2.waitKey()
# 이미지 아핀
# height, width, channel = img.shape
# matrix = cv2.getRotationMatrix2D((width/2,height/2),45,2)
# img = cv2.warpAffine(img,matrix,(width,height))
# cv2.imshow('270',img)
# cv2.waitKey()
# 이미지 밝기, 감마
# nimg = cv2.imread('../0706_data/night.jpg')
# table = np.array([((i/255.0)**0.5) * 255 for i in np.arange(0,256)]).astype('uint8')
# gamma_img = cv2.LUT(nimg, table)
#
# val = 50 #randint(10,50)
# # numpy.full 함수는 주어진 형태와 타입을 갖는, 주어진 값으로 채워진 새로운 어레이를 반환합니다.
# # numpy.arange 함수는 주어진 간격에 따라 균일한 어레이를 생성합니다.
# array = np.full(nimg.shape, (val,val,val),dtype=np.uint8) # 픽셀당 50이 더해진 것
# all_array = np.full(nimg.shape,(30,30,30),dtype=np.uint8) # 픽셀당 30이 더해진 것
# bright_img = cv2.add(nimg,array).astype('uint8')
# all_img = cv2.add(gamma_img,all_array).astype('uint8')
#
# cv2.imshow('origin',nimg)
# cv2.imshow('all',all_img)
# cv2.imshow('bright',bright_img)
# cv2.imshow('gamma',gamma_img)
# cv2.waitKey()
# # 이미지 블러링 개인정보때문에 중요성이 높다
# blu_img = cv2.blur(img,(15,15))
#
# roi = img[28:74,95:165] # blur할 부분
# cv2.imshow('blurLoca',roi)
# cv2.waitKey()
#
#
# roi = cv2.blur(roi,(15,15)) #
# img[28:74,95:165] = roi
# cv2.imshow('blu',blu_img)
# cv2.imshow('s-blu',img)
# cv2.waitKey()
# # 이미지 패딩 정사각형으로 놓을경우가 많다 자주씀
# img_pad = cv2.cv2.copyMakeBorder(img,50,50,100,100,cv2.BORDER_CONSTANT, value=[0,0,0]) # top bottom left right , 중심에 놓겠다 , 검은색 패딩하겠다
# cv2.imshow('img pad',img_pad)
# cv2.waitKey()
# cv2 cascade 거의 안씀
# face_cascade = cv2.CascadeClassifier('../0706_data/haarcascade_frontalface_default.xml')
#
# img = cv2.imread('../0706_data/face.jpg')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# faces = face_cascade.detectMultiScale(gray, 1.3, 1)
#
# print(faces)
#
# cv2.imshow('face', img)
# cv2.waitKey()
#
# total_list = []
# for i in range(5):
# ins_dic = {}
# ins_dic[f'person{i}'] = i
# ins_dic['bbox'] = [i+5,i+10,i+15,i+30]
# total_list.append(ins_dic)
#
# with open('json_sample.json','w',encoding='utf-8') as make_file:
# json.dump(total_list,make_file,indent='\t')
#
#
# json_dir = 'json_sample.json'
# print(os.path.isfile(json_dir))
# with open(json_dir) as f:
# json_data = json.load(f)
#
# for j_data in json_data:
# print(j_data)
# print(j_data['bbox'])
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MoveTank, MediumMotor, LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev2.sensor.lego import TouchSensor, ColorSensor, GyroSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
import xml.etree.ElementTree as ET
import threading
import time
from sys import stderr
gyro = GyroSensor(INPUT_1)
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
largeMotor_Left= LargeMotor(OUTPUT_B)
largeMotor_Right= LargeMotor(OUTPUT_C)
tank_block = MoveTank(OUTPUT_B, OUTPUT_C)
#_________________________________________________________________________________________________________________________________
def StraightGyro_target(stop, speed, rotations, target):
print("In StraightGyro_target", file=stderr)
current_degrees = largeMotor_Left.position
rotations = rotations * 360
target_rotations= current_degrees + rotations
current_gyro_reading = gyro.angle
# print("Current Gyro Reading: {}".format(current_gyro_reading))
while float(current_degrees) < target_rotations:
if stop():
break
# reading in current gyro and rotations
current_gyro_reading=gyro.angle
current_degrees = largeMotor_Left.position
#if the gyro is smaller than the target
if current_gyro_reading < target:
correction = target - current_gyro_reading # calculate full error by target - gyro
correction = correction * .25 # 1/4 of the correction (so the robot doesn't over correct)
steering_drive.on(steering = -correction , speed = speed) # turn by the correctuion and doesn't over correct
#if the gyro is larger than the target
if current_gyro_reading > target:
correction = target - current_gyro_reading # calculate full error by target - gyro
correction = correction * .25 # 1/4 of the correction (so the robot doesn't over correct)
steering_drive.on(steering = -correction , speed = speed) # turn by the correctuion and doesn't over correct
#if the gyro is == to the target just go straight
if current_gyro_reading == target:
steering_drive.on(steering = 0 , speed = speed)
# if the current rotations is larger than the target then break the loop which will stop the robot
if float(current_degrees) >= target_rotations:
break
if stop():
break
tank_block.off()
print('Leaving StraightGyro_target', file=stderr)
#stopProcessing=False
#StraightGyro_target(lambda:stopProcessing, speed=30, rotations=3)
|
# Generated by Django 3.1.3 on 2020-11-17 13:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0002_auto_20201117_1850'),
]
operations = [
migrations.AddField(
model_name='profile',
name='adhar_card',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
from pymongo import MongoClient
# connect to database
connection = MongoClient('localhost', 27017)
db = connection.school
# handle to names collection
students = db.students1
cursor = students.find()
hws = {} # name : [{"_id": score}]
import sys
for s in cursor:
score = sys.maxint
_id = s["_id"]
hw = s["scores"]
for t in hw:
if t["type"] == "homework" and score > t["score"]:
score = t["score"]
students.update({"_id":_id}, {"$pull":{"scores":{"type":"homework", "score":score}}})
|
import matplotlib.pyplot as plt
import numpy as np
max_iters = 10000
precision = 0.0001
gammas = np.linspace(0.001, 0.01, 10)
fct = lambda x: 4 * x**3 - 9 * x**2
def gradient(gamma):
iters = 0
cur_x = 6
previous_step_size = 1
x = []
while (previous_step_size > precision) & (iters < max_iters):
x.append(cur_x)
prev_x = cur_x
cur_x -= gamma * fct(prev_x)
previous_step_size = abs(cur_x - prev_x)
iters+=1
print("Gamma {} min {:.4f} f(min) {:.4f}".format(gamma, cur_x, fct(cur_x)))
return x
gamma = gammas[0]
fig, ax = plt.subplots(1,1)
for gamma in gammas:
x = gradient(gamma)
plt.plot(x, label = gamma)
plt.legend()
plt.show()
plt.legend()
plt.show()
|
def tableMulti(base, debut, fin):
print('fragment de la table de multiplication par', base, ":")
n= debut
while n <= fin :
print(n, "x" , base, "=" , n * base)
n= n + 1
def table(base):
resultat = []
n = 1
while n< 11:
b = n * base
resultat.append(b)
n=n+1
return resultat
def cube(n):
return n**3
def volumeSphere(r):
return 4 * 3.1416 * cube(r) / 3
r = input("entrez la valeur du rayon : ")
print("le volume de cette sphère vaut", volumeSphere(float(r)))
|
import urllib.parse
import requests
def run():
url='https://maps.googleapis.com/maps/api/geocode/json?address=Alfredo+mendiola+3540,+CA&key=AIzaSyC5guRUsYSgt9ADNt5LCOOoHc9p48oG2io'
json_data = requests.get(url).json()
country = json_data['results'][0]['address_components'][5]['long_name']
print(country)
if __name__ == '__main__':
run()
|
from pymongo import MongoClient
from flask import Flask, render_template, request, session
from random import randint
app = Flask(__name__)
client = MongoClient('mongodb+srv://admin:<password>@cluster0-w1ulm.mongodb.net/test?retryWrites=true&w=majority')
db = client['hackathon']
playerScores = db['playerScores']
@app.route('/', methods=["GET"])
def home():
return render_template('index.html',
messages=messages.find({}),
loggedIn='username' in session,
username=session.get('username','')
)
@app.route('/login', methods=['POST'])
def login():
session['username'] = request.form['username']
return "success"
@app.route('/logout', methods=['POST'])
def logout():
session.pop('username', None)
return "success"
@app.route('game', methods=['POST'])
def game():
comp = randint(1,3)
player = request.form['selection']
username = session['username']
for item in playerScores:
wins = item['wins']
losses = item['losses']
query = {'username': username, 'wins': wins, 'losses': losses}
if comp == 3 and player == 1:
update = {'username': username, 'wins': wins + 1, 'losses': losses }
elif player == 3 and comp == 1:
update = {'username': username, 'wins': wins, 'losses': losses + 1 }
elif player > comp:
update = {'username': username, 'wins': wins + 1, 'losses': losses }
elif comp > player:
update = {'username': username, 'wins': wins, 'losses': losses + 1 }
else:
update = {'username': username, 'wins': wins, 'losses': losses }
playerScores.update_one(query, update)
return "success"
app.run(port=3000, debug=True)
|
from selenium import webdriver
import time
# 크롬창(웹드라이버) 열기
driver = webdriver.Chrome("./chromedriver.exe")
# 구글 지도 접속하기
driver.get("https://www.google.com/maps/")
# 검색창에 "카페" 입력하기
searchbox = driver.find_element_by_css_selector("input#searchboxinput")
searchbox.send_keys("카페")
# 검색버튼 누르기
searchbutton = driver.find_element_by_css_selector("button#searchbox-searchbutton")
searchbutton.click()
#5. 검색 결과 확인하기
# time.sleep(1)
# # 컨테이너 dl.lsnx_det
# # stores = html.select("dl.lsnx_det")
# stores = driver.find_elements_by_css_selector("dl.lsnx_det")
#
# for s in stores:
# name = s.find_element_by_css_selector("dt > a").text
# addr = s.find_element_by_css_selector("dd.addr").text
#
# try:
# tel = s.find_element_by_css_selector("dd.tel").text
# except:
# tel = "전화번호 없음"
# # 가게 이름 dt > a
# # 가게 주소 dd.addr
# # 전화번호 dd.tel
#
# print(name)
# print(addr)
# print(tel)
#
# # 페이지버튼 div.paginate > *
# page_bar = driver.find_elements_by_css_selector("div.paginate > *")
#
# try:
# if n%5 != 0:
# page_bar[n%5+1].click()
# else:
# page_bar[6].click()
# except:
# print("수집완료")
# break
|
from flask import Flask
from flask import request
from namegen import NameGenerator
app = Flask(__name__)
namegen = NameGenerator()
@app.route('/post', methods=['POST'])
def get_name():
sourceCode = request.form['source']
print(sourceCode)
name, attention = namegen.get_name_and_attention_for(sourceCode)
return str({'name': name, 'attention': attention})
|
"""
интерполяция функции на таблице значений с помощью полинома Ньютона
(с учетом экстраполяции)
"""
from math import sin, pi, factorial, cos, exp
def f(x):
return exp(x)
def generate_table(start, end, step):
table = []
table.append([])
table.append([])
x = start
while(x < end + step):
#print(x)
table[0].append(x)
table[1].append(f(x))
x += step
return table
def get_table(filename):
infile = open(filename, 'r')
data = []
data.append([])
data.append([])
for line in infile:
if line:
a, b = map(float, line.split())
data[0].append(a)
data[1].append(b)
infile.close()
return data
def getCoefPolynomByConfiguration(conf, n):
newconf = []
for i in range(0, len(conf[0]) - n):
#print(conf[1][i+1], conf[1][i])
tmp = (conf[1][i+1] - conf[1][i]) / (conf[0][i+n] - conf[0][i] )
newconf.append(tmp)
return newconf
def some_combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def GetCombineMult(arr, m):
import itertools as it
res = 0
# pair = it.combinations(arr, m) # стандартная функция поиска всех нужных сочетаний
pair = some_combinations(arr, m)
for i in pair:
flag = False
tmp = 1
for j in i:
tmp *= j
res += tmp
return res
def deverative(x, n, table):
def binpoisk(x):
a = 0
b = len(table[0])
while(b - a > 1):
m = int((a + b) / 2)
if table[0][m] > x:
b = m
elif table[0][m] == x:
return m
else:
a = m
return a
def findconf():
conf = []
conf.append([])
conf.append([])
mid = binpoisk(x)
left = max(0, mid - int(n/2))
right = min(len(table[0]) - 1, left + n)
left = max(0, right - n)
for i in range(left, right + 1):
conf[0].append(table[0][i])
conf[1].append(table[1][i])
return conf
conf = findconf()
y = 0
for i in range(1, n + 1):
coef = getCoefPolynomByConfiguration(conf, i)
arr = []
for j in range(0, i):
t = x - conf[0][j]
arr.append(t)
tmp = GetCombineMult(arr, i - 1)
y += tmp * coef[0]
j = 0
for i in coef:
conf[1][j] = i
j += 1
return y
n = int(input("Введите количество узлов: "))
if n <= 0:
print("error")
else:
x = float(input("x = "))
table = generate_table(0, 5, 0.5)
#table = get_table("der_table.txt") # если таблицу нужно из файла загрузить
y = deverative(x, n - 1, table)
print("Результат ", y)
print("Правильный ответ ", f(x)) # актуально только для экспоненты
print("Абсолютная погрешность ", abs(y - f(x)))
|
import turtle, math
def square(t,length):
angle = 90 #degrees in square
for i in range(4):
t.fd(length)
t.lt(angle)
def polygon(t,length,sides):
angle = 360 / sides
for i in range(sides):
t.fd(length)
t.lt(angle)
def circle(t,radius):
sides = 100
angle = 360 / sides
circumference = 2 * math.pi * radius
length = circumference / sides
for i in range(sides):
t.fd(length)
t.lt(angle)
bob = turtle.Turtle()
def draw(t, length, n):
if n == 0:
return
angle = 50
t.fd(length*n)
t.lt(angle)
draw(t, length, n-1)
t.rt(2*angle)
draw(t, length, n-1)
t.lt(angle)
t.bk(length*n)
#execute the function
draw(bob,30,5)
turtle.mainloop()
|
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.mark = []
def average(self):
return sum(self.mark) / len(self.mark)
class WorkingStudent(Student):
def __init__(self, name, school, salary):
super(WorkingStudent, self).__init__(name, school)
self.salary = salary
@property
def weekly_salary(self):
return self.salary * 40
melis = WorkingStudent('Melis', 'IAAU', 55)
print(melis.salary)
melis.mark.append(77)
melis.mark.append(87)
print(f'average of your marks {melis.average()} ')
print(melis.weekly_salary)
# test short cuts for git commit
|
#!/usr/bin/env python
"""
npy.py
=======
Demonstrates sending and receiving NPY arrays and dict/json metadata over TCP socket.
To test serialization/deserialization::
npy.py --test
In one session start the server::
npy.py --server
In the other run the client::
npy.py --client
The client sends an array to the server where some processing
is done on the array after which the array is sent back to the client
together with some metadata.
* https://docs.python.org/3/howto/sockets.html
"""
import os, sys, logging, argparse, json, datetime, operator, functools
import io, struct, binascii as ba
import socket
import numpy as np
mul_ = lambda _:functools.reduce(operator.mul, _)
log = logging.getLogger(__name__)
x_ = lambda _:ba.hexlify(_)
HEADER_FORMAT, HEADER_SIZE, HEADER_BYTES = ">LLLL", 4, 16 # four big-endian unsigned long which is 4*4 bytes
def npy_serialize(arr):
"""
:param arr: ndarray
:return buf: bytes
"""
fd = io.BytesIO()
np.save( fd, arr) # write ndarray to stream
buf = fd.getbuffer()
assert type(buf) is memoryview
log.info("serialized arr %r into memoryview buf of length %d " % (arr.shape, len(buf)))
return buf
def npy_deserialize(buf):
"""
:param buf:
:return arr:
"""
fd = io.BytesIO(buf)
arr = np.load(fd)
return arr
def meta_serialize(meta):
buf = json.dumps(meta).encode()
return buf
def meta_deserialize(buf):
fd = io.BytesIO(buf)
meta = json.load(fd)
return meta
def pack_prefix(*sizes):
"""
:param arr_bytes: uint
:param meta_bytes: uint
:return bytes:
"""
assert len(sizes) == HEADER_SIZE
return struct.pack(HEADER_FORMAT, *sizes)
def unpack_prefix(data):
sizes = struct.unpack(HEADER_FORMAT, data)
assert len(sizes) == HEADER_SIZE
return sizes
def serialize_with_header(arr, meta):
"""
:param arr: numpy array
:param meta: metadata dict
:return buf: memoryview containing transport header followed by serialized NPY and metadata
"""
fd = io.BytesIO()
prefix = pack_prefix(0,0,0,0) # placeholder zeroes in header
fd.write(prefix)
np.save( fd, arr) # write ndarray to stream
hdr_arr_bytes = fd.tell() - len(prefix)
fd.write(meta_serialize(meta))
meta_bytes = fd.tell() - hdr_arr_bytes - len(prefix)
fd.seek(0) # rewind and fill in the header with the sizes
arr_bytes = arr.nbytes
hdr_bytes = hdr_arr_bytes - arr_bytes
fd.write(pack_prefix(hdr_bytes,arr_bytes,meta_bytes,0))
buf = fd.getbuffer()
assert type(buf) is memoryview
log.info("serialized arr %r into memoryview len(buf) %d hdr_bytes %d arr_bytes %d meta_bytes %d " % (arr.shape, len(buf), hdr_bytes, arr_bytes, meta_bytes ))
return buf
def deserialize_with_header(buf):
"""
:param fd: io.BytesIO stream
:return arr,meta:
Note that arr_bytes, meta_bytes not actually needed when are sure of completeness
of the buffer, unlike when reading from a network socket where there is
no gaurantee of completeness of the bytes received so far.
"""
fd = io.BytesIO(buf)
prefix = fd.read(HEADER_BYTES)
hdr_bytes,arr_bytes,meta_bytes,zero = unpack_prefix(prefix)
assert zero == 0
arr = np.load(fd) # fortunately np.load ignores the metadata that follows the array
meta = json.load(fd)
log.info("hdr_bytes:%d arr_bytes:%d meta_bytes:%d deserialized:%r" % (hdr_bytes,arr_bytes,meta_bytes,arr.shape))
return arr, meta
def test_serialize_deserialize(arr0, meta0, dump=False):
buf0 = npy_serialize(arr0)
if dump:
print(x_(buf0))
pass
arr1 = npy_deserialize(buf0)
assert np.all(arr0 == arr1)
meta1 = meta0
buf1 = serialize_with_header(arr1,meta1)
if dump:
print(x_(buf1))
pass
arr2,meta2 = deserialize_with_header(buf1)
assert np.all(arr0 == arr2)
print("meta2:%s" % repr(meta2))
log.info("buf0:%d buf1:%d" % (len(buf0),len(buf1)))
def recv_exactly(sock, n):
"""
https://eli.thegreenplace.net/2011/08/02/length-prefix-framing-for-protocol-buffers
Raise RuntimeError if the connection closed before n bytes were read.
"""
buf = b""
while n > 0:
data = sock.recv(n)
if data == b'':
raise RuntimeError('unexpected connection close')
buf += data
n -= len(data)
return buf
def make_array(dtype="float32", shape="10,4"):
log.info("make_array %s %s " % (dtype, shape))
dtype = getattr(np, dtype, np.float32)
shape = tuple(map(int,shape.split(",")))
size = mul_(shape)
arr = np.arange(size, dtype=dtype).reshape(shape)
return arr
def npy_send(sock, arr, meta):
buf = serialize_with_header(arr, meta)
sock.sendall(buf)
def npy_recv(sock):
hdr_bytes, arr_bytes, meta_bytes, zero = unpack_net_header(recv_exactly(sock, HEADER_BYTES))
assert zero == 0
arr = npy_deserialize(recv_exactly(sock, hdr_bytes+arr_bytes))
meta = meta_deserialize(recv_exactly(sock, meta_bytes))
log.info("hdr_bytes:%d arr_bytes:%d meta_bytes:%d arr:%s " % (hdr_bytes,arr_bytes,meta_bytes,repr(arr.shape)))
return arr, meta
def server(args):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(args.addr)
sock.listen(1) # 1:connect requests
while True:
conn, addr = sock.accept()
arr, meta = npy_recv(conn)
arr *= 10 ## server does some processing on the array
meta["stamp"] = datetime.datetime.now().strftime("%c")
npy_send(conn, arr, meta)
pass
conn.close()
def client(args, arr, meta):
log.info("client")
log.info("\n%s"%arr)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(args.addr)
npy_send(sock, arr, meta)
arr2,meta2 = npy_recv(sock)
sock.close()
print("arr2")
print(arr2)
print("meta2")
print(meta2)
def demo():
arr = np.arange(100, dtype=np.float32)
meta = dict(hello="world",src="npy.py")
path = os.path.expandvars("/tmp/$USER/opticks/demo.npy")
fold = os.path.dirname(path)
if not os.path.isdir(fold):
os.makedirs(fold)
pass
log.info("saving to %s " % path )
np.save(path,arr)
json.dump(meta, open(path+".json","w"))
def parse_args(doc):
parser = argparse.ArgumentParser(doc)
parser.add_argument("-p","--path", default=None)
parser.add_argument("-s","--server", action="store_true", default=False)
parser.add_argument("-c","--client", action="store_true", default=False)
parser.add_argument("-t","--test", action="store_true", default=False)
parser.add_argument("--dtype", default="float32")
parser.add_argument("--shape", default="10,4")
parser.add_argument("-d","--demo", action="store_true", default=False)
args = parser.parse_args()
args.metapath = None
if not args.path is None:
args.metapath = args.path + ".json"
if not os.path.exists(args.metapath):
args.metapath = None
pass
pass
port = os.environ.get("TCP_PORT","15006")
host = os.environ.get("TCP_HOST", "127.0.0.1" )
if host == "hostname":
host = socket.gethostname()
pass
addr = (host, int(port))
args.addr = addr
logging.basicConfig(level=logging.INFO)
log.info("addr: %s" % repr(addr))
return args
if __name__ == '__main__':
args = parse_args(__doc__)
arr0 = make_array(args.dtype, args.shape) if args.path is None else np.load(args.path)
meta0 = dict(red=1,green=2,blue=3) if args.metapath is None else json.load(open(args.metapath))
if args.server:
server(args)
elif args.client:
client(args, arr0, meta0)
elif args.test:
test_serialize_deserialize(arr0, meta0)
elif args.demo:
demo()
else:
pass
pass
|
from django.db import models
from django.utils.timezone import now #当前时间
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
GENDER = {
('m','man'),
('w','woman'),
('s','secret')
}
class BlogUser(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,default=None, null=True)
nickname = models.CharField('昵称', max_length=100, blank=True)
mugshot = models.ImageField('头像', upload_to='upload/mugshots', blank=True)
lastLoginDate = models.DateTimeField('最后一次登陆', default=now)
icon = models.IntegerField('硬币数量',default=0)
# lastLoginIP = models.GenericIPAddressField('最后一次登陆地址')
telephone = models.CharField('电话号码',max_length = 15,default='')
gender = models.CharField('性别',max_length=1,choices=GENDER,default='s')
def __str__(self):
return self.user.username
|
"""
剑指 Offer 52. 两个链表的第一个公共节点
输入两个链表,找出它们的第一个公共节点。
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
"""
思路其实很简单,大概是有两个方法,先说第一种,就是让两个链表尾端对齐,然后从头向后一起捋,就能找到了,大概时间复杂度是2n,其实就是n。
"""
def getIntersectionNode(headA: ListNode, headB: ListNode) -> ListNode:
ahead, bhead = headA, headB
while ahead.val != bhead.val:
ahead = ahead.next if ahead else headB
bhead = bhead.next if bhead else headA
return ahead
"""
第二种就是把Y字变成8字,相当于让两个人在不同长度的两个圈内跑,一直跑到两个人都在出口,提供个思路,代码可以之后自己实现。
"""
|
# -*- coding: utf-8 -*-
BASE_INDENT_SIZE = 4
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import sklearn.preprocessing as sk
import seaborn as sns
from sklearn import metrics
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
import random
from random import randint
from sklearn.model_selection import StratifiedKFold
import itertools
from itertools import cycle
from torch.autograd import Function
import os
from plot_loss_acc import plot_learning_curve
from aitl import FX, MTL, GradReverse, Discriminator
#######################################################
# DRUG, SAVE, LOAD #
#######################################################
## Note: For Paclitaxel PDX + Patient, use DRUG = 'Paclitaxel/PDXPatientCombined' ##
DRUG = 'Docetaxel'
MAX_ITER = 2
MODE = "cleaning_up_aitl_test"
GPU = True
if GPU:
device = "cuda"
else:
device = "cpu"
SAVE_RESULTS_TO = "./results/" + DRUG + "/" + MODE + "/"
SAVE_TRACE_TO = "./results/" + DRUG + "/" + MODE + "/trace/"
TARGET_DIR = 'target_3_folds'
SOURCE_DIR = 'source_3_folds'
LOAD_DATA_FROM = '../../cancer-genomics-data-preprocessing/data/split/' + DRUG + '/stratified/'
torch.manual_seed(42)
dirName = SAVE_RESULTS_TO + 'model/'
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
dirName = SAVE_RESULTS_TO + 'test/'
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
#######################################################
# FUNCTIONS #
#######################################################
def predict_label(XTestPatients, gen_model, map_model):
"""
Inputs:
:param XTestPatients - X_target_test
:param gen_model - current FX model
:param map_model - current MTL model
Output:
- Predicted (binary) labels (Y for input target test data)
"""
gen_model.eval()
gen_model.to(device)
map_model.eval()
map_model.to(device)
XTestPatients = XTestPatients.to(device)
F_xt_test = gen_model(XTestPatients)
_, yhatt_test = map_model(None, F_xt_test)
return yhatt_test
def evaluate_model(XTestPatients, YTestPatients, gen_model, map_model):
"""
Inputs:
:param XTestPatients - patient test data
:param YTestPatients - true class labels (binary) for patient test data
:param path_to_models - path to the saved models from training
Outputs:
- test loss
- test accuracy (AUC)
"""
XTestPatients = XTestPatients.to(device)
YTestPatients = YTestPatients.to(device)
y_predicted = predict_label(XTestPatients, gen_model, map_model)
# #LOSSES
C_loss_eval = torch.nn.BCELoss()
closs_test = C_loss_eval(y_predicted, YTestPatients)
if device == "cuda":
YTestPatients = YTestPatients.to("cpu")
yt_true_test = YTestPatients.view(-1,1)
yt_true_test = yt_true_test.cpu()
y_predicted = y_predicted.cpu()
AUC_test = roc_auc_score(yt_true_test.detach().numpy(), y_predicted.detach().numpy())
return closs_test, AUC_test
def roc_auc_score_trainval(y_true, y_predicted):
# To handle the case where we only have training samples of one class
# in our mini-batch when training since roc_auc_score 'breaks' when
# there is only one class present in y_true:
# ValueError: Only one class present in y_true. ROC AUC score is not defined in that case.
# The following code is taken from
# https://stackoverflow.com/questions/45139163/roc-auc-score-only-one-class-present-in-y-true?rq=1 #
if len(np.unique(y_true)) == 1:
return accuracy_score(y_true, np.rint(y_predicted))
return roc_auc_score(y_true, y_predicted)
#######################################################
# Hyper-Parameter Lists #
#######################################################
ls_splits = ['split1', 'split2', 'split3']
ls_ftsplits = ['ftsplit1', 'ftsplit2', 'ftsplit3']
ls_mb_size = [ {'mbS': 8, 'mbT': 16}, {'mbS': 8, 'mbT': 32}, \
{'mbS': 16, 'mbT': 8}, {'mbS': 16, 'mbT': 16}, {'mbS': 16, 'mbT': 32}, \
{'mbS': 32, 'mbT': 16}, {'mbS': 32, 'mbT': 32} ]
ls_h_dim = [1024, 512, 256, 128, 64, 32, 16]
ls_z_dim = [1024, 512, 256, 128, 64, 32, 16]
ls_lr = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001]
ls_epoch = [10, 15, 20, 25, 30, 35, 40, 45, 50]
ls_lam = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
ls_dropout_gen = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85]
ls_dropout_mtl = ls_dropout_gen
ls_dropout_dg = ls_dropout_gen
ls_dropout_ds = ls_dropout_gen
ls_dropout_dr = ls_dropout_gen
skf_trainval = StratifiedKFold(n_splits=3, random_state=42)
skf_train = StratifiedKFold(n_splits=3, random_state=42)
#######################################################
# AITL Model Training Starts Here #
#######################################################
for index, mbsize in enumerate(ls_mb_size):
mbS = mbsize['mbS']
mbT = mbsize['mbT']
# random.seed(42)
for iters in range(MAX_ITER):
print("\n\n\nITERATION # {}".format(iters))
print("-----------------------------\n\n")
# Randomly selecting hyper-parameter values #
hdm = random.choice(ls_h_dim)
zdm = random.choice(ls_z_dim)
lrs = random.choice(ls_lr)
epch = random.choice(ls_epoch)
lambd1 = random.choice(ls_lam)
lambd2 = random.choice(ls_lam)
drop_gen = random.choice(ls_dropout_gen)
drop_mtl = random.choice(ls_dropout_mtl)
drop_dg = random.choice(ls_dropout_dg)
drop_ds = random.choice(ls_dropout_ds)
drop_dr = random.choice(ls_dropout_dr)
h_dim = hdm
z_dim = zdm
lr = lrs
epoch = epch
lam1 = lambd1
lam2 = lambd2
dropout_gen = drop_gen
dropout_mtl = drop_gen
dropout_dg = drop_gen
dropout_ds = drop_gen
dropout_dr = drop_gen
print("-- Parameters used: --")
print("h_dim: {}\nz_dim: {}\nlr: {}\nepoch: {}\nlambda1: {}\nlambda2: {}".format(h_dim,
z_dim,
lr,
epoch,
lam1,
lam2))
print("mbS: {}\nmbT: {}\ndropout_gen: {}\ndropout_mtl: {}\ndropout_dg: {}\ndropout_ds: {}\ndropout_dr: {}\n".format(mbS,
mbT,
dropout_gen,
dropout_mtl,
dropout_dg,
dropout_ds,
dropout_dr))
batch_sizes = 'mbS' + str(mbS) + '_mbT' + str(mbT)
test_results_name = 'hdim' + str(h_dim) + '_zdim' + str(z_dim) + '_lr' + str(lr) + '_epoch' + str(epoch) + '_lambda1' + str(lam1) + '_lambda2' + str(lam2) + '_dropouts' + str(dropout_gen) \
+ '_' + str(dropout_mtl) + '_' + str(dropout_dg) + '_' + str(dropout_ds) + '_' + str(dropout_dr) + '_mbS' + str(mbS) + '_mbT' + str(mbT) + '.tsv'
test_results_dir = SAVE_RESULTS_TO + 'test/' + batch_sizes + '/'
dirName = test_results_dir
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
test_results_file = os.path.join(test_results_dir, test_results_name)
if os.path.isfile(test_results_file):
os.remove(test_results_file)
with open(test_results_file, 'a') as f:
f.write("-- Parameters --\n\n")
f.write("h_dim: {}\nz_dim: {}\nlr: {}\nepoch: {}\nlambda1: {}\nlambda2: {}\nmbS: {}\nmbT: {}\n".format(h_dim,
z_dim,
lr,
epoch,
lam1,
lam2,
mbS,
mbT))
f.write("dropout_gen: {}\ndropout_mtl: {}\ndropout_dg: {}\ndropout_ds: {}\ndropout_dr: {}\n\n".format(dropout_gen,
dropout_mtl,
dropout_dg,
dropout_ds,
dropout_dr))
AUCtest_splits_total = []
for split in ls_splits: # for each split
print("\n\nReading data for {} ...\n".format(split))
# Loading Source Data #
XTrainGDSC = pd.read_csv(LOAD_DATA_FROM + SOURCE_DIR + '/' + split + '/X_train_source.tsv',
sep='\t', index_col=0, decimal='.')
YTrainGDSC = pd.read_csv(LOAD_DATA_FROM + SOURCE_DIR + '/' + split + '/Y_logIC50train_source.tsv',
sep='\t', index_col=0, decimal='.')
XValGDSC = pd.read_csv(LOAD_DATA_FROM + SOURCE_DIR + '/' + split + '/X_val_source.tsv',
sep='\t', index_col=0, decimal='.')
YValGDSC = pd.read_csv(LOAD_DATA_FROM + SOURCE_DIR + '/' + split + '/Y_logIC50val_source.tsv',
sep='\t', index_col=0, decimal='.')
# Loading Target (Patient) Data #
XTestPatients = pd.read_csv(LOAD_DATA_FROM + TARGET_DIR + '/' + split + '/X_test_target.tsv',
sep='\t', index_col=0, decimal='.')
YTestPatients = pd.read_csv(LOAD_DATA_FROM + TARGET_DIR + '/' + split + '/Y_test_target.tsv',
sep='\t', index_col=0, decimal='.')
for ftsplit in ls_ftsplits: # target train/val splits for finetuning
model_params = 'hdim' + str(h_dim) + '_zdim' + str(z_dim) + '_lr' + str(lr) + '_epoch' + str(epoch) + '_lamb1' + str(lam1) + '_lamb2' + str(lam2) \
+ '_dropouts' + str(dropout_gen) + '_' + str(dropout_mtl) + '_' + str(dropout_dg) + '_' + str(dropout_ds) + '_' + str(dropout_dr) \
+ '_mbS' + str(mbS) + '_mbT' + str(mbT)
dirName = SAVE_TRACE_TO + batch_sizes + '/' + model_params + '/'
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
trace_file_tsv = os.path.join(dirName, split + '_' + ftsplit + '_trace.tsv')
trace_file_txt = os.path.join(dirName, split + '_' + ftsplit + '_trace.txt')
if os.path.isfile(trace_file_tsv):
os.remove(trace_file_tsv)
if os.path.isfile(trace_file_txt):
os.remove(trace_file_txt)
with open(trace_file_txt, 'a') as f:
f.write("-- Parameters --\n\n")
f.write("h_dim: {}\nz_dim: {}\nlr: {}\nepoch: {}\nlambda1: {}\nlambda2: {}\nmbS: {}\nmbT: {}\n".format(h_dim,
z_dim,
lr,
epoch,
lam1,
lam2,
mbS,
mbT))
f.write("dropout_gen: {}\ndropout_mtl: {}\ndropout_dg: {}\ndropout_ds: {}\ndropout_dr: {}\n".format(dropout_gen,
dropout_mtl,
dropout_dg,
dropout_ds,
dropout_dr))
with open(trace_file_tsv, 'a') as f:
f.write("-- Parameters --\n\n")
f.write("h_dim: {}\nz_dim: {}\nlr: {}\nepoch: {}\nlambda1: {}\nlambda2: {}\nmbS: {}\nmbT: {}\n".format(h_dim,
z_dim,
lr,
epoch,
lam1,
lam2,
mbS,
mbT))
f.write("dropout_gen: {}\ndropout_mtl: {}\ndropout_dg: {}\ndropout_ds: {}\ndropout_dr: {}\n".format(dropout_gen,
dropout_mtl,
dropout_dg,
dropout_ds,
dropout_dr))
f.write("\n\n#\n")
# Dataframe header #
f.write("epoch\ttrain_loss1\ttrain_loss2\ttrain_losstotal\ttrain_regloss\ttrain_closs\ttrain_DGloss\ttrain_DRloss\ttrain_DSloss\ttrain_AUC\ttrain_DGauc\ttrain_DRauc\ttrain_DSauc\tval_loss1\tval_loss2\tval_losstotal\tval_regloss\tval_closs\tval_DGloss\tval_DRloss\tval_DSloss\tval_AUC\tval_DGauc\tval_DRauc\tval_DSauc\n")
print("\n\n-- Reading data for {} of {} ... --".format(ftsplit, split))
XTrainPatients = pd.read_csv(LOAD_DATA_FROM + TARGET_DIR + '/' + split + '/' + ftsplit + '/X_train_ft_target.tsv',
sep='\t', index_col=0, decimal='.')
YTrainPatients = pd.read_csv(LOAD_DATA_FROM + TARGET_DIR + '/' + split + '/' + ftsplit + '/Y_train_ft_target.tsv',
sep='\t', index_col=0, decimal='.')
XValPatients = pd.read_csv(LOAD_DATA_FROM + TARGET_DIR + '/' + split + '/' + ftsplit + '/X_val_ft_target.tsv',
sep='\t', index_col=0, decimal='.')
YValPatients = pd.read_csv(LOAD_DATA_FROM + TARGET_DIR + '/' + split + '/' + ftsplit + '/Y_val_ft_target.tsv',
sep='\t', index_col=0, decimal='.')
print("Data successfully read!")
# Temporarily combine Source training data and Target training data
# to fit standard scaler on gene expression of combined training data.
# Then, apply fitted scaler to (and transform) Source validation,
# Target validation, and Target test (e.g. normalize validation and test
# data of source and target with respect to source and target train)r
XTrainCombined = pd.concat([XTrainGDSC, XTrainPatients])
scalerTrain = sk.StandardScaler()
scalerTrain.fit(XTrainCombined.values)
XTrainGDSC_N = scalerTrain.transform(XTrainGDSC.values)
XTrainPatients_N = scalerTrain.transform(XTrainPatients.values)
XValGDSC_N = scalerTrain.transform(XValGDSC.values)
XValPatients_N = scalerTrain.transform(XValPatients.values)
XTestPatients_N = scalerTrain.transform(XTestPatients.values)
TXValGDSC_N = torch.FloatTensor(XValGDSC_N)
TXValPatients_N = torch.FloatTensor(XValPatients_N)
TYValGDSC = torch.FloatTensor(YValGDSC.values)
TYValPatients = torch.FloatTensor(YValPatients.values.astype(int))
TYValPatients = TYValPatients.to(device)
TXValGDSC_N = TXValGDSC_N.to(device)
TXValPatients_N = TXValPatients_N.to(device)
TYValGDSC = TYValGDSC.to(device)
TXTestPatients_N = torch.FloatTensor(XTestPatients_N)
TYTestPatients = torch.FloatTensor(YTestPatients.values.astype(int))
TXTestPatients_N = TXTestPatients_N.to(device)
TYTestPatients = TYTestPatients.to(device)
class_sample_count = np.array([len(np.where(YTrainPatients.values==t)[0]) for t in np.unique(YTrainPatients.values)])
print("\nclass_sample_count: {}\n".format(class_sample_count))
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in YTrainPatients.values])
# samples_weight = np.array(weight[t] for t in YTrainPatients.values)
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.reshape(-1) # Flatten out the weights so it's a 1-D tensor of weights
# print("\nsamples_weight: {}\n".format(samples_weight))
sampler = WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), len(samples_weight), replacement=True)
# Apply sampler on XTrainPatients_N
# print("\n\nsampler:\n{}\n\n".format(list(sampler)))
PDataset = torch.utils.data.TensorDataset(torch.FloatTensor(XTrainPatients_N), torch.FloatTensor(YTrainPatients.values.astype(int)))
# print("PDataset: {}\n".format(PDataset.tensors))
PLoader = torch.utils.data.DataLoader(dataset = PDataset, batch_size= mbT, shuffle=False, sampler = sampler)
# PLoader = torch.utils.data.DataLoader(dataset = PDataset, batch_size= mbT, shuffle=False)
CDataset = torch.utils.data.TensorDataset(torch.FloatTensor(XTrainGDSC_N), torch.FloatTensor(YTrainGDSC.values))
CLoader = torch.utils.data.DataLoader(dataset = CDataset, batch_size= mbS, shuffle=True)
n_sample, IE_dim = XTrainGDSC_N.shape
AUCvals = []
Gen = FX(dropout_gen, IE_dim, h_dim, z_dim)
Map = MTL(dropout_mtl, h_dim, z_dim)
DG = Discriminator(dropout_dg, h_dim, z_dim)
DS = Discriminator(dropout_ds, h_dim, z_dim)
DR = Discriminator(dropout_dr, h_dim, z_dim)
Gen.to(device)
Map.to(device)
DG.to(device)
DS.to(device)
DR.to(device)
optimizer_2 = torch.optim.Adagrad(itertools.chain(Gen.parameters(), Map.parameters(), DG.parameters(), DS.parameters(), DR.parameters()),
lr = lr)
C_loss = torch.nn.BCELoss()
R_loss = torch.nn.MSELoss()
l1 = []
l2 = []
regs = []
classif = []
aucs = []
L = [] # total loss
DG_losstr = []
DR_losstr = []
DS_losstr = []
DG_auctr = []
DR_auctr = []
DS_auctr = []
AUCtests = []
Losstest = []
for it in range(epoch):
epoch_cost1 = 0
epoch_cost2 = 0
epoch_cost1ls = []
epoch_cost2ls = []
epoch_auc = []
epoch_reg = 0
epoch_regls = []
epoch_classifls = []
epoch_DGloss = []
epoch_DRloss = []
epoch_DSloss = []
epoch_DGauc = []
epoch_DRauc = []
epoch_DSauc = []
AUCvals = []
loss2_vals = []
loss1_vals = []
totloss_vals = []
DG_lossval = []
DR_lossval = []
DS_lossval = []
reg_lossval = []
classif_lossval = []
DG_aucval = []
DR_aucval = []
DS_aucval = []
epoch_losstotal = 0
epoch_loss = []
for i, data in enumerate(zip(CLoader, cycle(PLoader))):
DataS = data[0]
DataT = data[1]
## Sending data to device = cuda/cpu
xs = DataS[0].to(device)
ys = DataS[1].view(-1,1).to(device)
xt = DataT[0].to(device)
yt = DataT[1].view(-1,1).to(device)
# Skip to next set of training batch if any of xs or xt has less
# than a certain threshold of training examples. Let such threshold
# be 5 for now
if xs.size()[0] < 5 or xt.size()[0] < 5:
continue
Gen.train()
Map.train()
DG.train()
DS.train()
DR.train()
F_xs = Gen(xs)
F_xt = Gen(xt)
yhat_xs, yhat_xt = Map(F_xs, F_xt)
_, yhat_xsB0 = Map(None, F_xs)
closs = C_loss(yhat_xt, yt)
rloss = R_loss(yhat_xs, ys)
loss1 = closs + rloss
t = torch.Tensor([torch.mean(yhat_xsB0)]).to(device)
yhat_sxB = (yhat_xsB0 > t).float()
Labels = torch.ones(F_xs.size(0), 1)
Labelt = torch.zeros(F_xt.size(0), 1)
Lst = torch.cat([Labels, Labelt],0).to(device)
Xst = torch.cat([F_xs, F_xt], 0).to(device)
Yst = torch.cat([yhat_sxB, yt],0).to(device)
try:
locR = (Yst==0).nonzero()[:, 0] # Proper way to obtain location indices
except ValueError:
print("Error in 'locR = (Yst==0).nonzero()[:, 0]'")
print("(Yst==0).nonzero(): {}\n\n".format((Yst==0).nonzero()))
try:
locS = (Yst).nonzero()[:, 0] # Proper way to obtain location indices
except ValueError:
print("Error in 'locS = (Yst).nonzero()[:, 0]'")
print("(Yst).nonzero(): {}\n\n".format((Yst).nonzero()))
XDS = Xst[locS].to(device)
LabDS = Lst[locS].to(device)
XDR = Xst[locR].to(device)
LabDR = Lst[locR].to(device)
yhat_DG = DG(Xst)
yhat_DS = DS(XDS)
yhat_DR = DR(XDR)
DG_loss = C_loss(yhat_DG, Lst)
DS_loss = C_loss(yhat_DS, LabDS)
DR_loss = C_loss(yhat_DR, LabDR)
loss2 = lam1*DG_loss + lam2*DS_loss + lam2*DR_loss
Loss = loss1 + loss2
optimizer_2.zero_grad()
Loss.backward()
optimizer_2.step()
epoch_cost1ls.append(loss1)
epoch_cost2ls.append(loss2)
epoch_regls.append(rloss)
epoch_classifls.append(closs)
epoch_loss.append(Loss)
epoch_DGloss.append(DG_loss)
epoch_DSloss.append(DS_loss)
epoch_DRloss.append(DR_loss)
y_true = yt.view(-1,1)
y_pred = yhat_xt
y_true = y_true.cpu()
y_pred = y_pred.cpu()
AUC = roc_auc_score_trainval(y_true.detach().numpy(), y_pred.detach().numpy())
epoch_auc.append(AUC)
y_trueDG = Lst.view(-1,1)
y_predDG = yhat_DG
y_trueDR = LabDR.view(-1,1)
y_predDR = yhat_DR
y_trueDS = LabDS.view(-1,1)
y_predDS = yhat_DS
y_trueDG = y_trueDG.cpu()
y_predDG = y_predDG.cpu()
y_trueDR = y_trueDR.cpu()
y_predDR = y_predDR.cpu()
y_trueDS = y_trueDS.cpu()
y_predDS = y_predDS.cpu()
AUCDG = roc_auc_score_trainval(y_trueDG.detach().numpy(), y_predDG.detach().numpy())
AUCDR = roc_auc_score_trainval(y_trueDR.detach().numpy(), y_predDR.detach().numpy())
AUCDS = roc_auc_score_trainval(y_trueDS.detach().numpy(), y_predDS.detach().numpy())
epoch_DGauc.append(AUCDG)
epoch_DRauc.append(AUCDR)
epoch_DSauc.append(AUCDS)
l1.append(torch.mean(torch.Tensor(epoch_cost1ls)))
l2.append(torch.mean(torch.Tensor(epoch_cost2ls)))
regs.append(torch.mean(torch.Tensor(epoch_regls)))
classif.append(torch.mean(torch.Tensor(epoch_classifls)))
aucs.append(np.mean(epoch_auc))
L.append(torch.mean(torch.FloatTensor(epoch_loss)))
DG_losstr.append(torch.mean(torch.Tensor(epoch_DGloss)))
DR_losstr.append(torch.mean(torch.Tensor(epoch_DRloss)))
DS_losstr.append(torch.mean(torch.Tensor(epoch_DSloss)))
DG_auctr.append(torch.mean(torch.Tensor(epoch_DGauc)))
DR_auctr.append(torch.mean(torch.Tensor(epoch_DRauc)))
DS_auctr.append(torch.mean(torch.Tensor(epoch_DSauc)))
with torch.no_grad():
Gen.eval()
Gen.to(device)
Map.eval()
Map.to(device)
DG.eval()
DG.to(device)
DS.eval()
DS.to(device)
DR.eval()
DR.to(device)
TXValGDSC_N = TXValGDSC_N.to(device)
TXValPatients_N = TXValPatients_N.to(device)
F_xs_val = Gen(TXValGDSC_N)
F_xt_val = Gen(TXValPatients_N)
yhats_val, yhatt_val = Map(F_xs_val, F_xt_val)
_, yhats_valB0 = Map(None, F_xs_val)
# Discriminators
t_val = torch.Tensor([torch.mean(yhats_valB0)])
t_val = t_val.to(device)
yhats_valB = (yhats_valB0 > t_val).float()
Labels_val = torch.ones(F_xs_val.size(0), 1)
Labelt_val = torch.zeros(F_xt_val.size(0), 1)
# print("\n\n-- Validation test-- \n\n")
# print("Labels_val: {}\n".format(Labels_val))
# print("Labelt_val: {}\n".format(Labelt_val))
Lst_val = torch.cat([Labels_val, Labelt_val],0)
Lst_val = Lst_val.to(device)
# print("Lst_val: {}\n".format(Lst_val))
Xst_val = torch.cat([F_xs_val, F_xt_val], 0)
Yst_val = torch.cat([yhats_valB, TYValPatients],0)
locR_val = (Yst_val==0).nonzero()[:, 0] # Proper way to obtain location indices is with '[:, 0]'
locS_val = (Yst_val).nonzero()[:, 0] # Proper way to obtain location indices is with '[:, 0]'
XDS_val = Xst_val[locS_val]
LabDS_val = Lst_val[locS_val]
XDR_val = Xst_val[locR_val]
LabDR_val = Lst_val[locR_val]
yhat_DG_val = DG(Xst_val)
yhat_DS_val = DS(XDS_val)
yhat_DR_val = DR(XDR_val)
DG_loss_val = C_loss(yhat_DG_val, Lst_val)
DS_loss_val = C_loss(yhat_DS_val, LabDS_val)
DR_loss_val = C_loss(yhat_DR_val, LabDR_val)
loss2_val = lam1*DG_loss_val + lam2*DS_loss_val + lam2*DR_loss_val
#LOSSES
closs_val = C_loss(yhatt_val, TYValPatients)
rloss_val = R_loss(yhats_val, TYValGDSC)
loss1_val = closs_val + rloss_val
yt_true_val = TYValPatients.view(-1,1)
yt_true_val = yt_true_val.cpu()
yhatt_val = yhatt_val.cpu()
AUC_val = roc_auc_score_trainval(yt_true_val.detach().numpy(), yhatt_val.detach().numpy())
AUCvals.append(AUC_val)
loss2_vals.append(loss2_val)
loss1_vals.append(loss1_val)
totloss_vals.append(loss1_val + loss2_val) # Total loss = loss 1 + loss 2
DG_lossval.append(DG_loss_val)
DS_lossval.append(DS_loss_val)
DR_lossval.append(DR_loss_val)
reg_lossval.append(rloss_val)
classif_lossval.append(closs_val)
y_trueDG_val = Lst_val.view(-1,1)
y_predDG_val = yhat_DG_val
y_trueDR_val = LabDR_val.view(-1,1)
y_predDR_val = yhat_DR_val
y_trueDS_val = LabDS_val.view(-1,1)
y_predDS_val = yhat_DS_val
y_trueDG_val = y_trueDG_val.cpu()
y_predDG_val = y_predDG_val.cpu()
y_trueDR_val = y_trueDR_val.cpu()
y_predDR_val = y_predDR_val.cpu()
y_trueDS_val = y_trueDS_val.cpu()
y_predDS_val = y_predDS_val.cpu()
AUCDG_val = roc_auc_score_trainval(y_trueDG_val.detach().numpy(), y_predDG_val.detach().numpy())
AUCDR_val = roc_auc_score_trainval(y_trueDR_val.detach().numpy(), y_predDR_val.detach().numpy())
AUCDS_val = roc_auc_score_trainval(y_trueDS_val.detach().numpy(), y_predDS_val.detach().numpy())
# print("AUC DG val: {}\n".format(AUCDG_val))
# print("AUC DR val: {}\n".format(AUCDR_val))
# print("AUC DS val: {}\n".format(AUCDS_val))
# print("y_predDG_val: {}\n".format(y_predDG_val))
# print("y_predDR_val: {}\n".format(y_predDR_val))
# print("y_predDS_val: {}\n".format(y_predDS_val))
DG_aucval.append(AUCDG_val)
DR_aucval.append(AUCDR_val)
DS_aucval.append(AUCDS_val)
# Take average across all training batches
loss1tr_mean = l1[it]
loss2tr_mean = l2[it]
rlosstr_mean = regs[it]
AUCtr_mean = aucs[it]
totlosstr_mean = L[it]
DRlosstr_mean = DR_losstr[it]
DGlosstr_mean = DG_losstr[it]
DSlosstr_mean = DS_losstr[it]
DGauctr_mean = DG_auctr[it]
DRauctr_mean = DR_auctr[it]
DSauctr_mean = DS_auctr[it]
closstr_mean = classif[it]
loss1val_mean = np.mean(np.array(loss1_vals))
loss2val_mean = np.mean(np.array(loss2_vals))
totlossval_mean = np.mean(np.array(totloss_vals))
AUCval_mean = np.mean(np.array(AUCvals))
DRlossval_mean = np.mean(np.array(DR_lossval))
DGlossval_mean = np.mean(np.array(DG_lossval))
DSlossval_mean = np.mean(np.array(DS_lossval))
reglossval_mean = np.mean(np.array(reg_lossval))
DGaucval_mean = np.mean(np.array(DG_aucval))
DRaucval_mean = np.mean(np.array(DR_aucval))
DSaucval_mean = np.mean(np.array(DS_aucval))
clossval_mean = np.mean(np.array(classif_lossval))
print("\n\nEpoch: {}".format(it))
print("(tr) loss1 mean: {}".format(loss1tr_mean))
print("(tr) loss2 mean: {}".format(loss2tr_mean))
print("(tr) total loss mean: {}".format(totlosstr_mean))
print("(tr) DG loss mean: {}".format(DGlosstr_mean))
print("(tr) DR loss mean: {}".format(DRlosstr_mean))
print("(tr) DS loss mean: {}".format(DSlosstr_mean))
print("(tr) AUC mean: {}".format(AUCtr_mean))
print("\n(val) loss1 mean: {}".format(loss1val_mean))
print("(val) loss2 mean: {}".format(loss2val_mean))
print("(val) total loss mean: {}".format(totlossval_mean))
print("(val) DG loss mean: {}".format(DGlossval_mean))
print("(val) DR loss mean: {}".format(DRlossval_mean))
print("(val) DS loss mean: {}".format(DSlossval_mean))
print("(val) AUC mean: {}".format(AUCval_mean))
# Write to file
# Take avg
with open(trace_file_txt, 'a') as f:
f.write("\nepoch: {}\ttrain_loss1: {}\ttrain_loss2: {}\ttrain_losstotal: {}\ttrain_regloss: {}\ttrain_closs\ttrain_DGloss: {}\ttrain_DRloss: {}\ttrain_DSloss: {}\ttrain_AUC: {}\ttrain_DGauc: {}\ttrain_DRauc: {}\ttrain_DSauc: {}\n \
\tval_loss1: {}\tval_loss2: {}\tval_losstotal: {}\tval_regloss: {}\tval_closs\tval_DGloss: {}\tval_DRloss: {}\tval_DSloss: {}\tval_AUC: {}\tval_DGauc: {}\tval_DRauc: {}\tval_DSauc: {}\n".format(it,
loss1tr_mean,
loss2tr_mean,
totlosstr_mean,
rlosstr_mean,
closstr_mean,
DGlosstr_mean,
DRlosstr_mean,
DSlosstr_mean,
AUCtr_mean,
DGauctr_mean,
DRauctr_mean,
DSauctr_mean,
loss1val_mean,
loss2val_mean,
totlossval_mean,
reglossval_mean,
clossval_mean,
DGlossval_mean,
DRlossval_mean,
DSlossval_mean,
AUCval_mean,
DGaucval_mean,
DRaucval_mean,
DSaucval_mean))
with open(trace_file_tsv, 'a') as f:
f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(it,
loss1tr_mean,
loss2tr_mean,
totlosstr_mean,
rlosstr_mean,
closstr_mean,
DGlosstr_mean,
DRlosstr_mean,
DSlosstr_mean,
AUCtr_mean,
DGauctr_mean,
DRauctr_mean,
DSauctr_mean,
loss1val_mean,
loss2val_mean,
totlossval_mean,
reglossval_mean,
clossval_mean,
DGlossval_mean,
DRlossval_mean,
DSlossval_mean,
AUCval_mean,
DGaucval_mean,
DRaucval_mean,
DSaucval_mean))
# Save the current model #
print("totlossval_mean: {}".format(totlossval_mean))
save_best_model_to = os.path.join(SAVE_RESULTS_TO + 'model/', split + '_' + ftsplit + '_best_model.pt')
print("==> saving current model (loss = {:0.6f}) ...".format(totlossval_mean))
## Saving multiple models in one file (Feature extractors, mapper, discriminators) ##
## https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-multiple-models-in-one-file ##
torch.save({
'Gen_state_dict': Gen.state_dict(),
'Map_state_dict': Map.state_dict(),
'DG_state_dict': DG.state_dict(),
'DS_state_dict': DS.state_dict(),
'DR_state_dict': DR.state_dict(),
'optimizer_2_state_dict': optimizer_2.state_dict(),
}, save_best_model_to)
## Evaluate model ##
print("\n\n-- Evaluation -- \n\n")
print("TXTestPatients_N shape: {}\n".format(TXTestPatients_N.size()))
print("TYTestPatients shape: {}\n".format(TYTestPatients.size()))
save_best_model_to = os.path.join(SAVE_RESULTS_TO + 'model/', split + '_' + ftsplit + '_best_model.pt')
test_loss, test_auc = evaluate_model(TXTestPatients_N, TYTestPatients, Gen, Map)
print("\n\n-- Test Results --\n\n")
print("test loss: {}".format(test_loss))
print("test auc: {}".format(test_auc))
print("\n ----------------- \n\n\n")
with open(test_results_file, 'a') as f:
f.write("-- Split {} - ftsplit {} --\n".format(split, ftsplit))
f.write("Test loss: {}\t Test AUC: {}\n\n\n".format(test_loss, test_auc))
AUCtest_splits_total.append(test_auc)
## Plot Learning Curves for 9 models trained using this specific param setting ##
print("Plotting learning curves ... ")
plot_opts = {}
plot_opts['model_params'] = 'hdim' + str(h_dim) + '_zdim' + str(z_dim) + '_lr' + str(lr) + '_epoch' + str(epoch) + '_lamb1' + str(lam1) + '_lamb2' + str(lam2) \
+ '_dropouts' + str(dropout_gen) + '_' + str(dropout_mtl) + '_' + str(dropout_dg) + '_' + str(dropout_ds) + '_' + str(dropout_dr) \
+ '_mbS' + str(mbS) + '_mbT' + str(mbT)
plot_opts['base_trace_dir'] = SAVE_TRACE_TO + batch_sizes + '/'
plot_opts['split'] = split
plot_opts['ftsplit'] = ftsplit
plot_learning_curve(plot_opts)
## Calculate (held out) test set's avg AUC across different splits
AUCtest_splits_total = np.array(AUCtest_splits_total)
avgAUC = np.mean(AUCtest_splits_total)
stdAUC = np.std(AUCtest_splits_total)
with open(test_results_file, 'a') as f:
f.write("\n\n-- Average Test AUC --\n\n")
f.write("Mean: {}\tStandard Deviation: {}\n".format(avgAUC, stdAUC))
|
n = int(input())
a = [None]*n
ans = 1
for x in range(n):
a[x] = list(input())
for x in range(n-1):
if(a[x][1] == a[x+1][0]):
ans += 1
print(ans)
|
array = [1, 2, 3, 4]
new_array = []
new_array = [None] * len(array)
for i in range(len(array)):
new_array[i] = array[len(array) - 1 - i]
print(new_array)
|
# Generated by Django 2.2.10 on 2020-02-23 03:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='主旨')),
('body', models.TextField(verbose_name='內容')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='時間')),
('course', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='announcements', to='course.Course')),
('recipient', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inbox', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='outbox', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MessageStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('read', models.DateTimeField(auto_now_add=True, verbose_name='閱讀時間')),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='user.Message')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
]
|
from pyfa_ng_backend import create_app
app = create_app()
|
#!/usr/bin/env python
import pickle
import io
from lab_defs import teaching_length
from lab_mc import experiments, tutorials, null_experiment
experiments["LVT"] = tutorials["LVT"]
from print_student import get_styles, swansea_logo
from assign_students import get_students, match_students
from loadstore import load_pairs
from reportlab.platypus import SimpleDocTemplate, Paragraph, Table, TableStyle, Flowable, Spacer
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
from reportlab.lib.enums import TA_LEFT, TA_CENTER
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from colours import swansea_blue, light_grey, medium_grey, black
from pdfrw import PdfReader, PdfDict
from pdfrw.buildxobj import pagexobj
from pdfrw.toreportlab import makerl
from weeks import semester1_dates, semester2_dates
from collections import Counter
from itertools import zip_longest
from datetime import date
def get_header(semester, styles, level=2):
title1 = "Department of Physics • Experiment List"
title2 = "Level {} • {}–{} TB{}"
contents = [[[swansea_logo],
[Paragraph(title1, styles["Title"]),
Paragraph(title2.format(level, date.today().year,
date.today().year + 1,
semester),
styles["Title"])]]]
table = Table(contents, [130, (210 - 15 - 15) * mm - 130])
table_style = [('VALIGN', (0,0), (-1,-1), 'MIDDLE')]
table.setStyle(table_style)
return [table, Spacer(0, 5 * mm)]
def build_table(experiments_by_week, semester, styles):
if semester == 1:
dates = semester1_dates
experiments_by_week = experiments_by_week[:teaching_length]
elif semester == 2:
dates = semester2_dates
experiments_by_week = experiments_by_week[teaching_length:]
elif semester == "1+2":
dates = semester1_dates + semester2_dates
else:
raise ValueError("Invalid semester")
contents = [["Week"] + [e.acronym for e in sorted(experiments.values())]]
table_style = [('BACKGROUND', (0,0), (-1,0), medium_grey),
('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
('ALIGN', (0,0), (-1,-1), 'CENTER'),
('FONTNAME', (0,0), (-1,-1), 'Futura'),
('FONTSIZE', (0,0), (-1,-1), 11),
('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
('BOTTOMPADDING', (0,0), (-1,-1), 5)]
for week_index, week in enumerate(experiments_by_week):
current_row = [dates[week_index]]
counter = Counter(week)
if week_index % 2 == 1:
table_style.append(('BACKGROUND', (0,week_index+1), (-1,week_index+1), light_grey))
for experiment_index, experiment in enumerate(sorted(experiments.values())):
c = counter[experiment]
current_row.append("{}".format(c))
if c == 0:
table_style.append(('TEXTCOLOR',
(experiment_index+1,week_index+1),
(experiment_index+1,week_index+1),
medium_grey))
contents.append(current_row)
return contents, table_style
def build_document(experiments_by_week, semester, level, filename):
buf = io.BytesIO()
output_doc = SimpleDocTemplate(
buf,
rightMargin = 15 * mm,
leftmargin = 15 * mm,
topMargin = 15 * mm,
bottomMargin = 30 * mm,
pagesize = A4,
)
styles = get_styles()
Story = get_header(semester, styles, level)
table_content, table_style = build_table(experiments_by_week, semester, styles)
table = Table(table_content)
table.setStyle(table_style)
table.hAlign = 'CENTER'
Story.append(table)
output_doc.build(Story)
with open(filename, 'wb') as f:
f.write(buf.getvalue())
if __name__ == "__main__":
pairs = load_pairs("schedule.dat")
students = get_students("students.csv")
match_students(students, pairs)
missing_pairs = list(set(range(len(pairs))) - {student.pair_number - 1 for student in students.values()})
missing_pairs.sort(reverse=True)
for pair in missing_pairs:
pairs.pop(pair)
experiments_by_week = list(zip_longest(*pairs, fillvalue=null_experiment))
build_document(experiments_by_week, "1+2", 2, "list.pdf")
|
import numpy as np
import pandas as pd
data = pd.read_csv("./data/test0822.csv", delimiter=",")
row_per_col = 50
x = data.iloc[:3113, 1:9]
x = np.array(x)
print(x.shape)
test = data.iloc[3113-row_per_col:3113, 1:9]
print(test.shape)
x_len = 50
y_len = 5
sequence_length = x_len + y_len
size = row_per_col + 5
result = []
for i in range(len(x) - sequence_length + 1):
idk = []
idk = x[i:i+size]
result.append(idk)
result = np.array(result)
x = result[:, :50, :]
y = result[:, 50:, :]
print(x.shape)
print(y.shape)
x = x.reshape(x.shape[0] * x.shape[1], x.shape[2])
from sklearn.preprocessing import StandardScaler, MinMaxScaler
scaler = StandardScaler()
# scaler = MinMaxScaler()
scaler.fit(x)
x = scaler.transform(x)
test = scaler.transform(test)
x = x.reshape(3059, 50, 8)
test = test.reshape(1, -1, 8)
y = y.reshape(y.shape[0], y.shape[1] * y.shape[2])
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split( x, y, random_state = 66, test_size = 0.2)
print(x_train.shape)
print(y_train.shape)
from keras.models import Sequential
from keras.layers import LSTM, Dense
model = Sequential()
model.add(LSTM(50, input_shape = (50,8), activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(512, activation='relu'))
# model.add(Dense(32, activation='relu'))
# model.add(Dense(16, activation='relu'))
model.add(Dense(40, activation='relu'))
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
model.fit(x_train, y_train, epochs=300, batch_size=128, verbose=2)
pred = model.predict(x_test, batch_size=64)
print(pred)
from sklearn.metrics import r2_score
r2_y_predict = r2_score(y_test, pred)
print("x_test: ", x_test[0])
print("y_test: ",y_test[0])
print("pred: ", pred[0])
print("R2: ", r2_y_predict)
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE: ", RMSE(y_test, pred))
############### 평가
print("\n\n\n\n\n")
pred = model.predict(test, batch_size=2)
print(pred)
pred = pred.reshape(5, -1)
pred = np.round(pred)
print(pred)
dataframe = pd.DataFrame(pred)
dataframe.to_csv('./test0822/test0822.csv', header = False, index=False)
|
import time
import threading
from queue import Queue
class Producer(threading.Thread):
def run(self):
global queue
count=0
while True:
if queue.qsize()<1000:
for i in range(101):
count+=1
msg='生成产品'+str(count)
queue.put(msg)
print(msg)
time.sleep(1)
class Consumer(threading.Thread):
def run(self):
global queue
while True:
if queue.qsize()>100:
for i in range(10):
msg=self.name+'消费了'+queue.get()[2:]
print(msg)
#time.sleep(1)
if __name__=='__main__':
queue = Queue()
t1=Producer()
t2=Consumer()
t1.start()
#time.sleep(1)
t2.start()
t1.join()
t2.join()
|
import pygame
pygame.init()
win = pygame.display.set_mode((800,600))
pygame.display.set_caption("first game")
x = 50
y = 50
width = 40
height = 60
vol = 5
run = True
while run:
pygame.time.delay(100)
for event in pygame.event.get():
if event.type ==pygame.QUIT:
run = False
pygame.draw.rect(win,(255,0,0), (x, y, width, height))
pygame.display.update()
pygame.quit()
|
""" THIS module is for indexing the on page features of a URL
here a collection is formed and each word is treated as document.
Each document has a id from which it can be accessed. With each word
a posting list is associated which has two attributes first the id of
the URL and 2nd count of the number of times a word
appears in a given URL """
from pymongo import MongoClient
from stemming.porter2 import stem
from get_text_from_tag_for_url import GetIndividualTagsText
from get_text_from_html_tag import get_html_tag_text
import re
class OnPageSummarizer:
def __init__(self):
# for connection establishing
self.client = MongoClient('localhost', 27017)
# project name
self.db1 = self.client['test_project']
# for getting text of a given url
self.get_obj = None
self.get_html_text_obj = None
self.fp = open('stems.txt', 'r').read().split('\n')
# to find the document for finding url an did mapping
self.doc = self.db1.summary.find_one({"_id": "_hashmap"})
# to get the dictionary
self.dic = self.doc['mapping']
# getting the id of URL
self.id_of_url = None
def get_dict_words(self, on_page_summary_for_given_tag):
word_stems = []
stemmed_words_list = on_page_summary_for_given_tag.lower().split()
for word in stemmed_words_list:
word_stems.append(stem(word))
key_dic = {}
for word in word_stems:
if word in self.fp:
if word in key_dic:
key_dic[word] += 1
else:
key_dic[word] = 1
return key_dic
# HERE id of url is generated before it is called
def add_to_db_posting(self, keyword, count, tag):
doc = self.db1.on_page_summary.find_one({"_id": keyword + "_" + tag})
posting_list = doc['posting']
# adding the new value at correct position in the list
posting_list.append([self.id_of_url, count])
# updating the list in database
self.db1.on_page_summary.update(
{"_id": keyword + "_" + tag},
{"posting": posting_list}
)
def for_title(self):
title_text = self.get_obj.get_title_text()
# converting the dictionary to key, value pairs stored in a list
key_dic = {}
key_dic = self.get_dict_words(title_text)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "title")
def for_meta(self):
meta_text = self.get_obj.get_meta_text()
key_dic = {}
key_dic = self.get_dict_words(meta_text)
# converting the dictionary to key, value pairs stored in a list
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "meta")
def for_header(self):
header_text = self.get_obj.get_header_text()
key_dic = {}
key_dic = self.get_dict_words(header_text)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "header")
def for_table(self):
table_text = self.get_obj.get_table_text()
key_dic = self.get_dict_words(table_text)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "table")
def for_html(self):
html_text = self.get_html_text_obj.get_html_text()
key_dic = self.get_dict_words(html_text)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "html")
def cur_anchor(self, url):
# Removing symbols from url
cur_text = re.sub(r'[^a-zA-Z]', r' ', url)
key_dic = self.get_dict_words(cur_text)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "cur_a")
def for_anchor(self):
anchor = self.get_obj.get_anchor_tag()
key_dic = {}
key_dic = self.get_dict_words(anchor)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], "a")
def for_webpage_summary(self):
page_summary = self.get_obj.get_page_summary()
key_dic = {}
key_dic = self.get_dict_words(page_summary)
for word in key_dic:
self.add_to_db_posting(word, key_dic[word], 'page')
def fetch_updated_list(self):
self.doc = self.db1.summary.find_one({"_id": "_hashmap"})
# to get the dictionary
self.dic = self.doc['mapping']
def index_on_page_summary(self, src_content, url):
self.fetch_updated_list()
self.get_obj = GetIndividualTagsText(src_content, url)
self.get_html_text_obj = get_html_tag_text(src_content)
url = re.sub(r'\.', r';', url.encode('utf-8'))
try:
self.id_of_url = self.dic[url]
except KeyError as e:
print "Key Error-------"
print e
self.for_html()
self.for_title()
self.for_meta()
self.for_header()
self.for_table()
self.cur_anchor(url)
self.for_anchor()
self.for_webpage_summary()
|
import telepot
import pandas as pd
import csv
import requests
import json
import time
from collections import defaultdict
token = 'TOKEN'
chat_id = 'CHAT_ID'
api_key = 'AIzaSyAVJcQ0549l7BnK62jvf3EnITtgeMJXuww'
def gettelegram():
data=defaultdict(list)
bot = telepot.Bot(token)
count = 1
# p = 1
print("Round1")
message_id = []
sender_id = []
sender_name = []
date = []
message = []
l = bot.getUpdates()
# print(l)
url = ('https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze' + '?key=' + api_key)
file_name = 'D:\hack\main\deleted_msg.txt'
file_lines = []
with open(file_name, 'r') as read_obj:
# Read all lines in the file one by one
for line in read_obj:
s = line.replace('\n', '')
msgs = s.split(' | ')
file_lines.append(msgs[2])
print(file_lines)
for i in l:
if 'message' in i.keys():
if 'text' in i['message'].keys():
if i['message']['text'] not in file_lines:
message.append(i['message']['text'])
# print(message)
if 'message_id' in i['message'].keys():
message_id.append(i['message']['message_id'])
if 'from' in i['message'].keys():
sender_id.append(i['message']['from']['id'])
sender_name.append(i['message']['from']['first_name']+" "+i['message']['from']['last_name'])
if 'date' in i['message'].keys():
date.append(i['message']['date'])
else:
continue
troll_responses = []
# print(message)
for i in range(len(message)):
data_dict = {}
data_dict['comment'] = {}
data_dict['comment']['text'] = message[i]
data_dict['languages'] = ['en']
data_dict['requestedAttributes'] = {}
data_dict['requestedAttributes']['TOXICITY'] = {}
# print(data_dict)
response = requests.post(url=url, data=json.dumps(data_dict))
# print(count)
count += 1
# time.sleep(1)
response_dict = json.loads(response.content)
troll_responses.append(response_dict)
# print(data_dict)
# print()
# print(json.dumps(response_dict, indent=2))
# print("------------------------------------------------------")
for i in range(len(troll_responses)):
val = troll_responses[i]['attributeScores']['TOXICITY']['summaryScore']['value']
# print(message[i])
# print(val)
file = open('D:\hack\main\deleted_msg.txt', 'a')
if val*100 > 60:
print(message[i])
print(message_id[i])
bot.deleteMessage((chat_id, message_id[i]))
# file.write(message[i]+'\n')
data[sender_name[i]].append(message[i])
file.write(str(sender_id[i])+' | '+sender_name[i]+' | '+message[i]+'\n')
file.close()
# time.sleep(5)
return(data)
# p += 1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-28 18:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user_input', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='daily_user_input_optional',
old_name='Chia_seeds_consumed_during_workout',
new_name='chia_seeds_consumed_during_workout',
),
migrations.RenameField(
model_name='daily_user_input_optional',
old_name='General_Workout_Comments',
new_name='general_Workout_Comments',
),
migrations.RenameField(
model_name='daily_user_input_optional',
old_name='Heart_Rate_Variability',
new_name='heart_Rate_Variability',
),
migrations.RenameField(
model_name='daily_user_input_optional',
old_name='List_of_processed_food_consumed_yesterday',
new_name='list_of_processed_food_consumed_yesterday',
),
migrations.RenameField(
model_name='daily_user_input_strong',
old_name='Number_of_alcohol_consumed_yesterday',
new_name='number_of_alcohol_consumed_yesterday',
),
migrations.RenameField(
model_name='inputs_changes_from_third_sources',
old_name='Other',
new_name='other',
),
migrations.RenameField(
model_name='inputs_changes_from_third_sources',
old_name='Resting_Heart_Rate',
new_name='resting_Heart_Rate',
),
migrations.RenameField(
model_name='inputs_changes_from_third_sources',
old_name='Sleep_time_excluding_awake_time',
new_name='sleep_time_excluding_awake_time',
),
migrations.RenameField(
model_name='inputs_changes_from_third_sources',
old_name='TBD',
new_name='tbd',
),
]
|
import numpy.polynomial.polynomial as poly
def lagrange(k,n):
p = poly.Polynomial([1])
for i in range(1, n+1):
if i != k:
p = p*poly.Polynomial([-i,1]) / (k-i)
return p
def interp(l):
p = poly.Polynomial([0])
for i, elem in enumerate(l):
p += lagrange(i+1, len(l)) * elem
return p
def OP(poly, i):
return interp([poly(j) for j in range(1, i+1)])
def diff(p,q):
for i in range(1, 100):
if abs(p(i) - q(i)) > 1:
return q(i)
return False
v = poly.Polynomial([0, 0, 0, 1])
u = poly.Polynomial([1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1])
sum([int(diff(u, OP(u,i))+0.1) for i in range(1,12)])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# generic speech driver
from threading import Thread, Lock
from queue import Queue, Empty
import shlex
from subprocess import Popen
import time
class speakQueue(Queue):
def clear(self):
try:
while True:
self.get_nowait()
except Empty:
pass
class driver():
def __init__(self):
self.proc = None
self.speechThread = Thread(target=self.worker)
self.lock = Lock()
self.textQueue = speakQueue()
self.initialize()
def initialize(self):
environment = {'minVolume': 0,
'volume': '100',
'maxVolume': 200,
'minPitch': '0',
'pitch': '50',
'maxPitch': 99,
'minRate': 80,
'rate': '280',
'maxRate': 450,
'language': '',
'module': 'espeak',
'voice': 'en-us',
'command': 'espeak -a fenrirVolume -s fenrirRate -p fenrirPitch -v fenrirVoice -- "fenrirText"'
}
self.env = environment
self.minVolume = self.env['minVolume']
self.volume = self.env['volume']
self.maxVolume = self.env['maxVolume']
self.minPitch = self.env['minPitch']
self.pitch = self.env['pitch']
self.maxPitch = self.env['maxPitch']
self.minRate = self.env['minRate']
self.rate = self.env['rate']
self.maxRate = self.env['maxRate']
self.language = self.env['language']
self.voice = self.env['voice']
self.module = self.env['module']
self.speechCommand = self.env['command']
if self.speechCommand == '':
self.speechCommand = 'espeak -a fenrirVolume -s fenrirRate -p fenrirPitch -v fenrirVoice -- "fenrirText"'
self._isInitialized = True
if self._isInitialized:
self.speechThread.start()
def shutdown(self):
if not self._isInitialized:
return
self.cancel()
self.textQueue.put(-1)
def speak(self,text, queueable=True):
if not self._isInitialized:
return
if not queueable:
self.cancel()
utterance = {
'text': text,
'volume': self.volume,
'rate': self.rate,
'pitch': self.pitch,
'module': self.module,
'language': self.language,
'voice': self.voice,
}
self.textQueue.put(utterance.copy())
def cancel(self):
if not self._isInitialized:
return
self.clear_buffer()
self.lock.acquire(True)
if self.proc:
try:
self.proc.terminate()
except Exception as e:
try:
self.proc.kill()
except Exception as e:
pass
self.proc = None
self.lock.release()
def setCallback(self, callback):
print('SpeechDummyDriver: setCallback')
def clear_buffer(self):
if not self._isInitialized:
return
self.textQueue.clear()
def setVoice(self, voice):
if not self._isInitialized:
return
self.voice = str(voice)
def setPitch(self, pitch):
if not self._isInitialized:
return
self.pitch = str(self.minPitch + pitch * (self.maxPitch - self.minPitch ))
def setRate(self, rate):
if not self._isInitialized:
return
self.rate = str(self.minRate + rate * (self.maxRate - self.minRate ))
def setModule(self, module):
if not self._isInitialized:
return
self.module = str(module)
def setLanguage(self, language):
if not self._isInitialized:
return
self.language = str(language)
def setVolume(self, volume):
if not self._isInitialized:
return
self.volume = str(self.minVolume + volume * (self.maxVolume - self.minVolume ))
def worker(self):
while True:
utterance = self.textQueue.get()
if isinstance(utterance, int):
if utterance == -1:
return
else:
continue
elif not isinstance(utterance, dict):
continue
# no text means nothing to speak
if not 'text' in utterance:
continue
if not isinstance(utterance['text'],str):
continue
if utterance['text'] == '':
continue
# check for valid data fields
if not 'volume' in utterance:
utterance['volume'] = ''
if not isinstance(utterance['volume'],str):
utterance['volume'] = ''
if not 'module' in utterance:
utterance['module'] = ''
if not isinstance(utterance['module'],str):
utterance['module'] = ''
if not 'language' in utterance:
utterance['language'] = ''
if not isinstance(utterance['language'],str):
utterance['language'] = ''
if not 'voice' in utterance:
utterance['voice'] = ''
if not isinstance(utterance['voice'],str):
utterance['voice'] = ''
if not 'pitch' in utterance:
utterance['pitch'] = ''
if not isinstance(utterance['pitch'],str):
utterance['pitch'] = ''
if not 'rate' in utterance:
utterance['rate'] = ''
if not isinstance(utterance['rate'],str):
utterance['rate'] = ''
popenSpeechCommand = shlex.split(self.speechCommand)
for idx, word in enumerate(popenSpeechCommand):
word = word.replace('fenrirVolume', str(utterance['volume'] ))
word = word.replace('genericSpeechVolume', str(utterance['volume'] ))
word = word.replace('fenrirModule', str(utterance['module']))
word = word.replace('genericSpeechModule', str(utterance['module']))
word = word.replace('fenrirLanguage', str(utterance['language']))
word = word.replace('genericSpeechLanguage', str(utterance['language']))
word = word.replace('fenrirVoice', str(utterance['voice']))
word = word.replace('genericSpeechVoice', str(utterance['voice']))
word = word.replace('fenrirPitch', str(utterance['pitch']))
word = word.replace('genericSpeechPitch', str(utterance['pitch']))
word = word.replace('fenrirRate', str(utterance['rate']))
word = word.replace('genericSpeechRate', str(utterance['rate']))
word = word.replace('fenrirText', str(utterance['text']))
word = word.replace('genericSpeechText', str(utterance['text']))
popenSpeechCommand[idx] = word
try:
self.lock.acquire(True)
self.proc = Popen(popenSpeechCommand, stdin=None, stdout=None, stderr=None, shell=False)
self.lock.release()
self.proc.wait()
except Exception as e:
print(e)
self.lock.acquire(True)
self.proc = None
self.lock.release()
# create driver object
speechserver = driver()
# speak
speechserver.speak("For my frind storm, because he rulz")
# wait
time.sleep(1.6)
# stop worker
speechserver.shutdown()
|
import json
from logging import Logger
import core
from core.schema import S1
class DataCopy:
def __init__(self, log, data, data_d=None):
"""
@type log: Logger
@type data: core.Data
@type data_d: core.Data
"""
self.data = data
self.data_d = data_d
self.log = log
def run(self, gid=None):
if gid:
return self.dump_gid(gid)
else:
return self.dump_gids()
def dump_gids(self):
total = 0
c = self.data.rc.hscan(S1.destination_key_fmt('children'))
while len(c) > 1 and c[1]:
total += len(c[1])
for gid in c[1]:
self.dump_gid(gid)
# check if the next cursor is zero
if c[0] == '0' or c[0] == 0:
break
# grab next set
c = self.data.rc.hscan(S1.destination_key_fmt('children'), c[0])
# sleep 10 sec before retry
print('End of gid_set, total [{0}] GIDs.'.format(total))
self.data_d.rc.delete(S1.register_set())
print('Cleared register set.')
def dump_gid(self, gid):
print('Dumping user, GID: {0}'.format(gid))
# get child bindings for this account
children = set(self.data.get_destination_users(gid, 'children'))
if not children or (len(children) == 1 and gid in children):
if not self.data.rc.exists(S1.cache_key(gid)):
print('****** SELF CHILD + NO CACHE, SKIPPED, GID: {0}'.format(gid))
return
# just to be safe
children.add(gid)
for child in children:
self.dump_source(gid, child)
def copy_hash(self, key):
print('Copying {0}...'.format(key))
self.data_d.rc.delete(key)
d = self.data.rc.hgetall(key)
for k, v in d.iteritems():
self.data_d.rc.hset(key, k, v)
def copy_set(self, key):
print('Copying {0}...'.format(key))
self.data_d.rc.delete(key)
c = self.data.rc.sscan(key)
while len(c) > 1 and c[1]:
for record in c[1]:
self.data_d.rc.sadd(key, record)
# check if the next cursor is zero
if c[0] == '0' or c[0] == 0:
break
# grab next set
c = self.data.rc.sscan(key, c[0])
def copy_zset(self, key):
print('Copying {0}...'.format(key))
self.data_d.rc.delete(key)
c = self.data.rc.zscan(key)
while len(c) > 1 and c[1]:
for record in c[1]:
self.data_d.rc.zadd(key, *record)
# check if the next cursor is zero
if c[0] == '0' or c[0] == 0:
break
# grab next set
c = self.data.rc.zscan(key, c[0])
def dump_source(self, master_gid, gid):
print('Copying source [{0}:{1}]...'.format(master_gid, gid))
# add child gid to pollers first
self.data_d.register_gid(gid)
# add the gid from the list of child accounts
print('Linking GID: [{0}]m <-- [{1}]s'.format(master_gid, gid))
self.data_d.add_linked_account(master_gid, gid)
destinations = self.data.get_destinations(gid)
self.log.debug('{"dest": [')
c = 0
for destination in destinations:
users = self.data.get_destination_users(gid, destination)
for user in users:
if c != 0:
self.log.debug(',')
# dump destination
self.dump_destination(master_gid, gid, destination, user)
c += 1
self.log.debug('],')
# dump gid data keys
self.log.debug('"keys": [')
self.log.debug('"{0},"'.format(S1.gid_key(gid)))
self.log.debug('"{0},"'.format(S1.gid_log_key(gid)))
self.log.debug('"{0},"'.format(S1.links_key(gid)))
self.log.debug('"{0}"'.format(S1.cache_key(gid)))
self.log.debug(']}')
# copy keys
self.copy_hash(S1.gid_key(gid))
self.copy_zset(S1.gid_log_key(gid))
self.copy_hash(S1.cache_key(gid))
self.copy_set(S1.links_key(gid))
# copy tokens for all linked destinations (will overwrite some data)
links = self.data.get_linked_accounts(master_gid) or dict()
for k in links:
# copy token
p = k.split(':')
if not p[0] in self.data.provider:
continue
token = self.data.get_user_token(gid, p[0], p[1])
self.data_d.set_user_token(gid, p[0], p[1], token)
# copy user params
for p_name in S1.PROVIDER_PARAMS:
p_val = self.data.provider[p[0]].get_user_param(p[1], p_name)
if p_val:
self.data_d.provider[p[0]].set_user_param(p[1], p_name, p_val)
def dump_destination(self, master_gid, gid, destination, user):
self.log.debug('{')
self.log.debug('"dst":"{0}:{1}",'.format(destination, user))
self.log.debug('"m":"{0}","s":"{1}",'.format(master_gid, gid))
# get sources for this master gid
sources = set(self.data.get_gid_sources(gid).keys())
# get sources for this destination account
source_gid_set = set(self.data.get_bindings(destination, user))
# sources to unlink
sources_unlink = sources.intersection(source_gid_set)
self.log.debug('"src_all":{0},'.format(json.dumps(list(sources))))
self.log.debug('"src_dest":{0},'.format(json.dumps(list(source_gid_set))))
self.log.debug('"src_link":{0}'.format(json.dumps(list(sources_unlink))))
# unlink each source
for src_gid in sources_unlink:
print('Binding: [{0}] --> [{1}:{2}]'.format(gid, destination, user))
self.data_d.bind_user(master_gid, gid, destination, user)
# destination update
#up = self.data.get_destination_update(gid, destination, user)
#self.data_d.set_destination_first_use(gid, destination, user, up)
# copy first bound timestamp
#use = self.data.get_destination_first_use(gid, destination, user)
#self.data_d.set_destination_first_use(gid, destination, user, use)
# timestamps
#bound = self.data.get_destination_param(gid, destination, user, S1.bound_key())
#self.data_d.set_destination_param(gid, destination, user, S1.bound_key(), bound)
# filters
filter_data = self.data.filter.get_filter(destination, gid, user)
self.data_d.filter.set_filter(destination, gid, user, filter_data)
# message map
msg_id_map = self.data.filter.get_message_id_map(destination, user)
self.data_d.filter.set_message_id_map(destination, user, msg_id_map)
self.log.debug('}')
# remove account for this gid
# get destination accounts data (keys, avatar, etc.) for this link
acc_dump = self.data.get_linked_account(gid, destination, user)
if not acc_dump:
print('WARNING: No data for [{0}] --> [{1}:{2}]'.format(gid, destination, user))
else:
print('Copying Data: [{0}] --> [{1}:{2}]'.format(gid, destination, user))
self.data_d.link_provider_account(gid, destination, user, acc_dump)
|
# Copyright (C) 2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.api.process import Process
from lib.core.ioctl import zer0m0n
def dump_memory(pid):
"""Dump process memory using zer0m0n if available, otherwise fallback."""
if zer0m0n.dumpmem(pid) is False:
Process(pid=pid).dump_memory()
|
from slack_sdk.models.dialoags import DialogBuilder # noqa
from slack import deprecation
deprecation.show_message(__name__, "slack_sdk.models.dialogs")
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import logging
import os
from _winreg import HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
from lib.common.abstracts import Package
log = logging.getLogger(__name__)
class IE(Package):
"""Internet Explorer analysis package."""
PATHS = [
("ProgramFiles", "Internet Explorer", "iexplore.exe"),
]
REGKEYS = [
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Internet Explorer\\Main",
{
# "Would you like Internet Explorer as default browser?"
"Check_Associations": "no",
# "Set Up Windows Internet Explorer 8"
"DisableFirstRunCustomize": 1,
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Internet Explorer\\Security",
{
"Safety Warning Level": "Low",
"Sending_Security": "Low",
"Viewing_Security": "Low",
},
],
[
HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Internet Explorer\\Main",
{
# Disable Security Settings Check.
"DisableSecuritySettingsCheck": 1,
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl",
{
"FEATURE_LOCALMACHINE_LOCKDOWN": {
# "To help protect your security, Internet Explorer has
# restricted this webpage from running scripts or ActiveX
# controls that could access your computer. Click here for
# options..."
"iexplore.exe": 0,
},
"FEATURE_RESTRICT_FILEDOWNLOAD": {
# "To help protect your security, Windows Internet
# Explorer blocked this site from downloading files to
# your computer. Click here for more options..."
"iexplore.exe": 0,
},
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
{
# "You are about to be redirected to a connection that is not secure."
"WarnOnHTTPSToHTTPRedirect": 0,
# "You are about to view pages over a secure connection."
"WarnOnZoneCrossing": 0,
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Internet Explorer\\Document Windows",
{
# Maximize the window by default.
"Maximized": "yes",
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Internet Explorer\\Download",
{
# "Internet Explorer - Security Warning"
# "The publisher could not be verified."
"CheckExeSignatures": "no",
},
],
[
HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer",
{
# Disable SmartScreen Windows 8
"SmartScreenEnabled": "Off"
}
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Internet Explorer\\PhishingFilter",
{
# Disable SmartScreen Filter Windows 7
"EnabledV9": 0
}
],
]
def setup_proxy(self, proxy_host):
"""Configure Internet Explorer to route all traffic through a
proxy."""
self.init_regkeys([[
HKEY_CURRENT_USER,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
{
"MigrateProxy": 1,
"ProxyEnable": 1,
"ProxyHttp1.1": 0,
"ProxyServer": "http://%s" % proxy_host,
"ProxyOverride": "<local>",
},
]])
def start(self, target):
if "proxy" in self.options:
self.setup_proxy(self.options["proxy"])
# If it's a HTML file, force an extension, or otherwise Internet
# Explorer will open it as a text file or something else non-html.
if os.path.exists(target) and not target.endswith((".htm", ".html", ".mht", ".mhtml", ".url", ".swf")):
os.rename(target, target + ".html")
target += ".html"
log.info("Submitted file is missing extension, adding .html")
iexplore = self.get_path("Internet Explorer")
return self.execute(
iexplore, args=[target], maximize=True, mode="iexplore"
)
|
import args
import z
z.getp.quick_list = False
import buy
import os
from sortedcontainers import SortedSet
import gbuy_old
import math
date = "2000-01-01"
dates = z.getp("dates")
import delstock
def process(astock, one_at_a_time = True):
global problems
try:
problems = []
print("date: {}".format( date))
df = gbuy_old.getDataFromYahoo(astock, date)
if df is None:
problems.append(astock)
print("problem dl astock: {}".format( astock))
return
lastyear = None
f = None
for idx in df.index:
cdate = str(idx.to_pydatetime()).split(" ")[0]
cyear = cdate.split("-")[0]
if cyear != lastyear:
if f is not None:
f.close()
apath = z.getPath("split/{}/{}_{}.csv".format(astock[0], astock, cyear))
# if os.path.exists(apath):
# continue
lastyear = cyear
f = open(apath, "w")
f.write("Date,Open,High,Low,Close,Adj Close,Volume\n")
try:
opend = round(df.at[idx, "Open"],3)
high = round(df.at[idx, "High"],3)
low = round(df.at[idx, "Low"],3)
closed = round(df.at[idx, "Close"],3)
adj = round(df.at[idx, "Adj Close"],3)
vol = df.at[idx, "Volume"]
except:
opend = round(df.at[idx, "Open"][0],3)
high = round(df.at[idx, "High"][0],3)
low = round(df.at[idx, "Low"][0],3)
closed = round(df.at[idx, "Close"][0],3)
adj = round(df.at[idx, "Adj Close"][0],3)
vol = df.at[idx, "Volume"][0]
if not math.isnan(opend):
f.write("{},{},{},{},{},{},{}\n".format(cdate, opend, high, low, closed, adj, vol))
try:
if cdate != dates[-1]:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! MISSING TODAY {}".format(astock))
delstock.delstock(astock)
exit()
except:
pass
except Exception as e:
print ("problem with gbuy_old")
z.trace(e)
exit()
def genNeeded():
mcdic2 = z.getp("mcdic2")
needed = list()
bar = list()
for astock,items in mcdic2.items():
if items[0] >= 65 and astock not in stocks and not astock[-1].islower():
try:
info = buy.getFrom("savemeinfo2", astock)
if info["Average Vol. (3m)"] < 15000:
continue
except:
pass
needed.append(astock)
bar.append((items[0],astock))
return needed
if __name__ == '__main__':
import args
needed = z.getp("temp_needed")
stocks = z.getp("listofstocks")
print("stocks: {}".format( len(stocks)))
for astock in needed:
print("astock : {}".format( astock ))
process(astock)
stocks.append(astock)
print("stocks: {}".format( len(stocks)))
z.setp(stocks, "listofstocks")
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
#https://jalammar.github.io/visual-interactive-guide-basics-neural-networks/
##load the data
#dataset has different attributes of the houses that are sold
data=pd.read_csv('data1.csv')
#now we will classify the house whether the house buy is good or bad on the
#basis of two attributes area and bothroom (you can take any two attributes which you feel it relevent)
#we will eliminate the unwanted coloums
data=data.drop(["index","price","sq_price"],axis=1)
#get first 10 row of the dataset
data=data[0:10]
#let's assign random class to 10 row(good/bad) 1=good 2=bad
data.loc[:,("y1")]=[1,0,0,1,0,1,0,1,1,0]
#added another column for bad class y2 will be negation of y1
data .loc[:,("y2")]=data["y1"]==0 #outputs true/false
data.loc[:,("y2")]=data["y2"].astype(int) #conveting true/false to 1/0
#since we have to feed this data to tensorflow we have to convert it to Martix
input_x=data.loc[:,['area','bathrooms']].as_matrix()
input_y=data.loc[:,['y1','y2']].as_matrix()
#some parameters for training process
lr=0.00001
no_epochs=2000
display_step=50
n_samples=input_y.size
#tenserflow variables
x=tf.placeholder(tf.float32,[None,2])
w=tf.Variable(tf.zeros([2,2]))
b=tf.Variable(tf.zeros([2]))
#
y_values=tf.add(tf.matmul(x,w),b)
y=tf.nn.softmax(y_values)#activation function
y_=tf.placeholder(tf.float32,[None,2])
error=tf.reduce_sum(tf.pow(y_-y,2))/(2*n_samples)
optimizer= tf.train.GradientDescentOptimizer(lr).minimize(error)#optimizer to optimize the cost
#initialising all tf variable
init =tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
for i in range(no_epochs):
sess.run(optimizer,feed_dict={x:input_x,y_:input_y})
if (i)%display_step==0:
cc=sess.run(error,feed_dict={x:input_x,y_:input_y})
print "training step:",'%d' %(i),"cost=","{:.9f}".format(cc)
print "Optimization finished"
training_cost=sess.run(error,feed_dict={x:input_x,y_:input_y})
print "Training cost=", training_cost, "W=", sess.run(w), "b=", sess.run(b), '\n'
#prediction is less accurate we can add more hidden layers to imporve it.
print sess.run(y, feed_dict={x: input_x })
# y1 y2
# [[ 0.87931693 0.12068304]
# [ 0.81913888 0.18086113]
# [ 0.9059546 0.09404533]
# [ 0.79193395 0.20806599]
# [ 0.94435722 0.0556428 ]
# [ 0.86692518 0.13307482]
# [ 0.80973089 0.19026911]
# [ 0.79369158 0.20630841]
# [ 0.7863369 0.2136631 ]
# [ 0.80384922 0.19615081]]
#it classify all the houseas good buy
|
# -*- coding: utf-8 -*-
"""Functions for simulations.
"""
import numpy as np
__all__ = ['shepp_logan']
def shepp_logan(shape, dtype=np.complex):
"""Generates a Shepp Logan phantom with a given shape and dtype.
Args:
shape (tuple of ints): shape, can be of length 2 or 3.
dtype (Dtype): data type.
Returns:
array.
"""
return phantom(shape, sl_amps, sl_scales, sl_offsets, sl_angles, dtype)
sl_amps = [1, -0.8, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
sl_scales = [[.6900, .920, .810], # white big
[.6624, .874, .780], # gray big
[.1100, .310, .220], # right black
[.1600, .410, .280], # left black
[.2100, .250, .410], # gray center blob
[.0460, .046, .050],
[.0460, .046, .050],
[.0460, .046, .050], # left small dot
[.0230, .023, .020], # mid small dot
[.0230, .023, .020]]
sl_offsets = [[0., 0., 0],
[0., -.0184, 0],
[.22, 0., 0],
[-.22, 0., 0],
[0., .35, -.15],
[0., .1, .25],
[0., -.1, .25],
[-.08, -.605, 0],
[0., -.606, 0],
[.06, -.605, 0]]
sl_angles = [[0, 0, 0],
[0, 0, 0],
[-18, 0, 10],
[18, 0, 10],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
def phantom(shape, amps, scales, offsets, angles, dtype):
"""
Generate a cube of given shape using a list of ellipsoid
parameters.
"""
if len(shape) == 2:
ndim = 2
shape = (1, shape[-2], shape[-1])
elif len(shape) == 3:
ndim = 3
else:
raise ValueError('Incorrect dimension')
out = np.zeros(shape, dtype=dtype)
z, y, x = np.mgrid[-(shape[-3] // 2):((shape[-3] + 1) // 2),
-(shape[-2] // 2):((shape[-2] + 1) // 2),
-(shape[-1] // 2):((shape[-1] + 1) // 2)]
coords = np.stack((x.ravel() / shape[-1] * 2,
y.ravel() / shape[-2] * 2,
z.ravel() / shape[-3] * 2))
for amp, scale, offset, angle in zip(amps, scales, offsets, angles):
ellipsoid(amp, scale, offset, angle, coords, out)
if ndim == 2:
return out[0, :, :]
else:
return out
def ellipsoid(amp, scale, offset, angle, coords, out):
"""
Generate a cube containing an ellipsoid defined by its parameters.
If out is given, fills the given cube instead of creating a new
one.
"""
R = rotation_matrix(angle)
coords = (np.matmul(R, coords) - np.reshape(offset, (3, 1))) / \
np.reshape(scale, (3, 1))
r2 = np.sum(coords ** 2, axis=0).reshape(out.shape)
out[r2 <= 1] += amp
def rotation_matrix(angle):
cphi = np.cos(np.radians(angle[0]))
sphi = np.sin(np.radians(angle[0]))
ctheta = np.cos(np.radians(angle[1]))
stheta = np.sin(np.radians(angle[1]))
cpsi = np.cos(np.radians(angle[2]))
spsi = np.sin(np.radians(angle[2]))
alpha = [[cpsi * cphi - ctheta * sphi * spsi,
cpsi * sphi + ctheta * cphi * spsi,
spsi * stheta],
[-spsi * cphi - ctheta * sphi * cpsi,
-spsi * sphi + ctheta * cphi * cpsi,
cpsi * stheta],
[stheta * sphi,
-stheta * cphi,
ctheta]]
return np.array(alpha)
|
# Курс Python: основы и применение
# Задача 3, блок 2.1. Ошибки и исключения
'''
Алиса владеет интересной информацией, которую хочет заполучить Боб.
Алиса умна, поэтому она хранит свою информацию в зашифрованном файле.
У Алисы плохая память, поэтому она хранит все свои пароли в открытом виде в текстовом файле.
Бобу удалось завладеть зашифрованным файлом с интересной информацией и файлом с паролями, но он не смог понять какой из паролей ему нужен. Помогите ему решить эту проблему.
Алиса зашифровала свою информацию с помощью библиотеки simple-crypt.
Она представила информацию в виде строки, и затем записала в бинарный файл результат работы метода simplecrypt.encrypt.
Вам необходимо установить библиотеку simple-crypt, и с помощью метода simplecrypt.decrypt узнать, какой из паролей служит ключом для расшифровки файла с интересной информацией.
Ответом для данной задачи служит расшифрованная интересная информация Алисы.
Файл с информацией: https://stepik.org/media/attachments/lesson/24466/encrypted.bin
Файл с паролями: https://stepik.org/media/attachments/lesson/24466/passwords.txt
'''
# Решение
from simplecrypt import decrypt, DecryptionException
with open("D:\Other\Python\encrypted.bin", "rb") as enc:
encrypted = enc.read()
with open("D:\Other\Python\passwords.txt", "r") as psw:
passwords = psw.read().splitlines()
for psw in passwords:
try:
message = decrypt(psw, encrypted)
print('Пароль:', psw, '\nСообщение:', message.decode("utf-8"))
break
except DecryptionException:
pass
|
#!/usr/bin/python3
from datetime import datetime
from faker import Faker
import psycopg2
import time
import os
BATCH_SIZE = 20
UPDATE_FREQUENCY = 10
ITERATION = 100
HOSTNAME = 'localhost'
fake = Faker()
Faker.seed(datetime.now())
def insert_user_record(connection, cursor):
print("inserting records to user table...")
pg_insert = """
INSERT INTO users
(first_name, last_name, email)
VALUES
(%s, %s, %s)
"""
for _ in range(BATCH_SIZE):
first_name = fake.first_name()
last_name = fake.last_name()
email = fake.email()
cursor.execute(pg_insert, (first_name, last_name, email))
connection.commit()
print(f"{BATCH_SIZE} records has been successfully added")
if __name__ == "__main__":
try:
connection = psycopg2.connect(
user = "postgres",
password = "postgres",
host = HOSTNAME,
port = "5432",
database = "postgres"
)
cursor = connection.cursor()
for i in range(ITERATION):
insert_user_record(connection, cursor)
time.sleep(UPDATE_FREQUENCY)
except (Exception, psycopg2.Error) as error:
print("Error connecting to PostgreSQL database", error)
connection = None
finally:
if connection != None:
cursor.close()
connection.close()
print("PostgreSQL connection is now closed")
|
#converte o byte lido no PLC para seu respectivo tipo
from model.Measur import MS
import logging
def loadMeasur(tags):
try:
mss = {}
mss['G01'] = MS("G01",5)
mss['G02'] = MS("G02",5)
mss['G03'] = MS("G03",10)
mss['G04'] = MS("G04",10)
for ms in mss.values():
for tag in tags:
if tag.measur == ms.name:
ms.insert_tag(tag)
return mss
except Exception as e:
print e
logging.error(str(e))
|
# Generated by Django 2.2.1 on 2019-07-28 14:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notice', '0014_auto_20190728_2258'),
]
operations = [
migrations.AlterModelOptions(
name='notice',
options={'ordering': ['-id']},
),
]
|
def balancedBrackets(string):
# Write your code here
# Space: O(n) n = length of string
# The worst case scenario: All of the string would be brackets that are unmatched and stack would be length n.
# Time: O(n) n = length of string
# This dictionary's purpose is to make it easier to check for a matching bracket.
bracket_dict = {
'[': ']',
'(': ')',
'{': '}',
'|': '|'
}
# This set is used to isolated the characters of the string we're interested in.
# if the char isn't in this set, we don't care about it.
possible_matches = {'[',']','(',')','{', '}', '|'}
# create a stack to keep track of unmatched brackets.
stack = []
# interate through the string character by character.
for char in string:
# check if the char is a bracket.
# If not, continue to next character.
if char not in possible_matches:
continue
# If our stack is empty, we don't have anything to compare it to,
# so we know we have to add it to the stack and no other action is necessary.
if len(stack) is 0:
stack.append(char)
else:
# compare the new bracket to bracket at top of stack
# if the bracket at the top of the stack is an opening bracket AND
# char is the corresponding closing bracket, pop the bracket off the stack.
if stack[len(stack) - 1] in bracket_dict and bracket_dict[stack[len(stack) - 1]] is char:
print('we are here') # This print command is an artifact of troubleshooting. Should be deleted.
# if match pop bracket off stack
stack.pop()
# if not match, add bracket to stack
else:
stack.append(char)
# if there are still brackets left in the stack that means
# there wasn't a matching closing bracket for each opening bracket and
# we should return false.
# Otherwise, we return true.
if len(stack) > 0:
return False
else:
return True
|
import face_recognition
import cv2
import os
import smtplib
from email.message import EmailMessage
import imghdr
import datetime
EMAIL_ADDDRESS = os.environ.get("EMAIL_ADDRESS")
EMAIL_PASSWORD = os.environ.get("EMAIL_PASSWORD")
Time = datetime.datetime.now()
KNOWN_FACE_DIR = "Known_faces"
Known_faces_encoding = []
Known_names = []
cap = cv2.VideoCapture(0)
print("process Known Process!")
for name in os.listdir(KNOWN_FACE_DIR):
for filename in os.listdir(f"{KNOWN_FACE_DIR}/{name}"):
image = face_recognition.load_image_file(f"{KNOWN_FACE_DIR}/{name}/{filename}")
encoding = face_recognition.face_encodings(image)[0]
Known_faces_encoding.append(encoding)
Known_names.append(name)
print("Recognizing Faces...")
while True:
ret, frame = cap.read()
face_location = face_recognition.face_locations(frame)
face_encoding = face_recognition.face_encodings(frame, face_location)
for(top, right, bottom, left), face_encodings in zip(face_location, face_encoding):
result = face_recognition.compare_faces(Known_faces_encoding, face_encodings, tolerance=0.6)
name = "Unknown"
if True in result:
name = Known_names[result.index(True)]
print(name)
cv2.rectangle(frame, (left, top), (right, bottom), (0,0,0), 2)
cv2.rectangle(frame, (left, bottom + 20), (right, bottom), (0,0,0), cv2.FILLED)
cv2.putText(frame, name, (left, bottom + 18), cv2.FONT_ITALIC, 0.7, (255,255,255), 1)
cv2.imshow("Frame", frame)
if name == "Unknown":
cv2.imwrite("unknown.png",frame)
msg = EmailMessage()
msg['Subject'] = "Security Alert!!"
msg['From'] = EMAIL_ADDDRESS
msg['To'] = "prkapadnis2001@gmail.com"
msg.set_content(f"Hey someone is entered in your house at {Time} \n\n Image Attached")
with open("unknown.png", 'rb') as img:
file_name = img.name
file_data = img.read()
file_type = imghdr.what(file_name)
msg.add_attachment(file_data, filename = file_name, maintype = 'Image', subtype = file_type)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
print("Send!")
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
# Given the array nums consisting of 2n elements in the form
#
# [x1,x2,...,xn,y1,y2,...,yn].
#
# Return the array in the form [x1,y1,x2,y2,...,xn,yn].
class Solution:
def shuffle(self, nums, n):
return nums if [nums.insert((n - i), nums.pop())
for i in range(n)] != 0 else None
if __name__ == '__main__':
test_input1 = [1, 2, 3, 4, 4, 3, 2, 1]
test_input2 = 4
print(Solution.shuffle(Solution, test_input1, test_input2))
|
def main():
incomes = []
number_of_months = int(input("How many months? "))
# user enters number of months they want to calculate.
for month in range(1, number_of_months + 1):
income = float(
input("Enter income for month {}: ".format(month)))
incomes.append(income)
# user can input the amount for each month
print_report(incomes)
def print_report(incomes):
print("\nIncome Report\n-----------")
total = 0
for month, income in enumerate(incomes):
total += income
# totals the amounts for each month.
print("Month {:2} - Income: ${:10.2f} \
Total: ${:10.2f}".format(month + 1, income, total))
# to test this, i used 5 moths and the amount for each month was 1000, 1500, 900, 1100, 1000.
# total was 5500
main()
|
import os
import re
import psycopg2
import logging
import datetime
import itertools
from flask import Blueprint, render_template, redirect, request, \
g, url_for, abort, config, current_app, session, flash, jsonify, Response
from models import *
from auth import *
from utils import *
from resources import *
from db import *
from NORM.exceptions import ObjectNotFound,ObjectExists
admin_pages = Blueprint('admin', __name__, template_folder='templates/admin')
@admin_pages.route('/control', methods=['GET'])
def admin():
if not g.admin:
return render_template('login.html')
try:
cmsdate = datetime.datetime.fromtimestamp(os.path.getmtime(current_app.config['CACHE_PATH']+'.sqlite'))
except Exception as exc:
logging.warn("Error getting cache time: %s", exc)
cmsdate = None
return render_template('admin.html', cmsdate=cmsdate)
@admin_pages.route('/control/cacheclear', methods=['POST'])
@check_admin
def cacheclear():
try:
os.unlink(current_app.config['CACHE_PATH']+'.sqlite')
except Exception as exc:
abort(500)
return redirect(url_for('.admin'))
@admin_pages.route('/control', methods=['POST'])
def admin_post():
try:
user = User.authenticate(g.conn, request.form['username'], request.form['password'])
if user is None:
raise ValueError
except (ObjectNotFound,ValueError) as exc:
current_app.logger.warn("Exception: %s", repr(exc))
return render_template('login.html', message='Incorrect username or password')
session['admin'] = True
session['userid'] = user['id']
session['username'] = user['username']
session['admin_level'] = user['user_type']
flash("Admin login successful")
return redirect(url_for('.admin'))
@admin_pages.route('/control/logout')
def logout():
del session['admin']
return redirect(url_for('cms.index'))
@admin_pages.route('/control/url/submit', methods=['POST'])
@check_admin
def forcecheck():
data = g.api.submit_url(request.form['url'], force=1)
if data['success']:
flash("URL submitted successfully")
else:
flash("Error submitting result")
return redirect(url_for('.admin'))
@admin_pages.route('/control/excludelist', methods=['GET'])
@check_admin
def blacklist_select():
entries = g.api.blacklist_select()
return render_template('blacklist.html',
entries = entries
)
@admin_pages.route('/control/excludelist', methods=['POST'])
@check_admin
def blacklist_post():
g.api.blacklist_insert(request.form['domain'])
return redirect(url_for('.blacklist_select'))
@admin_pages.route('/control/excludelist/delete', methods=['GET'])
@check_admin
def blacklist_delete():
g.api.blacklist_delete(request.args['domain'])
return redirect(url_for('.blacklist_select'))
@admin_pages.route('/control/user')
@check_admin
def users():
users = User.select(g.conn)
return render_template('users.html', users=users)
@admin_pages.route('/control/user/add', methods=['POST'])
@check_admin
def user_add():
f = request.form
user = User(g.conn)
user.update({
'username': f['username'],
'email': f['email'],
'user_type': f['user_type'],
})
newpass = user.random_password()
user.set_password(newpass)
user.store()
g.conn.commit()
flash("User {0} created with password {1} ".format(f['username'], newpass))
return redirect(url_for('.users'))
@admin_pages.route('/control/user/disable/<int:id>')
@check_admin
def user_disable(id):
ret = user_set_enabled(id, False)
return ret
@admin_pages.route('/control/user/enable/<int:id>')
@check_admin
def user_enable(id):
ret = user_set_enabled(id, True)
return ret
def user_set_enabled(id, value):
user = User(g.conn, id)
user['enabled'] = value
user.store()
g.conn.commit()
flash("User {0} {1}.".format(user['username'],
'enabled' if value else 'disabled'))
return redirect(url_for('.users'))
@admin_pages.route('/control/user/newpassword/<int:id>')
@check_admin
def user_generate_password(id):
user = User(g.conn, id)
newpass = user.reset_password()
user.store()
g.conn.commit()
flash("User {0} password reset to: {1}".format(user['username'], newpass))
return redirect(url_for('.users'))
#
# Search filter admin
# -------------------
#
@admin_pages.route('/control/search-filter')
@check_admin
def search_filter():
terms = SearchIgnoreTerm.select(g.conn, _orderby=['-enabled','term'])
return render_template('search_filter.html',
terms=terms)
@admin_pages.route('/control/search-filter/add', methods=['POST'])
@check_admin
def search_filter_add():
f = request.form
term = SearchIgnoreTerm(g.conn)
term.update({
'term': f['term'],
'enabled': True
})
term.store()
g.conn.commit()
return redirect(url_for('.search_filter'))
@admin_pages.route('/control/search-filter/update', methods=['POST'])
@check_admin
def search_filter_update():
f = request.form
enabled = set(make_list(f.getlist('enabled')))
terms = set(make_list(f.getlist('term')))
remove = terms - enabled
add = enabled - terms
current_app.logger.debug("Terms: %s, Enabled: %s", terms, enabled)
current_app.logger.debug("Add: %s, Remove: %s", add, remove)
for termid in remove:
term = SearchIgnoreTerm(g.conn, id=int(termid))
term['enabled'] = False
term.store()
for termid in add:
term = SearchIgnoreTerm(g.conn, id=int(termid))
term['enabled'] = True
term.store()
g.conn.commit()
return redirect(url_for('.search_filter'))
#
# Court Order admin
# ------------------
#
@admin_pages.route('/control/courtorders')
@check_admin
def courtorders():
reports = CourtJudgment.select(g.conn, _orderby='-date')
return render_template('courtorders.html', judgments=reports)
@admin_pages.route('/control/courtorders/<int:id>')
@check_admin
def courtorders_view(id):
obj = CourtJudgment(g.conn, id)
rh = obj.get_rightsholder()
return render_template('courtorders_view.html',
judgment=obj,
rh=rh,
orders=obj.get_court_orders(),
sites=obj.get_grouped_urls_with_expiry(),
groups=[(grp['id'],grp['name']) for grp in obj.get_url_groups()]
)
@admin_pages.route('/control/courtorders/review')
@admin_pages.route('/control/courtorders/review/<int:page>')
@check_admin
def courtorders_review(page=1):
offset = (page-1)*25
q = Query(g.conn,
"""
select count(distinct urls.urlid) ct from urls
inner join url_latest_status uls on uls.urlid = urls.urlid
inner join isps on isps.name = uls.network_name and regions && %s::varchar[]
and (isps.filter_level = 'No Adult' or isps.isp_type = 'mobile')
left join court_judgment_urls cu on urls.url = cu.url
where urls.status = 'ok' and uls.status = 'blocked'
and urls.url ~* '^https?://[^/]+$'
and uls.blocktype = 'COPYRIGHT' and (cu.url is null or cu.judgment_id is null)
""",
[[current_app.config['DEFAULT_REGION']]]
)
count = q.fetchone()['ct']
q.close()
q = Query(g.conn,
"""
select urls.url, array_agg(network_name) networks,
min(uls.created) created, min(uls.first_blocked) first_blocked, whois_expiry ,
case when exists(select id from court_judgment_url_flags cjuf
where cjuf.urlid = urls.urlid and cjuf.judgment_url_id is null) then true else false end as flagged
from urls
inner join url_latest_status uls on uls.urlid = urls.urlid
inner join isps on isps.name = uls.network_name and regions && %s::varchar[]
and (isps.filter_level = 'No Adult' or isps.isp_type = 'mobile')
left join court_judgment_urls cu on urls.url = cu.url
where urls.status = 'ok' and uls.status = 'blocked'
and urls.url ~* '^https?://[^/]+$'
and uls.blocktype = 'COPYRIGHT' and cu.url is null
group by urls.url, whois_expiry, urls.urlid
order by min(uls.first_blocked) limit 25 offset {0}""".format(offset),
[[current_app.config['DEFAULT_REGION']]]
)
return render_template('courtorders_review.html',
results=q,
page=page,
pagesize=25,
pagecount=get_pagecount(count, 25)
)
@admin_pages.route('/control/courtorders/edit/<int:id>')
@admin_pages.route('/control/courtorders/add')
@check_admin
def courtorders_edit(id=None):
obj = CourtJudgment(g.conn, id)
return render_template('courtorders_edit.html',
obj=obj,
powers=[ (x['id'], x['name'])
for x in
CourtPowers.select(g.conn, _orderby='name')
],
orders = obj.get_court_orders(),
order_networks = obj.get_court_order_networks(),
rightsholders=Rightsholder.get_list(g.conn)
)
@admin_pages.route('/control/courtorders/update/<int:id>', methods=['POST'])
@admin_pages.route('/control/courtorders/insert', methods=['POST'])
@check_admin
def courtorders_update(id=None):
try:
f = request.form
obj = CourtJudgment(g.conn, id)
obj.update({x: convertnull(f[x]) for x in CourtJudgment.FIELDS})
obj.store()
to_delete = [ int(x) for x in f.getlist('delete') if x ]
for order_id, network_name, url, date, expiry_date in zip(
f.getlist('order_id'),
f.getlist('network_name'),
f.getlist('applies_url'),
f.getlist('order_date'),
f.getlist('expiry_date'),
):
order = CourtOrder(g.conn, order_id or None)
if order['id'] in to_delete:
order.delete()
continue
order.update({
'network_name': network_name,
'url': url,
'judgment_id': obj['id'],
'date':convertnull(date),
'expiry_date':convertnull(expiry_date)
})
order.store()
g.conn.commit()
return redirect(url_for('.courtorders'))
except KeyError as exc:
logging.warn("Key error: %s", exc.args)
raise
@admin_pages.route('/control/courtorders/delete/<int:id>')
@check_admin
def courtorders_delete(id):
obj = CourtJudgment(g.conn, id)
obj.delete()
g.conn.commit()
return redirect(url_for('.courtorders'))
@admin_pages.route('/control/courtorders/site/add', methods=['POST'])
@check_admin
def courtorders_site_add():
f = request.form
obj = CourtJudgmentURL(g.conn)
obj.update({'url':normalize_url(f['url']),'judgment_id':f['judgment_id']})
try:
obj.store()
g.conn.commit()
except ObjectExists:
flash("This site has already been added to this court order")
g.conn.rollback()
return redirect(url_for('.courtorders_view', id=f['judgment_id']))
@admin_pages.route('/control/courtorders/site/group', methods=['POST'])
@check_admin
def courtorders_site_group():
f = request.form
if f['group_id']:
grp = CourtJudgmentURLGroup(g.conn, f['group_id'])
else:
grp = None
for site_id in f.getlist('site_id'):
obj = CourtJudgmentURL(g.conn, site_id)
if f['group_id'] == '':
obj['group_id'] = None
else:
obj['group_id'] = f['group_id']
obj.store()
g.conn.commit()
if grp:
flash("Added URL(s) to group: " + grp['name'])
else:
flash("Removed URL(s) from groups")
return redirect(url_for('.courtorders_view', id=f['judgment_id']))
@admin_pages.route('/control/courtorders/site/group/add', methods=['POST'])
@check_admin
def courtorders_group_add():
obj = CourtJudgmentURLGroup(g.conn)
obj['judgment_id'] = request.form['judgment_id']
obj['name'] = request.form['name']
obj.store()
g.conn.commit()
flash("Added URL group: "+ request.form['name'])
return redirect(url_for('.courtorders_view', id=request.form['judgment_id']))
@admin_pages.route('/control/courtorders/site/group/delete/<int:id>', methods=['GET'])
@check_admin
def courtorders_group_delete(id):
obj = CourtJudgmentURLGroup(g.conn, id=id)
obj.delete()
g.conn.commit()
flash("Deleted URL group: "+ obj['name'])
return redirect(url_for('.courtorders_view', id=obj['judgment_id']))
@admin_pages.route('/control/courtorders/site/delete/<int:id>', methods=['GET'])
@check_admin
def courtorders_site_delete(id):
obj = CourtJudgmentURL(g.conn, id=id)
obj.delete()
g.conn.commit()
flash("Removed site: "+ obj['url'])
return redirect(url_for('.courtorders_view', id=obj['judgment_id']))
@admin_pages.route('/control/courtorders/site/flag/<int:id>', methods=['GET'])
@check_admin
def courtorders_site_flag(id):
url = CourtJudgmentURL(g.conn, id=id)
judgment = url.get_court_judgment()
q = Query(g.conn, """
select isps.name, uls.status, uls.blocktype, uls.created
from urls
inner join url_latest_status uls on uls.urlid = urls.urlid
inner join isps on uls.network_name = isps.name
where urls.url = %s
and isps.regions && '{gb}'::varchar[] and (isps.isp_type = 'mobile' or isps.filter_level = 'No Adult')
order by isps.name""",
[url['url']])
flags = Query(g.conn, """
select f.*
from court_judgment_url_flag_history f
where f.judgment_url_id = %s
order by f.date_observed desc""",
[url['id']])
try:
flag = CourtJudgmentURLFlag.select_one(g.conn,
judgment_url_id = url['id'])
except ObjectNotFound:
flag = {}
return render_template('courtorders_flag.html',
url=url,
flag=flag,
judgment=judgment,
today=datetime.date.today(),
status=q,
flags=flags,
flagreasons=load_data('flagreasons')
)
@admin_pages.route('/control/courtorders/url/flag/<path:id>', methods=['GET'])
@check_admin
def courtorders_url_flag(id):
id = fix_path(id)
try:
url = Url.select_one(g.conn, url=id)
except ObjectNotFound:
abort(404)
q = Query(g.conn, """
select isps.name, uls.status, uls.blocktype, uls.created
from url_latest_status uls
inner join isps on uls.network_name = isps.name
where urlid = %s
and isps.regions && '{gb}'::varchar[] and (isps.isp_type = 'mobile' or isps.filter_level = 'No Adult')
order by isps.name""",
[url['urlid']])
flags = Query(g.conn, """
select f.*
from court_judgment_url_flag_history f
where f.urlid = %s
order by f.date_observed desc""",
[url['urlid']])
try:
flag = CourtJudgmentURLFlag.select_one(g.conn,
urlid = url['urlid'])
except ObjectNotFound:
flag = {}
return render_template('courtorders_flag.html',
url=url,
flag=flag,
judgment=None,
today=datetime.date.today(),
status=q,
flags=flags,
flagreasons=load_data('flagreasons'),
formsubmit=url_for('.courtorders_url_flag_post')
)
@admin_pages.route('/control/courtorders/site/flag', methods=['POST'])
@check_admin
def courtorders_site_flag_post():
f = request.form
url = CourtJudgmentURL(g.conn, id=f['urlid'])
if 'delete' in f:
try:
flag = CourtJudgmentURLFlag.select_one(g.conn, judgment_url_id = url['id'])
flag.delete()
judgment = url.get_court_judgment()
g.conn.commit()
flash("Url {0} unflagged".format(url['url']))
return redirect(url_for('.courtorders_view', id=judgment['id']))
except ObjectNotFound:
g.conn.rollback()
abort(404)
try:
flag = CourtJudgmentURLFlag.select_one(g.conn, judgment_url_id = url['id'])
except ObjectNotFound:
flag = CourtJudgmentURLFlag(g.conn)
flag.update({
'reason': f['reason'],
'description': f['description'],
'date_observed': f['date_observed'] or None,
'abusetype': f['abusetype'] if f['reason'] == 'domain_may_be_abusive' else None,
'judgment_url_id': f['urlid'],
'urlid': url.get_urlid(),
})
flag.store()
judgment = url.get_court_judgment()
g.conn.commit()
flash("Url {0} flagged".format(url['url']))
return redirect(url_for('.courtorders_view', id=judgment['id']))
@admin_pages.route('/control/courtorders/url/flag', methods=['POST'])
@check_admin
def courtorders_url_flag_post():
f = request.form
url = Url.select_one(g.conn, urlid=f['urlid'])
if 'delete' in f:
try:
flag = CourtJudgmentURLFlag.select_one(g.conn, urlid = url['urlid'])
flag.delete()
g.conn.commit()
flash("Url {0} unflagged".format(url['url']))
return redirect(url_for('.courtorders_url_flag', id=url['url']))
except ObjectNotFound:
g.conn.rollback()
abort(404)
try:
flag = CourtJudgmentURLFlag.select_one(g.conn, urlid = url['urlid'])
except ObjectNotFound:
flag = CourtJudgmentURLFlag(g.conn)
flag.update({
'reason': f['reason'],
'description': f['description'],
'date_observed': f['date_observed'] or None,
'abusetype': f['abusetype'] if f['reason'] == 'domain_may_be_abusive' else None,
'judgment_url_id': None,
'urlid': url['urlid'],
})
flag.store()
g.conn.commit()
flash("Url {0} flagged".format(url['url']))
return redirect(url_for('.courtorders_url_flag', id=url['url']))
@admin_pages.route('/control/courtorders/site/flag/delete/<int:id>', methods=['GET'])
@check_admin
def courtorders_site_flag_delete(id):
q = Query(g.conn,
"delete from court_judgment_url_flag_history where id = %s returning judgment_url_id, urlid ",
[id])
row = q.fetchone()
g.conn.commit()
flash("Historical flag removed")
if row['judgment_url_id']:
return redirect(url_for('.courtorders_site_flag', id=row['judgment_url_id']))
else:
url = Url.select_one(g.conn, urlid=row['urlid'])
return redirect(url_for('.courtorders_url_flag', id=url['url']))
@admin_pages.route('/control/courtorders/site/group/import', methods=['GET'])
@check_admin
def courtorders_group_import():
return render_template('courtorders_group_import.html', groups=CourtJudgmentURLGroup.select(g.conn, _orderby='name'))
@admin_pages.route('/control/courtorders/site/group/import', methods=['POST'])
@check_admin
def courtorders_group_do_import():
if 'groupfile' not in request.files:
flash('No input file supplied')
return redirect(request.url)
groupfile = request.files['groupfile']
if groupfile.filename == '':
flash('No selected file')
return redirect(request.url)
if groupfile and groupfile.filename.endswith('.csv'):
import_groupfile(groupfile)
flash("Imported successfully")
return redirect(url_for('.courtorders'))
def import_groupfile(groupfile):
import csv
reader = csv.reader(groupfile.stream)
for row in reader:
url = row[0]
if len(row) < 2:
continue
group = row[1]
if not url or not group:
continue
try:
groupobj = CourtJudgmentURLGroup.select_one(g.conn, name=group)
try:
urlobj = CourtJudgmentURL.select_one(g.conn, url=url)
except ObjectNotFound:
urlobj = None
if urlobj is None or urlobj['group_id']:
# already assigned to a group, create a new url obj and assign that to the group & same judgment
urlobj = CourtJudgmentURL(g.conn)
urlobj.update({
'judgment_id': groupobj['judgment_id'],
'group_id': groupobj['id'],
'url': url
})
else:
# assign existing url to group
urlobj['group_id'] = groupobj['id']
try:
urlobj.store()
except ObjectExists:
current_app.logger.warn("Duplicate entry: %s", urlobj.data)
g.conn.rollback()
else:
g.conn.commit()
except ObjectNotFound:
current_app.logger.warn("Group not found: %s", group)
g.conn.rollback()
## URL Admin
@admin_pages.route('/control/urls', methods=['GET'])
@check_admin
def urls():
if request.args.get('url'):
try:
status = g.api.status_url(request.args['url'], True)
except Exception:
status = None
flash("Could not locate a URL record for {0}".format(request.args['url']))
else:
status = None
return render_template('admin_urls.html', status=status, tags=load_data('tags'))
@admin_pages.route('/control/urls/check', methods=['GET'])
@check_admin
def admin_urls_check():
status = g.api.status_url(request.args['url'], request.args.get('normalize', '1') == '1')
return jsonify(**status)
@admin_pages.route('/control/urls', methods=['POST'])
@check_admin
def urls_post():
f = request.form
if 'update_status' in f:
rsp = g.api.set_status_url(f['url'], f['status'],
f.get('normalize', '0') == '1')
if rsp['success'] == True:
flash("URL Status updated")
else:
flash("Error updating URL status")
return redirect(url_for('.urls'))
if 'update_tag' in f:
if f['newtag']:
tag = f['newtag'].lower()
else:
tag = f['tag'].lower()
if not is_tag_valid(tag):
flash("Tag \"{0}\" is not valid. Tags must contain only characters a-z, 0-9 and '-'.".format(tag))
return redirect(url_for('.urls'))
q = Query(g.conn, """update urls set tags = tags || %s::varchar where url = %s and not tags && %s::varchar[]""",
[ tag, normalize_url(f['url']), [tag] ])
q.close()
g.conn.commit()
flash("URL Tags updated")
return redirect(url_for('.urls', url=normalize_url(f['url'])))
abort(400)
@admin_pages.route('/control/urls/upload')
@check_admin
def urls_upload():
return render_template('admin_url_upload.html',
tags=sorted(load_data('tags'))
)
@admin_pages.route('/control/urls/upload', methods=['POST'])
@check_admin
def urls_upload_post():
bad_tags = [ # strip empty and invalid values
x.lower() for x in request.form.getlist('tag')
if x and not is_tag_valid(x.lower())
]
if bad_tags:
flash("Invalid tags: {0}".format(", ".join(bad_tags)))
tags = make_list([ # strip empty and invalid values
x.lower() for x in request.form.getlist('tag')
if x and is_tag_valid(x.lower())
])
addcount = 0
errors = []
for _url in request.form['urls'].splitlines():
url = normalize_url(_url)
try:
result = g.api.submit_url(url, queue='none', source=request.form['source'])
if result['success']:
addcount += 1
else:
errors.append(url)
continue
for tag in tags:
q = Query(g.conn,
"""update urls set tags = tags || %s::varchar
where url = %s and not tags && %s::varchar[]""",
[ tag, url, [tag] ])
q.close()
g.conn.commit()
except Exception as v:
current_app.logger.warn("API exception: %s", str(v))
if errors:
flash("Errors submitting: {0}".format(", ".join(errors)))
flash("{0} url{1} uploaded".format(addcount, '' if addcount == 1 else 's'))
return redirect(url_for('.urls_upload'))
################
#
# URL category admin
#
################
@admin_pages.route('/control/url-category')
@check_admin
def url_categories():
return render_template('url_category.html',
categories=Category.select_with_counts(g.conn),
)
@admin_pages.route('/control/url-category/edit/<id>')
@check_admin
def url_category_edit(id):
cat = Category(g.conn, id)
return render_template('url_category_edit.html', category=cat)
@admin_pages.route('/control/url-category/update', methods=['POST'])
@check_admin
def url_category_update():
f = request.form
cat = Category(g.conn, f['id'])
cat.update({
'name': f['name'],
'display_name': f['display_name'],
})
cat.store()
g.conn.commit()
flash("Category {0} updated".format(f['name']))
return redirect(url_for('.url_categories'))
@admin_pages.route('/control/url-category/merge', methods=['POST'])
@check_admin
def url_category_merge():
f = request.form
mergelist = f.getlist('merge')
if len(mergelist) == 1:
flash("More than 1 merge category required")
return redirect(url_for('.url_categories'))
cat = Category(g.conn, id=mergelist.pop(0))
mergenames = []
for merge in mergelist:
mergecat = Category(g.conn, id=merge)
q = Query(g.conn,
"""update public.url_categories
set category_id = %s
where category_id = %s
and not exists(select 1 from public.url_categories x where x.urlid = url_categories.urlid and x.category_id = %s)""",
[cat['id'], merge, cat['id']])
q = Query(g.conn,
"""update public.url_categories set enabled = true, last_updated=now() where category_id = %s and enabled = false""",
[cat['id']])
mergenames.append(mergecat['name'])
mergecat.delete()
g.conn.commit()
flash("Merged categories {0} with {1}".format(", ".join(mergenames), cat['name']))
return redirect(url_for('.url_categories'))
@admin_pages.route('/control/url-category/delete/<id>', methods=['GET','POST']) # should be a post method
@check_admin
def url_category_delete(id):
cat = Category(g.conn, id=id)
if request.method == 'POST':
if cat['namespace'] != 'ORG':
flash("Cannot delete non-ORG category")
return redirect(url_for('.url_categories'))
cat.delete()
g.conn.commit()
flash("Category {0} deleted".format(cat['name']))
return redirect(url_for('.url_categories'))
if request.method == 'GET':
return render_template('url_category_delete_confirm.html', id=id, cat=cat)
## Tests admin
@admin_pages.route('/control/tests')
@check_admin
def tests():
tests = Test.select(g.conn, _orderby='last_run')
queues = Query(g.conn, """select *
from tests.queue_status
where queue_name like '%%.public'
order by message_count desc""",
[])
return render_template('tests.html', tests=tests, queues=queues)
@admin_pages.route('/control/tests/add')
@admin_pages.route('/control/tests/edit/<int:id>')
@check_admin
def tests_edit(id=None):
test = Test(g.conn, id=id)
if not id:
test['check_interval'] = datetime.timedelta(0)
test['repeat_interval'] = datetime.timedelta(0)
return render_template('tests_edit.html',
test=test,
isps=load_isp_data(),
countries=load_country_data(),
filters=load_data('filters'),
tags=[ x['id'] for x in Tags.select_all(g.conn, _orderby='id')]
)
@admin_pages.route('/control/tests/update', methods=['POST'])
@check_admin
def tests_update():
f = request.form
test = Test(g.conn, id=(f['id'] or None))
test.update({
'name': f['name'],
'description': f['description'],
'check_interval': "{0} {1}".format(f['check_interval_num'], f['check_interval_unit']),
'repeat_interval':
"{0} {1}".format(f['repeat_interval_num'], f['repeat_interval_unit'])
if f.get('repeat_enable') else None,
'batch_size': f['batch_size']
})
if f.get('source') == 'query':
test['filter'] = f['filter']
elif f.get('source') == 'tag':
test['tags'] = [f['tag']]
test['filter'] = None
if 'isps' in f:
test['isps'] = f.getlist('isps')
else:
test['isps'] = []
test.store()
g.conn.commit()
flash("Test case updated")
return redirect(url_for('.tests'))
@admin_pages.route('/control/tests/delete/<int:id>')
@check_admin
def tests_delete(id):
t = Test(g.conn, id)
t.delete()
g.conn.commit()
flash("Test case deleted")
return redirect(url_for('.tests'))
@admin_pages.route('/control/tests/status/<int:id>/<status>')
@check_admin
def tests_status(id, status):
t = Test(g.conn, id)
t['status'] = status.upper()
if status.upper() == 'RUNNING':
t['status_message'] = ''
if not t.get('last_run'):
t['last_run'] = datetime.datetime.now()
t.store()
g.conn.commit()
flash("Test status updated.")
return redirect(url_for('.tests'))
|
from setuptools import setup
__version__ = '1.0.0'
setup(
name='putils',
version=__version__,
description='Python2.7 utilities frequently used ',
author='Dongwon Kim',
author_email='dkim010@gmail.com',
url='https://github.com/dkim010/putils',
license='Apache-2.0',
packages=['putils'],
)
|
import requests
hello = "https://www.googleapis.com/youtube/v3/channels?part=statistics&id=UCpNooCUr-Q01rjgqxzjvztg&key=AIzaSyC4ucWTSN3s7d4KrqJ9ZOYZ-ezvzwTSGsg";
request = requests.get(hello);
reallist = ['\"','{','}','\n',']']
data = request.content;
sdata = data;
for character in reallist:
sdata = sdata.replace(character,'');
sdata = sdata.split(',');
def get_channel_id():
channel_id = sdata[6].replace('id: ','');
channel_id = channel_id.replace(' ','');
return channel_id;
def get_viewcount():
viewcount = sdata[7].split(':');
viewcount = viewcount[2].replace(' ','');
return viewcount;
def get_subscribers():
subscribers = sdata[9].split(':');
subscribers = subscribers[1].replace(' ','');
return subscribers;
def get_videoCount():
videoCount = sdata[11].split(':');
videoCount = videoCount[1].replace(' ','');
return videoCount
channel_id = get_channel_id();
viewcount = get_viewcount();
subscribers = get_subscribers();
videoCount = get_videoCount();
info_list = [channel_id,viewcount,videoCount,subscribers];
info_list = "\n".join(info_list);
channel_id_title = 'Channel ID: ' + channel_id;
view_count_title = 'Views: ' + viewcount;
subscribers_title = 'Subscribers: ' + subscribers;
video_Count_title = 'Videos: ' + videoCount;
info_list_title = [channel_id_title,view_count_title,subscribers_title,video_Count_title];
info_list_title = '\n'.join(info_list_title);
def writetofile():
fh = open("Channel_Info.txt", "w")
fh.writelines(str(info_list_title));
fh.close()
writetofile();
print(info_list)
print('Channel ID: ' + channel_id + '\n' + 'Views: ' + viewcount + '\n' + 'Videos: ' + videoCount + '\n' + 'Subscribers; ' + subscribers);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.