text
stringlengths 8
6.05M
|
|---|
import pandas as pd
from IPython.display import display
from ipywidgets import Latex
from bqplot import *
from bqplot.market_map import MarketMap
data = pd.read_csv('data_files/country_codes.csv', index_col=[0])
country_codes = data.index.values
country_names = data['Name']
gdp_data = pd.read_csv('data_files/gdp_per_capita.csv', index_col=[0], parse_dates=True)
gdp_data.fillna(method='backfill', inplace=True)
gdp_data.fillna(method='ffill', inplace=True)
col = ColorScale(scheme='Greens')
continents = data['Continent'].values
ax_c = ColorAxis(scale=col, label='GDP per Capita', visible=False)
data['GDP'] = gdp_data.ix[-1]
market_map = MarketMap(names=country_codes, groups=continents, # Basic data which needs to set for each map
cols=25, row_groups=3, # Properties for the visualization
ref_data=data, # Data frame used for different properties of the map
tooltip_fields=['Name', 'Continent', 'GDP'], # Columns from data frame to be displayed as tooltip
tooltip_formats=['', '', '.1f'],
scales={'color': col}, axes=[ax_c]) # Axis and scale for color data
deb_output = Latex()
def selected_index_changed(name, value):
deb_output.value = str(value)
market_map.on_trait_change(selected_index_changed, name='selected')
# Creating the figure to be displayed as the tooltip
sc_x = DateScale()
sc_y = LinearScale()
ax_x = Axis(scale=sc_x, grid_lines='dashed', label='Date')
ax_y = Axis(scale=sc_y, orientation='vertical', grid_lines='dashed',
label='GDP', label_location='end', label_offset='-1em')
line = Lines(x= gdp_data.index.values, scales={'x': sc_x, 'y': sc_y}, colors=['orange'])
fig_tooltip = Figure(marks=[line], axes=[ax_x, ax_y], min_width=600, min_height=400)
market_map = MarketMap(names=country_codes, groups=continents,
cols=25, row_groups=3,
color=data['GDP'], scales={'color': col}, axes=[ax_c],
ref_data=data, tooltip_widget=fig_tooltip)
# Update the tooltip chart
hovered_symbol = ''
def hover_handler(self, content):
global hovered_symbol
symbol = content.get('ref_data', {}).get('Country Code', '')
if(symbol != hovered_symbol):
hovered_symbol = symbol
if(gdp_data.get(hovered_symbol) is not None):
line.y = gdp_data[hovered_symbol].values
fig_tooltip.title = hovered_symbol
# Custom msg sent when a particular cell is hovered on
market_map.on_hover(hover_handler)
display(market_map)
|
From bcb0a961df77a0d7a3b2e7e58fac3e283b5ef8c4 Mon Sep 17 00:00:00 2001
From: Mohamad Safadieh <self@mhmd.sh>
Date: Wed, 5 May 2021 12:38:26 -0400
Subject: [PATCH] added sshpass_prompt, ssh_transfer_method, timeout
---
sshjail.py | 42 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/sshjail.py b/sshjail.py
index 5973380..0e26c68 100644
--- sshjail.py
+++ sshjail.py
@@ -49,6 +49,17 @@
vars:
- name: ansible_password
- name: ansible_ssh_pass
+ sshpass_prompt:
+ description: Password prompt that sshpass should search for. Supported by sshpass 1.06 and up
+ default: ''
+ ini:
+ - section: 'ssh_connection'
+ key: 'sshpass_prompt'
+ env:
+ - name: ANSIBLE_SSHPASS_PROMPT
+ vars:
+ - name: ansible_sshpass_prompt
+ version_added: '2.10'
ssh_args:
description: Arguments to pass to all ssh cli tools
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
@@ -247,6 +258,16 @@
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
+ ssh_transfer_method:
+ default: smart
+ description:
+ - "Preferred method to use when transferring files over ssh"
+ - Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
+ - Using 'piped' creates an ssh pipe with ``dd`` on either side to copy the data
+ choices: ['sftp', 'scp', 'piped', 'smart']
+ env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
+ ini:
+ - {key: transfer_method, section: ssh_connection}
scp_if_ssh:
default: smart
description:
@@ -270,6 +291,27 @@
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
+ timeout:
+ default: 10
+ description:
+ - This is the default ammount of time we will wait while establishing an ssh connection
+ - It also controls how long we can wait to access reading the connection once established (select on the socket)
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_SSH_TIMEOUT
+ version_added: '2.11'
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: ssh_connection
+ version_added: '2.11'
+ vars:
+ - name: ansible_ssh_timeout
+ version_added: '2.11'
+ cli:
+ - name: timeout
+ type: integer
'''
try:
|
import os
import askmanta
import json
from dateutil import parser as dateparser
from askmanta.environment import client
class Phase(object):
# phase name = "job name: phase i"
def __init__(self, i, spec, directive):
self.i = i
self.spec = spec
self.directive = directive
self.type = spec.get('type', 'map')
self.init = spec.get('init', [])
self.manifests = []
self.assets = spec.get('assets', [])
self.count = None
self.memory = None
self.disk = None
if 'sh' in spec:
self.executable = spec['sh']
elif 'script' in spec:
path = os.path.join(self.directive.manta_tmp, 'scripts', spec['script'])
self.executable = path
spec.setdefault('dependencies', {}).setdefault('scripts', []).append(spec['script'])
else:
raise ValueError()
platforms = spec.get('dependencies', False)
if platforms:
for platform, dependencies in platforms.items():
manifest = askmanta.manifest.platforms[platform](platform, self)
manifest.add(*dependencies)
self.manifests.append(manifest)
@property
def name(self):
return "{directive}: step {i}/{n}".format(
directive=self.directive.name, i=self.i, n=len(self.directive))
def serialize(self):
assets = self.assets
init = self.init
for manifest in self.manifests:
init = init + manifest.init
for store in self.directive.stores.values():
assets = assets + [store.archive_destination]
instruction = {
'type': self.type,
'init': ";\n".join(init),
'assets': assets,
'exec': self.executable,
}
for key in ['count', 'memory', 'disk']:
option = getattr(self, key)
if option:
instruction[key] = option
return instruction
class Directive(object):
def __init__(self, name, spec, root):
self.name = name
self.spec = spec
# local root
self.root = root
# manta root
self.manta_root = "/{account}/stor/directives/{name}".format(
account=client.account, name=name)
self.tmp = "/tmp/askmanta/{name}".format(name=self.name)
self.manta_tmp = "/var/tmp"
self.stores = {}
self.parse()
def parse(self):
self.phases = [Phase(i, spec, self) for i, spec in enumerate(self.spec)]
for store in self.stores.values():
if store.is_active:
instruction = "cd /var/tmp && tar xvzf {src}".format(
src=store.archive_asset)
for phase in self.phases:
phase.init.insert(0, instruction)
def serialize(self):
return [phase.serialize() for phase in self.phases]
def build(self):
for store in self.stores.values():
if store.is_active:
store.save()
phases_filename = os.path.join(self.tmp, 'phases.json')
json.dump(self.serialize(), open(phases_filename, 'w'), indent=4)
def stage(self):
# TODO: support for -f --fresh, which would client.rmr(base) first
client.mkdirp(self.manta_root)
# TODO: client.get_object(name), check `mtime` > self.store.ctime and if so, abort
# (current version already uploaded)
for store in self.stores.values():
if not store.is_active:
continue
print store.archive_destination
client.put_object(
store.archive_destination,
file=open(store.path),
durability_level='1',
)
def submit(self, inputs):
job_id = client.create_job(self.serialize(), self.name)
client.add_job_inputs(job_id, inputs)
client.end_job_input(job_id)
return Job(id=job_id)
def run(self, inputs):
self.build()
self.stage()
return self.submit(inputs=inputs)
def local_run(self, inputs):
"""
* create a virtualenv for every step, with just the packages for that step
* emulate how Joyent pipes lines into stdin
* local runs can't really work if the phases have side-effects, but if
they don't and if the input files are local too, things should work swimmingly
"""
raise NotImplementedError()
def __len__(self):
return len(self.spec)
class File(object):
def __init__(self, path):
self.path = path
@property
def content(self):
if not hasattr(self, '_content'):
self._content = client.get_object(self.path)
return self._content
def json(self):
return json.loads(self.content)
class Job(object):
# we can initialize a job with either an id
# (for an existing job) or a directive (for
# a job that's already running)
def __init__(self, id=None, directive=None):
self.id = id
self.directive = directive
self.root = os.path.join('/', client.account, 'jobs', id)
# TODO: distinguish between live and archived jobs
#self.path = os.path.join(self.root, 'live/status')
self.path = os.path.join(self.root, 'job.json')
self.errors = []
self.outputs = []
self.is_done = None
def poll(self):
self.raw = raw = File(self.path).json()
self.name = raw['name']
self.state = raw['state']
self.is_done = raw['state'] == 'done'
self.stats = raw['stats']
current = raw['stats']['tasksDone']
total = raw['stats']['tasks']
self.cursor = (current, total)
self.ctime = dateparser.parse(self.raw['timeCreated'])
if self.stats['errors']:
try:
# TODO: distinguish between live and archived jobs
# live/err => err.txt
overview_path = os.path.join(self.root, 'live/err')
overview = File(overview_path).json()
stderr_path = overview['stderr']
stderr = File(stderr_path)
self.errors.append(stderr)
except:
pass
if self.stats['outputs']:
pass
def delete(self):
# too lazy to implement this in Python...
subprocess.call(["mrm", "-r", self.root])
|
import tensorflow as tf
import numpy as np
import os
import sys
from PIL import Image, ImageOps
from utils import batch_norm, get_shape, lkrelu
class Generator(object):
def __init__(self, inputs, is_training, ochan, stddev=0.02, center=True, scale=True, reuse=None):
self._is_training = is_training
self._stddev = stddev
self._ochan = ochan
with tf.variable_scope('G', initializer=tf.truncated_normal_initializer(stddev=self._stddev), reuse=reuse):
self._center = center
self._scale = scale
self._prob = 0.5
self._inputs = inputs
self._encoder = self._build_encoder(inputs)
self._decoder = self._build_decoder(self._encoder)
def _build_encoder_layer(self, name, inputs, k, bn=True, use_dropout=False):
layer = dict()
with tf.variable_scope(name):
layer['filters'] = tf.get_variable('filters', [4, 4, get_shape(inputs)[-1], k])
layer['conv'] = tf.nn.conv2d(inputs, layer['filters'], strides=[1, 2, 2, 1], padding='SAME')
layer['bn'] = batch_norm(layer['conv'], center=self._center, scale=self._scale, training=self._is_training) if bn else layer['conv']
layer['dropout'] = tf.nn.dropout(layer['bn'], self._prob) if use_dropout else layer['bn']
layer['fmap'] = lkrelu(layer['dropout'], slope=0.2)
return layer
def _build_encoder(self, inputs):
encoder = dict()
with tf.variable_scope('encoder'):
encoder['l1'] = self._build_encoder_layer('l1', inputs, 64, bn=False)
encoder['l2'] = self._build_encoder_layer('l2', encoder['l1']['fmap'], 128)
encoder['l3'] = self._build_encoder_layer('l3', encoder['l2']['fmap'], 256)
encoder['l4'] = self._build_encoder_layer('l4', encoder['l3']['fmap'], 512)
encoder['l5'] = self._build_encoder_layer('l5', encoder['l4']['fmap'], 512)
encoder['l6'] = self._build_encoder_layer('l6', encoder['l5']['fmap'], 512)
encoder['l7'] = self._build_encoder_layer('l7', encoder['l6']['fmap'], 512)
encoder['l8'] = self._build_encoder_layer('l8', encoder['l7']['fmap'], 512)
return encoder
def _build_decoder_layer(self, name, inputs, output_shape_from,use_dropout=False):
layer = dict()
with tf.variable_scope(name):
output_shape = tf.shape(output_shape_from)
layer['filters'] = tf.get_variable('filters', [4, 4, get_shape(output_shape_from)[-1], get_shape(inputs)[-1]])
layer['conv'] = tf.nn.conv2d_transpose(inputs, layer['filters'], output_shape=output_shape, strides=[1, 2, 2, 1], padding='SAME')
layer['bn'] = batch_norm(tf.reshape(layer['conv'], output_shape), center=self._center, scale=self._scale, training=self._is_training)
layer['dropout'] = tf.nn.dropout(layer['bn'], self._prob) if use_dropout else layer['bn']
layer['fmap'] = tf.nn.relu(layer['dropout'])
return layer
def _build_decoder(self, encoder):
decoder = dict()
with tf.variable_scope('decoder'):
decoder['dl1'] = self._build_decoder_layer('dl1', encoder['l8']['fmap'], output_shape_from=encoder['l7']['fmap'], use_dropout=True)
fmap_concat = tf.concat([decoder['dl1']['fmap'], encoder['l7']['fmap']], axis=3)
decoder['dl2'] = self._build_decoder_layer('dl2', fmap_concat, output_shape_from=encoder['l6']['fmap'], use_dropout=True)
fmap_concat = tf.concat([decoder['dl2']['fmap'], encoder['l6']['fmap']], axis=3)
decoder['dl3'] = self._build_decoder_layer('dl3', fmap_concat, output_shape_from=encoder['l5']['fmap'], use_dropout=True)
fmap_concat = tf.concat([decoder['dl3']['fmap'], encoder['l5']['fmap']], axis=3)
decoder['dl4'] = self._build_decoder_layer('dl4', fmap_concat, output_shape_from=encoder['l4']['fmap'])
fmap_concat = tf.concat([decoder['dl4']['fmap'], encoder['l4']['fmap']], axis=3)
decoder['dl5'] = self._build_decoder_layer('dl5', fmap_concat, output_shape_from=encoder['l3']['fmap'])
fmap_concat = tf.concat([decoder['dl5']['fmap'], encoder['l3']['fmap']], axis=3)
decoder['dl6'] = self._build_decoder_layer('dl6', fmap_concat, output_shape_from=encoder['l2']['fmap'])
fmap_concat = tf.concat([decoder['dl6']['fmap'], encoder['l2']['fmap']], axis=3)
decoder['dl7'] = self._build_decoder_layer('dl7', fmap_concat, output_shape_from=encoder['l1']['fmap'])
fmap_concat = tf.concat([decoder['dl7']['fmap'], encoder['l1']['fmap']], axis=3)
decoder['dl8'] = self._build_decoder_layer('dl8', fmap_concat, output_shape_from=self._inputs)
with tf.variable_scope('cl9'):
cl9 = dict()
cl9['filters'] = tf.get_variable('filters', [4, 4, get_shape(decoder['dl8']['fmap'])[-1], self._ochan])
cl9['conv'] = tf.nn.conv2d(decoder['dl8']['fmap'], cl9['filters'], strides=[1, 1, 1, 1], padding='SAME')
cl9['fmap'] = tf.nn.tanh(cl9['conv'])
decoder['cl9'] = cl9
return decoder
|
from flask_restful import Resource
from flask import request
from flask_jwt_extended import jwt_required
from auth.password_manager import change_password, forgot_password
from exception import MyException
class PasswordChange(Resource):
@classmethod
@jwt_required()
def put(cls):
data = request.get_json()
if not data:
raise MyException('fields cannot be empty', status_code=400)
return change_password(data['email'], data['old_password'], data['new_password'])
class PasswordForgot(Resource):
@classmethod
def post(cls):
data = request.get_json()
if not data:
raise MyException('fields cannot be empty', status_code=400)
return forgot_password(data['email'])
|
# -*- coding: utf-8 -*-
class Solution:
def validPalindrome(self, s):
def _validPalindrome(s, first, last):
while first < last:
if s[first] != s[last]:
return False
first, last = first + 1, last - 1
return True
first, last = 0, len(s) - 1
while first < last:
if s[first] != s[last]:
return _validPalindrome(s, first, last - 1) or _validPalindrome(
s, first + 1, last
)
first, last = first + 1, last - 1
return True
if __name__ == "__main__":
solution = Solution()
assert solution.validPalindrome("aba")
assert solution.validPalindrome("abca")
|
from pyrosetta import *
from pyrosetta.rosetta.core.select.residue_selector import ResidueIndexSelector
from pyrosetta.rosetta.core.simple_metrics.metrics import RMSDMetric
from pyrosetta.rosetta.core.simple_metrics.per_residue_metrics import PerResidueRMSDMetric
from pyrosetta.rosetta.core.scoring import rmsd_atoms
from pyrosetta.rosetta.core.scoring import superimpose_pose
init()
pose=pose_from_pdb('loop_swap_protocol/tev.pdb')
ky9 = pose_from_pdb('../dali_data/pdb/ky/pdb1ky9.ent.gz')
pmm = PyMOLMover()
pmm.apply(pose)
pmm.apply(ky9)
qris = ResidueIndexSelector(str(pose.pdb_info().pdb2pose('A',142))+","+str(pose.pdb_info().pdb2pose('A',154)))
sris = ResidueIndexSelector(str(ky9.pdb_info().pdb2pose('B',201))+","+str(ky9.pdb_info().pdb2pose('B',213)))
prmsd = PerResidueRMSDMetric()
prmsd.set_rmsd_type(rmsd_atoms.rmsd_protein_bb_ca)
prmsd.set_residue_selector_reference(qris)
prmsd.set_residue_selector(sris)
prmsd.set_comparison_pose(pose)
amap = prmsd.create_atom_id_map(ky9)
superimpose_pose(ky9, pose, amap)
pmm.apply(ky9)
# Make residue selectors for loop termini with overlap regions
stemp = '{}-{},{}-{}'
qs = stemp.format(qn - self.n_overlap_size, qn,
qc, qc + self.c_overlap_size)
ss = stemp.format(sn - self.n_overlap_size, sn,
sc, sc + self.c_overlap_size)
query_selector = ResidueIndexSelector(qs)
subject_selector = ResidueIndexSelector(ss)
rmsd = RMSDMetric()
prmsd.set_residue_selector_reference?
psubset = Pose(pose(1,50))
Pose?
ky9=pose_from_pdb('loop_swap_protocol/aligned_pdbs/1KY9.pdb')
tev_inf = pose.pdb_info()
ky9_inf=ky9.pdb_info()
qloop = Pose(pose, tev_inf.pdb2pose('A',142),tev_inf.pdb2pose('A',154))
sloop = Pose(ky9, ky9_inf.pdb2pose('A',201),ky9_inf.pdb2pose('A',213))
pmm = PyMOLMover()
pmm.apply(qloop)
pmm.apply(sloop)
pmm.apply(qloop)
pmm.apply(sloop)
pmm.apply(pose)
rmsd.set_comparison_pose?
rmsd.set_comparison_pose(qloop)
rmsd.calculate(sloop)
rmsd.set_rmsd_type?
rmsd.set_rmsd_type('CA')
rmsd.set_rmsd_type('rmsd_protein_bb_ca')
rmsd_atoms?
rmsd.set_rmsd_type(rmsd_atoms.rmsd_protein_bb_ca)
rmsd.calculate(sloop)
prmsd.set_comparison_pose(qloop)
prmsd.calculate(sloop)
prmsd.set_rmsd_type(rmsd_atoms.rmsd_protein_bb_ca)
prmsd.calculate(sloop)
sloop.total_residue()
print(sloop)
print(qloop)
ris = ResidueIndexSelector('1,13')
ris = ResidueIndexSelector(str(ky9_inf.pdb2pose('A',201))+","+str(ky9_inf.pdb2pose('A',213)))
prmsd.set_residue_selector_reference(ris)
sris = ResidueIndexSelector(str(ky9_inf.pdb2pose('A',201))+","+str(ky9_inf.pdb2pose('A',213)))
qris = ResidueIndexSelector(str(tev_inf.pdb2pose('A',142))+","+str(tev_inf.pdb2pose('A',154)))
prmsd.set_residue_selector_reference(qris)
prmsd.set_residue_selector(sris)
prmsd.set_comparison_pose(pose)
amap = prmsd.create_atom_id_map(ky9)
superimpose_pose(ky9, pose, amap)
pmm.apply(ky9)
superimpose_pose(ky9, *pose, amap)
ky9_b = pose_from_pdb('../dali_data/pdb/ky/pdb1ky9.ent.gz')
print(ky9_b)
sris_b = ResidueIndexSelector(str(ky9_b.pdb_info().pdb2pose('B',201))+","+str(ky9_b.pdb_info().pdb2pose('B',213)))
prmsd.set_residue_selector(sris_b)
amap = prmsd.create_atom_id_map(ky9_b)
pmm.apply(ky9_b)
superimpose_pose(ky9_b, pose, amap)
pmm.apply(ky9_b)
ky9_b = pose_from_pdb('../dali_data/pdb/ky/pdb1ky9.ent.gz')
|
import GlobalSettings
import os
from PyQt5 import QtWidgets, Qt, uic, QtCore
from functools import partial
from CSPRparser import CSPRparser
import re
import platform
import traceback
import math
from annotation_functions import *
#global logger
logger = GlobalSettings.logger
# Class Name: genLibrary
# this class is a window that allows the user to select the settings for Generate Library
# When the user clicks Generate Library, it goes ahead and gets the Annotation Data needed
# Then the user can select the settings they want, and then hit submit.
# It creates a txt file with the data
class genLibrary(QtWidgets.QMainWindow):
def __init__(self):
try:
# qt stuff
super(genLibrary, self).__init__()
uic.loadUi(GlobalSettings.appdir + 'generate_library.ui', self)
self.setWindowTitle('Generate Library')
self.setWindowIcon(Qt.QIcon(GlobalSettings.appdir + 'cas9image.ico'))
groupbox_style = """
QGroupBox:title{subcontrol-origin: margin;
left: 10px;
padding: 0 5px 0 5px;}
QGroupBox#Step1{border: 2px solid rgb(111,181,110);
border-radius: 9px;
font: bold 14pt 'Arial';
margin-top: 10px;}"""
self.Step1.setStyleSheet(groupbox_style)
self.Step2.setStyleSheet(groupbox_style.replace("Step1", "Step2"))
self.Step3.setStyleSheet(groupbox_style.replace("Step1", "Step3"))
self.Step4.setStyleSheet(groupbox_style.replace("Step1", "Step4"))
# button connections
self.cancel_button.clicked.connect(self.cancel_function)
self.BrowseButton.clicked.connect(self.browse_function)
self.submit_button.clicked.connect(self.submit_data)
self.progressBar.setValue(0)
# variables
self.anno_data = dict()
self.kegg_nonKegg = ''
self.gen_lib_dict = dict()
self.cspr_data = dict()
self.Output = dict()
self.off_tol = .05
self.off_max_misMatch = 4
self.off_target_running = False
self.parser = CSPRparser("")
# set the numbers for the num genes combo box item
for i in range(10):
self.numGenescomboBox.addItem(str(i + 1))
# set the numbers for the minOn combo box
for i in range(19, 70):
self.minON_comboBox.addItem(str(i + 1))
#scale UI
self.scaleUI()
except Exception as e:
logger.critical("Error initializing generate library class.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
#scale UI based on current screen
def scaleUI(self):
try:
self.repaint()
QtWidgets.QApplication.processEvents()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
screen = QtWidgets.QApplication.screens()[screen]
dpi = screen.physicalDotsPerInch()
width = screen.geometry().width()
height = screen.geometry().height()
# font scaling
fontSize = 12
self.fontSize = fontSize
self.centralWidget().setStyleSheet("font: " + str(fontSize) + "pt 'Arial';")
#scale title
fontSize = 30
self.label.setStyleSheet("font: bold " + str(fontSize) + "pt 'Arial';")
self.adjustSize()
currentWidth = self.size().width()
currentHeight = self.size().height()
# window scaling
# 1920x1080 => 800x650
scaledWidth = int((width * 950) / 1920)
scaledHeight = int((height * 500) / 1080)
if scaledHeight < currentHeight:
scaledHeight = currentHeight
if scaledWidth < currentWidth:
scaledWidth = currentWidth
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
x = centerPoint.x()
y = centerPoint.y()
x = x - (math.ceil(scaledWidth / 2))
y = y - (math.ceil(scaledHeight / 2))
self.setGeometry(x, y, scaledWidth, scaledHeight)
self.repaint()
QtWidgets.QApplication.processEvents()
except Exception as e:
logger.critical("Error in scaleUI() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
#center UI on current screen
def centerUI(self):
try:
self.repaint()
QtWidgets.QApplication.processEvents()
#center window on current screen
width = self.width()
height = self.height()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
x = centerPoint.x()
y = centerPoint.y()
x = x - (math.ceil(width / 2))
y = y - (math.ceil(height / 2))
self.setGeometry(x, y, width, height)
self.repaint()
QtWidgets.QApplication.processEvents()
except Exception as e:
logger.critical("Error in centerUI() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function launches the window
# Parameters:
# annotation_data: a dictionary that has the data for the annotations searched for
# currently MainWindow's searches dict is passed into this
# org_file: the cspr_file that pertains to the organism that user is using at the time
# anno_type: whether the user is using KEGG or another type of annotation file
def launch(self, annotation_data, org_file, anno_type):
try:
self.cspr_file = org_file
self.db_file = org_file[:org_file.find('.')] + '_repeats.db'
self.anno_data = annotation_data
self.kegg_nonKegg = anno_type
self.process = QtCore.QProcess()
self.parser.fileName = org_file
# setting the path and file name fields
index1 = self.cspr_file.find('.')
if platform.system() == "Windows":
index2 = self.cspr_file.rfind('\\')
else:
index2 = self.cspr_file.rfind('/')
self.filename_input.setText(self.cspr_file[index2 + 1:index1] + '_lib')
if platform.system() == "Windows":
self.output_path.setText(GlobalSettings.CSPR_DB + "\\")
else:
self.output_path.setText(GlobalSettings.CSPR_DB + "/")
# depending on the type of file, build the dictionary accordingly
self.build_dict_non_kegg()
# get the gRNA data from the cspr file
self.cspr_data = self.parser.gen_lib_parser(self.gen_lib_dict, GlobalSettings.mainWindow.endoChoice.currentText())
self.get_endo_data()
#center UI
self.centerUI()
self.show()
self.activateWindow()
except Exception as e:
logger.critical("Error in launch() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
def get_endo_data(self):
try:
f = open(GlobalSettings.appdir + "CASPERinfo")
self.endo_data = {}
while True:
line = f.readline()
if line.startswith('ENDONUCLEASES'):
while True:
line = f.readline()
line = line.replace("\n","")
if (line[0] == "-"):
break
line_tokened = line.split(";")
if len(line_tokened) == 10:
endo = line_tokened[0]
five_length = line_tokened[2]
seed_length = line_tokened[3]
three_length = line_tokened[4]
prime = line_tokened[5]
hsu = line_tokened[9]
self.endo_data[endo] = [int(five_length) + int(three_length) + int(seed_length), prime, "MATRIX:" + hsu]
break
f.close()
except Exception as e:
logger.critical("Error in get_endo_data() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this is here in case the user clicks 'x' instead of cancel. Just calls the cancel function
def closeEvent(self, event):
try:
closeWindow = self.cancel_function()
# if the user is doing OT and does not decide to cancel it ignore the event
if closeWindow == -2:
event.ignore()
else:
event.accept()
except Exception as e:
logger.critical("Error in closeEvent() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function takes all of the cspr data and compresses it again for off-target usage
def compress_file_off(self):
try:
if platform.system() == "Windows":
file = GlobalSettings.CSPR_DB + "\\off_input.txt"
else:
file = GlobalSettings.CSPR_DB + "/off_input.txt"
f = open(file, 'w')
for gene in self.cspr_data:
for j in range(len(self.cspr_data[gene])):
loc = self.cspr_data[gene][j][0]
seq = self.cspr_data[gene][j][1]
pam = self.cspr_data[gene][j][2]
score = self.cspr_data[gene][j][3]
strand = self.cspr_data[gene][j][4]
output = str(loc) + ';' + str(seq) + ';' + str(pam) + ';' + str(score) + ';' + str(strand)
f.write(output + '\n')
f.close()
except Exception as e:
logger.critical("Error in compress_file_off() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function parses the temp_off file, which holds the off-target analysis results
# it also updates each target in the cspr_data dictionary to replace the endo with the target's results in off-target
def parse_off_file(self):
try:
if platform.system() == "Windows":
file = GlobalSettings.CSPR_DB + "\\temp_off.txt"
else:
file = GlobalSettings.CSPR_DB + "/temp_off.txt"
f = open(file, "r")
file_data = f.read().split('\n')
f.close()
scoreDict = dict()
# get the data from the file
for i in range(len(file_data)):
if file_data[i] == 'AVG OUTPUT':
continue
elif file_data[i] != '':
buffer = file_data[i].split(':')
scoreDict[buffer[0]] = buffer[1]
# update cspr_Data
for gene in self.cspr_data:
for i in range(len(self.cspr_data[gene])):
tempTuple = (self.cspr_data[gene][i][0], self.cspr_data[gene][i][1], self.cspr_data[gene][i][2], self.cspr_data[gene][i][3], self.cspr_data[gene][i][4], scoreDict[self.cspr_data[gene][i][1]])
self.cspr_data[gene][i] = tempTuple
except Exception as e:
logger.critical("Error in parse_off_file() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function runs the off_target command
# NOTE: some changes may be needed to get it to work with other OS besides windows
def get_offTarget_data(self, num_targets, minScore, spaceValue, output_file, fiveseq):
try:
self.perc = False
self.bool_temp = False
self.running = False
# when finished, parse the off file, and then generate the lib
def finished():
if self.off_target_running:
self.progressBar.setValue(100)
self.parse_off_file()
did_work = self.generate(num_targets, minScore, spaceValue, output_file, fiveseq)
self.off_target_running = False
#self.process.kill()
if did_work != -1:
self.cancel_function()
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Information)
msgBox.setWindowTitle("Library Generated!")
msgBox.setText(
"CASPER has finished generating your library!")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
os.remove(GlobalSettings.CSPR_DB + '/off_input.txt')
os.remove(GlobalSettings.CSPR_DB + '/temp_off.txt')
# as off-targeting outputs things, update the off-target progress bar
def progUpdate(p):
line = str(self.process.readAllStandardOutput())
line = line[2:]
line = line[:len(line) - 1]
if platform.system() == 'Windows':
for lines in filter(None, line.split(r'\r\n')):
if (lines.find("Running Off Target Algorithm for") != -1 and self.perc == False):
self.perc = True
if (self.perc == True and self.bool_temp == False and lines.find(
"Running Off Target Algorithm for") == -1):
lines = lines[32:]
lines = lines.replace("%", "")
if (float(lines) <= 99.5):
num = float(lines)
self.progressBar.setValue(num)
else:
self.bool_temp = True
else:
for lines in filter(None, line.split(r'\n')):
if (lines.find("Running Off Target Algorithm for") != -1 and self.perc == False):
self.perc = True
if (self.perc == True and self.bool_temp == False and lines.find(
"Running Off Target Algorithm for") == -1):
lines = lines[32:]
lines = lines.replace("%", "")
if (float(lines) <= 99.5):
num = float(lines)
self.progressBar.setValue(num)
else:
self.bool_temp = True
if platform.system() == 'Windows':
app_path = GlobalSettings.appdir
exe_path = app_path + 'OffTargetFolder\\OT_Win.exe'
output_path = '"' + GlobalSettings.CSPR_DB + '\\temp_off.txt" '
data_path = '"' + GlobalSettings.CSPR_DB + "\\off_input.txt" + '" '
elif platform.system() == 'Linux':
app_path = GlobalSettings.appdir.replace('\\', '/')
exe_path = app_path + r'OffTargetFolder/OT_Lin'
output_path = '"' + GlobalSettings.CSPR_DB + '/temp_off.txt" '
data_path = '"' + GlobalSettings.CSPR_DB + "/off_input.txt" + '" '
else:
app_path = GlobalSettings.appdir.replace('\\', '/')
exe_path = app_path + r'OffTargetFolder/OT_Mac'
output_path = '"' + GlobalSettings.CSPR_DB + '/temp_off.txt" '
data_path = '"' + GlobalSettings.CSPR_DB + "/off_input.txt" + '" '
exe_path = '"' + exe_path + '" '
cspr_path = '"' + self.cspr_file + '" '
db_path = '"' + self.db_file + '" '
filename = output_path
filename = filename[:len(filename) - 1]
filename = filename[1:]
filename = filename.replace('"', '')
CASPER_info_path = '"' + app_path + 'CASPERinfo' +'" '
num_of_mismathes = self.off_max_misMatch
tolerance = self.off_tol # create command string
endo = '"' + GlobalSettings.mainWindow.endoChoice.currentText() + '" '
detailed_output = " False "
avg_output = "True"
hsu = ' "' + self.endo_data[GlobalSettings.mainWindow.endoChoice.currentText()][2] + '"'
# set the off_target_running to true, to keep the user from closing the window while it is running
self.off_target_running = True
cmd = exe_path + data_path + endo + cspr_path + db_path + output_path + CASPER_info_path + str(
num_of_mismathes) + ' ' + str(tolerance) + detailed_output + avg_output + hsu
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
self.process.readyReadStandardOutput.connect(partial(progUpdate, self.process))
self.process.readyReadStandardError.connect(partial(progUpdate, self.process))
self.progressBar.setValue(0)
QtCore.QTimer.singleShot(100, partial(self.process.start, cmd))
self.process.finished.connect(finished)
except Exception as e:
logger.critical("Error in get_offTarget_data() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# submit function
# this function takes all of the input from the window, and calls the generate function
# Still need to add the checks for 5' seq, and the percentage thing
def submit_data(self):
try:
if self.off_target_running:
return
output_file = self.output_path.text() + self.filename_input.text()
minScore = int(self.minON_comboBox.currentText())
num_targets = int(self.numGenescomboBox.currentText())
fiveseq = ''
# error check for csv files
if output_file.endswith('.txt'):
output_file = output_file.replace('.txt', '.csv')
elif not output_file.endswith('.txt') and not output_file.endswith('.csv'):
output_file = output_file + '.csv'
# error checking for the space value
# if they enter nothing, default to 15 and also make sure it's actually a digit
if self.space_line_edit.text() == '':
spaceValue = 15
elif self.space_line_edit.text().isdigit():
spaceValue = int(self.space_line_edit.text())
elif not self.space_line_edit.text().isdigit():
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Please enter integers only for space between guides.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
# if space value is more than 200, default to 200
if spaceValue > 200:
spaceValue = 200
elif spaceValue < 0:
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Please enter a space-value that is 0 or greater.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
if self.find_off_Checkbox.isChecked():
self.compress_file_off()
# get the fiveprimseq data and error check it
if self.fiveprimeseq.text() != '' and self.fiveprimeseq.text().isalpha():
fiveseq = self.fiveprimeseq.text()
elif self.fiveprimeseq.text() != '' and not self.fiveprimeseq.text().isalpha():
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Please make sure only the letters A, T, G, or C are added into 5' End specificity box.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
# get the targeting range data, and error check it here
if not self.start_target_range.text().isdigit() or not self.end_target_range.text().isdigit():
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Error: Please make sure that the start and end target ranges are numbers only. Please make sure that start is 0 or greater, and end is 100 or less. ")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
elif int(self.start_target_range.text()) >= int(self.end_target_range.text()):
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Please make sure that the start number is always less than the end number")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
# if they check Off-Targeting
if self.find_off_Checkbox.isChecked():
# make sure its a digit
if self.maxOFF_comboBox.text() == '' or not self.maxOFF_comboBox.text().isdigit() and '.' not in self.maxOFF_comboBox.text():
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Please enter only numbers for Maximum Off-Target Score. It cannot be left blank")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
else:
# make sure it between 0 and .5
if not 0.0 < float(self.maxOFF_comboBox.text()) <= .5:
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Error")
msgBox.setText(
"Please enter a max off-target score between 0 and 0.5!")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return
# compress the data, and then run off-targeting
self.compress_file_off()
self.get_offTarget_data(num_targets, minScore, spaceValue, output_file, fiveseq)
else:
# actually call the generate function
did_work = self.generate(num_targets, minScore, spaceValue, output_file, fiveseq)
if did_work != -1:
self.cancel_function()
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Library Generated!")
msgBox.setText(
"CASPER has finished generating your library!")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
except Exception as e:
logger.critical("Error in submit_data() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# cancel function
# clears everything and hides the window
def cancel_function(self):
try:
if self.off_target_running:
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Question)
msgBox.setWindowTitle("Off-Targeting is running")
msgBox.setText(
"Off-Targetting is running. Closing this window will cancel that process, and return to the main window. .\n Do you wish to continue?")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Yes)
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.No)
msgBox.exec()
if (msgBox.result() == QtWidgets.QMessageBox.No):
return -2
else:
self.off_target_running = False
self.process.kill()
self.cspr_file = ''
self.anno_data = list()
self.filename_input.setText('')
self.output_path.setText('')
self.gen_lib_dict.clear()
self.cspr_data.clear()
self.Output.clear()
self.start_target_range.setText('0')
self.end_target_range.setText('100')
self.space_line_edit.setText('15')
self.find_off_Checkbox.setChecked(False)
self.modifyParamscheckBox.setChecked(False)
self.maxOFF_comboBox.setText('')
self.fiveprimeseq.setText('')
self.off_target_running = False
self.progressBar.setValue(0)
self.hide()
except Exception as e:
logger.critical("Error in cancel_function() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# browse function
# allows the user to browse for a folder
# stores their selection in the output_path line edit
def browse_function(self):
try:
if self.off_target_running:
return
# get the folder
filed = QtWidgets.QFileDialog()
mydir = QtWidgets.QFileDialog.getExistingDirectory(filed, "Open a Folder",
GlobalSettings.CSPR_DB, QtWidgets.QFileDialog.ShowDirsOnly)
if(os.path.isdir(mydir) == False):
return
# make sure to append the '/' to the folder path
if platform.system() == "Windwos":
self.output_path.setText(mydir + "\\")
else:
self.output_path.setText(mydir + "/")
except Exception as e:
logger.critical("Error in browse_function() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function builds the dictionary that is used in the generate function
# this is the version that builds it from data from feature_table, gbff, or gff
# builds it exactly as Brian built it in the files given
def build_dict_non_kegg(self):
try:
for tuple in self.anno_data:
chrom = tuple[0]
feature = tuple[1]
feature_id = get_id(feature)
feature_name = get_name(feature)
feature_desc = get_description(feature)
### Order: chromosome number, gene start, gene end, dir of gene, gene description, gene name/locus tag
self.gen_lib_dict[feature_name] = [chrom,int(feature.location.start),int(feature.location.end),get_strand(feature),get_description(feature),get_name(feature)]
except Exception as e:
logger.critical("Error in build_dict_non_kegg() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# generate function taken from Brian's code
def generate(self,num_targets_per_gene, score_limit, space, output_file, fiveseq):
try:
deletedDict = dict()
# check and see if we need to search based on target_range
startNum = float(self.start_target_range.text())
endNum = float(self.end_target_range.text())
checkStartandEndBool = False
if startNum != 0.0 or endNum != 100.0:
if startNum >= 0.0 and endNum <= 100.0:
startNum = startNum / 100
endNum = endNum / 100
checkStartandEndBool = True
else:
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Invalid Targeting Range:")
msgBox.setText(
"Please select a targeting range between 0 and 100.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return -1
for gene in self.gen_lib_dict:
target_list = self.cspr_data[gene] # Gets the gRNAs for given gene
#target_list = chrom_list[k:l+1]
# Reverse the target list if the gene is on negative strand:
if self.gen_lib_dict[gene][3] == "-":
target_list.reverse()
# Filter out the guides with low scores and long strings of T's
# also store the ones deleted if the user selects 'modify search parameters'
if self.modifyParamscheckBox.isChecked():
deletedDict[gene] = list()
for i in range(len(target_list) - 1, -1, -1): ### Start at end and move backwards through list
# check the target_range here
if int(target_list[i][3]) < int(score_limit):
if self.modifyParamscheckBox.isChecked():
deletedDict[gene].append(target_list[i])
target_list.pop(i)
# check for gRNAs with poly T regions here
elif re.search("T{5,10}", target_list[i][1]) is not None:
if self.modifyParamscheckBox.isChecked():
deletedDict[gene].append(target_list[i])
target_list.pop(i)
# check for the fiveseq
if fiveseq != '':
for i in range(len(target_list) - 1, -1, -1): ### Start at end and move backwards through list
if not target_list[i][1].startswith(fiveseq.upper()):
if self.modifyParamscheckBox.isChecked():
deletedDict[gene].append(target_list[i])
target_list.pop(i)
# check the target range here
if checkStartandEndBool:
for i in range(len(target_list) - 1, -1, -1):
totalDistance = self.gen_lib_dict[gene][2] - self.gen_lib_dict[gene][1]
target_loc = abs(int(target_list[i][0])) - int(self.gen_lib_dict[gene][1])
myRatio = target_loc / totalDistance
if not (startNum <= myRatio <= endNum):
if self.modifyParamscheckBox.isChecked():
deletedDict[gene].append(target_list[i])
target_list.pop(i)
# if the user selected off-targeting, check to see that the targets do not exceed the selected max score
if self.find_off_Checkbox.isChecked():
maxScore = float(self.maxOFF_comboBox.text())
for i in range(len(target_list) - 1, -1, -1):
if maxScore < float(target_list[i][5]):
if self.modifyParamscheckBox.isChecked():
deletedDict[gene].append(target_list[i])
target_list.pop(i)
# Now generating the targets
self.Output[gene] = list()
i = 0
vec_index = 0
prev_target = (0, "xyz", 'abc', 1, "-")
while i < num_targets_per_gene:
# select the first five targets with the score and space filter that is set in the beginning
if len(target_list) == 0 or vec_index >= len(target_list):
break
while abs(int(target_list[vec_index][0]) - int(prev_target[0])) < int(space):
if target_list[vec_index][3] > prev_target[3] and prev_target != (0,"xyz", "abc", 1, "-"):
self.Output[gene].remove(prev_target)
self.Output[gene].append(target_list[vec_index])
prev_target = target_list[vec_index]
vec_index += 1
# check and see if there will be a indexing error
if vec_index >= len(target_list) - 1:
vec_index = vec_index - 1
break
# Add the new target to the output and add another to i
self.Output[gene].append(target_list[vec_index])
prev_target = target_list[vec_index]
i += 1
vec_index += 1
# if the user selects modify search parameters, go through and check to see if each one has the number of targets that the user wanted
# if not, append from the deletedDict until they do
if self.modifyParamscheckBox.isChecked():
for gene in self.Output:
if len(self.Output[gene]) < num_targets_per_gene:
for i in range(len(deletedDict[gene])):
if len(self.Output[gene]) == num_targets_per_gene:
break
else:
loc = deletedDict[gene][i][0]
seq = deletedDict[gene][i][1]
pam = deletedDict[gene][i][2]
score = deletedDict[gene][i][3]
strand = deletedDict[gene][i][4] + '*'
endo = deletedDict[gene][i][5]
self.Output[gene].append((loc, seq, pam, score, strand, endo))
# Now output to the file
try:
f = open(output_file, 'w')
# if OT checked
if self.find_off_Checkbox.isChecked():
f.write('Gene Name,Sequence,On-Target Score,Off-Target Score,Location,PAM,Strand\n')
elif not self.find_off_Checkbox.isChecked():
f.write('Gene Name,Sequence,On-Target Score,Location,PAM,Strand\n')
for gene in self.Output:
i = 0
gene_name = self.gen_lib_dict[gene][-1]
for target in self.Output[gene]:
# check to see if the target did not match the user's parameters and they selected 'modify'
# if the target has an error, put 2 asterisks in front of the target sequence
if '*' in target[4]:
tag_id = "**" + gene_name + "-" + str(i + 1)
else:
tag_id = gene_name + "-" + str(i + 1)
i += 1
tag_id = tag_id.replace(',', '')
# if OT checked
if self.find_off_Checkbox.isChecked():
f.write(tag_id + ',' + target[1] + ',' + str(target[3]) + ',' + str(target[5]) + ',' + str(abs(int(target[0]))) + ',' + target[2] + ',' + target[4][0] + '\n')
# if OT not checked
elif not self.find_off_Checkbox.isChecked():
f.write(tag_id + ',' + target[1] + ',' + str(target[3]) + ',' + str(abs(int(target[0]))) + ',' + target[2] + ',' + target[4][0] + '\n')
f.close()
except PermissionError:
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("File Cannot Open")
msgBox.setText(
"This file cannot be opened. Please make sure that the file is not opened elsewhere and try again.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Ok)
msgBox.exec()
return -1
except Exception as e:
print(e)
return
except Exception as e:
logger.critical("Error in generate() in generate library.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
|
# coding=utf-8
"""
test exec *.sql with python
Desc:*
Maintainer: wangfm
CreateDate: 2016-11-09 17:56:28
"""
#cmd login
# mysql -h10.1.0.56 -uroot -pSanbu@123456 -P13306
import MySQLdb
from subprocess import Popen,PIPE
def execFileSql(*args, **kwargs):
"""
Function: execFileSql()
Desc: 删除数据库,并重建,最后刷库
Args:
- :file
- :**
Return: result
Usage: execFileSql(file, **)
Maintainer: wangfm
CreateDate: 2016-11-10 15:57:02
"""
# init data
filesql = args[0]
host = kwargs['host']
usr = kwargs['usr']
passwd = kwargs['passwd']
port = int(kwargs['port'])
database = kwargs['database']
# clean database
try:
conn = MySQLdb.connect(host=host,user=usr,passwd=passwd,port=port)
cur = conn.cursor()
cur.execute('DROP DATABASE IF EXISTS %s' % database)
cur.execute('CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARSET utf8 COLLATE utf8_general_ci' % database)
# CREATE DATABASE IF NOT EXISTS middle DEFAULT CHARSET utf8 COLLATE utf8_general_ci
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
process = Popen('mysql -h%s -P%s -u%s -p%s %s' \
%(host, port, usr, passwd, database), stdout=PIPE, stdin=PIPE, shell=True)
print "1------"
print "exec sql,wait..."
output = process.communicate('source '+filesql)
print output
print "end."
def execFileSqlOnly(*args, **kwargs):
"""
Function: execFileSql()
Desc: 删除数据库,并重建,最后刷库
Args:
- :file
- :**
Return: result
Usage: execFileSql(file, **)
Maintainer: wangfm
CreateDate: 2016-11-10 15:57:02
"""
# init data
filesql = args[0]
host = kwargs['host']
usr = kwargs['usr']
passwd = kwargs['passwd']
port = int(kwargs['port'])
database = kwargs['database']
process = Popen('mysql -h%s -P%s -u%s -p%s %s' \
%(host, port, usr, passwd, database), stdout=PIPE, stdin=PIPE, shell=True)
print "1------"
print "exec sql,wait..."
output = process.communicate('source '+filesql)
print output
print "end."
def exec_test():
"""
Function: exec_test()
Desc: test
Args:
- :
Return:
Usage:
Maintainer: wangfm
"""
filesql = 'G:\\00project\\sql_lib\\middle_10.sql'
dbinfo = {'host': '10.1.0.56',
'usr': 'root',
'passwd': 'Sanbu@123456',
'port': '13306',
'database': 'middle2'}
execFileSql(filesql, **dbinfo)
if __name__ == '__main__':
exec_test()
|
from django.conf.urls import url
from django.urls import path
from . import views
user = views.UserInfo.as_view({
"get":"list"
})
urlpatterns=[
url(r'^userinfo/$',user,name="user")
]
|
import numpy
import cv2
import heapq
from motion import *
grid_line_x = 7
grid_line_y = 7
m=600/(grid_line_x-1)
n=600/(grid_line_y-1)
# m=480/(grid_line_x-1)
# n=540/(grid_line_y-1)
a1=0
b1=0
a2=0
b2=0
a3=0
b3=0
a4=0
b4=0
clipcount=0
###############################
# trims contours accoding to given area
#
#
#
def areacon(contours,area,sub):
count=0
#con=0
for i in range(len(contours)):
ar = cv2.contourArea(contours[i])
#print ar,area
if ar>area-sub and ar<area+sub:#detecting provision marker
contours[count]=contours[i]
count=count+1
#print count
return contours,count
# ###############################
# # detects number and sign
# #
# #
# #
# def detectCellVal(outImg,img_rgb,patch,symbol,threshold,grid_map):
# img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# template = cv2.imread(patch)
# temp_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
# w, h = temp_gray.shape[::-1]
# # print w,h
# res = cv2.matchTemplate(img_gray,temp_gray,cv2.TM_CCOEFF_NORMED)
# # threshold = 0.616#change this to match
# loc = numpy.where( res >= threshold)
# for pt in zip(*loc[::-1]):
# print pt
# x,y=getcoor(pt[0]+w/2,pt[1]+h/2,m,n)
# grid_map[y][x]=symbol
# cv2.rectangle(outImg, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
# return outImg,grid_map
########################################
##############
# Image clipper
#
#
#
#
def imgclip(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#where is gray getting used
# lower = numpy.array([0, 0, 0]) #black color mask
# upper = numpy.array([120, 120, 120])
global a1,a2,a3,a4,b1,b2,b3,b4, clipcount #new change 14 sept
clipcount=clipcount+1
lower = numpy.array([0, 0, 0]) #black color mask
upper = numpy.array([179, 56, 45])
mask = cv2.inRange(frame, lower, upper)
ret,thresh1 = cv2.threshold(mask,127,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(frame,contours,-1,(0,255,0),3)
biggest = 0
max_area = 0
min_size = thresh1.size/4
index1 = 0
for i in contours:
area = cv2.contourArea(i)
if area > 10000:
peri = cv2.arcLength(i,True)
if area > max_area:
biggest = index1
max_area = area
index1 = index1 + 1
approx = cv2.approxPolyDP(contours[biggest],0.05*peri,True)
#drawing the biggest polyline
cv2.polylines(frame, [approx], True, (0,255,0), 3)
if clipcount<=2 or (a1==0 and b1==0 and a2==0 and b2==0 and a3==0 and b3==0 and a4==0 and b4==0):#new change 14 sept
x1 = approx[0][0][0]
y1 = approx[0][0][1]
x2 = approx[1][0][0]
y2 = approx[1][0][1]
x3 = approx[3][0][0]
y3 = approx[3][0][1]
x4 = approx[2][0][0]
y4 = approx[2][0][1]
a1=x1
a2=x2
a3=x3
a4=x4
b1=y1
b2=y2
b3=y3
b4=y4
# print x1,y1,x2,y2,x3,y3,x4,y4
# x1 = 570
# y1 = 110
# x2 = 69
# y2 = 100
# x3 = 570
# y3 = 390
# x4 = 70
# y4 = 400
#points remapped from source image from camera
#to cropped image try to match x1, y1,.... to the respective near values
# A,B,C,D
# pts1 = numpy.float32([[x1,y1],[x2,y2],[x3,y3],[x4,y4]])
# pts2 = numpy.float32([[0,0],[0,480],[320,0],[320,480]])
# pts1 = numpy.float32([[a1,b1],[a2,b2],[a3,b3],[a4,b4]])
# pts2 = numpy.float32([[0,0],[0,480],[320,0],[320,480]])
pts1 = numpy.float32([[a3,b3],[a1,b1],[a4,b4],[a2,b2]])
pts2 = numpy.float32([[0,0],[0,480],[320,0],[320,480]])
persM = cv2.getPerspectiveTransform(pts1,pts2)
img = cv2.warpPerspective(frame,persM,(320,480))
return img
###clipping ends
############
############
# Obstacle dilation
#
#
#
############################################
def obstacle(hsv):
lower = numpy.array([53 ,112, 34],numpy.uint8)
upper = numpy.array([94, 255, 255],numpy.uint8)
# lower = numpy.array([22 ,57, 208],numpy.uint8)#obstacle green
# upper = numpy.array([75, 251, 253],numpy.uint8)
# lower = numpy.array([0 ,0, 0],numpy.uint8)
# upper = numpy.array([179, 255, 88],numpy.uint8) #different black
# lower = numpy.array([0, 0, 0]) #black color mask
# upper = numpy.array([120, 120, 120])
mask = cv2.inRange(hsv,lower, upper)
contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours=sorted(contours, key = cv2.contourArea, reverse = True)[:]
contours,length=areacon(contours,2500,2000)
contours=sorted(contours, key = cv2.contourArea, reverse = True)[:length]
cv2.fillPoly(mask,contours, (255,255,255))
# cv2.imshow('maksed',mask)
#kernel = numpy.ones((50,40),numpy.uint8)
kernel = numpy.ones((33,33),numpy.uint8)
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
#erosion = cv2.erode(mask,kernel,iterations = 1)
dilation = cv2.dilate(closing,kernel,iterations = 1)#obstacle = cv2.dilate(closing,kernel,iterations = 1)
cv2.imshow('Obstacle dilation',dilation)
return mask, dilation
#################################
#################################
#draw grid with obstacles marked
#
#
#
def markobstacle(obb,imgg,m,n):
h,k,l=imgg.shape
widm=h/(m-1)
widn=k/(n-1)
'''
img-- a single test image as inumpyut argument
route_length -- returns the single integer specifying the route length
'''
global grid_map
#cv2.imshow("walls in grid map",obb)
for x in range(0,m-1):
X=x*widm+(widm/2)
for y in range(0,n-1):
Y=y*widn+(widn/2)
# print
if obb[X,Y]>=200 or x==0 or x==m-2 or y==0 or y==n-2:#and frame[X,Y,1]>=70 and frame[X,Y,2]>=70: #obstacle black ,bgr value(0,0,0)
grid_map[x][y]=1
cv2.circle(imgg,(Y,X), 5, (0,50,200), -1)
continue
#print grid_map
return grid_map,imgg
##############################################
##################
# grid draw
#
#
#
def grid_draw(image,mm,nn): ##filename is image filename with full file path, n is grid of n lines
#img=cv2.imread(filename) ##getting input image
h,k,l=image.shape
widm=h/(mm-1)
widn=k/(nn-1)
for x in range(0, mm): ##drawing lines
X=x*widm
cv2.line(image,(0,X),(k,X),(0,0,0), 2)#lines is red color, bgr format
for y in range(0, nn): ##drawing lines
Y=y*widn
cv2.line(image,(Y,0),(Y,h),(0,0,0), 2)
return (image)
###################
###########################################
# calculate contours coordinates
#
#
#
#
def ccoor(contour):
M = cv2.moments(contour)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
return cx,cy
###############################
# detects number and sign
#
#
#
def solveGrid(grid_map):
operator='+'
for i in range(0,6):
operator='+'
for j in range(0,5):
# print grid_map[i][j]
if str(grid_map[i][j])=='+' and j%2==1:
operator='+'
# grid_map[i][5]=grid_map[i][5]+grid_map[i][j]
elif str(grid_map[i][j])=='-' and j%2==1:
operator='-'
# grid_map[i][5]=grid_map[i][5]-grid_map[i][j]
elif j%2==0:
if operator=='+':
grid_map[i][5]=grid_map[i][5]+grid_map[i][j]
else:
grid_map[i][5]=grid_map[i][5]-grid_map[i][j]
return grid_map
|
class Solution:
def minimumTime(self, time: List[int], totalTrips: int) -> int:
def check(cur:int)-> bool:
cnt = 0
for i in range(len(time)):
cnt += cur // time[i]
if cnt >= totalTrips:
return True
if cnt < totalTrips:
return False
time.sort()
l, r = 1, time[0] * totalTrips
while l < r:
mid = (l + r) >> 1
if check(mid):
r = mid
else:
l = mid + 1
return l
|
from unittest import TestCase
from Calculator import Calculator
class TestCalculator(TestCase):
def setUp(self):
self.calculator = Calculator()
def test_add(self):
result = self.calculator.add(10, 20)
expected = 30
self.assertEqual(expected, result)
|
from PIL import Image
import glob
import os
def main():
label = ['Atelectasis','Cardiomegaly','Consolidation','Edema','Effusion','Emphysema','Fibrosis','Hernia',
'Infiltration','Mass','Normal','Nodule','Pleural_Thickening','Pneumonia','Pneumothorax']
for i in label:
path = r'C:/Users/Mimi/git/tensorflow_lung/tf_files/lung_photos_png/{}/'.format(i)
output_dir = r'C:/Users/Mimi/git/tensorflow_lung/tf_files/lung_photos/{0}/'.format(i)
print(path)
for file in glob.glob(path + "*.png"):
im = Image.open(file)
file2 = file.split("\\")[-1].replace(".png", "")
#create folder if not exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
im = im.convert("RGB")
im.save(output_dir + "{}.jpg".format(file2))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
__author__ = 'Damir'
from itertools import imap
from operator import itemgetter
from helpers import get_A_B
from avl_tree import AVLTree
def calc_Y(x, a, b):
x = float(x)
a = float(a)
b = float(b)
return a*x+b
ALL_XS = list()
ALL_COORDINATES = list()
SORT_COORDINATES = list()
TREE_COUNTER = 0
def coord_processing(pol_id, icoords):
"""
записывает список координат вида
{'x1': , 'y1': , 'x2': , 'y2': , pol_id': , }
"""
local_xs = set()
# Polygon has 3 coords in minimum
prev, next = icoords.next(), icoords.next()
while next:
p_x = prev[0]
p_y = prev[1]
n_x = next[0]
n_y = next[1]
if p_x < n_x:
c = {
'x1': p_x,
'y1': p_y,
'x2': n_x,
'y2': n_y,
}
elif p_x > n_x:
c = {
'x1': n_x,
'y1': n_y,
'x2': p_x,
'y2': p_y,
}
else:
print 'Warning, pol_id={0} has vertical line'.format(pol_id)
raise Exception()
c['a'], c['b'] = get_A_B(**c)
c['pol_id'] = pol_id
print c
try:
prev = next
next = icoords.next()
except StopIteration:
next = None
ALL_COORDINATES.append(c)
def get_coordinates():
"""
из файла читаем строки
"""
with open('C:\Users\Damir\Projects\polygon.txt') as f:
for line in f.readlines():
# fixme каретка разделитель
pol_id, other_id, line_co = line.split('\t', 2)
print len(line_co.split(','))
icoords = imap(lambda x: x.split(), line_co.split(','))
coord_processing(pol_id, icoords)
break
def sort_coordinates():
"""
сортируем все координаты по х1
"""
global SORT_COORDINATES
SORT_COORDINATES = sorted(ALL_COORDINATES, key=itemgetter('x1'))
stopped = 1
def sort_tree_nodes(nodes, x_middle):
for n in nodes:
n['val'] = calc_Y(n['x1'], n['a'], n['b'])
def build_tree(nodes):
pass
def build_first_tree():
first = SORT_COORDINATES[0]
# очищаем память
SORT_COORDINATES[0] = None
first_x = float(first['x1'])
global stopped
nodes = [first, ]
next = SORT_COORDINATES[stopped]
n_x = float(next['x1'])
while n_x == first_x:
nodes.append(next)
# очищаем память
SORT_COORDINATES[stopped] = None
stopped += 1
next = SORT_COORDINATES[stopped]
n_x = float(next['x1'])
x_middle = (n_x+first_x)/2
# сортируем ноды
nodes = sort_tree_nodes(nodes, x_middle)
# строим дерево
ref_to_tree = build_tree(nodes)
# первое дерево
ALL_XS.append((first_x, ref_to_tree))
# следующее значение Х
ALL_XS.append((n_x, None))
if __name__ == "__main__":
pass
# a, b = linear_func(0.5, 2, 1, 1)
# y = calc_Y(a,b,3)
# print y
# get_coordinates()
# sort_coordinates()
# print 'Coordinates'
# for c in ALL_COORDINATES:
# print c
# print SORT_COORDINATES
# for s in SORT_COORDINATES:
# print s['x1']
# build_first_tree()
# print 'stoppes', stopped
|
import requests
import sys
def menu():
run_menu = True
menu = ("\n---------------------\n"+
"Pesquisa CEP (Webservice) "+
"\n---------------------\n"+
cep = input("Digite o CEP : ")+
"(0) Sair \n"+
"-------------------")
while(run_menu):
print (menu)
if len(cep) == 8:
request = requests.get('http://api.postmon.com.br/v1/cep/{0}'.format(cep))
get = request.json()
elif (cep == "0"):
run_menu = False
sys.exit()
else:
print("CEP invalido")
#######################################################################
menu()
request = requests.get('http://api.postmon.com.br/v1/cep/{0}'.format(cep))
get = request.json()
# Example using basic authentication with a webservice
session = Session()
cep = "03891-000"
client = zeep.Client(
wsdl='http://api.postmon.com.br/v1/cep/{0}'.format(cep)
)
print (client['uf'])
##response = client.service.insert(
## numero_contribuinte = "0162150165-7",
## nome_logradouro_imovel = "RUA CARNAÚBA DOS DANTAS",
## bairro_imovel = "VILA SANTA LÚCIA",
## cep = "03891-000",
## cidade = "SÃO PAULO",
## codigo_imovel = "0162150165-7",
## tipo_contribuinte = "FISICA",
## numero_imovel = "299",
## documento_contribuinte_1 = "00044785395885",
## nome_contribuinte="KEVIN BRAGA",
## fonte_pesquisa="CIDADÃO.BETHA",
## uf = "SP"
## )
##
##print (response)
##_id = client.service.getKeys(
## u_cpf_cnpj="00035141219828"
##)
##
##update = client.service.update(
## state="1",
## u_cpf_cnpj = "00035141219828",
## sys_id=_id['sys_id'][0]
##)
##
##print(update)
##response = client.service.update(
## state="2",
## sys_id="9a2d131f0f27b6408a94e0ba42050eaf",
## u_cpf_cnpj="35141219828",
## u_nome ="KEVIN BRAGA")
##
##print (response)
|
# -*- coding: utf-8 -*-
import spidercomm as common
from bs4 import BeautifulSoup
# get all tags a from a single url
def a_links(url_seed,attrs={}):
html = common.download(url_seed)
soup = BeautifulSoup(html,'html.parser')
alinks= soup.find_all('a',attrs)
return alinks
def crawled_page(crawled_url):
html = common.download(crawled_url)
soup = BeautifulSoup(html,'html.parser')
title = soup.find('h1',{'class':'title'})
if title== None:
return "Title_Is_None",crawled_url
content = soup.find('div',{'class':'show-content'})
if content == None:
return title.text, "Content_Is_None"
return title.text,content.text
def isMultiPaged(url):
html_page1 = common.download(url % 1)
soup = BeautifulSoup(html_page1,'html.parser')
body1 = soup.find('body')
body1.script.decompose()
html_page2 = common.download(url % 2)
if html_page2 == None:
return False
soup = BeautifulSoup(html_page2,"html.parser")
body2 = soup.find('body')
#print [x.extract() for x in body2.findAll('script') ]
body2.script.decompose()
if str(body1) == str(body2):
return False
else:
return True
def getNumberOfPages(url):
count = 1
flag = True
if (isMultiPaged(url)):
while flag:
url= url % count
# print "url: %s" % url
count += 1
html = common.download(url)
if html==None:
break
return count
|
preco = float(input('Digite o preço do produto:'))
descconto = preco * 0.95
print('O valor do producot com desconto é de: R${}.'.format(descconto))
|
def average(lst):
total = 0
for value in lst:
total += value
return total / len(lst)
def nestedAverage(lst):
total = 0
totalLength = 0
for nestedList in lst:
totalLength += len(nestedList)
for value in nestedList:
total += value
return total / totalLength
print(nestedAverage([[1, 3], [1, 2]]))
|
import sys, os, urllib2, tweetstream, json
from datetime import datetime as dt
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN_KEY = ""
ACCESS_TOKEN_SECRET = ""
stream=tweetstream.SampleStream
start=dt.now()
with stream(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) as stream:
while 1==1:
try:
for tweet in stream:
print json.dumps(tweet)
now = dt.now()
if (now - start).seconds > 3600:
break
except:
now = dt.now()
if (now - start).seconds > 3600:
break
else:
continue
|
from napalm import get_network_driver
import json
def connection(host, username, password):
driver = get_network_driver('ios')
iosvl2 = driver(host, username, password)
iosvl2.open()
print("connected to :", host)
return iosvl2
def getconfig(host, username, password):
iosvl2 = connection(host, username, password)
output = iosvl2.get_config()
config_list = output.get('running').splitlines()
json_config = json.dumps(config_list, sort_keys=True, indent=4)
config_list = json.loads(json_config)
iosvl2.close()
return config_list
def getacl(file):
line = []
for item in file:
if item.strip(' ').startswith('access-list'):
line.append(item)
if line:
for item in line:
print(item)
print(item)
print('ACLs configured')
else:
print('no ACL configured')
def getaccessclass(file):
line = []
for item in file:
if item.strip(' ').startswith('access-class'):
line.append(item)
if line:
for item in line:
print(item)
print('access-class configured')
else:
print('no access-class configured')
def getssh2(file):
version2 = []
ssh = []
for item in file:
if item.strip(' ') == 'ip ssh version 2':
version2 = item
if item.strip(' ') == 'transport input ssh':
ssh = item
if version2:
print('ssh 2 configured')
else:
print('no ssh 2 configured')
if ssh:
print('ssh is forced')
else:
print('ssh is not forced')
def getaaa(file):
aaa = []
authentication = []
for item in file:
if item.strip(' ') == 'aaa new-model':
aaa = item
if item.strip(' ').startswith('aaa authentication'):
aaa = item
if aaa:
print('aaa model created')
else:
print('no aaa model created')
if authentication:
print('aaa authentication is configured')
else:
print('aaa authentication is not configured')
def getntp(file):
ntp = []
for item in file:
if item.strip(' ').startswith('ntp server'):
ntp.append(item)
if ntp:
print('ntp is configured')
else:
print('ntp is not configured')
# sur interface mode access
# cdp neighbor interface verif
def getbpdu_guard(file):
guard = []
for item in file:
if item.strip(' ') == 'spanning-tree bpduguard enable':
guard = item
if guard:
print('spanning-tree bpduguard is configured')
else:
print('spanning-tree bpduguard is not configured')
def getsnmpv3(file):
snmp = []
for item in file:
if 'v3' in item and item.strip(' ').startswith('snmp-server'):
snmp = item
if snmp:
print('SNMPv3 is configured')
else:
print('SNMPv3 is not configured')
# vlan configured
def getarp(file):
arp = []
for item in file:
if item.strip(' ').startswith('ip arp inspection'):
snmp = item
if arp:
print('ARP inspection is configured')
else:
print('ARP inspection is not configured')
# vlan
def getdhcpsnooping(file):
dhcp = []
for item in file:
if item.strip(' ').startswith('ip dhcp snooping vlan'):
snmp = item
if dhcp:
print('DHCP snooping is configured')
else:
print('DHCP snooping is not configured')
def getbackup(file):
archive = []
for item in file:
if item.strip(' ') == 'archive' or item.strip(' ').startswith('path'):
archive = item
if archive:
print('Backup is configured')
else:
print('Backup is not configured')
def check_ip(ip, user, password):
config = getconfig(ip, user, password)
getacl(config)
getaccessclass(config)
getssh2(config)
getaaa(config)
getntp(config)
getbpdu_guard(config)
getsnmpv3(config)
getarp(config)
getdhcpsnooping(config)
getbackup(config)
|
# 数字转换为字符列表,倒序生成新的字符列表,再根据符号生成倒序数字,并判断是否大于intmax或小于intmin
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
sign = [1,-1][x < 0]
rst = sign * int(str(abs(x))[::-1])
return rst if -(2**31)-1 < rst < 2**31 else 0
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
b = 2 ** 31
neg_b = -1 * b
result = 0
if x > 0:
while x > 0:
result = x % 10 + result * 10
x = x // 10
else:
x = abs(x)
while x > 0:
result = x % 10 + result * 10
x = x // 10
result *= -1
if result >= b or result < neg_b:
return 0
return result
|
import numpy as np
from .utils import *
def get_positive_input(n1=40, n2=30, mean_distance=0.8):
"""
Constructs two groups with n1 observations in first and n2 observations in second.
:param n1:
:param n2:
:param mean_distance: distance of means in two gorups
:return:
"""
y = [2 * np.random.normal(0, 1, size=n1) + 100 - mean_distance,
2 * np.random.normal(0, 1, size=n2) + 100 + mean_distance]
return y
def get_binary_input(n1=25, a1=12, n2=28, a2=20):
x1 = np.zeros(shape=n1, dtype=np.int)
x1[:a1] = 1
x2 = np.zeros(shape=n2, dtype=np.int)
x2[:a2] = 1
return [x1, x2]
def save_input(input_function, file_name_prefix, folder_name):
y = input_function()
form = "%d" if y[0].dtype == np.int else "%f"
for i in range(2):
np.savetxt(f"./{folder_name}/{file_name_prefix}_{i}.csv", y[i], fmt=form)
def get_count_input(n1=67, lambda1=11.1, n2=76, lambda2=9.9):
x1 = np.random.poisson(lam=lambda1, size=n1)
x2 = np.random.poisson(lam=lambda2, size=n2)
return [x1, x2]
def get_ordinal_input(f1=(10, 9, 20), f2=(18, 9, 7)):
x1 = np.zeros(shape=sum(f1), dtype=np.int)
for i in range(1, len(f1)):
begin = sum(f1[:i])
x1[begin: (begin + f1[i])] = i
x2 = np.zeros(shape=sum(f2), dtype=np.int)
for i in range(1, len(f2)):
begin = sum(f2[:i])
x2[begin: (begin + f2[i])] = i
return [x1, x2]
def get_binomial_input(n1=20, p1=0.44, n2=25, p2=0.52):
x1 = np.zeros(shape=(n1, 2), dtype=np.int)
x1[:, 0] = np.random.poisson(lam=4, size=n1)
x1[:, 1] = np.random.binomial(n=x1[:, 0], p=p1)
x2 = np.zeros(shape=(n2, 2), dtype=np.int)
x2[:, 0] = np.random.poisson(lam=4, size=n2)
x2[:, 1] = np.random.binomial(n=x2[:, 0], p=p2, )
return [x1, x2]
def run_all():
folder_name = "artificial_data"
mk_dir_if_not_exists(folder_name)
save_input(get_binary_input, file_name_prefix="Binary_data", folder_name=folder_name)
save_input(get_positive_input, file_name_prefix="Positive_real_data", folder_name=folder_name)
save_input(get_count_input, file_name_prefix="Count_data", folder_name=folder_name)
save_input(get_ordinal_input, file_name_prefix="Ordinal_data", folder_name=folder_name)
save_input(get_binomial_input, file_name_prefix="Binomial_data", folder_name=folder_name)
if __name__ == '__main__':
run_all()
|
from SupportClasses.WordEmbedder import WordEmbedder
import numpy as np
class SentenceEmbedder:
def __init__(self):
self.wordEmbedder = self.__initWordEmbedder()
def __initWordEmbedder(self):
return WordEmbedder()
def getVector(self, sentence):
sentenceVectors = []
for word in sentence.split(' '):
# print('Word:', word)
sentenceVectors.append(self.wordEmbedder.getVector(word))
return np.asarray(sentenceVectors)
# sentence = 'I love to eat Pizza'
# sentence = 'Build me a workflow'
# sentence = 'My file size is 3GB'
# # sentence = 'I love to eat Pizza'
# print('sentence:', sentence)
# se = SentenceEmbedder()
# print(se.getVector(sentence))
|
# Generated by Django 2.2 on 2019-04-12 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('yohbiteapp', '0009_auto_20190412_0935'),
]
operations = [
migrations.RenameField(
model_name='district',
old_name='state',
new_name='district',
),
migrations.RenameField(
model_name='local',
old_name='state',
new_name='district',
),
]
|
# Generated by Django 3.2.5 on 2021-07-26 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clinic_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('id_number', models.IntegerField(blank=True)),
('birth_certificate_no', models.IntegerField(blank=True)),
('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=15)),
('age', models.IntegerField()),
],
),
]
|
import random
def display_game():
user_name = input("What is your name? ")
print("Hello " + user_name)
print("Well, " + user_name + " if you want to go out alive you have to guess the number between 1-50")
random_number = random.randint(1, 50)
while True:
user_number = int(input("What is your guess? "))
if random_number == user_number:
break
elif random_number < user_number:
print("{}{}".format(user_number, " is too high"))
else:
print("{}{}".format(user_number, " is too low!"))
print ("Yes " + str(user_number) + " is my secret number! Congratulations.")
display_game()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
from db_env import *
class MssqlConnection:
def __init__(self,):
#: 服务器地址
self.host = db_conn_info['HOST']
#: 数据据库名称
self.db_name = db_conn_info['DATABASE']
#: 登录用户
self.user = db_conn_info['USER']
#: 登录密码
self.password = db_conn_info['PASSWORD']
#: 数据库连接句柄
self.handler = ''
#: 连接游标
self.cursor = ''
#: 植物表对象
self.plant_dict = {}
#:
self.plant_name2id = {}
self.plant_id2name = {}
#: 传感器表对象
self.sensor_dict = {}
self.sensor_name2id = {}
self.sensor_id2name = {}
#: 房间表对象
self.room_dict = {}
#:
self.room_desc2id = {}
self.room_id2desc = {}
#: 控制器ID与房间号映射
self.controller_dict = {}
self.load_table()
def connect(self):
"""
建立数据库连接
"""
if self.handler == '':
#log_msg = 'In connect-- host: %s, user: %s, password: %s, dbName: %s' %(self.host, self.user, self.password, self.db_name)
#log_handler.debug(log_msg)
self.handler = pyodbc.connect(driver='{SQL Server}', server=self.host, \
database=self.db_name, uid=self.user, pwd=self.password, ansi = True)#, unicode_results = True)
self.cursor = self.handler.cursor()
def close(self):
"""
关闭数据库连接
:rtype: 0 成功, -1 失败
"""
if self.cursor:
self.cursor.close()
self.cursor = ''
if self.handler:
self.handler.close()
self.handler = ''
return 0
def queryAll(self, sql_str):
"""
获取全部查询结果
:param sql_str: 待执行的SQL语句
:rtype: 查询结果
"""
self.cursor.execute(sql_str)
return self.cursor.fetchall()
def querySome(self, sql_str, maxcnt):
"""
获取前maxcnt条查询结果
:param sql_str: 待执行的SQL语句
:param maxcnt: 返回限制的行数
:rtype: 查询结果
"""
self.cursor.execute(sql_str)
return self.cursor.fetchmany(maxcnt)
def queryPage(self, sql_str, skip_cnt, page_size):
"""
获取分页查询结果
:param sql_str: 待执行的SQL语句
:param skip_cnt:
:param page_size:
:rtype:
"""
self.cursor.execute(sql_str)
self.cursor.skip(skip_cnt)
return self.cursor.fetchmany(page_size)
def count(self,sql_str):
"""
获取查询条数
:param sql_str: 待执行的SQL语句
:rtype: 查询条数
"""
self.cursor.execute(sql_str)
return self.cursor.fetchone()[0]
def executeDML(self, sql_str):
"""
执行DML语句,包括增删改
:param sql_str: 待执行的SQL语句
:rtype: 成功返回生效的数据条数, 失败返回 ERR
"""
try:
cnt = self.cursor.execute(sql_str).rowcount
self.handler.commit()
return cnt
except Exception, e:
log_msg = str(e)
# log_handler.error(log_msg)
# print log_msg
return ERR
def load_table(self):
"""
将数据库中部分表加载到内存
"""
self.connect()
sql_str = 'select room_id, room_description from tb_room'
query_list = self.queryAll(sql_str)
for i in query_list:
self.room_dict[i[0]] = i[1]
self.room_desc2id[i[1]] = i[0]
self.room_id2desc[i[0]] = i[1]
sql_str = 'select plant_id, plant_name from tb_plant'
query_list = self.queryAll(sql_str)
for i in query_list:
temp = TablePlant()
temp.plant_id = i[0]
temp.plant_name = i[1]
self.plant_dict[temp.plant_name] = temp
self.plant_name2id[i[1]] = i[0]
self.plant_id2name[i[0]] = i[1]
sql_str = 'select sensor_id, sensor_type, room_id, state from tb_sensor'
query_list = self.queryAll(sql_str)
for i in query_list:
temp = TableSensor()
temp.sensor_id = i[0]
temp.sensor_name = i[1]
temp.room_id = i[2]
temp.state = i[3]
self.sensor_dict[temp.sensor_name] = temp
self.sensor_name2id[temp.sensor_name] = temp.sensor_id
self.sensor_id2name[temp.sensor_id] = temp.sensor_name
sql_str = 'select controller_id, room_id, controller_type from tb_controller'
query_list = self.queryAll(sql_str)
#TODO: 这里可定有问题,每个房间有多个控制器,但是否
for i in query_list:
self.controller_dict[i[1]] = {i[2]: i[0]}
self.close()
@classmethod
def test_connection(cls):
try:
handler = pyodbc.connect(driver='{SQL Server}', server=db_conn_info['HOST'], \
database=db_conn_info['DATABASE'], uid=db_conn_info['USER'], pwd=db_conn_info['PASSWORD'], ansi = True)#, unicode_results = True)
cursor = handler.cursor()
cursor.close()
handler.close()
return SUC
except Exception, e:
log_msg = 'DB test connection failed'
log_handler.error(log_msg)
return FAI
def transfor_absolute_time(self, state = POLICY_RUNNING):
"""
将执行策略的相对时间转换为绝对时间
:param start_time: 实例最早有效时间
:rtype: 成功 0 , 失败 -1
"""
self.connect()
self.executeDML("delete from tb_absolute_time")
instance_list = []
sql_str = 'select distinct room_id from tb_policy_instance where state >= %d' %(state)
room_list = self.queryAll(sql_str)
for room in room_list:
sql_str = '''select top 1 policy_instance_id, policy_id, plant_id, room_id, start_time from tb_policy_instance
where room_id = %d and state >= %d order by state, start_time, policy_instance_id''' %(room[0], state)
new_instance = self.queryAll(sql_str)
temp = PolicyInstance()
temp.instance_id = new_instance[0][0]
temp.policy_id = new_instance[0][1]
temp.plant_id = new_instance[0][2]
temp.room_id = new_instance[0][3]
temp.start_time = new_instance[0][4]
instance_list.append(temp)
for i in instance_list:
sql_str = u"""
select rule_id, interval_date, hours from tb_rule
where policy_id = %d
""" %(i.policy_id)
rst = self.queryAll(sql_str)
if len(rst) == 0:
sql_str = 'update tb_policy_instance set state = %d where policy_instance_id = %d' %(POLICY_OLD, i.instance_id)
self.executeDML(sql_str)
# log_msg = 'Got one empty policy, which has been updated to OLD directly !'
# log_handler.debug(log_msg)
continue
absolute_time_list = []
first_rule = AbsoluteTime()
# 获得最后一项规则号,此目的为将最后一条rule_id与次最后的rule_id相同。
first_rule.rule_id = rst[0][0]
first_rule.instance_id = i.instance_id
first_rule.change_time = i.start_time
absolute_time_list.append(first_rule)
count = timedelta(days = 0)
for j in rst[:-1]:
count += timedelta(days = j[1], hours = j[2])
change_time = i.start_time + count
aaa = AbsoluteTime()
#TODO: 雷区, 策略内的rule在被建立时,其rule_id未必总是连续在一起的
aaa.rule_id = j[0] + 1
aaa.instance_id = i.instance_id
aaa.change_time = change_time
absolute_time_list.append(aaa)
count += timedelta(days = rst[-1:][0][1], hours = rst[-1:][0][2])
change_time = i.start_time + count
last_rule = AbsoluteTime()
# 获得最后一项规则号,此目的为将最后一条rule_id与次最后的rule_id相同。
last_rule.rule_id = rst[-1:][0][0]
last_rule.instance_id = i.instance_id
last_rule.change_time = change_time
absolute_time_list.append(last_rule)
for j in absolute_time_list:
sql_str = "insert into tb_absolute_time(rule_id, policy_instance_id, change_time) values(%d, %d, '%s')" %(j.rule_id, j.instance_id, j.change_time)
try:
self.executeDML(sql_str)
except pyodbc.IntegrityError, e:
continue
sql_str = 'update tb_policy_instance set state = %d where policy_instance_id = %d' %(POLICY_RUNNING, i.instance_id)
self.executeDML(sql_str)
self.close()
# log_msg = "All policy in all room is all ready !"
# log_handler.debug(log_msg)
def transfor_room_absolute_time(self, room_id, state = POLICY_RUNNING):
"""
将数据库中策略的相对时间转换为绝对时间
:param room_id: 房间号
:param state: 更改后的策略状态
:rtype: SUC / FAI / ERR
"""
self.connect()
# 查询新策略相关信息
sql_str = '''select top 1 policy_instance_id, policy_id, plant_id, room_id, start_time from tb_policy_instance
where room_id = %d and state >= %d order by state, start_time, policy_instance_id''' %(room_id, state)
new_instance = self.queryAll(sql_str)
if len(new_instance) == 0:
# 无新策略
# log_msg = 'No new policy to be executed in room: %d' %room_id
# log_handler.debug(log_msg)
return FAI
temp = PolicyInstance()
temp.instance_id = new_instance[0][0]
temp.policy_id = new_instance[0][1]
temp.plant_id = new_instance[0][2]
temp.room_id = new_instance[0][3]
temp.start_time = new_instance[0][4]
# 查询新策略的执行规则
sql_str = u'''
select rule_id, interval_date, hours from tb_rule
where policy_id = %d
''' %(temp.policy_id)
rst = self.queryAll(sql_str)
if len(rst) == 0:
# 此时有新策略,但该策略的执行规则为空
sql_str = 'update tb_policy_instance set state = %d where policy_instance_id = %d' %(POLICY_OLD, temp.instance_id)
self.executeDML(sql_str)
# log_msg = 'Got one empty policy, which has been updated to OLD directly !'
# log_handler.debug(log_msg)
return FAI
# 结合开始时间,将特定策略的相对时间转换为绝对时间
absolute_time_list = []
# 获得最后一项规则号,此目的为将最后一条rule_id与次最后的rule_id相同。
first_rule = AbsoluteTime()
first_rule.rule_id = rst[0][0]
first_rule.instance_id = temp.instance_id
first_rule.change_time = temp.start_time
absolute_time_list.append(first_rule)
count = timedelta(days = 0)
for j in rst[:-1]:
count += timedelta(days = j[1], hours = j[2])
change_time = temp.start_time + count
aaa = AbsoluteTime()
aaa.rule_id = j[0] + 1
aaa.instance_id = temp.instance_id
aaa.change_time = change_time
absolute_time_list.append(aaa)
count += timedelta(days = rst[-1:][0][1], hours = rst[-1:][0][2])
change_time = temp.start_time + count
last_rule = AbsoluteTime()
# 获得最后一项规则号,此目的为将最后一条rule_id与次最后的rule_id相同。
last_rule.rule_id = rst[-1:][0][0]
last_rule.instance_id = temp.instance_id
last_rule.change_time = change_time
absolute_time_list.append(last_rule)
# 删除当前房间内的绝对时间策略
sql_str1 = 'select distinct policy_instance_id from vw_task where room_id = %d' %(room_id)
sql_str2 = '''select top 1 policy_instance_id from tb_policy_instance
where room_id = %d
order by state, start_time, policy_instance_id''' %(room_id)
result1 = self.queryAll(sql_str1)
# result2 = self.queryAll(sql_str2)
if len(result1) > 0:
policy_instance_id = result1[0][0]
sql_str = 'delete from tb_absolute_time where policy_instance_id = %d' %(policy_instance_id)
self.executeDML(sql_str)
# log_msg = 'Policy instance :%d delete from tb_absolute_time, for new instance being added to it !' %(policy_instance_id)
# log_handler.debug(log_msg)
# 插入新策略的绝对时间规则
for j in absolute_time_list:
sql_str = "insert into tb_absolute_time(rule_id, policy_instance_id, change_time) values(%d, %d, '%s')" %(j.rule_id, j.instance_id, j.change_time)
try:
self.executeDML(sql_str)
except pyodbc.IntegrityError, e:
continue
# log_msg = 'One new policy instance [ID: %d]applied in room :%d, using policy id: %d' %(temp.instance_id, temp.room_id, temp.policy_id)
# log_handler.debug(log_msg)
# 将策略实例状态改为运行态
sql_str = 'update tb_policy_instance set state = %d where policy_instance_id = %d' %(POLICY_RUNNING, absolute_time_list[0].instance_id)
self.executeDML(sql_str)
self.close()
def new_plant(self, plant_name):
"""
新建种植植物
:param plant_name: 植物名称
:rtype: 成功返回插入条数, 失败返回 ERR
"""
sql_str = 'insert into tb_plant(plant_name) values(%s)' %(plant_name)
self.connect()
result = self.executeDML(sql_str)
self.close()
return result
def insert_room(self, room_id, room_description):
"""
新建房间
:param room_id: 植物名称
:param room_description: 房间描述
:rtype: 成功返回插入条数, 失败返回 ERR
"""
sql_str = 'insert into tb_room(room_description) values(%s)' %(room_description)
self.connect()
result = self.executeDML(sql_str)
self.close()
return result
def insert_sensor(self, sensor_id, sensor_type, room_id, position = '', state = ON):
"""
插入传感器信息
:param sensor_id: 传感器ID 整型
:param sensor_type: 传感器类型 字符串
:param room_id: 房间ID 整型
:param position: 传感器位置
:param state: 传感器当前状态
:rtype: SUC 成功, FAI 失败, ERR 异常
"""
sql_insert = '''insert into tb_sensor(sensor_id, sensor_type, room_id, position, state)
values(%d, '%s', %d, '%s', %d)''' %(sensor_id, sensor_type, room_id, position, state)
sql_update = '''update tb_sense
set sensor_type = '%s',
set room_id = %d,
set position = %s,
set state = %d
where sensor_id = %d
''' %(sensor_type, room_id, position, state, sensor_id)
try:
self.connect()
self.executeDML(sql_insert)
return SUC
except Exception,err:
try:
self.connect()
self.executeDML(sql_update)
return SUC
except Exception, e:
log_msg = 'Insert Sensor Failed'
# log_handler.error(log_msg)
# log_msg = 'Insert Sensor Failed: \n%s' %str(e)
return ERR
finally:
self.close()
def update_sensor(self, sensor_id, room_id, new_room_id, sensor_type, position = '', state = ON):
"""
更改传感器信息
"""
sql_str = '''
update tb_sensor
set sensor_type = '%s',
room_id =
'''
def insert_controller(self, controller_id, controller_type, room_id, state = OFF):
"""
插入控制器信息
:param controller_id: 控制器ID
:param controller_type: 控制器类型 字符串
:param room_id: 房间ID
:param state: 传感器当前状态
:rtype: SUC 成功, FAI 失败, ERR 异常
"""
sql_str = '''insert into tb_controller(controller_id, controller_type, room_id, state)
values(%d, '%s', %d, %d)''' %(controller_id, controller_type, room_id, state)
#TODO: update
try:
self.connect()
self.executeDML(sql_str)
self.close()
except Exception:
return FAI
return SUC
def insert_instance(self, room_id, sense_time):
try:
self.connect()
sql_str = "insert into tb_instance(room_id, sense_time) values(%d, '%s')" %(room_id, sense_time)
result = self.executeDML(sql_str)
if result != ERR:
sql_str = "select instance_id from tb_instance where room_id = %d and sense_time = '%s'" %(room_id, sense_time)
result = self.queryAll(sql_str)
self.close()
return result[0][0]
else:
return ERR
except IndexError:
# log_msg = 'cannot create instance!'
# log_handler.error(log_msg)
# print log_msg
return FAI
def insert_sensor_data(self, instance_id, sensor_id, value):
sql_str = 'insert into tb_data(instance_id, sensor_id, data) values(%d, %d, %f)' %(instance_id, sensor_id, value)
self.connect()
self.executeDML(sql_str)
self.close()
return SUC
def insert_data(self, room_id, sense_time, temperature, humidity, co2, light, \
temperature_id = -1, humidity_id = -1, co2_id = -1, light_id = -1):
"""
插入采集数据接口
:param room_id: 房间号
:param sense_time: 采集时间
:param temperature: 温度
:param humidity: 湿度
:param co2: 二氧化碳浓度
:param light: 光照
:rtype: 成功返回 1, 失败返回错误信息
"""
if temperature_id < 0 or humidity < 0 or co2_id < 0 or light_id < 0:
temperature_id = self.sensor_dict['temperature'].sensor_id
humidity_id = self.sensor_dict['humidity'].sensor_id
co2_id = self.sensor_dict['co2'].sensor_id
light_id = self.sensor_dict['light'].sensor_id
self.connect()
# start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
sql_str = "{call sp_insert_sense_data(%d, '%s', %f, %f, %f, %f, %d, %d, %d, %d)}" \
%(room_id, sense_time, temperature, humidity, co2, light, temperature_id, humidity_id, co2_id, light_id)
# print sql_str
self.executeDML(sql_str)
except KeyError:
print 'sensor name error'
self.close()
def create_policy_instance(self, policy_id, plant_name, room_id, start_time, state = POLICY_NEW):
"""
创建新的策略实例
:param policy_id: 策略号
:param plany_name: 名称
:param room_id: 房间号
:param start_time: 开始执行时间,格式要求: 2013-12-17 15:45:00 (格式受限于SQLServer)
:rtype: 成功返回新建的实例号,失败返回-1
"""
self.connect()
try:
plant_id = self.plant_dict[plant_name].plant_id
except KeyError:
self.executeDML("insert into tb_plant(plant_name) values('%s')" %(plant_name))
self.load_table()
plant_id = self.plant_dict[plant_name].plant_id
# plant_id = self.queryAll("select plant_id from tb_plant where plant_name ='%s'" %(plant_name))[0][0]
try:
sql_str = '''insert into tb_policy_instance(policy_id, plant_id, room_id, start_time, state)
values(%d, %d, %d, '%s', %d)''' %(policy_id, plant_id, room_id, start_time, state)
self.executeDML(sql_str)
instance_id = self.queryAll('''select top 1 policy_instance_id from tb_policy_instance
where policy_id = %d order by policy_instance_id desc''' %(policy_id))[0][0]
self.close()
return instance_id
except Exception, e:
print 'in create_policy_instance: '
print e
return -1
def new_policy_instance(self, dict):
"""
新建全新养殖模式
:param dict: 封装了新建策略必须的信息的字典
:rtype: 新建结果,code: 0 成功, -1 失败
"""
policy_id = self.create_policy(dict['description'])
instance_id = self.create_policy_instance(policy_id, dict['plantName'], dict['roomId'], dict['startAt'])
result = self.create_rule(dict['policy'])
if policy_id >= 0 and instance_id >= 0 and result >= 0:
return {'code': 0, 'definition': 'Successful'}, policy_id
else:
#TODO: 具体出错位置
return {'code': -1, 'definition': 'Failed'}
def update_policy_instance_state(self, room_id, state):
"""
更改策略实例状态
:param policy_instance_id: 策略号
:param state: 状态
:rtype: 【尚无】
"""
self.connect()
sql_str = 'select distinct policy_instance_id from vw_task where room_id = %d' %(room_id)
temp = self.queryAll(sql_str)
if len(temp) == 0:
log_msg = 'update policy instance state in room: %d to state: %d failed' %(room_id, state)
return FAI
policy_instance_id = temp[0][0]
sql_str = '''
update tb_policy_instance
set state = %d
where policy_instance_id = %d
''' %(state, policy_instance_id)
self.executeDML(sql_str)
self.close()
# log_msg = 'Policy instance %d updated to state %d' %(policy_instance_id, state)
# log_handler.debug(log_msg)
def delete_policy(self, policy_id):
sql_str = 'delete from tb_policy where policy_id = %d' %(policy_id)
self.connect()
self.executeDML(sql_str)
self.close()
return SUC
def create_rule(self, policy_id, rules):
"""
插入养殖模式
:param policy_id: 策略号
:param rules: 执行规则列表
:rtype: SUC
"""
self.connect()
for one_rule in rules:
sql_str = u'''insert into tb_rule(
policy_id,
interval_date,
hours,
temperature_peak,
temperature_valle,
humidity_peak,
humidity_valle,
co2_peak,
co2_valle,
reserved1_peak,
reserved1_valle
)values( %d, %d, %d, %f, %f, %f, %f, %f, %f, %f, %f)''' \
%(policy_id,\
one_rule['date'], \
one_rule['hour'], \
one_rule['temperature'][1], \
one_rule['temperature'][0],\
one_rule['humidity'][1], \
one_rule['humidity'][0],\
one_rule['co2'][1], \
one_rule['co2'][0], \
one_rule['brightness'][1], \
one_rule['brightness'][0]
)
self.executeDML(sql_str)
self.close()
return SUC
def view_controller(self, controller_id):
"""
查看控制器状态
:param controller_id:控制器ID
:rtype:返回控制器状态,整数
"""
self.connect()
sql_str = ''' select state from tb_controller where controller_id = %d''' %(controller_id)
state = self.queryAll(sql_str)
self.close()
return state[0][0]
def update_controller(self, controller_id, state):
"""
更改控制器状态
:param controller: 控制器ID
:param state: 状态
:rtype: 0【待定】
"""
self.connect()
sql_str = '''update tb_controller set state = %d where controller_id = %d''' %(state, controller_id)
self.executeDML(sql_str)
self.close()
return 0
def get_threshold(self, room_id, datetime):
"""
此函数可以获得指定房间的制定时刻的环境限定范围,主要为head.h文件内的threshold队列变量服务
:param datetime: 查询时间
:param room_id: 房间号
:rtype: 包含了两个限定范围的元组,
"""
#TODO: 这里我们假设tb_absolute_time 中,一个房间只能有一种策略在执行,且目前版本,在系统初始化时务必这样,否则将混乱
sql_str = '''
select top 2 %d, change_time,
temperature_valle, temperature_peak,
humidity_valle, humidity_peak,
co2_valle, co2_peak,
light_color, reserved1_valle, reserved1_peak, policy_instance_id
from vw_task
where change_time >= '%s' and room_id = %d
order by change_time
''' %(room_id, datetime, room_id)
self.connect()
temp = self.queryAll(sql_str)
self.close()
return temp
if __name__ == "__main__":
serverIp = db_conn_info['HOST']
dbName = db_conn_info['DATABASE']
uid = db_conn_info['USER']
pwd = db_conn_info['PASSWORD']
# conn=MssqlConnection(serverIp,dbName,uid,pwd)
conn=MssqlConnection()
conn.transfor_absolute_time()
# temp = conn.get_threshold(1, '2014-01-06 16:07:00')
# print temp
conn.test_connection()
# print temp[0][8]
# print (temp[1][0], str(temp[1][1]))
|
import openpyxl
import modules.peripherals.churches as churches
import modules.peripherals.buildings as buildings
#reads data from a .xlsx file in the correct format and returns a list of churches
def input(filename):
workbook = openpyxl.load_workbook(filename)
worksheet = workbook['Churches']
churchList = []
for row in worksheet:
if isValidChurch(row):
curChurch = churches.church(row)
churchList.append(curChurch)
buildingList = []
worksheet = workbook['Buildings']
for row in worksheet:
if isValidBuilding(row):
curBuilding = buildings.building(row)
buildingList.append(curBuilding)
retList = [churchList, buildingList]
return retList
#returns true if church is valid
def isValidChurch(input):
if type(input[4].value) == int: #assume that if female_adults input is an int then church is valid
return True
else:
return False
#returns true if building is valid
def isValidBuilding(input):
if type(input[1].value) == int: #assume that if room 1 capacity is an int then building is valid
return True
else:
return False
|
# Generated by Django 3.1.2 on 2020-11-01 16:02
import accounts.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='phone',
),
migrations.AddField(
model_name='profile',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='تاریخ تولد'),
),
migrations.AddField(
model_name='profile',
name='gender',
field=models.IntegerField(blank=True, choices=[(1, 'مرد'), (2, 'زن')], null=True, verbose_name='جنسیت'),
),
migrations.AddField(
model_name='profile',
name='mobile',
field=models.CharField(blank=True, max_length=11, null=True, verbose_name='تلفن همراه'),
),
migrations.AlterField(
model_name='profile',
name='address',
field=models.TextField(blank=True, null=True, verbose_name='آدرس'),
),
migrations.AlterField(
model_name='profile',
name='balance',
field=models.IntegerField(default=0, verbose_name='اعتبار'),
),
migrations.AlterField(
model_name='profile',
name='profile_image',
field=models.ImageField(blank=True, null=True, upload_to=accounts.models.path_and_rename, verbose_name='تصویر'),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='حساب کاربری'),
),
]
|
# Reading file into a list.
menuConfigurationFile = "bankMenu.cfg"
menuConfigurationList = []
with open(menuConfigurationFile) as menuObject:
for line in menuObject:
lineWithNewLine = line.replace('\n', '')
menuConfigurationList.append(lineWithNewLine)
for line in menuConfigurationList:
print(line)
print(menuConfigurationList)
|
# -*- coding: utf-8 -*-
import time
from .common import ApiTestBase, compat_mock, compat_urllib_parse
class TagsTests(ApiTestBase):
"""Tests for TagsEndpointsMixin."""
@staticmethod
def init_all(api):
return [
{'name': 'test_tag_info', 'test': TagsTests('test_tag_info', api)},
{'name': 'test_tag_related', 'test': TagsTests('test_tag_related', api)},
{'name': 'test_tag_search', 'test': TagsTests('test_tag_search', api)},
{
'name': 'test_tag_follow_suggestions',
'test': TagsTests('test_tag_follow_suggestions', api),
},
{
'name': 'test_tags_user_following',
'test': TagsTests('test_tags_user_following', api, user_id='25025320'),
},
{
'name': 'test_tag_follow_mock',
'test': TagsTests('test_tag_follow_mock', api),
},
{
'name': 'test_tag_unfollow_mock',
'test': TagsTests('test_tag_unfollow_mock', api),
},
{'name': 'test_tag_section', 'test': TagsTests('test_tag_section', api)},
]
def test_tag_info(self):
results = self.api.tag_info('catsofinstagram')
self.assertEqual(results.get('status'), 'ok')
self.assertGreater(results.get('media_count'), 0, 'No media_count returned.')
time.sleep(self.sleep_interval)
results = self.api.tag_info(u'日本')
self.assertEqual(results.get('status'), 'ok')
self.assertGreater(results.get('media_count'), 0, 'No media_count returned.')
def test_tag_related(self):
results = self.api.tag_related('catsofinstagram')
self.assertEqual(results.get('status'), 'ok')
self.assertGreater(
len(results.get('related', [])), 0, 'No media_count returned.'
)
def test_tag_search(self):
rank_token = self.api.generate_uuid()
results = self.api.tag_search('cats', rank_token)
self.assertEqual(results.get('status'), 'ok')
self.assertGreater(len(results.get('results', [])), 0, 'No results returned.')
def test_tag_follow_suggestions(self):
results = self.api.tag_follow_suggestions()
self.assertEqual(results.get('status'), 'ok')
self.assertGreater(len(results.get('tags', [])), 0, 'No results returned.')
def test_tags_user_following(self):
results = self.api.tags_user_following(self.test_user_id)
self.assertEqual(results.get('status'), 'ok')
self.assertIn('tags', results)
self.assertGreater(len(results.get('tags', [])), 0, 'No results returned.')
@compat_mock.patch('instapi.Client._call_api')
def test_tag_follow_mock(self, call_api):
tag = 'catsofinstagram'
call_api.return_value = {'status': 'ok'}
self.api.tag_follow(tag)
call_api.assert_called_with(
'tags/follow/{hashtag!s}/'.format(
hashtag=compat_urllib_parse.quote(tag.encode('utf-8'))
),
params=self.api.authenticated_params,
)
@compat_mock.patch('instapi.Client._call_api')
def test_tag_unfollow_mock(self, call_api):
tag = 'catsofinstagram'
call_api.return_value = {'status': 'ok'}
self.api.tag_unfollow(tag)
call_api.assert_called_with(
'tags/unfollow/{hashtag!s}/'.format(
hashtag=compat_urllib_parse.quote(tag.encode('utf-8'))
),
params=self.api.authenticated_params,
)
def test_tag_section(self):
results = self.api.tag_section('catsofinstagram')
self.assertEqual(results.get('status'), 'ok')
self.assertIn('sections', results)
self.assertGreater(len(results.get('sections', [])), 0, 'No results returned.')
|
import datetime
import pytest
from ethtx.models.decoded_model import AddressInfo, Argument
from ethtx.models.objects_model import BlockMetadata, TransactionMetadata, Call, Event
from ethtx.models.semantics_model import ParameterSemantics, ContractSemantics
FAKE_TIME = datetime.datetime(2020, 12, 25, 17, 5, 55)
@pytest.fixture
def patch_datetime_now(monkeypatch):
class MyDatetime:
@classmethod
def now(cls):
return FAKE_TIME
monkeypatch.setattr(datetime, "datetime", MyDatetime)
class DecodedModelMock:
ADDRESS_INFO: AddressInfo = AddressInfo(address="address", name="name")
ARGUMENT: Argument = Argument(name="name", type="type", value=1)
class ObjectModelMock:
BLOCK_METADATA: BlockMetadata = BlockMetadata(
block_number=15,
block_hash="0x1",
timestamp=FAKE_TIME,
parent_hash="0x",
miner="miner",
gas_limit=12,
gas_used=1,
tx_count=5,
)
TRANSACTION_METADATA: TransactionMetadata = TransactionMetadata(
tx_hash="0x",
block_number=1,
gas_price=2,
from_address="0xa",
to_address="0xb",
tx_index=3,
tx_value=4,
gas_limit=5,
gas_used=1,
success=False,
)
CALL: Call = Call(
call_type="call",
from_address="0xa",
to_address="0xb",
call_value=10,
call_data="0x00000000000000000",
return_value="0xeeee",
status=True,
)
EVENT: Event = Event(contract="0x", topics=["", ""], log_index=1)
class SemanticModelMock:
PARAMETER_SEMANTICS: ParameterSemantics = ParameterSemantics(
parameter_name="name", parameter_type="type"
)
CONTRACT_SEMANTICS: ContractSemantics = ContractSemantics(
code_hash="0x", name="name"
)
|
#copy methond of dictionary
a={1:'bhavya',2:'komal',3:'khushi'}
b=a.copy()
print(b)
|
# Generated by Django 2.1.3 on 2018-11-06 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0032_auto_20181107_0225'),
]
operations = [
migrations.AddField(
model_name='employee',
name='phone',
field=models.IntegerField(default='', max_length=10, null=True),
),
]
|
# -*- coding:utf-8 -*-
import math
import web
def GetRankByHashInternal(assetid,limitnum) :
html = web.GetHeader("rank")
html = html + '<div name="address" align="center">\n'
html = html + '<br/><br/>\n'
html = html + '<h2>'+ _("Rank") +'</h2>\n'
html = html + '<div class="container">\n'
count = web.collection_txs.find({"type":"RegisterTransaction"}).count()
results = web.collection_txs.find({"type":"RegisterTransaction"}).sort("height",1)
row = int(math.ceil(count / 4))
r = 0
for i in range(0, row+1) :
html = html + '<div class="row">\n'
html = html + '<div class="column column-20"></div>\n'
for j in range(0,4) :
if r >= count :
html = html + '<div class="column column-15"></div>\n'
elif assetid == results[r]['txid']:
html = html + '<div class="column column-15"><a href="/rank/' + results[r]['txid'] + '"><b>[' + web.GetAssetNameByAsset(results[r]['asset']) + ']</b></a></div>\n'
else :
html = html + '<div class="column column-15"><a href="/rank/' + results[r]['txid'] + '">[' + web.GetAssetNameByAsset(results[r]['asset']) + ']</a></div>\n'
r = r + 1
html = html + '<div class="column column-20"></div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
html = html + '<br/>\n'
if assetid != None :
html = html + '<h4>- '+ web.GetAssetName(assetid) +' -</h4>\n'
html = html + '<div class="container">\n'
html = html + '<table width="80%" border="0" cellpadding="3" cellspacing="0" align="center">'
html = html + '<tr align="left">'
html = html + '<th>' + _('Rank') + '</th><th>' + _('Address') + '</th><th>' + _('Asset') + '</th><th>' + _('Value') + '</th><th>' + _('Transaction Counts') + '</th><th>' + _('Last Transaction Time') + '</th><th>' + _('First Transaction Time') + '</th>' + '<br/>'
html = html + '</tr>'
rank = 0
#results = collection_ads.find({"asset":"c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b"}).sort("value",-1).limit(200)
results = web.collection_ads.find({"asset":assetid}).sort("value",-1).limit(limitnum)
if results :
for result in results :
rank = rank + 1
html = html + '<tr>'
html = html + '<td>' + str(rank) + '</td>'
html = html + '<td>' + '<a href="/address/' + result['address'] + '">' + result['address'] + '</a></td>'
html = html + '<td>' + web.GetAssetName(result['asset']) + '</td>'
html = html + '<td>' + str(result['value']) + '</td>'
html = html + '<td>' + str(len(result['txid_list'])) + '</td>'
html = html + '<td>' + web.GetLocalTime(result['last_tx_time']) + '</td>'
html = html + '<td>' + web.GetLocalTime(result['first_tx_time']) + '</td>'
html = html + '</tr>'
html = html + '</table>\n'
html = html + '</div>\n'
html = html + '</div>\n'
html = html + web.GetFooter()
return html
|
from blog.models import Contact
from django.utils import timezone
from django.shortcuts import render, redirect
from blog.forms import ContactForm
def contact_new(request):
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
contact = form.save()
return redirect('post_list')
else:
form = ContactForm()
return render(request, 'blog/contact_new.html', {'form': form})
|
from abc import ABC # Abstract Base Class
import copy
class Term(ABC):
"""
Evaluates to the term's value.
If there are variables (identifiers) in the term, a name-value binding shall be inputted.
"""
def eval(self, binding: dict = None):
raise NotImplementedError()
def get_term_of(self, names: set):
raise NotImplementedError()
class AtomicTerm(Term):
"""
An atomic term of a formula in a condition (e.g., in "x*2 < y + 7" the atomic terms are 2 and 7).
"""
def __init__(self, value: object):
self.value = value
self.simplifiable = True
self.abstract_terms = [{"sign": 1, "term": self, "is_id": False}]
def eval(self, binding: dict = None):
return self.value
def get_term_of(self, names: set):
return self
def __repr__(self):
return str(self.value)
class IdentifierTerm(Term):
"""
A term of a formula representing a single variable (e.g., in "x*2 < y + 7" the atomic terms are x and y).
"""
def __init__(self, name: str, getattr_func: callable):
self.name = name
self.getattr_func = getattr_func
self.simplifiable = True
self.abstract_terms = [{"sign": 1, "term": self, "is_id": True}]
def eval(self, binding: dict = None):
if not type(binding) == dict or self.name not in binding:
raise NameError("Name %s is not bound to a value" % self.name)
return self.getattr_func(binding[self.name])
def get_term_of(self, names: set):
if self.name in names:
return self
return None
def __repr__(self):
return self.name
class BinaryOperationTerm(Term):
"""
A term representing a binary operation.
"""
def __init__(self, lhs: Term, rhs: Term, binary_op: callable, is_Minus=False):
self.lhs = lhs
self.rhs = rhs
self.binary_op = binary_op
self.simplifiable = lhs.simplifiable and rhs.simplifiable
new_rhs_terms = copy.deepcopy(rhs.abstract_terms)
new_lhs_terms = copy.deepcopy(lhs.abstract_terms)
if is_Minus:
for item in new_rhs_terms:
item["sign"] = -item["sign"]
self.abstract_terms = new_lhs_terms + new_rhs_terms
def eval(self, binding: dict = None):
return self.binary_op(self.lhs.eval(binding), self.rhs.eval(binding))
def get_term_of(self, names: set):
raise NotImplementedError()
class PlusTerm(BinaryOperationTerm):
def __init__(self, lhs: Term, rhs: Term):
super().__init__(lhs, rhs, lambda x, y: x + y)
def get_term_of(self, names: set):
lhs = self.lhs.get_term_of(names)
rhs = self.rhs.get_term_of(names)
if lhs and rhs:
return PlusTerm(lhs, rhs)
return None
def __repr__(self):
return "{}+{}".format(self.lhs, self.rhs)
class MinusTerm(BinaryOperationTerm):
def __init__(self, lhs: Term, rhs: Term):
super().__init__(lhs, rhs, lambda x, y: x - y, is_Minus=True)
def get_term_of(self, names: set):
lhs = self.lhs.get_term_of(names)
rhs = self.rhs.get_term_of(names)
if lhs and rhs:
return MinusTerm(lhs, rhs)
return None
def __repr__(self):
return "{}-{}".format(self.lhs, self.rhs)
class MulTerm(BinaryOperationTerm):
def __init__(self, lhs: Term, rhs: Term):
super().__init__(lhs, rhs, lambda x, y: x * y)
self.simplifiable = False
def get_term_of(self, names: set):
lhs = self.lhs.get_term_of(names)
rhs = self.rhs.get_term_of(names)
if lhs and rhs:
return MulTerm(lhs, rhs)
return None
def __repr__(self):
return "{}*{}".format(self.lhs, self.rhs)
class DivTerm(BinaryOperationTerm):
def __init__(self, lhs: Term, rhs: Term):
super().__init__(lhs, rhs, lambda x, y: x / y)
self.simplifiable = False
def get_term_of(self, names: set):
lhs = self.lhs.get_term_of(names)
rhs = self.rhs.get_term_of(names)
if lhs and rhs:
return DivTerm(lhs, rhs)
return None
def __repr__(self):
return "{}/{}".format(self.lhs, self.rhs)
class Formula(ABC):
"""
Returns whether the parameters satisfy the formula. It evaluates to True or False.
If there are variables (identifiers) in the formula, a name-value binding shall be inputted.
"""
def eval(self, binding: dict = None):
pass
def get_formula_of(self, names: set):
pass
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
"""
Returns a simplified formula where the lhs term consist only of lhs_vars,
and rhs term from only rhs_vars.
returns None if simplification is complicated (one term contains div for example)
"""
return None
class AtomicFormula(Formula): # RELOP: < <= > >= == !=
"""
An atomic formula containing no logic operators (e.g., A < B).
"""
def __init__(self, left_term: Term, right_term: Term, relation_op: callable):
self.left_term = left_term
self.right_term = right_term
self.relation_op = relation_op
self.seperatable_formulas = True
def eval(self, binding: dict = None):
return self.relation_op(self.left_term.eval(binding), self.right_term.eval(binding))
def __repr__(self):
return "{} {} {}".format(self.left_term, self.relation_op, self.right_term)
def simplify_formula_handler(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
lhs_term_vars = set()
rhs_term_vars = set()
for item in self.left_term.abstract_terms:
if item["is_id"] == True:
lhs_term_vars.add(item["term"].name)
for item in self.right_term.abstract_terms:
if item["is_id"] == True:
rhs_term_vars.add(item["term"].name)
# check if already simple : set() is for removing duplicates and empty set cases
if set(lhs_term_vars).issubset(lhs_vars) and set(rhs_term_vars).issubset(rhs_vars):
return self.left_term, self.right_term
# check if a possible simplification exists
if not (self.left_term.simplifiable and self.right_term.simplifiable):
return None, None
new_lhs_term, new_rhs_term = self.rearrange_terms(lhs_vars, rhs_vars)
return new_lhs_term, new_rhs_term
def rearrange_terms(self, lhs_vars, rhs_vars):
new_lhs_term = AtomicTerm(0)
new_rhs_term = AtomicTerm(0)
(new_lhs_term, new_rhs_term) = self.consume_terms(
self.left_term.abstract_terms, new_lhs_term, new_rhs_term, lhs_vars
)
(new_rhs_term, new_lhs_term) = self.consume_terms(
self.right_term.abstract_terms, new_rhs_term, new_lhs_term, rhs_vars
)
return (new_lhs_term, new_rhs_term)
def consume_terms(self, terms, same_side_term, other_side_term, curr_side_vars):
for cur_term in terms:
if cur_term["is_id"]:
if cur_term["sign"] == 1: # plus
if cur_term["term"].name in curr_side_vars:
same_side_term = PlusTerm(same_side_term, cur_term["term"])
else: # opposite side of the equation, gets opposite sign
other_side_term = MinusTerm(other_side_term, cur_term["term"])
else: # minus
if cur_term["term"].name in curr_side_vars:
same_side_term = MinusTerm(same_side_term, cur_term["term"])
else: # opposite side of the equation, gets opposite sign
other_side_term = PlusTerm(other_side_term, cur_term["term"])
else: # atomic term
if cur_term["sign"] == 1: # plus
same_side_term = PlusTerm(same_side_term, cur_term["term"])
else: # minus
same_side_term = MinusTerm(same_side_term, cur_term["term"])
return (same_side_term, other_side_term)
def dismantle(self):
return (
self.left_term,
self.get_relop(),
self.right_term,
)
def get_relop(self):
"""
return the relop of the current AtomicFormula ( < <= > >= == != )
"""
return None
def rank(self,lhs_vars: set, rhs_vars: set, priorities: dict):
"""
returns the priority of the current formula according to a given dictionary representing
the attributes priorities, for example : [a:5 , b:10 , c:8].
the rank is computed by multiplying the attributs priorities, to best indicate the frequencies of the combinations.
it is best to choose priorities
according to the frequencies of the attributes to maximize the optimizations.
"""
simplified_lhs, simplified_rhs = self.simplify_formula_handler(lhs_vars, rhs_vars)
if simplified_lhs is None:
return -1
rank = 1
for attr in simplified_lhs.abstract_terms:
if attr["is_id"]:
if(priorities.__contains__(attr["term"].name)):
rank *= priorities[attr["term"].name]
else:
rank +=1
for attr in simplified_rhs.abstract_terms:
if attr["is_id"]:
if(priorities.__contains__(attr["term"].name)):
rank *= priorities[attr["term"].name]
else:
rank +=1
return rank
class EqFormula(AtomicFormula):
def __init__(self, left_term: Term, right_term: Term):
super().__init__(left_term, right_term, lambda x, y: x == y)
def get_formula_of(self, names: set):
right_term = self.right_term.get_term_of(names)
left_term = self.left_term.get_term_of(names)
if left_term and right_term:
return EqFormula(left_term, right_term)
return None
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
new_lhs_term, new_rhs_term = self.simplify_formula_handler(lhs_vars, rhs_vars, priorities)
return EqFormula(new_lhs_term, new_rhs_term) if new_lhs_term is not None else None
def __repr__(self):
return "{} == {}".format(self.left_term, self.right_term)
def get_relop(self):
return "=="
class NotEqFormula(AtomicFormula):
def __init__(self, left_term: Term, right_term: Term):
super().__init__(left_term, right_term, lambda x, y: x != y)
def get_formula_of(self, names: set):
right_term = self.right_term.get_term_of(names)
left_term = self.left_term.get_term_of(names)
if left_term and right_term:
return NotEqFormula(left_term, right_term)
return None
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
new_lhs_term, new_rhs_term = self.simplify_formula_handler(lhs_vars, rhs_vars, priorities)
return NotEqFormula(new_lhs_term, new_rhs_term) if new_lhs_term is not None else None
def __repr__(self):
return "{} != {}".format(self.left_term, self.right_term)
def get_relop(self):
return "!="
class GreaterThanFormula(AtomicFormula):
def __init__(self, left_term: Term, right_term: Term):
super().__init__(left_term, right_term, lambda x, y: x > y)
def get_formula_of(self, names: set):
right_term = self.right_term.get_term_of(names)
left_term = self.left_term.get_term_of(names)
if left_term and right_term:
return GreaterThanFormula(left_term, right_term)
return None
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
new_lhs_term, new_rhs_term = self.simplify_formula_handler(lhs_vars, rhs_vars, priorities)
return GreaterThanFormula(new_lhs_term, new_rhs_term) if new_lhs_term is not None else None
def __repr__(self):
return "{} > {}".format(self.left_term, self.right_term)
def get_relop(self):
return ">"
class SmallerThanFormula(AtomicFormula):
def __init__(self, left_term: Term, right_term: Term):
super().__init__(left_term, right_term, lambda x, y: x < y)
def get_formula_of(self, names: set):
right_term = self.right_term.get_term_of(names)
left_term = self.left_term.get_term_of(names)
if left_term and right_term:
return SmallerThanFormula(left_term, right_term)
return None
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
new_lhs_term, new_rhs_term = self.simplify_formula_handler(lhs_vars, rhs_vars, priorities)
return SmallerThanFormula(new_lhs_term, new_rhs_term) if new_lhs_term is not None else None
def __repr__(self):
return "{} < {}".format(self.left_term, self.right_term)
def get_relop(self):
return "<"
class GreaterThanEqFormula(AtomicFormula):
def __init__(self, left_term: Term, right_term: Term):
super().__init__(left_term, right_term, lambda x, y: x >= y)
def get_formula_of(self, names: set):
right_term = self.right_term.get_term_of(names)
left_term = self.left_term.get_term_of(names)
if left_term and right_term:
return GreaterThanEqFormula(left_term, right_term)
return None
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
new_lhs_term, new_rhs_term = self.simplify_formula_handler(lhs_vars, rhs_vars, priorities)
return GreaterThanEqFormula(new_lhs_term, new_rhs_term) if new_lhs_term is not None else None
def __repr__(self):
return "{} >= {}".format(self.left_term, self.right_term)
def get_relop(self):
return ">="
class SmallerThanEqFormula(AtomicFormula):
def __init__(self, left_term: Term, right_term: Term):
super().__init__(left_term, right_term, lambda x, y: x <= y)
def get_formula_of(self, names: set):
right_term = self.right_term.get_term_of(names)
left_term = self.left_term.get_term_of(names)
if left_term and right_term:
return SmallerThanEqFormula(left_term, right_term)
return None
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
new_lhs_term, new_rhs_term = self.simplify_formula_handler(lhs_vars, rhs_vars, priorities)
return SmallerThanEqFormula(new_lhs_term, new_rhs_term) if new_lhs_term is not None else None
def __repr__(self):
return "{} <= {}".format(self.left_term, self.right_term)
def get_relop(self):
return "<="
class BinaryLogicOpFormula(Formula): # AND: A < B AND C < D
"""
A formula composed of a logic operator and two nested formulas.
"""
def __init__(self, left_formula: Formula, right_formula: Formula, binary_logic_op: callable):
self.left_formula = left_formula
self.right_formula = right_formula
self.binary_logic_op = binary_logic_op
self.seperatable_formulas = left_formula.seperatable_formulas and right_formula.seperatable_formulas
self.formula_to_sort_by = None
def eval(self, binding: dict = None):
return self.binary_logic_op(self.left_formula.eval(binding), self.right_formula.eval(binding))
def extract_atomic_formulas(self):
"""
given a BinaryLogicOpFormula returns its atomic formulas as a list, in other
words, from the formula (f1 or f2 and f3) returns [f1,f2,f3]
"""
# a preorder path in a tree (taking only leafs (atomic formulas))
formulas_stack = []
formulas_stack.append(self.right_formula)
formulas_stack.append(self.left_formula)
atomic_formulas = []
while formulas_stack:
curr_form = formulas_stack.pop()
if isinstance(curr_form,AtomicFormula):
atomic_formulas.append(curr_form)
else:
formulas_stack.append(curr_form.right_formula)
formulas_stack.append(curr_form.left_formula)
return atomic_formulas
def dismantle(self):
if(self.formula_to_sort_by is None):
return None,None,None
return (
self.formula_to_sort_by.left_term,
self.formula_to_sort_by.get_relop(),
self.formula_to_sort_by.right_term,
)
class AndFormula(BinaryLogicOpFormula): # AND: A < B AND C < D
def __init__(self, left_formula: Formula, right_formula: Formula):
super().__init__(left_formula, right_formula, lambda x, y: x and y)
def get_formula_of(self, names: set):
right_formula = self.right_formula.get_formula_of(names)
left_formula = self.left_formula.get_formula_of(names)
if left_formula is not None and right_formula is not None:
return AndFormula(left_formula, right_formula)
if left_formula:
return left_formula
if right_formula:
return right_formula
return None
def __repr__(self):
return "{} AND {}".format(self.left_formula, self.right_formula)
def simplify_formula(self, lhs_vars: set, rhs_vars: set, priorities: dict = {}):
if (not self.seperatable_formulas):
return None
# here we know the formulas is of structure (f1 and f2 and f3 and... and fn)
# we need to decide which formula to simplify (according to priorities)
atomic_formulas = self.extract_atomic_formulas()
#the default is to simplify according to the first formula (f1) unless given priorities.
atomic_formulas_ranking = [ f.rank(lhs_vars,rhs_vars,priorities) for f in atomic_formulas ]
max_rank = max(atomic_formulas_ranking)
# rank==-1 in case the formula f is not simple.
if max_rank == -1:
return None
index_of_form_to_simplify = atomic_formulas_ranking.index(max_rank)
atomic_formulas[index_of_form_to_simplify] = atomic_formulas[index_of_form_to_simplify].simplify_formula(lhs_vars,rhs_vars)
new_and_form = AndFormula(atomic_formulas[0],atomic_formulas[1])
for index in range(len(atomic_formulas)-2):
new_and_form = AndFormula(new_and_form,atomic_formulas[index+2])
new_and_form.formula_to_sort_by = atomic_formulas[index_of_form_to_simplify]
return new_and_form
class TrueFormula(Formula):
def eval(self, binding: dict = None):
return True
def __repr__(self):
return "True Formula"
|
"""
什么是线程锁?
目的是将一段代码锁住,一旦获得锁权限,除非释放线程锁,否则其他任何代码都无法获得锁权限
为什么需要线程锁
由于多线程同时在完成特定的操作时,由于并不是原子操作,所以在完成操作的过程中可能会被打断,去做其他的操作。
可能会产生脏数据
例如,一个线程读取变量n,n初始值为1,然后n++,最后输出n
当访问n++后,被打断,由另外的线程做同样的工作,这时n被加了两次,所以最后n等于2,而不是1
所以说需要给n++操作加上锁变成原子操作,直到结束再释放线程锁
"""
from threading import Thread, Lock, currentThread
from time import sleep, ctime
import random
from atexit import register
lock = Lock()
def fun():
# lock.acquire() #加锁
for i in range(5):
print("Thread Name = ",currentThread().name, "i =", i)
sleep(random.randint(1,5))
# lock.release() #解锁
def myfun():
for i in range(3):
Thread(target=fun).start()
@register
def exit():
print("线程执行结束:",ctime())
myfun()
|
inputs = [
'hotkey01',
'hotkey82',
'hotkey30',
's',
'hotkey21',
'hotkey00',
'hotkey60',
'hotkey41',
'hotkey80',
'hotkey90',
'sMineral',
'hotkey10',
'hotkey91',
'hotkey71',
'hotkey81',
'hotkey40',
'hotkey31',
'hotkey12',
'sBase',
'hotkey62',
'hotkey42',
'hotkey11',
'hotkey02',
'hotkey50',
'hotkey61',
'hotkey70',
'hotkey22',
'hotkey52',
'hotkey20',
'hotkey72',
'hotkey32',
'hotkey92',
'hotkey51'
]
players = [
'/YongHwa/',
'/ParalyzE/',
'/INnoVation/',
'/LiquidTLO/',
'/PartinG/',
'/Lilbow/',
'/sOs/',
'/WhiteRa/',
'/Super/',
'/Symbol/',
'/ByuL/',
'/iGJim/',
'/Stardust/',
'/FanTaSy/',
'/Soulkey/',
'Life/',
'/Leenock/',
'/Kane/',
'/Stats/',
'/soO/',
'/DRG/',
'/MaNa/',
'/Rain/',
'/Bbyong/',
'/Rogue/',
'/Dream/',
'/yoeFWSan/',
'/Zest/',
'/viOLet/',
'/Hydra/',
'/TYTY/',
'/MǂForGG/',
'/Classic/',
'/Bunny/',
'/Life/',
'/CMStormPolt/',
'/ForGG/',
'/Sen/',
'/LiquidSnute/',
'/Polt/',
'/Pigbaby/',
'/True/',
'/Curious/',
'/VortiX/',
'/YoDa/',
'/HuK/',
'/Welmu/',
'/Nerchio/',
'/hydra/',
'/Happy/',
'/Dear/',
'/LiquidBunny/',
'/MyuNgSiK/',
'/FireCake/',
'/JJAKJI/',
'/Cure/',
'/Maru/',
'/AxHeart/',
'/Solar/',
'/iaguz/',
'/Trap/',
'/herO/',
'/MMA/',
'/Golden/',
'/Stork/',
'/MinChul/',
'/ShoWTimE/',
'/LiquidTaeJa/',
'/Dark/',
'/Bomber/',
'/iGXiGua/',
'/HyuN/'
]
races = ['Terran', 'Protoss', 'Zerg']
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
train_size = 4099
test_size = 1025
le = LabelEncoder()
le.fit(races)
X = []
y = []
for line in open('train.csv', 'r'):
train = line.split(',')
[player, race] = train[0].split(';')
if race == 'Terran\n':
race = 'Terran'
feature = [0 for i in range(len(inputs))]
for i in range(1, len(train), 2):
feature[inputs.index(train[i])] += 1
normalize_term = sum(feature)
if normalize_term != 0:
feature = [x/normalize_term for x in feature]
feature.append(le.transform([race])[0])
X.append(feature)
y.append(train[0])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
print(dtc.score(X_test, y_test))
#print(X_train[0])
#print(Y_train[0])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 18:53:54 2017
@author: root
"""
from brian2 import *
from numpy import *
from matplotlib.pyplot import *
from helpers import *
ca()
timelength = 200
Sig = zeros([timelength])
for ind in range(timelength):
if ind > 30:
Sig[ind] = 1.1
else:
Sig[ind] = 0
start_scope()
Npred = 10 # latent variable neurons
NEe = 16 # excitatory error layer neurons
NEi = 4 # inhibitory error layer neurons
tau = 10*ms
sig = TimedArray(Sig,dt = 2*ms)
eqsE = ''' dv/dt = (-v + sig(t) + I)/tau : 1 (unless refractory)
I : 1 '''
eqs = ''' dv/dt = (-v + I)/tau : 1 (unless refractory)
I : 1 '''
E = NeuronGroup(NEe+NEi, eqsE, threshold = 'v > 1', reset = 'v = 0', refractory = 5*ms)
Ee = E[:NEe]
Ei = E[NEe:]
Y = NeuronGroup(Npred, eqs, threshold = 'v > 1', reset = 'v = 0', refractory = 5*ms)
SynIE = Synapses(Ei,Ee,'w : 1',on_pre = 'v_post -= 0.3')
SynIE.connect(p=0.2)
SynIE.w = 'rand()'
SynYE = Synapses(Y,Ei, 'w : 1', on_pre = 'v_post += 0.5')
SynYE.connect(p=0.2)
SynYE.w = 'rand()'
SynEY = Synapses(Ee,Y, 'w : 1',on_pre = 'v_post += 0.5')
SynEY.connect(p=0.2)
SynEY.w = 'rand()'
SmE = SpikeMonitor(E)
SmY = SpikeMonitor(Y)
run(400*ms)
figure(), plot(SmE.t/ms,SmE.i,'.')
figure(), plot(SmY.t/ms,SmY.i,'.')
|
from operator import truediv
def representation(zone_pop, rep_req):
rep_total = 0
result = []
population_total = sum(zone_pop)
for population in zone_pop:
# rep = (population / population_total) * rep_req # Python 3
rep = truediv(population, population_total) * rep_req
# current = round(rep) or 1 # Python 3
current = round(rep) if rep % 1 > 0.5 else int(rep) or 1
rep_total += current
result.append(current)
diff = rep_total - rep_req
# for _ in range(abs(int(diff))): # Python 3
for _ in xrange(abs(int(diff))):
if diff < 0:
result[result.index(min(result))] += 1
diff += 1
else:
result[result.index(max(result))] -= 1
diff -= 1
return result
|
"""
__author__ ='Nijesh'
"""
import os
import pandas as pd
import numpy as np
import json
import re
from itertools import izip_longest
class ValidationError(Exception):
def __init__(self,message):
super(ValidationError, self).__init__(message)
def __repr__(self):
return self.message
class WeightValidate(object):
"""
The Class to Validate Weight File
"""
def __init__(self, path, filename, filetype='csv'):
"""
:param path: The File Path
:param filename: The FIle Name of Weight File
:param filetype: Type of Weight file;Defaulted to csv
"""
self.path = path
self.filename = filename
self.full_path = os.path.join(self.path, self.filename)
self.filetype = filetype
def weightfile_nan(self):
"""
NaN Validation Method
:return: False if NAN Validation is Successful
"""
df = pd.read_csv(self.full_path)
weights_col = df['Weight']
result = weights_col.isnull().values.any()
index = df['Weight'].index[df['Weight'].apply(np.isnan)]
df_index = df.index.values.tolist()
nan_rows = [df_index.index(i) for i in index]
if result:
raise ValidationError("NaN Validation Failed. Weight File has Empty/NaN/Invalid entries "
"at Indices {0}".format(nan_rows))
else:
return True
@staticmethod
def get_network_struct():
"""
Static Method for getting network structure
:return: shape of network from networks.txt file
"""
networkpath = os.path.abspath('../data/DNN.txt')
with open(networkpath, 'r') as jsonfile:
content = json.load(jsonfile)
list_layers = content['layers']
input_layer = list_layers[0]
output_layer = list_layers[-1]
num_input_nodes = input_layer['Input']['nodes']
num_output_layer_nodes = output_layer['Output']['nodes']
list_layer = list_layers[1:-1]
hlayers = []
hlayernodes = []
dicts ={'false':False ,'true':True}
for each in list_layer:
for key in each.keys():
match = re.match("^HL[0-9]$", key)
if match.group(0):
hlayers.append(str(match.group(0)))
hiddenbias = []
for i, layer in enumerate(hlayers):
temp = list_layer[i]
num_nodes = temp[layer]['nodes']
bias = temp[layer]['addBias'].lower()
bias = dicts[bias]
hiddenbias.append(bias)
if not bias:
hlayernodes.append(int(num_nodes))
else:
hlayernodes.append(int(num_nodes)+int(1))
input_bias = input_layer['Input']['addBias'].lower()
input_bias = dicts[input_bias]
output_bias = output_layer['Output']['addBias'].lower()
output_bias = dicts[output_bias]
if input_bias:
num_input_nodes = int(num_input_nodes)+int(1)
if output_bias:
num_output_layer_nodes = int(num_output_layer_nodes) + int(1)
shape = (int(num_input_nodes), hlayernodes, int(num_output_layer_nodes))
biases = (input_bias,hiddenbias,output_bias)
return shape,biases
def weight_count_validate(self):
"""
WeightCount Validation for Weight MAtrices at each intermediate layer
:return: True if its Successful
"""
# Node Count from Network Structure File
global hidden_diff, hidden_diff
shape,biases = WeightValidate.get_network_struct()
input_node = shape[0]
output_node = shape[-1]
hidden_nodes = shape[1:-1][0]
df = pd.read_csv(self.full_path)
source_layer, source_node, = df['Source Layer'], df['Source Node'],
destination_layer, destination_node, weights = df['Destination Layer'], df['Destination Node'], df['Weight']
hiddenbias = biases[1]
if hiddenbias[0]:
input_matrix_num_weights = (input_node) * (hidden_nodes[0]-1)
else:
input_matrix_num_weights = (input_node) * (hidden_nodes[0])
output_matrix_num_weights = (hidden_nodes[-1]) * output_node
hidden_matrix_num_weights = []
for i, num in enumerate(hidden_nodes):
try:
bias_for_index_next = hiddenbias[i+1]
if bias_for_index_next:
hidden_weights = num * (hidden_nodes[i + 1]-1)
else:
hidden_weights = num * (hidden_nodes[i + 1])
hidden_matrix_num_weights.append(hidden_weights)
except IndexError:
pass
network_file_output = (input_matrix_num_weights, hidden_matrix_num_weights, output_matrix_num_weights)
# Input Node Count from Weights File
input_df_len = len(df[source_layer == 0]['Weight'])
dest = destination_layer.max()
output_df_len = len(df[destination_layer == dest]['Weight'])
hidden_df_len = []
for each in range(1, dest - 1):
hidden_df_len.append(len(df[source_layer == each]['Weight']))
weight_file_output = (input_df_len, hidden_df_len, output_df_len)
if network_file_output == weight_file_output:
return True
else:
temp =[]
weight_file = (weight_file_output[0],weight_file_output[2])
network_file = (network_file_output[0],network_file_output[2])
result = abs(np.subtract(weight_file,network_file))
hidden_layer_weight = weight_file_output[1]
hidden_layer_network = network_file_output[1]
for items in izip_longest(hidden_layer_weight,hidden_layer_network):
items = np.array(items, dtype=float)
temp.append(items)
hidden_diff = abs(np.diff(temp))
raise ValidationError("Weight Matrix Count Validation Failed \nThe Computation from Weights" \
" File didn\'t match the records in NetworkStructure File.\nThe Number of connection"
" Weights missed layerwise is {0}.\n\nThe Second element which is a list shows an"
" array with each hidden layer and a 'nan' if present, signifies layer "
" mismatch between network structure file and weight file.\nIgnore the floating values"
" in the hidden layer difference as its a NaN float array;treat it as whole number "
" file".format((result[0],hidden_diff.tolist(),result[1])))
def file_format_validate(self):
"""
Format Validation wrt weights.txt files
:return: True if validation is Successful
"""
networkpath = os.path.abspath('../config/weights.txt')
header = ['Source Layer', 'Source Node', 'Destination Layer', 'Destination Node', 'Weight']
df = pd.read_csv(networkpath, delimiter='\t', names=header)
weight_df = pd.read_csv(self.full_path)
# Check of Dimension
bools = (df.shape[1] == weight_df.shape[1])
if not bools:
raise ValidationError(""
"Validation Failed."
"\nShape Mismatch of weights file "
"with weights.txt file during File Format validation")
# Weights are Numeric
df_format = df['Weight'].dtypes
bool_num = df_format == weight_df['Weight'].dtypes == 'float64'
if not bool_num:
raise TypeError(""
"Validation Failed."
"\nCheck the Weight file for data type mismatch")
# position of Weight Vector
pos = df.columns.get_loc("Weight") == weight_df.columns.get_loc("Weight")
if not pos:
raise ValidationError(""
"Validation Failed."
"\nPosition Mismatch found during File Format Validation")
if pos and bools and bool_num:
return True
def redundancy_check(self):
"""
Returns: False if there are no redundant rows in weights file
"""
df = pd.read_csv(self.full_path)
result = df.duplicated(['Source Layer', 'Source Node', 'Destination Layer', 'Destination Node'])
result = result.tolist()
if any(result):
raise ValidationError("Validation Failed.\n"
"Redundancy Validation Failed at Row Index{0}"
" of the generated Weights file".format(np.where(result)[0]))
else:
return False
def validate(self):
"""
Returns: True If all the Validation is Success.Acts like a driver
Usage: from validate_weights import WeightValidate
weights = WeightValidate('C:\\ANN\\ANN_Source\\Data', 'weights.csv')
if weights.validate():
Handle the Boolean Here"
"""
if self.file_format_validate() and not (self.redundancy_check())and self.weightfile_nan() \
and self.weight_count_validate():
return True
|
from flask import Flask, request
from multiprocessing import Process
import RPi.GPIO as GPIO
#Alexa
from flask_ask import Ask, statement
import spidev
import time
#Twilio
from twilio.twiml.messaging_response import MessagingResponse
from .notifications import send_sms
from .notifications import send_email
# for testing purposes
def default(a, b):
return a + b
def createSPI(bus, device):
spi = spidev.SpiDev()
spi.open(bus, device)
spi.max_speed_hz = 1000000
spi.mode = 0
return spi
def requestButton():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(21, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while (True):
input = GPIO.input(21)
if input == False:
print("Button Pressed")
send_sms.sendSMS(
'+19098272197', '+12058329927',
"Request to open R'Mailbox: Reply with Y/y or N/n.")
time.sleep(1)
def packageNotify():
mailNotification = 1
while (True):
atmegaSPI = createSPI(0, 0)
#SPI Command to Atmega
Send_Status = 0x10
atmegaSPI.xfer([Send_Status])
isPackage = atmegaSPI.readbytes(1)[0]
print("Package Status", isPackage)
atmegaSPI.close()
if isPackage:
print("Package detected")
if mailNotification == 1:
mailNotification = 0
print("Sending package notification")
send_sms.sendSMS(
'+19098272197', '+12058329927',
"You have received a new package! The RMailbox will keep it safe until you retrieve it."
)
send_email.sendEmail()
elif not isPackage:
print("No package")
if mailNotification != 1:
mailNotification = 1
else:
print("Unknown signal")
time.sleep(3)
app = Flask(__name__)
ask = Ask(app, '/')
@app.route("/sms", methods=['GET', 'POST'])
def sms_reply():
# Get message body of incoming message
body = request.values.get('Body', None)
# Start Response
resp = MessagingResponse()
# Determine Correct Response
if body == 'Y' or body == 'y':
atmegaSPI = createSPI(0, 0)
print('Unlocking door')
#SPI Command to Atmega
Unlock_Door = 0x20
atmegaSPI.xfer([Unlock_Door])
atmegaResponse = atmegaSPI.readbytes(1)[0]
print(hex(atmegaResponse))
atmegaSPI.close()
resp.message("Access granted to R'Mailbox.")
elif body == 'N' or body == 'n':
resp.message("Access denied to R'Mailbox.")
else:
resp.message("Please respond with Y/y or N/n")
return str(resp)
@ask.intent('IRIntent')
def isMail():
atmegaSPI = createSPI(0, 0)
#SPI Command to Atmega
Send_Status = 0x10
atmegaSPI.xfer([Send_Status])
isPackage = atmegaSPI.readbytes(1)[0]
print("Current Package Status: ", isPackage)
atmegaSPI.close()
if isPackage:
return statement('You have mail')
return statement('There is currently no mail')
if __name__ == '__main__':
try:
flaskServer = Process(target=app.run, kwargs=dict(debug=True))
requestOpen = Process(target=requestButton)
#notifications = Process(target=packageNotify)
#http://jhshi.me/2015/12/27/handle-keyboardinterrupt-in-python-multiprocessing/index.html#.XtBUiDpKj-g
#Fixes errors on keyboard interrupts
flaskServer.daemon = True
requestOpen.daemon = True
#notifications.daemon = True
flaskServer.start()
requestOpen.start()
#notifications.start()
notifications = packageNotify()
except KeyboardInterrupt:
flaskServer.terminate()
requestOpen.terminate()
#notifications.terminate()
exit()
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'target',
'product_name': 'Product',
'type': 'shared_library',
'mac_bundle': 1,
'sources': [
'<(PRODUCT_DIR)/copy.c',
],
'actions': [
{
'action_name': 'Helper',
'description': 'Helps',
'inputs': [ 'source.c' ],
'outputs': [ '<(PRODUCT_DIR)/copy.c' ],
'action': [ 'cp', '${SOURCE_ROOT}/source.c',
'<(PRODUCT_DIR)/copy.c' ],
},
],
},
],
}
|
#MINI SHOPPING CLI PROGRAM
import time
import random
print("WELCOME TO MY CART SHOP")
item = ["KITCHEN-SET","BLAZERS","WILD CRAFT SCHOOL BAGS","BEDSHEETS"]
cost = ["Rs 3600","Rs 4000","Rs 1250","900"]
x = []
l = []
print()
for i,j in zip(item,cost):
print(i,"=",j)
print()
choice = 'y' or 'n'
count = 0
while choice == 'y' or choice == 'YES' or choice == 'yes' or choice =='Y':
print()
user_1 = input("Enter the name of the item which you would like to purchase : ")
print()
user_2 = int(input("Enter the cost of the item which you would like to purchase : {} ".format("Rs")))
print()
if user_2 == 35600 or user_2 == 2000 or user_2 == 1200:
l.append(user_2)
else:
print()
print("WAIT........ SECONDS.")
time.sleep(1)
break
if user_1 in item:
x.append(user_1)
print()
print(user_1,"has been added to your cart.")
print()
count += 1
else:
print()
print("Item not found.Try again.")
choice = input("Do you want to add any other item to your cart : ")
while choice == 'n' or choice == 'no' or choice == 'NO' or choice == 'N':
print()
print("There are",count,"items in your cart")
print()
print("Total : {}".format("Rs"),sum(l))
print()
user_4 = input("Proceed to checkout (y/n) : ")
if user_4 == 'n':
print()
print("ABORTING IN 5 SECONDS")
time.sleep(5)
break
else:
print()
user_5 = input("Select payment method (Credit/Debit) : ")
print()
print("PROCESSING")
r = 0
while r <= 2:
print(".")
time.sleep(1)
r += 1
print()
print("CAPTCHA GENERATION")
b = 0
while b <= 3:
print(".")
time.sleep(1)
b += 1
print()
print("Enter the captcha given below.")
print()
captcha = random.randint(111111,999999)
print(captcha)
print()
user_6 = input()
if user_6 != captcha:
"ABORTING IN 5 SECONDS."
else:
continue
f = 0
print()
print("AWAITING IMFORMATION.")
while f <= 5:
print(".")
time.sleep(1)
f += 1
print()
print("TRANSACTION SUCCESSFUL.")
print()
print("**************************THANK YOU FOR CHOSING MY CART SHOP*****************************")
time.sleep(15)
break
|
def print_sum():
global a,b
a=100
b=200
result = a + b
print(f"print_sum() 내부 : a = {a}, b = {b}, result = {result}")
a=10
b=20
result = a+b
print(f"print_sum() 이전 : a = {a}, b = {b}, result = {result}")
print_sum()
result = a+b
print(f"print_sum() 외부 : a = {a}, b = {b}, result = {result}")
|
# Exercício 4.1 - Livro
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
if n1 > n2:
print(n1)
if n2 > n1:
print(n2)
|
import pkg_resources
def iter_bundles():
for entry in pkg_resources.iter_entry_points("trytls.bundles"):
yield entry.name
def load_bundle(name):
for entry in pkg_resources.iter_entry_points("trytls.bundles", name):
return entry.load()
return None
|
#Sorting
"Numpy has a function called sort() which will sort an array"
import numpy as np
arr = np.array([1,4,3,1,2,51,12,33,5])
print(np.sort(arr))
"This worls doe all data types and dimensioanl arrays"
"In a multidimensional array it will sort the elements in each array"
|
# coding=utf-8
__author__ = 'Insolia'
from bank.models import *
import csv
from transliterate import translit
from django.contrib.auth.models import Group
import random
from bank.constants import *
import string
def get_pd(leng):
a = random.sample(string.printable[:62], leng)
s = ''
for c in a:
s = s+c
return s
### execfile('Yanyshev_sucks.py')
p_out = open('meta_files/new_passwords.txt', 'w')
'''
for u in User.objects.filter(groups__name='pioner'):
pd = get_pd(8)
u.set_password(pd)
u.save()
print u.account + 'login: ' + u.username + ' password: ' + pd
info = u.account + '\n' + 'login: ' + u.username + ' password: ' + pd
p_out.write(info.encode('utf-8'))
p_out.write('\n ' + '--'*30 +' \n\n')
'''
for u in User.objects.filter(groups__name='pedsostav'):
pd = get_pd(12)
u.set_password(pd)
u.save()
print 'login: ' + u.username + ' password: ' + pd
info = '\n' + 'login: ' + u.username + ' password: ' + pd
p_out.write(info.encode('utf-8'))
p_out.write('\n ' + '--'*30 +' \n\n')
|
# dict_to_csv.py
import csv
dict_sample = {'name': 'LinuxHint', 'city': 'CA', 'education': 'Engineering'}
with open('data.csv', 'w') as f:
for key in dict_sample.keys():
f.write("%s, %s\n" %(key, dict_sample[key]))
|
import matplotlib
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from ipykernel.pylab.config import InlineBackend
from jedi.api.refactoring import inline
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import BorderlineSMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from imblearn.pipeline import Pipeline as imbpipeline
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, \
classification_report
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# to display all columns and rows:
pd.set_option('display.max_columns', None); pd.set_option('display.max_rows', None);
pd.set_option("display.float_format", lambda x : "%5f" %x)
pd.read_csv("C:/Users/Erdal/PycharmProjects/dsmlbc/datasets/churn.csv")
def load_df():
df = pd.read_csv("C:/Users/Erdal/PycharmProjects/dsmlbc/datasets/churn.csv",index_col=0)
df.drop("CustomerId",axis=1,inplace=True)
return df
df = load_df()
# Explanatory Data Analysis (EDA) & Data Visualization
df = load_df()
df.head()
df.tail()
df.shape
df.info()
df.columns
df.index
df.describe().T
df.isnull().values.any()
df.isnull().sum()
# Categorical Varaible Analyze
cat_cols = [col for col in df.columns if len(df[col].value_counts())<12 and col not in "Exited"]
for i in cat_cols:
sns.countplot(x=i, data=df)
plt.show()
def cat_summary_long(data,categorical_cols ,number_of_classes=10):
var_count = 0
vars_more_classes = []
for var in data:
if var in categorical_cols:
print(pd.DataFrame({var: data[var].value_counts(),
"Ratio": 100 * data[var].value_counts() / len(data)}), end="\n\n")
var_count += 1
else:
vars_more_classes.append(var)
print("{} categorical variables have been described".format(var_count), end="\n\n")
print("There are {} variables have more than {} classes".format(len(vars_more_classes), number_of_classes),
end="\n\n")
print("Variables name have more than {} classes".format(number_of_classes), end="\n")
print(vars_more_classes)
cat_summary_long(df,cat_cols,12)
# Numerical Varaible Analyze
df.describe().T
df.describe([0.05, 0.10, 0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]).T
num_cols = [col for col in df.columns if len(df[col].value_counts())> 12 and df[col].dtypes !="O"]
def hist_for_nums(data, numeric_cols):
col_counter = 0
data = data.copy()
for col in numeric_cols:
data[col].hist(bins =20)
plt.xlabel(col)
plt.title(col)
plt.show()
sns.boxplot(x=col,data=data)
plt.show()
col_counter += 1
print(col_counter,"variables have been plotted")
hist_for_nums(df,num_cols)
#df[Balance] == 0
# Target Analyze
df["Exited"].value_counts()
# Target Analysis Based on Categorical Variables
df.groupby("Gender")["Exited"].mean()
def target_summary_with_cat(data, target,cat_names):
for var in cat_names:
print("\t\t\t\t",var,"\n\n",pd.DataFrame({target+"_MEAN" : data.groupby(var)[target].mean(),var+"_COUNT": data[var].value_counts(),
var+"_RATIO": 100 * data[var].value_counts() / len(data)}), end = "\n\n\n")
target_summary_with_cat(df,"Exited",cat_cols)
# Target Analysis Based on Numeric Variables
df.groupby("Exited")["Age"].mean()
def target_summary_with_num(data, target, num_names):
for col in num_names:
print(pd.DataFrame({col: data.groupby(target)[col].median()}), end="\n\n\n")
target_summary_with_num(df,"Exited",num_cols)
# Analyze Numeric Varaible According To Each Other
num_cols
sns.scatterplot(x = "Balance", y = "Age", data =df)
plt.show()
sns.lmplot(x = "EstimatedSalary", y = "CreditScore", data = df)
plt.show()
df.corr()
# We will use heatmap also in Feature Engineering
plt.figure(figsize=(12,10)) # on this line I just set the size of figure to 12 by 10.
p = sns.heatmap(df.corr(), annot=True,cmap ='RdYlGn') # seaborn has very simple solution for heatmap
plt.show()
# DATA PRE-PROCESSING & FEATURE ENGINEERING
# Varaible has outlier
def outlier_thresholds(dataframe, variable):
quartile1 = dataframe[variable].quantile(0.25)
quartile3 = dataframe[variable].quantile(0.75)
interquartile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquartile_range
low_limit = quartile1 - 1.5 * interquartile_range
return low_limit, up_limit
df["Age"].quantile(0.99)
def has_outliers(dataframe, num_col_names, plot = False):
variable_names = []
for col in num_col_names:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].any(axis = None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].shape[0]
print(col,":",number_of_outliers)
variable_names.append(col)
if plot:
sns.boxplot(x = dataframe[col])
plt.show()
return variable_names
outlier_list = has_outliers(df,num_cols,True)
outlier_thresholds(df,"CreditScore")
outlier_thresholds(df,"Age")
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[dataframe[variable] < low_limit, variable] = low_limit
dataframe.loc[dataframe[variable] > up_limit, variable] = up_limit
for i in outlier_list:
replace_with_thresholds(df,i)
#Check has outlier
has_outliers(df,num_cols)
for i in df.columns:
if i not in "Exited":
print(i)
print(df.groupby("Exited")[i].describe())
# Feature Engineering
df["CreditScore<405"] = df["CreditScore"].apply(lambda x: 1 if x< 405 else 0)
df["CreditScore<405"].value_counts()
# df.loc[df["Balance"]>221532.800000,"Exited"]
df["Balance"].hist()
plt.show()
df["HasBalance"] = df["Balance"].apply(lambda x: 1 if x>0 else 0)
df.groupby("HasBalance")["Exited"].mean()
df["NEW_NUMOFPRODUCTS"] = df["NumOfProducts"] - df["HasCrCard"]
df["Balance_1"] = df["Balance"].apply(lambda x: 1 if x == 0 else x)
df["EstimatedSal/Balance_1"] = df["EstimatedSalary"]/df["Balance_1"]
df.drop("Balance_1",axis=1,inplace=True)
df.groupby(["Geography","Gender"]).agg({"Age":"mean","Exited":"mean","Gender":"count"})
df["France_Female"] = 0
df.loc[(df["Geography"]=="France")&(df["Gender"]=="Female"),"France_Female"] = 1
df["Germany_Female"] = 0
df.loc[(df["Geography"]=="Germany")&(df["Gender"]=="Female"),"Germany_Female"] = 1
df["Spain_Female"] = 0
df.loc[(df["Geography"]=="Spain")&(df["Gender"]=="Female"),"Spain_Female"] = 1
df.groupby(["Exited","Geography"]).agg({"Age":"mean"})
a = pd.DataFrame(pd.qcut(df["Age"],4,labels=[1,2,3,4]))
a.rename(columns={"Age":"Age_qcut"},inplace=True)
a.tail()
df = pd.concat([df,a],axis=1)
df.head()
df["EstimatedSalary_Qcut"] = pd.qcut(df["EstimatedSalary"],10,labels=[1,2,3,4,5,6,7,8,9,10])
df.groupby("EstimatedSalary_Qcut").agg({"Exited":["mean","count"]})
df["Surname_Count"] = df["Surname"].apply(lambda x: len(x))
df["Exited"].value_counts()
# WELLDONE
# df.loc[df["NumOfProducts"]>3,"Exited"]
def one_hot_encoder(dataframe, categorical_cols, nan_as_category=True):
original_columns = dataframe.columns.tolist() #Eski hali : list(dataframe.columns)
dataframe = pd.get_dummies(dataframe, columns=categorical_cols, dummy_na=nan_as_category, drop_first=True)
new_columns = [c for c in dataframe.columns if c not in original_columns]
return dataframe, new_columns
cat_cols = [col for col in df.columns if len(df[col].value_counts())<12 and col not in "Exited"]
df, new_cols_ohe = one_hot_encoder(df, cat_cols, False)
cols_need_scale = [col for col in df.columns if col not in new_cols_ohe and col not in "Exited" and df[col].dtypes != "O"]
"""plt.figure(figsize=(36,30)) # on this line I just set the size of figure to 12 by 10.
p = sns.heatmap(df.corr(), annot=True,cmap ='RdYlGn') # seaborn has very simple solution for heatmap
plt.show()"""
def robust_scaler(variable):
var_median = variable.median()
quartile1 = variable.quantile(0.25)
quartile3 = variable.quantile(0.75)
interquantile_range = quartile3 - quartile1
if int(interquantile_range) == 0:
quartile1 = variable.quantile(0.10)
quartile3 = variable.quantile(0.90)
interquantile_range = quartile3 - quartile1
z = (variable - var_median) / interquantile_range
return round(z, 3)
else:
z = (variable - var_median) / interquantile_range
return round(z, 3)
for col in cols_need_scale:
df[col] = robust_scaler(df[col])
df.drop(["Surname"],axis=1,inplace=True)
y = df["Exited"]
X = df.drop(["Exited"], axis=1)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,confusion_matrix,precision_score,recall_score,f1_score
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=11,stratify=y)
"""# import the Random Under Sampler object.
from imblearn.under_sampling import RandomUnderSampler
# create the object.
under_sampler = RandomUnderSampler()
# fit the object to the training data.
X_train, y_train = under_sampler.fit_resample(X_train,y_train)"""
"""# import the NearMiss object.
from imblearn.under_sampling import NearMiss
# create the object with auto
near = NearMiss(sampling_strategy="not minority")
# fit the object to the training data.
X_train, y_train = near.fit_resample(X_train, y_train)"""
"""# import the TomekLinks object.
from imblearn.under_sampling import TomekLinks
# instantiate the object with the right ratio strategy.
tomek_links = TomekLinks(sampling_strategy='majority')
# fit the object to the training data.
X_train, y_train = tomek_links.fit_resample(X_train, y_train)"""
"""# import the ClusterCentroids object.
from imblearn.under_sampling import ClusterCentroids
# instantiate the object with the right ratio.
cluster_centroids = ClusterCentroids(sampling_strategy="auto")
# fit the object to the training data.
X_train, y_train = cluster_centroids.fit_resample(X_train, y_train)"""
# import the EditedNearestNeighbours object.
from imblearn.under_sampling import EditedNearestNeighbours
# create the object to resample the majority class.
enn = EditedNearestNeighbours(sampling_strategy="majority",)
# fit the object to the training data.
X_train, y_train = enn.fit_resample(X_train, y_train)
"""# import the NeighbourhoodCleaningRule object.
from imblearn.under_sampling import NeighbourhoodCleaningRule
# create the object to resample the majority class.
ncr = NeighbourhoodCleaningRule(sampling_strategy="majority")
# fit the object to the training data.
X_train, y_train = ncr.fit_resample(X_train, y_train)"""
"""# import the Random Over Sampler object.
from imblearn.over_sampling import RandomOverSampler
# create the object.
over_sampler = RandomOverSampler()
# fit the object to the training data.
X_train, y_train = over_sampler.fit_resample(X_train, y_train)"""
"""# import the SMOTETomek
from imblearn.over_sampling import SMOTE
# create the object with the desired sampling strategy.
smote = SMOTE(sampling_strategy='minority')
# fit the object to our training data
X_train, y_train = smote.fit_resample(X_train, y_train)"""
"""# import the ADASYN object.
from imblearn.over_sampling import ADASYN
# create the object to resample the majority class.
adasyn = ADASYN(sampling_strategy="minority")
# fit the object to the training data.
X_train, y_train = adasyn.fit_resample(X_train, y_train)"""
"""# import the SMOTETomek.
from imblearn.combine import SMOTETomek
# create the object with the desired sampling strategy.
smotemek = SMOTETomek(sampling_strategy='auto')
# fit the object to our training data.
X_train, y_train = smotemek.fit_resample(X_train, y_train)"""
"""# import the SMOTEENN.
from imblearn.combine import SMOTEENN
# create the object with the desired samplig strategy.
smoenn = SMOTEENN(sampling_strategy='minority')
# fit the object to our training data.
X_train, y_train = smoenn.fit_resample(X_train, y_train)"""
"""
from imblearn.pipeline import Pipeline as imbpipeline
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import ClusterCentroids
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
stratify=y,
random_state=11)
pipeline = imbpipeline(steps=[['RUS', ClusterCentroids(random_state=)],
['classifier', LogisticRegression(random_state=11,
max_iter=1000)]])
stratified_kfold = StratifiedKFold(n_splits=3,
shuffle=True,
random_state=11)
param_grid = {'classifier__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
grid_search = GridSearchCV(estimator=pipeline,
param_grid=param_grid,
scoring='recall',
cv=stratified_kfold,
n_jobs=-1)
grid_search.fit(X_train, y_train)
cv_score = grid_search.best_score_
test_score = grid_search.score(X_test, y_test)
print(f'Cross-validation score: {cv_score}\nTest score: {test_score}')
"""
#Model Tunning
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from lightgbm import LGBMClassifier
log_clf = LogisticRegression()
rnd_clf = RandomForestClassifier()
svm_clf = SVC(gamma="auto")
tree_clf = DecisionTreeClassifier()
knn_clf= KNeighborsClassifier()
bgc_clf=BaggingClassifier()
gbc_clf=GradientBoostingClassifier()
abc_clf= AdaBoostClassifier()
lgbm_clf = LGBMClassifier(random_state = 12345)
nb_clf = GaussianNB()
xgb_clf = GradientBoostingClassifier(random_state=12345)
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import NearMiss
from imblearn.under_sampling import TomekLinks
from imblearn.under_sampling import ClusterCentroids
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.under_sampling import NeighbourhoodCleaningRule
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from imblearn.combine import SMOTETomek
from imblearn.combine import SMOTEENN
rus = RandomUnderSampler()
nm = NearMiss()
tl = TomekLinks()
cc = ClusterCentroids()
enn = EditedNearestNeighbours()
ncr = NeighbourhoodCleaningRule()
ros = RandomOverSampler()
smt = SMOTE(random_state=11)
ada = ADASYN()
smtmk = SMOTETomek()
smtenn = SMOTEENN()
stratified_kfold = StratifiedKFold(n_splits=3,
shuffle=True,
random_state=11)
param_grid = {'classifier__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf), ('tree', tree_clf),('knn', knn_clf),('bg', bgc_clf),
('gbc', gbc_clf),('abc', abc_clf),("lgbm", lgbm_clf),("nb", nb_clf),("xgb", xgb_clf)],voting='hard')
#voting_clf.fit(X_train, y_train)
for clf in (log_clf, rnd_clf, svm_clf,tree_clf,knn_clf,bgc_clf,gbc_clf,abc_clf,lgbm_clf,nb_clf,xgb_clf,voting_clf):
for smpmthd in (rus, nm, tl, cc, enn, ncr, ros, smt, ada, smtmk, smtenn):
pipeline = imbpipeline(steps=[['UnderOverSamplingMethods', smpmthd],
['classifier', clf]])
for a in ["accuracy", "precision", "recall", "f1", "roc_auc"]:
#cross_val_score(estimator=pipeline, X_train, y_train, scoring=a, cv=stratified_kfold, n_jobs=-1).mean()
cv_results = cross_val_score(pipeline, X_train, y_train, cv=stratified_kfold, scoring=a)
"""grid_search = GridSearchCV(estimator=pipeline,
scoring=a,
param_grid=param_grid,
cv=stratified_kfold,
n_jobs=-1)"""
"""cv_score = grid_search.best_score_
test_score = grid_search.score(X_test, y_test)"""
#test_score = 1
print(clf,smpmthd,a,f'Cross-validation score: {cv_results.mean()}')
"""clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__,"Accuracy Score :", accuracy_score(y_test, y_pred))
print(clf.__class__.__name__,"Precision Score :", precision_score(y_test, y_pred))
print(clf.__class__.__name__,"Recall Score :", recall_score(y_test, y_pred))
print(clf.__class__.__name__,"F1 Score :", f1_score(y_test, y_pred))"""
"""
from numpy import mean
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.under_sampling import RandomUnderSampler ENN
from imblearn.over_sampling import ADASYN
steps = [('over', BorderlineSMOTE()), ('model', DecisionTreeClassifier())]
pipeline = Pipeline(steps=steps)
oversample = ADASYN()
under = RandomUnderSampler()
X_train, y_train = oversample.fit_resample(X_train, y_train)
# evaluate pipeline
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(pipeline, X_test, y_test, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
"""
"""
smote = BorderlineSMOTE()
X_train, y_train = smote.fit_resample(X_train,y_train)
X_train.head()
y_train.head()
y_train.value_counts()
log_model = LogisticRegression().fit(X_train, y_train)
log_model.intercept_
log_model.coef_
log_model.predict(X_test)[0:10]
y[0:10]
log_model.predict_proba(X_test)[0:10]
y_pred = log_model.predict(X_test)
confusion_matrix(y_test,y_pred)
accuracy_score(y_test, y_pred)
precision_score(y_test,y_pred)
recall_score(y_test,y_pred)
f1_score(y_test,y_pred)
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
import time
"""
""""
models = [GaussianNB(), DecisionTreeClassifier(), SVC()]
names = ["Naive Bayes", "Decision Tree", "SVM"]
for model, name in zip(models, names):
print(name)
start = time.time()
for score in ["accuracy", "precision", "recall","f1"]:
print(score," : ",cross_val_score(, X, y, scoring=score, cv=10).mean())
print("Duration : ",time.time() - start,"sec")
"""
"""
folds = KFold(n_splits = 5, shuffle = True, random_state = 35)
scores = []
for n_fold, (train_index, valid_index) in enumerate(folds.split(X,y)):
print('\n Fold '+ str(n_fold+1 ) +
' \n\n train ids :' + str(train_index) +
' \n\n validation ids :' + str(valid_index))
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
rf_model.fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
acc_score = accuracy_score(y_test, y_pred)
scores.append(acc_score)
print('\n Accuracy score for Fold ' +str(n_fold+1) + ' --> ' + str(acc_score)+'\n')
print(scores)
print('Avg. accuracy score :' + str(np.mean(scores)))
"""
"""
cross_val_score(log_model, X_test, y_test, cv=10).mean()
print(classification_report(y, y_pred))
logit_roc_auc = roc_auc_score(y, log_model.predict(X))
fpr, tpr, thresholds = roc_curve(y, log_model.predict_proba(X)[:, 1])
plt.figure()
plt.plot(fpr, tpr, label='AUC (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
"""
#LR
LR = LogisticRegression()
LRparam_grid = {
'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'penalty': ['l1', 'l2'],
'max_iter': list(range(100,800,100)),
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
}
LR_search = GridSearchCV(LR, param_grid=LRparam_grid, refit = True, verbose = 3, cv=5)
# fitting the model for grid search
LR_search.fit(X_train , y_train)
LR_search.best_params_
# summarize
print('Mean Accuracy: %.3f' % LR_search.best_score_)
print('Config: %s' % LR_search.best_params_)
LR_tuned = LogisticRegression(**LR_search.best_params_).fit(X_train,y_train)
y_pred = LR_tuned.predict(X_test)
print("LR Accuracy Score : ",accuracy_score(y_test,y_pred))
print("LR Recall Score : ",recall_score(y_test,y_pred))
# RF
kfold = StratifiedKFold(n_splits=10)
rf_model = RandomForestClassifier(random_state=12345).fit(X_train, y_train)
cross_val_score(rf_model, X_train, y_train, cv=10).mean()
rf_params = {"n_estimators": [200, 500, 1000],
"max_features": [5, 7, 9],
"min_samples_split": [5, 8,10],
"max_depth": [3,5, None]}
rf_model = RandomForestClassifier(random_state=12345)
rf_gs_cv = GridSearchCV(rf_model,
rf_params,
cv=kfold,
n_jobs=-1,
verbose=2).fit(X_train, y_train)
rf_gs_cv.best_params_
rf_tuned = RandomForestClassifier(**rf_gs_cv.best_params_).fit(X_train,y_train)
y_pred = rf_tuned.predict(X_test)
print("RF Accuracy Score : ",accuracy_score(y_test,y_pred))
print("RF Recall Score : ",recall_score(y_test,y_pred))
# LightGBM
kfold = StratifiedKFold(n_splits=10)
lgbm = LGBMClassifier(random_state=12345)
cross_val_score(lgbm, X_train, y_train, cv=kfold,scoring="recall").mean()
# model tuning
lgbm_params = {"learning_rate": [0.01, 0.1, 0.5],
"n_estimators": [500, 1000, 1500],
"max_depth": [3, 5, 8],
"num_leaves":[30]}
gs_cv = GridSearchCV(lgbm,
lgbm_params,
cv=kfold,
n_jobs=1,
verbose=2).fit(X_train, y_train)
lgbm_tuned = LGBMClassifier(**gs_cv.best_params_).fit(X_train, y_train)
y_pred = lgbm_tuned.predict(X_test)
print("LGBM Accuracy Score : ",accuracy_score(y_test,y_pred))
print("LGBM Recall Score : ",recall_score(y_test,y_pred))
feature_imp = pd.Series(lgbm_tuned.feature_importances_,
index=X.columns).sort_values(ascending=False)
sns.barplot(x=feature_imp, y=feature_imp.index)
plt.xlabel('Değişken Önem Skorları')
plt.ylabel('Değişkenler')
plt.title("Değişken Önem Düzeyleri")
plt.show()
#####
xgb = GradientBoostingClassifier(random_state=12345)
xgb_params = {"n_estimators": [100, 500, 1000],
"subsample" : [0.6, 0.8, 1.0],
"max_depth" : [3, 4, 5],
"learning_rate" : [0.1, 0.01, 0.05,],
"min_samples_split" : [2, 5, 10]}
xgb_cv_model = GridSearchCV(xgb,
xgb_params,
cv = 5,
n_jobs = -1,
verbose = 2).fit(X_train,y_train)
xgb_tuned = GradientBoostingClassifier(**xgb_cv_model.best_params_,random_state=12345).fit(X_train,y_train)
y_pred = xgb_tuned.predict(X_test)
print("XGB Accuracy Score : ",accuracy_score(y_test,y_pred))
print("XGB Recall Score : ",recall_score(y_test,y_pred))
"""# eda
# data prep
# feature eng.
# model
# tahmin
# model tuning
# final model
# feature importance
# TUM MODELLER CV YONTEMI
models = [('LR', LogisticRegression()),
('KNN', KNeighborsClassifier()),
('CART', DecisionTreeClassifier()),
('RF', RandomForestClassifier()),
('SVM', SVC(gamma='auto')),
('XGB', GradientBoostingClassifier()),
("LightGBM", LGBMClassifier()),
("Naive Bayes",GaussianNB()),]
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=10, random_state=123456, shuffle=True)
cv_results = cross_val_score(model, X_test, y_test, cv=10, scoring=["accuracy","precision","recall","f1"])
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure(figsize=(15, 10))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
x_train, x_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=0)
from imblearn.combine import SMOTETomek
smk = SMOTETomek()
x_train, y_train = smk.fit_resample(x_train, y_train)
x_test, y_test = smk.fit_resample(x_test, y_test)
""""""
x_train.columns
lgbm = LGBMClassifier(random_state=12345)
lgbm.fit(x_train,y_train)
accuracy_score(y_test,lgbm.predict(x_test))"""
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from lightgbm import LGBMClassifier
log_clf = LogisticRegression()
rnd_clf = RandomForestClassifier()
svm_clf = SVC(gamma="auto")
tree_clf = DecisionTreeClassifier()
knn_clf= KNeighborsClassifier()
bgc_clf=BaggingClassifier()
gbc_clf=GradientBoostingClassifier()
abc_clf= AdaBoostClassifier()
lgbm_clf = LGBMClassifier(random_state = 12345)
nb_clf = GaussianNB()
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf), ('tree', tree_clf),('knn', knn_clf),('bg', bgc_clf),
('gbc', gbc_clf),('abc', abc_clf),("lgbm", lgbm_clf),("nb", nb_clf)],voting='hard')
voting_clf.fit(X_train, y_train)
for clf in (log_clf, rnd_clf, svm_clf,tree_clf,knn_clf,bgc_clf,gbc_clf,abc_clf,lgbm_clf,nb_clf,voting_clf):
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__,"Accuracy Score :", accuracy_score(y_test, y_pred))
print(clf.__class__.__name__,"Precision Score :", precision_score(y_test, y_pred))
print(clf.__class__.__name__,"Recall Score :", recall_score(y_test, y_pred))
print(clf.__class__.__name__,"F1 Score :", f1_score(y_test, y_pred))
#####
lgbm = LGBMClassifier(random_state = 12345)
lgbm_params = {"learning_rate": [0.01, 0.05, 0.1],
"n_estimators": [100, 500, 1000],
"max_depth":[3, 5, 8]}
gs_cv = GridSearchCV(lgbm,
lgbm_params,
cv = 10,
n_jobs = -1,
verbose = 2).fit(X_train,y_train)
lgbm_tuned = LGBMClassifier(**gs_cv.best_params_).fit(X_train,y_train)
y_pred = lgbm_tuned.predict(X_test)
acc_score = accuracy_score(y_test, y_pred)
print(acc_score)
#####
xgb = GradientBoostingClassifier(random_state=12345)
xgb_params = {"n_estimators": [100, 500, 1000],
"subsample" : [0.6, 0.8, 1.0],
"max_depth" : [3, 4, 5],
"learning_rate" : [0.1, 0.01, 0.05,],
"min_samples_split" : [2, 5, 10]}
xgb_cv_model = GridSearchCV(xgb,
xgb_params,
cv = 5,
n_jobs = -1,
verbose = 2).fit(X_train,y_train)
xgb_tuned = GradientBoostingClassifier(**xgb_cv_model.best_params_,random_state=12345)
xgb_tuned = xgb_tuned.fit(X_train,y_train)
y_pred = xgb_tuned.predict(X_test)
acc_score = accuracy_score(y_test, y_pred)
recall_score(y_test,y_pred)
print(acc_score)
#SVM
from sklearn import svm
classifier = svm.SVC(class_weight={0:0.60, 1:0.40},random_state=12345)
svm_tuned = classifier.fit(X_train, y_train)
y_pred = svm_tuned.predict(X_test)
acc_score = accuracy_score(y_test, y_pred)
recall_score(y_test,y_pred)
print(acc_score)
#SVM
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
encoder = preprocessing.LabelEncoder()
#Libraries to Build Ensemble Model : Random Forest Classifier
# Create the parameter grid based on the results of random search
params_grid = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
# Performing CV to tune parameters for best SVM fit
svm_model = GridSearchCV(SVC(), params_grid, cv=5)
svm_model.fit(x_train, y_train)
# View the accuracy score
print('Best score for training data:', svm_model.best_score_,"\n")
# View the best parameters for the model found using grid search
print('Best C:',svm_model.best_estimator_.C,"\n")
print('Best Kernel:',svm_model.best_estimator_.kernel,"\n")
print('Best Gamma:',svm_model.best_estimator_.gamma,"\n")
final_model = svm_model.best_estimator_
Y_pred = final_model.predict(x_test)
Y_pred_label = list(Y_pred)
# Making the Confusion Matrix
#print(pd.crosstab(Y_test_label, Y_pred_label, rownames=['Actual Activity'], colnames=['Predicted Activity']))
print(confusion_matrix(y_test,Y_pred_label))
print("\n")
print(classification_report(y_test,Y_pred_label))
print("Training set score for SVM: %f" % final_model.score(x_train , y_train))
print("Testing set score for SVM: %f" % final_model.score(x_test , y_test ))
svm_model.score
"""
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
import pytest
from pants.backend.codegen.thrift import dependency_inference
from pants.backend.codegen.thrift.dependency_inference import (
InferThriftDependencies,
ThriftDependenciesInferenceFieldSet,
ThriftMapping,
)
from pants.backend.codegen.thrift.target_types import ThriftSourcesGeneratorTarget
from pants.backend.codegen.thrift.target_types import rules as target_types_rules
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.engine.target import InferredDependencies
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*stripped_source_files.rules(),
*dependency_inference.rules(),
*target_types_rules(),
QueryRule(ThriftMapping, []),
QueryRule(InferredDependencies, [InferThriftDependencies]),
],
target_types=[ThriftSourcesGeneratorTarget],
)
def test_thrift_mapping(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--source-root-patterns=['root1', 'root2', 'root3']"])
rule_runner.write_files(
{
# Two proto files belonging to the same target. We should use two file addresses.
"root1/thrifts/f1.thrift": "",
"root1/thrifts/f2.thrift": "",
"root1/thrifts/BUILD": "thrift_sources()",
# These thrifts would result in the same stripped file name, so they are ambiguous.
"root1/two_owners/f.thrift": "",
"root1/two_owners/BUILD": "thrift_sources()",
"root2/two_owners/f.thrift": "",
"root2/two_owners/BUILD": "thrift_sources()",
}
)
result = rule_runner.request(ThriftMapping, [])
assert result == ThriftMapping(
mapping=FrozenDict(
{
"thrifts/f1.thrift": Address("root1/thrifts", relative_file_path="f1.thrift"),
"thrifts/f2.thrift": Address("root1/thrifts", relative_file_path="f2.thrift"),
}
),
ambiguous_modules=FrozenDict(
{
"two_owners/f.thrift": (
Address("root1/two_owners", relative_file_path="f.thrift"),
Address("root2/two_owners", relative_file_path="f.thrift"),
)
}
),
)
def test_dependency_inference(rule_runner: RuleRunner, caplog) -> None:
rule_runner.set_options(["--source-root-patterns=['src/thrifts']"])
rule_runner.write_files(
{
"src/thrifts/project/f1.thrift": dedent(
"""\
include 'tests/f.thrift';
include 'unrelated_path/foo.thrift";
"""
),
"src/thrifts/project/f2.thrift": "include 'project/f1.thrift';",
"src/thrifts/project/BUILD": "thrift_sources()",
"src/thrifts/tests/f.thrift": "",
"src/thrifts/tests/BUILD": "thrift_sources()",
# Test handling of ambiguous imports. We should warn on the ambiguous dependency, but
# not warn on the disambiguated one and should infer a dep.
"src/thrifts/ambiguous/dep.thrift": "",
"src/thrifts/ambiguous/disambiguated.thrift": "",
"src/thrifts/ambiguous/main.thrift": dedent(
"""\
include 'ambiguous/dep.thrift';
include 'ambiguous/disambiguated.thrift";
"""
),
"src/thrifts/ambiguous/BUILD": dedent(
"""\
thrift_sources(name='dep1', sources=['dep.thrift', 'disambiguated.thrift'])
thrift_sources(name='dep2', sources=['dep.thrift', 'disambiguated.thrift'])
thrift_sources(
name='main',
sources=['main.thrift'],
dependencies=['!./disambiguated.thrift:dep2'],
)
"""
),
}
)
def run_dep_inference(address: Address) -> InferredDependencies:
tgt = rule_runner.get_target(address)
return rule_runner.request(
InferredDependencies,
[InferThriftDependencies(ThriftDependenciesInferenceFieldSet.create(tgt))],
)
assert run_dep_inference(
Address("src/thrifts/project", relative_file_path="f1.thrift")
) == InferredDependencies([Address("src/thrifts/tests", relative_file_path="f.thrift")])
assert run_dep_inference(
Address("src/thrifts/project", relative_file_path="f2.thrift")
) == InferredDependencies([Address("src/thrifts/project", relative_file_path="f1.thrift")])
caplog.clear()
assert run_dep_inference(
Address("src/thrifts/ambiguous", target_name="main", relative_file_path="main.thrift")
) == InferredDependencies(
[
Address(
"src/thrifts/ambiguous",
target_name="dep1",
relative_file_path="disambiguated.thrift",
)
]
)
assert len(caplog.records) == 1
assert (
"The target src/thrifts/ambiguous/main.thrift:main imports `ambiguous/dep.thrift`"
in caplog.text
)
assert (
"['src/thrifts/ambiguous/dep.thrift:dep1', 'src/thrifts/ambiguous/dep.thrift:dep2']"
in caplog.text
)
assert "disambiguated.thrift" not in caplog.text
|
"""
Dividing a set into two equal length subsets,
that one subset is always greater than the other.
"""
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
for num in nums2:
nums1.append(num)
nums1.sort()
if len(nums1)%2:
return nums1[len(nums1)//2]
else:
return (nums1[len(nums1)//2-1] + nums1[len(nums1)//2])/2
if __name__ == "__main__":
so = Solution()
print(so.findMedianSortedArrays([1,2], [3,4]))
|
"""Derivatives of the MSE Loss."""
from math import sqrt
from torch import einsum, eye, normal
from backpack.core.derivatives.basederivatives import BaseLossDerivatives
class MSELossDerivatives(BaseLossDerivatives):
"""Derivatives of the MSE Loss.
We only support 2D tensors.
For `X : [n, d]` and `Y : [n, d]`, if `reduce=sum`, the MSE computes
`∑ᵢ₌₁ⁿ ‖X[i,∶] − Y[i,∶]‖²`. If `reduce=mean`, the result is divided by `nd`.
"""
def _sqrt_hessian(self, module, g_inp, g_out):
"""Square-root of the hessian of the MSE for each minibatch elements.
Returns the Hessian in format `Hs = [D, N, D]`, where
`Hs[:, n, :]` is the Hessian for the `n`th element.
Attributes:
module: (torch.nn.MSELoss) module
g_inp: Gradient of loss w.r.t. input
g_out: Gradient of loss w.r.t. output
Returns:
Batch of hessians, in format [D, N, D]
"""
self.check_input_dims(module)
N, D = module.input0.shape
sqrt_H = sqrt(2) * eye(D, device=module.input0.device) # [D, D]
sqrt_H = sqrt_H.unsqueeze(0).repeat(N, 1, 1) # [N, D, D]
sqrt_H = einsum("nab->anb", sqrt_H) # [D, N, D]
if module.reduction == "mean":
sqrt_H /= sqrt(module.input0.numel())
return sqrt_H
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
"""A Monte-Carlo estimate of the square-root of the Hessian.
Attributes:
module: (torch.nn.MSELoss) module.
g_inp: Gradient of loss w.r.t. input.
g_out: Gradient of loss w.r.t. output.
mc_samples: (int, optional) Number of MC samples to use. Default: 1.
Returns:
tensor:
"""
N, D = module.input0.shape
samples = normal(0, 1, size=[mc_samples, N, D], device=module.input0.device)
samples *= sqrt(2) / sqrt(mc_samples)
if module.reduction == "mean":
samples /= sqrt(module.input0.numel())
return samples
def _sum_hessian(self, module, g_inp, g_out):
"""The Hessian, summed across the batch dimension.
Args:
module: (torch.nn.MSELoss) module
g_inp: Gradient of loss w.r.t. input
g_out: Gradient of loss w.r.t. output
Returns: a `[D, D]` tensor of the Hessian, summed across batch
"""
self.check_input_dims(module)
N, D = module.input0.shape
H = 2 * eye(D, device=module.input0.device)
if module.reduction == "sum":
H *= N
elif module.reduction == "mean":
H /= D
return H
def _make_hessian_mat_prod(self, module, g_inp, g_out):
"""Multiplication of the input Hessian with a matrix."""
def hessian_mat_prod(mat):
Hmat = 2 * mat
if module.reduction == "mean":
Hmat /= module.input0.numel()
return Hmat
return hessian_mat_prod
def check_input_dims(self, module):
"""Raises an exception if the shapes of the input are not supported."""
if not len(module.input0.shape) == 2:
raise ValueError("Only 2D inputs are currently supported for MSELoss.")
def hessian_is_psd(self):
return True
|
#coding=utf8
import sys
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from graph_reco import *
class MyBrowser(QWidget):
def __init__(self, parent = None):
super(MyBrowser, self).__init__(parent)
self.createLayout()
self.createConnection()
def search(self):
address = str(self.addressBar.text())
if address:
if address.find('://') == -1:
address = 'http://' + address
url = QUrl(address)
self.webView.load(url)
def record(self):
os.system('python record1.py')
def record2(self):
os.system('python record2.py')
def graphReco(self):
absolute_path = QFileDialog.getOpenFileName(self,'Open file')
print str(absolute_path)
pred_list = graph_reco(str(absolute_path))
print pred_list
oout = open('graph_result.txt','w')
oout.write(pred_list[0].encode('utf8'))
oout.close()
self.addressBar.setText(pred_list[0])
def createLayout(self):
self.setWindowTitle("dada's browser")
self.addressBar = QLineEdit()
self.goButton = QPushButton("&GO")
self.voiceButton = QPushButton("®ISTER")
self.logButton = QPushButton("&LOGIN")
self.graphButton = QPushButton("&GRAPH")
bl = QHBoxLayout()
bl.addWidget(self.addressBar)
bl.addWidget(self.voiceButton)
bl.addWidget(self.logButton)
bl.addWidget(self.goButton)
bl.addWidget(self.graphButton)
self.webView = QWebView()
layout = QVBoxLayout()
layout.addLayout(bl)
layout.addWidget(self.webView)
self.setLayout(layout)
def createConnection(self):
self.connect(self.addressBar, SIGNAL('returnPressed()'), self.search)
self.connect(self.addressBar, SIGNAL('returnPressed()'), self.addressBar, SLOT('selectAll()'))
self.connect(self.goButton, SIGNAL('clicked()'), self.search)
self.connect(self.goButton, SIGNAL('clicked()'), self.addressBar, SLOT('selectAll()'))
self.connect(self.voiceButton, SIGNAL('clicked()'), self.record)
self.connect(self.logButton, SIGNAL('clicked()'), self.record2)
self.connect(self.graphButton, SIGNAL('clicked()'), self.graphReco)
app = QApplication(sys.argv)
browser = MyBrowser()
browser.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
# encoding: utf-8
# @author: Zhipeng Ye
# @contact: Zhipeng.ye19@xjtlu.edu.cn
# @file: calculate_ngram3.py
# @time: 2020-01-14 01:27
# @desc:
import os
import re
import main
import math
import traceback
import codecs
import sys
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
class LanguageModelContent:
def __init__(self, possibility, words, punishment=''):
self.possibility = possibility
self.words = words
self.punishment = punishment
def __str__(self):
return self.possibility + '\t' + self.words + '\t' + self.punishment
if __name__ == "__main__":
gram_2 = {}
# with open('/Data_SSD/zhipengye/zhipengye/data/gram2/gram2_count', encoding='utf-8') as file:
with open('/Data_SSD/zhipengye/zhipengye/data/gram2/gram2_count', encoding='utf-8') as file:
for line in file:
if '\n' is not line and '' is not line:
try:
segments = re.split('[\t\s]+',line)
first_word = segments[0]
second_word = segments[1]
count = segments[2]
gram_2[first_word + ' '+second_word] = count
except Exception as ex:
traceback.print_exc()
print('line is :',line)
dir_list = os.listdir('/Data_SSD/zhipengye/zhipengye/data/original_data/Data/data')
# because ngrams-[00030 - 00036]-of-00394 have no invalid data
filtered_list = [dir for dir in dir_list if dir >= 'ngrams-00037-of-00394' and dir <= 'ngrams-00132-of-00394']
# filtered_list = [dir for dir in dir_list if dir >= 'ngrams-00037-of-00394' and dir <= 'ngrams-00038-of-00394']
model_list = []
for file_name in filtered_list:
with open('/Data_SSD/zhipengye/zhipengye/data/original_data/Data/data/'+file_name, encoding='utf-8') as file:
for line in file:
if re.match('^[\u4e00-\u9fa5]{1,8}[\s\t]{1,}[\u4e00-\u9fa5]{1,8}[\s\t]{1,}[\u4e00-\u9fa5]{1,8}[\s\t]{1,}\d{1,}',line):
segments = re.split('[\s\t]+', line)
first_word = segments[0]
second_word = segments[1]
third_word = segments[2]
words = first_word+' '+second_word + ' '+third_word
count = float(segments[3])
gram_2_words = first_word + ' ' +second_word
gram_2_count = gram_2.get(gram_2_words)
if gram_2_count != None:
conditional_possibility = count / float(gram_2_count)
conditional_possibility = math.log10(conditional_possibility)
conditional_possibility = round(conditional_possibility, 6)
model = LanguageModelContent(str(conditional_possibility),words )
model_list.append(model)
if len(model_list) >= 1000000:
with open('/Data_SSD/zhipengye/zhipengye/data/gram3/gram3','a',encoding='utf-8') as file:
for model in model_list:
file.write(str(model) + '\n')
print('1000000 rows have been processed!')
model_list = []
with open('/Data_SSD/zhipengye/zhipengye/data/gram3/gram3','a',encoding='utf-8') as file:
for model in model_list:
file.write(str(model) + '\n')
print(str(len(model_list))+' rows have been processed!')
|
import json
import bs4
import requests
res = requests.get('https://en.wikipedia.org/wiki/List_of_hobbies')
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, "html.parser")
activity_list = []
with open('sports-list.json') as fp:
sports_list = json.load(fp)
categories = soup.select('.div-col')
categories.extend(soup.select(' ul:nth-of-type(11)'))
categories.extend(soup.select(' ul:nth-of-type(12)'))
for category in categories:
activities = category.select('li a')
for activity in activities:
if 'title' not in activity.attrs:
continue
else:
name = activity.attrs['title']
name = name.title()
activity_list.append(name)
activity_list = set(activity_list) - set(sports_list)
activity_list = list(activity_list)
activity_list.sort()
with open('activity-list.json', 'w') as fp:
json.dump(activity_list, fp, indent=4)
|
# square
def print_square(n):
print('square')
for i in range(n):
print('*' * n, end='')
print()
# triangle
def print_triangle(n):
print()
print('triangle')
start_point = 2
triangle_height = n // 2
if n % 2 != 0:
triangle_height = (n + 1) // 2
start_point = 1
for i in range(triangle_height):
print(' ' * ((n - start_point) // 2) + '*' * start_point)
start_point += 2
# rhombus
def print_rhombus(n):
print()
print('rhombus')
start_point = 2
triangle_height = n // 2
if n % 2 != 0:
triangle_height = (n + 1) // 2
start_point = 1
for i in range(triangle_height):
print(' ' * ((n - start_point) // 2) + '*' * start_point)
if i < triangle_height - 1:
start_point += 2
for i in range(triangle_height - 1, 0, -1):
start_point -= 2
print(' ' * ((n - start_point) // 2) + '*' * start_point)
def print_figure(name, n):
if name == 'square':
print_square(n)
elif name == 'triangle':
print_triangle(n)
elif name == 'rhombus':
print_rhombus(n)
else:
print('Unknown name of the figure')
name = input('Enter name of the figure (square, triangle, rhombus): ')
n = int(input('Enter figure size: '))
print_figure(name, n)
|
from tkinter import *
from LoginPage import *
import tkinter
root = Tk()
root.title('路障跟踪与维修系统')
#root.wm_iconbitmap('x.ico')
LoginPage(root)
root.mainloop()
|
# sb贪心即可
MAXN = 2**31
class Solution:
def increasingTriplet(self, nums: List[int]) -> bool:
first, second = MAXN, MAXN
for item in nums:
if item <= first:
first = item
elif item <= second:
second = item
else:
return True
return False
|
#from http://stackoverflow.com/a/3076636/5620182
class Shape(object):
def __new__(cls, *args, **kwargs):
if cls is Shape: # <-- required because Line's
description, args = args[0], args[1:] # __new__ method is the
if description == "It's flat": # same as Shape's
new_cls = Line
else:
raise ValueError("Invalid description: {}.".format(description))
else:
new_cls = cls
return super(Shape, cls).__new__(new_cls, *args, **kwargs)
def number_of_edges(self):
return "A shape can have many edges..."
class Line(Shape):
def number_of_edges(self):
return 1
class SomeShape(Shape):
pass
if __name__ == "__main__":
l1 = Shape("It's flat")
print(l1.number_of_edges())
l2 = Line()
print(l2.number_of_edges())
u = SomeShape()
print(u.number_of_edges())
s = Shape("Hexagon")
|
# coding=UTF-8
fila = [10, 20, 30, 40, 50]
fila.append(60)
print(fila)
print(fila.pop(0))
print(fila)
print(fila.pop(0))
print(fila)
print(fila.pop(0))
print(fila)
#Pilha
def ola():
print("Olá, ")
mundo()
def mundo():
print("mundo!")
def olamundo():
ola()
olamundo()
|
import glob
import re
import random
import numpy as np
import cv2
import os
from torch.utils.data import Dataset
from data.data_label_factory import label_factory
def read_video(filename):
frames = []
if not os.path.isfile(filename):
print('file not found')
cap = cv2.VideoCapture(filename)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frames.append(frame)
cap.release()
video = np.stack(frames)
return video
class DatasetReader(Dataset):
def __init__(self, data):
super(DatasetReader, self).__init__()
self.data = [data]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
videodata = self.data[idx]
return videodata
|
import pygame
class Grafika(object):
def menu(self):
pygame.display.set_caption('MasterMind')
self.tlo = pygame.image.load("Obrazy/kkk.png")
self.ramka = pygame.image.load("Obrazy/ramka_2.png")
size = self.screen.get_size()
self.srodek_x = int(size[0] / 2)
self.screen.blit(self.tlo, (0, 0))
pos = pygame.mouse.get_pos()
if pos[0] in range(95, 326) and pos[1] in range(170, 229):
self.screen.blit(self.ramka, (self.srodek_x - 117, 170))
if pos[0] in range(95, 326) and pos[1] in range(246, 307):
self.screen.blit(self.ramka, (self.srodek_x - 117, 246))
if pos[0] in range(95, 326) and pos[1] in range(323, 384):
self.screen.blit(self.ramka, (self.srodek_x - 117, 323))
if pos[0] in range(95, 326) and pos[1] in range(400, 462):
self.screen.blit(self.ramka, (self.srodek_x - 117, 400))
def instrukcja_2(self):
tekst_instrukcji = pygame.image.load("Obrazy/tekst_instrukcji.png")
self.screen.blit(tekst_instrukcji, (0, 0))
strzalka = pygame.image.load("Obrazy/strzalka.png")
pos = pygame.mouse.get_pos()
if pos[0] in range(21, 127) and pos[1] in range(23, 88):
self.screen.blit(strzalka, (21, 23))
def nowa_gra_2(self):
nowa_gra_2 = pygame.image.load("Obrazy/opcje.png")
self.screen.blit(nowa_gra_2, (0, 0))
pygame.display.flip()
strzalka = pygame.image.load("Obrazy/strzalka.png")
ramka_opcje = pygame.image.load("Obrazy/ramka_opcje.png")
ramka_opcje_mala = pygame.image.load("Obrazy/ramka_opcje_mala.png")
ramka_graj = pygame.image.load("Obrazy/graj.png")
pos = pygame.mouse.get_pos()
# strzalka
if pos[0] in range(21, 127) and pos[1] in range(19, 88):
self.screen.blit(strzalka, (21, 19))
# duza ramka
if pos[0] in range(60, 363) and pos[1] in range(104, 165):
self.screen.blit(ramka_opcje, (60, 104))
if pos[0] in range(60, 363) and pos[1] in range(170, 220):
self.screen.blit(ramka_opcje, (60, 170))
if pos[0] in range(60, 363) and pos[1] in range(236, 286):
self.screen.blit(ramka_opcje, (60, 236))
# mala ramka
if pos[0] in range(215, 261) and pos[1] in range(329, 373):
self.screen.blit(ramka_opcje_mala, (215, 329))
if pos[0] in range(277, 322) and pos[1] in range(328, 372):
self.screen.blit(ramka_opcje_mala, (277, 328))
if pos[0] in range(340, 384) and pos[1] in range(328, 372):
self.screen.blit(ramka_opcje_mala, (340, 328))
if pos[0] in range(216, 261) and pos[1] in range(404, 447):
self.screen.blit(ramka_opcje_mala, (215, 404))
if pos[0] in range(277, 322) and pos[1] in range(403, 447):
self.screen.blit(ramka_opcje_mala, (277, 403))
if pos[0] in range(340, 384) and pos[1] in range(403, 447):
self.screen.blit(ramka_opcje_mala, (340, 403))
# ramka graj
if pos[0] in range(126, 300) and pos[1] in range(545, 607):
self.screen.blit(ramka_graj, (126, 545))
def kulki_szesc(self):
k_6 = pygame.image.load("Obrazy/kulki_6.png")
self.screen.blit(k_6, (62, 650))
def kulki_siedem(self):
k_7 = pygame.image.load("Obrazy/kulki_7.png")
self.screen.blit(k_7, (40, 650))
def kulki_osiem(self):
k_8 = pygame.image.load("Obrazy/kulki_8.png")
self.screen.blit(k_8, (21, 650))
def graj_12(self):
_12 = pygame.image.load("Obrazy/12.png")
self.screen.blit(_12, (0, 0))
def graj_10(self):
_10 = pygame.image.load("Obrazy/10.png")
self.screen.blit(_10, (0, 0))
def graj_8(self):
_8 = pygame.image.load("Obrazy/8.png")
self.screen.blit(_8, (0, 0))
|
from datetime import datetime
from flask import Flask
from flask import jsonify
from flask_babel import Babel
import date_helper
import month_helper
app = Flask(__name__)
app.config.from_pyfile('mysettings.cfg')
babel = Babel(app)
@app.route("/")
def index():
return '<h2>Bienvenido al index</h2>'
@app.route("/fechas")
def get_dates():
current_date = datetime.now()
date_format = date_helper.DateFormat(current_date)
response = {
'Fecha en formato corto': date_format.short_date(),
'Fecha en formato largo': date_format.long_date(),
'Fecha entera': date_format.full_date()
}
return jsonify(response), 200
@app.route("/months")
def months():
return jsonify({'meses': month_helper.get_months()})
if __name__ == '__main__':
print(dir(date_helper))
app.run(debug=False, port=5000)
|
# -*- coding: utf-8 -*-
import sys,os,json
config = json.loads(open(os.path.dirname(os.path.dirname(__file__)).replace('\\','/')+'/config.json').read())
ip = config.get('public_disk')
sys.path.append('//%s/LocalShare/py27/Lib'%ip)
sys.path.append('//%s/LocalShare/py27/Lib/site-packages'%ip)
from flask import Flask,render_template,url_for,request,redirect,Blueprint,session,flash
from datetime import datetime, timedelta
from pymongo import MongoClient
app = Flask(__name__)
conn = MongoClient('localhost', 27017)
db = conn.TaskList
@app.route('/')
def index():
return render_template('index.html', title=u'欢迎到来到灼华俱乐部')
@app.route('/test')
def test():
dic = {}
Realistic = []
Cartoon = []
ThreeShadingTwo = []
for each in db.uemat.find({}, {'_id': 0}):
if each.get('matType') == 'Realistic':
Realistic.append(each)
if each.get('matType') == 'Cartoon':
Cartoon.append(each)
if each.get('matType') == 'ThreeShadingTwo':
ThreeShadingTwo.append(each)
dic['Realistic'] = Realistic
dic['Cartoon'] = Cartoon
dic['ThreeShadingTwo'] = ThreeShadingTwo
return render_template('test.html', title=u'欢迎到来到灼华俱乐部',dic=dic)
@app.route('/uemat')
def ueMat():
dic = {}
Realistic = []
Cartoon = []
ThreeShadingTwo = []
for each in db.uemat.find({},{'_id': 0}):
if each.get('matType') == 'Realistic':
Realistic.append(each)
if each.get('matType') == 'Cartoon':
Cartoon.append(each)
if each.get('matType') == 'ThreeShadingTwo':
ThreeShadingTwo.append(each)
dic['Realistic'] = Realistic
dic['Cartoon'] = Cartoon
dic['ThreeShadingTwo'] = ThreeShadingTwo
return render_template('ue_mat.html', title=u'欢迎到来到灼华俱乐部',dic=dic)
@app.route('/uemat/add')
def ueMatAdd():
return render_template('ue_matadd.html', title=u'欢迎到来到灼华俱乐部')
@app.route('/uemat/update')
def ueMatUpdate():
matCode = request.args.get('matCode')
dic = db.uemat.find_one({'matCode': matCode},{'_id': 0})
jsonDic = json.dumps(dic)
return render_template('ue_matupdate.html', title=u'欢迎到来到灼华俱乐部',dic=dic,jsonDic= jsonDic)
@app.route('/uemat/ad_post',methods=['POST'])
def ueMatAddPost():
action = request.args.get('action')
if action == 'add':
form_data = request.form.to_dict()
if db.uemat.find({'matCode': form_data['matCode']}).count():
return '材质模板编码已经存在'
elif form_data.get('matCn') and form_data.get('matType') and form_data.get('matCode') and form_data.get('img64'):
db.uemat.insert(form_data)
return '材质模板创建成功'
else:
return '数据不符合要求'
if action == 'update':
form_data = request.form.to_dict()
item = db.uemat.find_one({}, {'_id': 0, 'matCode': form_data['matCode']})
if item:
db.uemat.update({'matCode': form_data['matCode']}, {'$set': form_data})
return '材质模板更新成功'
else:
return '材质模板更新失败'
if action == 'del':
form_data = request.form.to_dict()
db.uemat.remove({'matCode': form_data['matCode']})
return '删除成功'
@app.route('/uemat/json',methods=['get'])
def ueMatJson():
temp = []
for each in db.uemat.find({}, {'_id': 0}):
temp.append(each)
return json.dumps(temp)
@app.route('/maya/con',methods=['POST'])
def maya_task():
action = request.args.get('action')
if action == 'get':
item = db.maya.find_one({'taskStatus': 'wait'}, {'_id': 0})
if item:
db.maya.update({'TaskId': item['TaskId']},{'$set': {'taskStatus': 'cleaned', 'date': str(datetime.now()).split('.')[0]}})
print('%s is sent' % item)
return json.dumps(item)
else:
item = db.maya.find_one({'taskStatus': 'cleaned'}, {'_id': 0})
if item:
db.maya.update({'TaskId': item['TaskId']},{'$set': {'taskStatus': 'running', 'date': str(datetime.now()).split('.')[0]}})
print('%s is sent' % item)
return json.dumps(item)
else:
print('maya taskEmpty')
return json.dumps({'taskStock': 'taskEmpty'})
if action == 'post':
data = json.loads(request.data)
if db.maya.find({'TaskId': data['TaskId']}).count():
return u'任务已存在,不要重复提交'
else:
data['date'] = str(datetime.now()).split('.')[0]
db.maya.insert(data)
return u'任务提交成功'
if action == 'del':
data = request.form.to_dict()
if not db.maya.find({'TaskId': data['TaskId']}).count():
return json.dumps({'info':'task is not exist'})
else:
db.maya.remove({'TaskId': data['TaskId']})
return json.dumps({'info': 'delete success'})
if action == 'update':
data = json.loads(request.data)
item = db.maya.find_one({}, {'_id': 0, 'TaskId': data['TaskId']})
if not item:
return json.dumps({'info': 'task is not exist'})
else:
db.maya.update({'TaskId': data['TaskId']}, {'$set': data['info']})
return json.dumps({'info': 'update success'})
if action == 'repost':
form_data = request.form.to_dict()
item = db.maya.find_one({}, {'_id': 0, 'TaskId': form_data['TaskId']})
if not item:
return json.dumps({'info': 'task is not exist'})
else:
db.maya.update({'TaskId': form_data['TaskId']}, {'$set': {'taskStatus': 'wait'}})
return json.dumps({'info': 'update success'})
@app.route('/maya/task',methods=['GET', 'POST', 'PUT'])
def maya_web():
temp_list = []
dic={}
now = datetime.now()
for each in db.maya.find({'taskStatus':'wait'}, {'_id': 0}):
temp_list.append(each)
dic['wait'] = temp_list
temp_list = []
for each in db.maya.find({'taskStatus':'cleaned'}, {'_id': 0}):
temp_list.append(each)
dic['cleaned'] = temp_list
temp_list = []
for each in db.maya.find({'taskStatus':'running'}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 4000:
temp_list.append(each)
else:
db.maya.update({'TaskId': each['TaskId']},
{'$set': {'taskStatus': 'failed', 'date': str(datetime.now()).split('.')[0]}})
dic['running'] = temp_list
temp_list = []
for each in db.maya.find({'taskStatus':'success'}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 50000:
temp_list.append(each)
else:
db.maya.remove({'TaskId': each['TaskId']})
dic['success'] = temp_list
temp_list = []
for each in db.maya.find({'taskStatus': 'failed'}, {'_id': 0}):
temp_list.append(each)
dic['failed'] = temp_list
return render_template('task.html',title=u'欢迎到来到灼华俱乐部',dic = dic)
@app.route('/admin/ad_post',methods=['POST'])
def ad_action():
action = request.args.get('action')
dic = request.form.to_dict()
if action == 'delete':
try:
if dic:
db.col.remove({'TaskId':dic['TaskId']})
return '删除成功'
except:
return '删除出错'
if action == 'delete_fail':
try:
if dic:
db.failded.remove({'TaskId':dic['TaskId']})
return '删除成功'
except:
return '删除出错'
if action == 'commit':
try:
data = db.failded.find_one({'TaskId':dic['TaskId']},{'_id': 0 })
db.failded.remove({'TaskId':dic['TaskId']})
data['date'] = str(datetime.now()).split('.')[0]
if db.col.find({'TaskId': data['TaskId']}).count() or db.cloth.find({'TaskId': data['TaskId']}).count() or db.runing.find({'TaskId': data['TaskId']}).count():
return '记录重复'
else:
if data['info'] == 'animate_commit':
db.cleaned.insert(data)
if data['info'] == 'cloth_commit':
db.cloth.insert(data)
return '提交成功'
except:
return '提交出错'
if action == 'recommit':
try:
data = db.runing.find_one({'TaskId': dic['TaskId']}, {'_id': 0})
db.runing.remove({'TaskId': dic['TaskId']})
data['date'] = str(datetime.now()).split('.')[0]
if data['info'] == 'animate_commit':
db.cleaned.insert(data)
if data['info'] == 'cloth_commit':
db.cloth.insert(data)
return '提交成功'
except:
return '提交出错'
if action == 'ueRenderWaitDelete':
try:
if dic:
db.render_wait.remove({'TaskId':dic['TaskId']})
return '删除成功'
except:
return '删除出错'
if action == 'ueRenderFailedDelete':
try:
if dic:
db.render_failed.remove({'TaskId':dic['TaskId']})
return '删除成功'
except:
return '删除出错'
if action == 'ueRenderCommit':
try:
data = db.render_failed.find_one({'TaskId':dic['TaskId']},{'_id': 0 })
db.render_failed.remove({'TaskId':dic['TaskId']})
data['date'] = str(datetime.now()).split('.')[0]
data['info'] = 'render_wait'
if db.render_wait.find({'TaskId': data['TaskId']}).count() or db.rendering.find({'TaskId': data['TaskId']}).count():
return '记录重复'
else:
db.render_wait.insert(data)
return '提交成功'
except:
return '提交出错'
else:
return 'you will never know'
@app.route('/uepro',methods=['GET', 'POST', 'PUT'])
def ueData():
action = request.args.get('request_type')
dic = json.loads(request.data)
print(dic)
if action == 'insert':
try:
db.uepro.insert(dic)
return 'data insert success'
except:
return 'data insert failded'
if action == 'query':
temp = []
for each in db.uepro.find(dic,{'_id': 0}):
temp.append(each)
return json.dumps(temp)
if action == 'update':
try:
db.uepro.update(dic[0],dic[1])
return json.dumps(dic)+ ' update success'
except:
return json.dumps(dic)+' update failded'
if action == 'del':
try:
db.uepro.remove(dic)
return json.dumps(dic)+ ' delete success'
except:
return json.dumps(dic)+' delete failded'
@app.route('/ue/render')
def ue_render():
temp_list = []
dic = {}
now = datetime.now()
for each in db.render_wait.find({}, {'_id': 0}):
temp_list.append(each)
dic['render_wait'] = temp_list
temp_list = []
for each in db.rendering.find({}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 4000:
temp_list.append(each)
else:
db.render_failed.insert(each)
db.rendering.remove({'TaskId': each['TaskId']})
dic['rendering'] = temp_list
temp_list = []
for each in db.render_success.find({}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 50000:
temp_list.append(each)
else:
db.render_success.remove({'TaskId': each['TaskId']})
dic['render_success'] = temp_list
temp_list = []
for each in db.render_failed.find({}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 50000:
temp_list.append(each)
else:
db.render_failed.remove({'TaskId': each['TaskId']})
dic['render_failed'] = temp_list
return render_template('ue_reader.html', title=u'欢迎到来到灼华俱乐部', dic=dic)
@app.route('/ue/con',methods=['GET', 'POST', 'PUT'])
def unreal_task():
action = request.args.get('action')
if action == 'get':
item = db.uepro.find_one({'taskStatus':'wait'}, {'_id': 0})
if item:
db.uepro.update({'TaskId': item['TaskId']},{'$set': {'taskStatus': 'running','date':str(datetime.now()).split('.')[0]}})
print('%s is sent'%item)
return json.dumps(item)
else:
print('task is Empty')
return json.dumps({'taskStock':'taskEmpty'})
if action == 'post':
data = json.loads(request.data)
if db.uepro.find({'TaskId': data['TaskId']}).count():
return json.dumps({'info':'task is exist,do not repeat upload'})
else:
data['date'] = str(datetime.now()).split('.')[0]
db.uepro.insert(data)
return json.dumps({'info': 'upload success'})
if action == 'del':
form_data = request.form.to_dict()
if not db.uepro.find({'TaskId': form_data['TaskId']}).count():
return json.dumps({'info':'task is not exist'})
else:
db.uepro.remove({'TaskId': form_data['TaskId']})
return json.dumps({'info': 'delete success'})
if action == 'update':
data = json.loads(request.data)
item = db.uepro.find_one({}, {'_id': 0, 'TaskId': data['TaskId']})
if not item:
return json.dumps({'info':'task is not exist'})
else:
db.uepro.update({'TaskId': data['TaskId']}, {'$set': data['info']})
return json.dumps({'info': 'update success'})
if action == 'repost':
form_data = request.form.to_dict()
item = db.uepro.find_one({}, {'_id': 0, 'TaskId': form_data['TaskId']})
if not item:
return json.dumps({'info':'task is not exist'})
else:
db.uepro.update({'TaskId': form_data['TaskId']}, {'$set': {'taskStatus':'wait'}})
return json.dumps({'info': 'update success'})
@app.route('/ue/task',methods=['GET', 'POST', 'PUT'])
def unreal_web():
temp_list = []
dic ={}
now = datetime.now()
for each in db.uepro.find({'taskStatus':'wait'}, {'_id': 0}):
temp_list.append(each)
dic['wait'] = temp_list
temp_list = []
for each in db.uepro.find({'taskStatus':'running'}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 4000:
temp_list.append(each)
else:
db.uepro.update({'TaskId': each['TaskId']},
{'$set': {'taskStatus': 'failed', 'date': str(datetime.now()).split('.')[0]}})
dic['running'] = temp_list
temp_list = []
for each in db.uepro.find({'taskStatus':'success'}, {'_id': 0}):
delta = now - datetime.strptime(each['date'], "%Y-%m-%d %H:%M:%S")
if delta.seconds < 50000:
temp_list.append(each)
else:
db.uepro.remove({'TaskId': each['TaskId']})
dic['success'] = temp_list
temp_list = []
for each in db.uepro.find({'taskStatus': 'failed'}, {'_id': 0}):
temp_list.append(each)
dic['failed'] = temp_list
return render_template('ue_task.html', title=u'欢迎到来到灼华俱乐部', dic=dic)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
from lex import lex, lex_print
from token_parser import parse, parse_print
from run import run
import sys
from time import time
import threading
from typing import List
from node import Node
import argparse
def remove_comments(lines: List[List[str]]) -> List[List[str]]:
'''
This function removes lines that start with a question mark.
'''
if len(lines) == 0:
return []
head, *tail = lines
if head.startswith('?'):
return remove_comments(tail)
return [head] + remove_comments(tail)
def open_and_split_file(file_name: str) -> List[List[str]]:
'''
This function splits the text file into lines.
These lines then get divided into seperate words.
This functions returns a 2d list with every sub list containing the words from a line.
'''
text_file = open(file_name, 'r')
text_list = text_file.read().splitlines()
text_list = remove_comments(text_list)
text_list = list(map(str.split, text_list))
text_list = filter(None, text_list)
return text_list
class run_all:
def __init__(self, file_name):
self.nodes = parse(lex(open_and_split_file(file_name)))
def __call__(self):
self.run_program(self.nodes)
def run_program(self, nodes: List[List[Node]]):
'''
This function runs the program.
'''
print("\nProgram variables:", run(nodes).program_state)
def increase_stack_recursion():
'''
This function sets the recursion limit to 100000 and the stack size to 256mb
'''
sys.setrecursionlimit(0x100000)
threading.stack_size(256000000)
def get_arguments():
'''
This function retrieves the command line arguments.
This function returns a dictionary with the command line arguments and the value of the argument
'''
parser = argparse.ArgumentParser(description="Lines interpreter")
parser.add_argument("File", help="Path to input file")
parser.add_argument("-l", dest='-l', action='store_true', default='false', help="Print lexer output when running")
parser.add_argument("-p", dest='-p', action='store_true', default='false', help="Print parser output when running")
parser.add_argument("-t", dest='-t', action='store_true', default='false', help="Print the amount of time it took to run the program")
return vars(parser.parse_args())
def main():
'''
Main function that gets called when the program starts.
This function processes the given comand line arguments and calls all the functions to execute the program.
'''
global lex
global parse
increase_stack_recursion()
arguments = get_arguments()
if arguments['-l'] == True:
lex = lex_print(lex)
if arguments['-p'] == True:
parse = parse_print(parse)
if arguments['-t'] == True:
start_time = time()
t = threading.Thread(target=run_all(arguments['File']))
t.start()
t.join()
if arguments['-t'] == True:
print("\nThe program took", round(time() - start_time, 2), "seconds to run")
main()
|
import numpy as np
import tensorflow as tf
from data.data_utils import *
class TextRNN(object):
"""文本分类,TextRNN模型"""
def __init__(self, config):
self.config = config
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.max_sen_len], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.is_training = tf.placeholder(tf.bool, name="is_training")
self.keep_prob = tf.where(self.is_training, config.dropout_keep_prob, 1.0)
self.rnn()
def rnn(self):
# lstm核
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(self.config.hidden_dim, state_is_tuple=True)
# gru核
def gru_cell():
return tf.contrib.rnn.GRUCell(self.config.hidden_dim)
# 为每一个rnn核后面加一个dropout层
def dropout():
if (self.config.rnn == 'lstm'):
cell = lstm_cell()
else:
cell = gru_cell()
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
"""RNN模型"""
# 词向量映射
with tf.name_scope("embedding"):
init_embeddings = tf.random_uniform([self.config.vocab_size, self.config.embedding_dim])
embedding = tf.get_variable("embedding", initializer=init_embeddings, dtype=tf.float32, trainable=self.config.update_w2v)
self.embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)
with tf.name_scope("rnn"):
# 多层rnn网络
cells = [dropout() for _ in range(self.config.num_layers)]
rnn_cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
_outputs, _ = tf.nn.dynamic_rnn(cell=rnn_cell, inputs=self.embedding_inputs, dtype=tf.float32)
last = _outputs[:, -1, :] # 取最后一个时序输出作为结果
with tf.name_scope("score"):
# 全连接层,后面接dropout以及relu激活
fc = tf.layers.dense(last, self.config.hidden_dim, name='fc1')
fc = tf.contrib.layers.dropout(fc, self.keep_prob)
fc = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2')
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) # 预测类别
with tf.name_scope("optimize"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy)
# 优化器
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 获取batch
def get_batches(self, x, y=None, batch_size=64, is_shuffle=True):
for index in batch_index(len(x), batch_size, is_shuffle=is_shuffle):
n = len(index)
feed_dict = {
self.input_x: [x[i] for i in index]
}
if y is not None:
feed_dict[self.input_y] = [y[i] for i in index]
yield feed_dict, n
# 对一个batch训练
def train_on_batch(self, sess, feed):
feed[self.is_training]=True
_loss, _acc = sess.run([self.loss, self.accuracy], feed_dict=feed)
return _loss, _acc
# 对一个batch验证
def val_on_batch(self, sess, feed):
feed[self.is_training]=False
_loss, _acc = sess.run([self.loss, self.accuracy], feed_dict=feed)
return _loss, _acc
# 对一个batch预测
def predict_on_batch(self, sess, feed, prob=True):
feed[self.is_training]=False
result = tf.argmax(self.logits, 1)
if prob:
result = tf.nn.softmax(logits=self.logits, dim=1)
res = sess.run(result, feed_dict=feed)
return res
# 预测输入x
def predict(self, sess, x, prob=False):
y_pred = []
for _feed, _ in self.get_batches(x, batch_size=self.config.batch_size, is_shuffle=False):
_y_pred = self.predict_on_batch(sess, _feed, prob)
y_pred += _y_pred.tolist()
return np.array(y_pred)
def evaluate(self, sess, x, y):
"""评估在某一数据集上的准确率和损失"""
num = len(x)
total_loss = 0.0
total_acc = 0.0
for _feed, _n in self.get_batches(x, y, batch_size=self.config.batch_size):
loss, acc = self.val_on_batch(sess, _feed)
total_loss += loss * _n
total_acc += acc * _n
return total_loss / num, total_acc / num
|
''' Power Function in python '''
def power_func(int_x, int_y):
''' Raise x to the power of y '''
if int_y == 1:
return int_x
else:
return int_x * power_func(int_x, int_y - 1)
|
q = int(input())
while q:
q -= 1
n, m = [int(x) for x in input().split()]
sum1 = 0
for x in range(1, n+1):
temp = 1
for y in range(1, m+1):
temp = ((temp%1000000007) * ((x + y)%1000000007))%1000000007
# print(temp)
print(temp)
sum1 = ((sum1%1000000007) + (temp%1000000007))%1000000007
# sum1 += temp
# print(sum1)
print(sum1)
|
import re
import os
import sys
import flask
import base64
import torchvision.transforms as transforms
from io import BytesIO
from PIL import Image
# Append pix2pix filepath to app.py
module_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pix2pix')
if module_path not in sys.path:
sys.path.append(module_path)
from convert import convert_image
from models import Pix2PixModel
from options import TestOptions
from flask import Flask, render_template, request, send_file, jsonify
# Global Vars
# CUDA_VISIBLE_DEVICES
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
opt = TestOptions().parse()
opt.checkpoints_dir = os.path.join('./pix2pix', opt.checkpoints_dir)
model = Pix2PixModel()
model.initialize(opt)
transformers = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
])
app = Flask(__name__)
# Routes
# Index
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api')
def api_page():
return render_template('api.html')
# Helper function to serve PIL image
def serve_pil_image(pil_img):
img_io = BytesIO()
pil_img.save(img_io, 'PNG')
return base64.b64encode(img_io.getvalue())
# Generate pepe endpoint
@app.route('/generate', methods=['POST'])
def generate():
global model, opt, transformers
if 'img' in request.form:
img_data = re.sub('^data:image/.+;base64,', '', request.form['img'])
img_data = base64.b64decode(img_data)
img = Image.open(BytesIO(img_data)).convert('RGB')
img = img.resize((256, 256), Image.BILINEAR)
img = convert_image(img, model, transformers)
return jsonify({'img': serve_pil_image(img).decode('utf-8')})
elif 'img' in request.json:
img_data = re.sub('^data:image/.+;base64,', '', request.json['img'])
img_data = base64.b64decode(img_data)
img = Image.open(BytesIO(img_data)).convert('RGB')
img = img.resize((256, 256), Image.BILINEAR)
img = convert_image(img, model, transformers)
return jsonify({'img': serve_pil_image(img).decode('utf-8')})
return jsonify({'error': 'img not found'})
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8000, threaded=True)
|
from classes.Action import Action
from classes.Cluster import Cluster
from classes.Scooter import Scooter
from classes.Depot import Depot
from classes.State import State
from classes.Vehicle import Vehicle
from classes.Location import Location
from classes.events.Event import Event
from classes.World import World
from classes.events.LostTrip import LostTrip
from classes.events.ScooterDeparture import ScooterDeparture
from classes.events.ScooterArrival import ScooterArrival
from classes.events.VehicleArrival import VehicleArrival
from classes.events.GenerateScooterTrips import GenerateScooterTrips
|
"""
copy:只复制深层对象的引用
deepcopy:复制深层对象的本身
"""
import copy
a = [1,2,["a", "b"]]
c = copy.copy(a) # 浅拷贝
d = copy.deepcopy(a) #深拷贝
print(c)
print(d)
a.append(5)
print("---------")
print(a)
print(c)
print(d)
a[2][1] = "x"
print("---------")
print(a)
print(c)
print(d)
|
#!/usr/bin/python3
""" WriteFile Module """
def write_file(filename="", text=""):
""" Function that writes a string to a
text file (UTF8) and returns the number
of characters written
"""
with open(filename, 'w') as f:
return f.write(text)
|
"""
A prime number (or a prime) is a natural number greater than 1 that has
no positive divisors other than 1 and itself.
The property of being prime is called primality. A simple but slow method of
verifying the primality of a given number is known as trial division. It
consists of testing whether n is a multiple of any integer between 2 and sqrt(n).
# The Basic
Write a program that, given a number comprised between 2 and 49, returns
if it is a prime number or not. We can assume that the computer knows
(stores) that [2, 3, 5, 7] are prime numbers.
"""
def is_prime(n: int) -> bool:
if n % 2 == 0:
return False
if n % 3 == 0:
return False
if n % 5 == 0:
return False
if n % 7 == 0:
return False
return True
def main():
n = int(input("Enter number between 2 and 49: "))
if n < 2 or n > 49:
print("Number out of range")
return
if is_prime(n):
print(n, "is prime")
else:
print(n, "is not prime")
if __name__ == "__main__":
main()
|
#!/opt/local/bin/python2.7
import cv2
line_length = 40
def main():
cv2.namedWindow("Scanner alignment", cv2.CV_WINDOW_AUTOSIZE)
capture = cv2.VideoCapture(1)
while True:
cv2.waitKey(10)
_,img = capture.read()
img = cv2.flip(cv2.transpose(img),1)
height, width, depth = img.shape
cv2.line(img,(width/2-line_length/2,height/2),(width/2+line_length/2,height/2),(255, 255, 255),1)
cv2.line(img,(width/2,height/2-line_length/2),(width/2,height/2+line_length/2),(255, 255, 255),1)
cv2.imshow("Scanner alignment", img)
if __name__ == '__main__':
main()
|
"""
剑指 Offer 45. 把数组排成最小的数
输入一个非负整数数组,把数组里所有数字拼接起来排成一个数,打印能拼接出的所有数字中最小的一个。
"""
"""
简单说一下解题思路,第一个反应就是将数组里所有的数都给拆成1位数,把所有的0都拿出来剩余的数从小到大排序,然后把所有的0在第2位插入。
但是这个方法有点想当然了,比方说[20,1]这个数组,我们会返回最小值102,但是这个最小应该是120,于是提出方法2。
"""
def minNumber(nums):
tempList = sorted(list(map(int,list("".join(map(str,nums))))))
tempIndex = 0
while tempIndex < len(tempList):
if tempList[tempIndex]!= 0:
break
tempIndex += 1
aList = tempList[tempIndex:]
aList.insert(1,"".join(list(map(str,tempList[:tempIndex]))))
res = "".join(list(map(str,aList)))
return res
"""
思路2,其实就是数组里边所有数的排序问题,比方说30和34,如果3034<3430,那么,就把30排在34的前边,用这个规则来确定排序法则,对整个数组进行排序。
利用这个法则,嵌入到任意一种排序方法里就可以了,首先实现一个最简单的快排,再把这个方法嵌入进去。不过好像直接用sort来做也行,权当是复习快排了。
"""
def aSmallerb(a,b):
if int(str(a)+str(b)) < int(str(b)+str(a)):
return True
else:
return False
def minNumber2(nums):
if len(nums) < 2:
return nums
startIndex = 0
finalIndex = len(nums) -1
tempNum = nums[0]
while startIndex < finalIndex:
while startIndex < finalIndex and aSmallerb(tempNum,nums[finalIndex]):
finalIndex -= 1
nums[startIndex] = nums[finalIndex]
while startIndex < finalIndex and not aSmallerb(tempNum,nums[startIndex]):
startIndex += 1
nums[finalIndex] = nums[startIndex]
return minNumber2(nums[:startIndex]) + [tempNum] + minNumber2(nums[startIndex+1:])
if __name__ == '__main__':
res = minNumber2([13,4,5,2,1])
print("".join(list(map(str,res))))
print(res)
|
"""
=================
Metrics Utilities
=================
This module contains shared utilities for querying, parsing, and transforming
simulation data to support particular observations during the simulation.
"""
from collections import ChainMap
from string import Template
from typing import Union, List, Tuple, Dict, Callable
import numpy as np
import pandas as pd
from vivarium.framework.lookup import LookupTable
from vivarium.framework.values import Pipeline
from vivarium_public_health.utilities import to_years
_MIN_AGE = 0.
_MAX_AGE = 150.
_MIN_YEAR = 1900
_MAX_YEAR = 2100
class QueryString(str):
"""Convenience string that forms logical conjunctions using addition.
This class is meant to be used to create a logical statement for
use with pandas ``query`` functions. It hides away the management
of conjunctions and the fence posting problems that management creates.
Examples
--------
>>> from vivarium_public_health.metrics.utilities import QueryString
>>> s = QueryString('')
>>> s
''
>>> s + ''
''
>>> s + 'abc'
'abc'
>>> s += 'abc'
>>> s + 'def'
'abc and def'
"""
def __add__(self, other: Union[str, 'QueryString']) -> 'QueryString':
if self:
if other:
return QueryString(str(self) + ' and ' + str(other))
else:
return self
else:
return QueryString(other)
def __radd__(self, other: Union[str, 'QueryString']) -> 'QueryString':
return QueryString(other) + self
class SubstituteString(str):
"""Normal string plus a no-op substitute method.
Meant to be used with the OutputTemplate.
"""
def substitute(self, *_, **__) -> 'SubstituteString':
"""No-op method for consistency with OutputTemplate."""
return self
class OutputTemplate(Template):
"""Output string template that enforces standardized formatting."""
@staticmethod
def format_template_value(value):
"""Formatting helper method for substituting values into a template."""
return str(value).replace(' ', '_').lower()
@staticmethod
def get_mapping(*args, **kws):
"""Gets a consistent mapping from args passed to substitute."""
# This is copied directly from the first part of Template.substitute
if not args:
raise TypeError("descriptor 'substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = dict(kws)
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
return self, mapping
def substitute(*args, **kws) -> Union[SubstituteString, 'OutputTemplate']:
"""Substitutes provided values into the template.
Users are allowed to pass any dictionary like object whose keys match
placeholders in the template. Alternatively, they can provide
keyword arguments where the keywords are the placeholders. If both
are provided, the keywords take precedence.
Returns
-------
Union[SubstituteString, OutputTemplate]
Another output template with the provided values substituted in
to the template placeholders if any placeholders remain,
otherwise a completed ``SubstituteString``.
"""
self, mapping = OutputTemplate.get_mapping(*args, **kws)
mapping = {key: self.format_template_value(value) for key, value in mapping.items()}
try:
return SubstituteString(super(OutputTemplate, self).substitute(mapping))
except KeyError:
return OutputTemplate(super(OutputTemplate, self).safe_substitute(mapping))
def safe_substitute(*args, **kws):
"""Alias to OutputTemplate.substitute."""
self, mapping = OutputTemplate.get_mapping(*args, **kws)
return self.substitute(mapping)
def __repr__(self):
return super(OutputTemplate, self).safe_substitute()
def get_age_bins(builder) -> pd.DataFrame:
"""Retrieves age bins relevant to the current simulation.
Parameters
----------
builder
The simulation builder.
Returns
-------
DataFrame with columns ``age_group_name``, ``age_start``,
and ``age_end``.
"""
age_bins = builder.data.load('population.age_bins')
# Works based on the fact that currently only models with age_start = 0 can include fertility
age_start = builder.configuration.population.age_start
min_bin_start = age_bins.age_start[np.asscalar(np.digitize(age_start, age_bins.age_end))]
age_bins = age_bins[age_bins.age_start >= min_bin_start]
age_bins.loc[age_bins.age_start < age_start, 'age_start'] = age_start
exit_age = builder.configuration.population.exit_age
if exit_age:
age_bins = age_bins[age_bins.age_start < exit_age]
age_bins.loc[age_bins.age_end > exit_age, 'age_end'] = exit_age
return age_bins
def get_output_template(by_age: bool, by_sex: bool, by_year: bool, **_) -> OutputTemplate:
"""Gets a template string for output metrics.
The template string should be filled in using filter criteria for
measure, age, sex, and year in the observer using this function.
Parameters
----------
by_age
Whether the template should include age criteria.
by_sex
Whether the template should include sex criteria.
by_year
Whether the template should include year criteria.
Returns
-------
OutputTemplate
A template string with measure and possibly additional criteria.
"""
template = '${measure}'
if by_year:
template += '_in_${year}'
if by_sex:
template += '_among_${sex}'
if by_age:
template += '_in_age_group_${age_group}'
return OutputTemplate(template)
def get_age_sex_filter_and_iterables(config: Dict[str, bool],
age_bins: pd.DataFrame,
in_span: bool = False) -> Tuple[QueryString,
Tuple[List[Tuple[str, pd.Series]], List[str]]]:
"""Constructs a filter and a set of iterables for age and sex.
The constructed filter and iterables are based on configuration for the
observer component.
Parameters
----------
config
A mapping with 'by_age' and 'by_sex' keys and boolean values
indicating whether the observer is binning data by the respective
categories.
age_bins
A table containing bin names and bin edges.
in_span
Whether the filter for age corresponds to living through an age
group in a time span or uses a point estimate of age at a particular
point in time.
Returns
-------
QueryString
A filter on age and sex for use with DataFrame.query
Tuple[List[Tuple[str, pd.Series]], List[str]]
Iterables for the age and sex groups partially defining the bins
for the observers.
"""
age_sex_filter = QueryString("")
if config['by_age']:
ages = list(age_bins.set_index('age_group_name').iterrows())
if in_span:
age_sex_filter += '{age_start} < age_at_span_end and age_at_span_start < {age_end}'
else:
age_sex_filter += '{age_start} <= age and age < {age_end}'
else:
ages = [('all_ages', pd.Series({'age_start': _MIN_AGE, 'age_end': _MAX_AGE}))]
if config['by_sex']:
sexes = ['Male', 'Female']
age_sex_filter += 'sex == "{sex}"'
else:
sexes = ['Both']
return age_sex_filter, (ages, sexes)
def get_time_iterable(config: Dict[str, bool],
sim_start: pd.Timestamp,
sim_end: pd.Timestamp) -> List[Tuple[str, Tuple[pd.Timestamp, pd.Timestamp]]]:
"""Constructs an iterable for time bins.
The constructed iterable are based on configuration for the observer
component.
Parameters
----------
config
A mapping with 'by_year' and a boolean value indicating whether
the observer is binning data by year.
sim_start
The time the simulation starts.
sim_end
The time the simulation ends.
Returns
-------
List[Tuple[str, Tuple[pandas.Timestamp, pandas.Timestamp]]]
Iterable for the time groups partially defining the bins
for the observers.
"""
if config['by_year']:
time_spans = [(year, (pd.Timestamp(f'1-1-{year}'), pd.Timestamp(f'1-1-{year + 1}')))
for year in range(sim_start.year, sim_end.year + 1)]
else:
time_spans = [('all_years', (pd.Timestamp(f'1-1-{_MIN_YEAR}'), pd.Timestamp(f'1-1-{_MAX_YEAR}')))]
return time_spans
def get_group_counts(pop: pd.DataFrame,
base_filter: str,
base_key: OutputTemplate,
config: Dict[str, bool],
age_bins: pd.DataFrame,
aggregate: Callable[[pd.DataFrame], Union[int, float]] = len
) -> Dict[Union[SubstituteString, OutputTemplate], Union[int, float]]:
"""Gets a count of people in a custom subgroup.
The user is responsible for providing a default filter (e.g. only alive
people, or people susceptible to a particular disease). Demographic
filters will be applied based on standardized configuration.
Parameters
----------
pop
The population dataframe to be counted. It must contain sufficient
columns for any necessary filtering (e.g. the ``age`` column if
filtering by age).
base_filter
A base filter term (e.g.: alive, susceptible to a particular disease)
formatted to work with the query method of the provided population
dataframe.
base_key
A template string with replaceable fields corresponding to the
requested filters.
config
A dict with ``by_age`` and ``by_sex`` keys and boolean values.
age_bins
A dataframe with ``age_start`` and ``age_end`` columns.
aggregate
Transformation used to produce the aggregate.
Returns
-------
Dict[Union[SubstituteString, OutputTemplate], Union[int, float]]
A dictionary of output_key, count pairs where the output key is a
string or template representing the sub groups.
"""
age_sex_filter, (ages, sexes) = get_age_sex_filter_and_iterables(config, age_bins)
base_filter += age_sex_filter
group_counts = {}
for group, age_group in ages:
start, end = age_group.age_start, age_group.age_end
for sex in sexes:
filter_kwargs = {'age_start': start, 'age_end': end, 'sex': sex, 'age_group': group}
group_key = base_key.substitute(**filter_kwargs)
group_filter = base_filter.format(**filter_kwargs)
in_group = pop.query(group_filter) if group_filter and not pop.empty else pop
group_counts[group_key] = aggregate(in_group)
return group_counts
def get_susceptible_person_time(pop, config, disease, current_year, step_size, age_bins):
base_key = get_output_template(**config).substitute(measure=f'{disease}_susceptible_person_time', year=current_year)
base_filter = QueryString(f'alive == "alive" and {disease} == "susceptible_to_{disease}"')
person_time = get_group_counts(pop, base_filter, base_key, config, age_bins,
aggregate=lambda x: len(x) * to_years(step_size))
return person_time
def get_disease_event_counts(pop, config, disease, event_time, age_bins):
base_key = get_output_template(**config).substitute(measure=f'{disease}_counts', year=event_time.year)
# Can't use query with time stamps, so filter
pop = pop.loc[pop[f'{disease}_event_time'] == event_time]
base_filter = QueryString('')
return get_group_counts(pop, base_filter, base_key, config, age_bins)
def get_prevalent_cases(pop, config, disease, event_time, age_bins):
config = config.copy()
config['by_year'] = True # This is always an annual point estimate
base_key = get_output_template(**config).substitute(measure=f'{disease}_prevalent_cases', year=event_time.year)
base_filter = QueryString(f'alive == "alive" and {disease} != "susceptible_to_{disease}"')
return get_group_counts(pop, base_filter, base_key, config, age_bins)
def get_population_counts(pop, config, event_time, age_bins):
config = config.copy()
config['by_year'] = True # This is always an annual point estimate
base_key = get_output_template(**config).substitute(measure=f'population_count', year=event_time.year)
base_filter = QueryString(f'alive == "alive"')
return get_group_counts(pop, base_filter, base_key, config, age_bins)
def get_person_time(pop: pd.DataFrame, config: Dict[str, bool], sim_start: pd.Timestamp,
sim_end: pd.Timestamp, age_bins: pd.DataFrame) -> Dict[str, float]:
base_key = get_output_template(**config).substitute(measure='person_time')
base_filter = QueryString("")
time_spans = get_time_iterable(config, sim_start, sim_end)
person_time = {}
for year, (t_start, t_end) in time_spans:
year_key = base_key.substitute(year=year)
lived_in_span = get_lived_in_span(pop, t_start, t_end)
person_time_in_span = get_person_time_in_span(lived_in_span, base_filter, year_key, config, age_bins)
person_time.update(person_time_in_span)
return person_time
def get_lived_in_span(pop: pd.DataFrame, t_start: pd.Timestamp, t_end: pd.Timestamp) -> pd.DataFrame:
"""Gets a subset of the population that lived in the time span.
Parameters
----------
pop
A table representing the population with columns for 'entrance_time',
'exit_time' and 'age'.
t_start
The date and time at the start of the span.
t_end
The date and time at the end of the span.
Returns
-------
pandas.DataFrame
A table representing the population who lived some amount of time
within the time span with all columns provided in the original
table and additional columns 'age_at_span_start' and 'age_at_span_end'
indicating the age of the individual at the start and end of the time
span, respectively. 'age_at_span_start' will never be lower than the
age at the simulant's entrance time and 'age_at_span_end' will never
be greater than the age at the simulant's exit time.
"""
lived_in_span = pop.loc[(t_start < pop['exit_time']) & (pop['entrance_time'] < t_end)]
span_entrance_time = lived_in_span.entrance_time.copy()
span_entrance_time.loc[t_start > span_entrance_time] = t_start
span_exit_time = lived_in_span.exit_time.copy()
span_exit_time.loc[t_end < span_exit_time] = t_end
lived_in_span.loc[:, 'age_at_span_end'] = lived_in_span.age - to_years(lived_in_span.exit_time
- span_exit_time)
lived_in_span.loc[:, 'age_at_span_start'] = lived_in_span.age - to_years(lived_in_span.exit_time
- span_entrance_time)
return lived_in_span
def get_person_time_in_span(lived_in_span: pd.DataFrame, base_filter: QueryString,
span_key: OutputTemplate, config: Dict[str, bool],
age_bins: pd.DataFrame) -> Dict[Union[SubstituteString, OutputTemplate], float]:
"""Counts the amount of person time lived in a particular time span.
Parameters
----------
lived_in_span
A table representing the subset of the population who lived in a
particular time span. Must have columns for 'age_at_span_start' and
'age_at_span_end'.
base_filter
A base filter term (e.g.: alive, susceptible to a particular disease)
formatted to work with the query method of the provided population
dataframe.
span_key
A template string with replaceable fields corresponding to the
requested filters.
config
A dict with ``by_age`` and ``by_sex`` keys and boolean values.
age_bins
A dataframe with ``age_start`` and ``age_end`` columns.
Returns
-------
Dict[Union[SubstituteString, OutputTemplate], float]
A dictionary of output_key, person_time pairs where the output key
corresponds to a particular demographic group.
"""
person_time = {}
age_sex_filter, (ages, sexes) = get_age_sex_filter_and_iterables(config, age_bins, in_span=True)
base_filter += age_sex_filter
for group, age_bin in ages:
a_start, a_end = age_bin.age_start, age_bin.age_end
for sex in sexes:
filter_kwargs = {'sex': sex, 'age_start': a_start,
'age_end': a_end, 'age_group': group}
key = span_key.substitute(**filter_kwargs)
group_filter = base_filter.format(**filter_kwargs)
in_group = lived_in_span.query(group_filter) if group_filter else lived_in_span.copy()
age_start = np.maximum(in_group.age_at_span_start, a_start)
age_end = np.minimum(in_group.age_at_span_end, a_end)
person_time[key] = (age_end - age_start).sum()
return person_time
def get_deaths(pop: pd.DataFrame, config: Dict[str, bool], sim_start: pd.Timestamp,
sim_end: pd.Timestamp, age_bins: pd.DataFrame, causes: List[str]) -> Dict[str, int]:
"""Counts the number of deaths by cause.
Parameters
----------
pop
The population dataframe to be counted. It must contain sufficient
columns for any necessary filtering (e.g. the ``age`` column if
filtering by age).
config
A dict with ``by_age``, ``by_sex``, and ``by_year`` keys and
boolean values.
sim_start
The simulation start time.
sim_end
The simulation end time.
age_bins
A dataframe with ``age_start`` and ``age_end`` columns.
causes
List of causes present in the simulation.
Returns
-------
Dict[str, int]
A dictionary of output_key, death_count pairs where the output_key
represents a particular demographic subgroup.
"""
base_filter = QueryString('alive == "dead" and cause_of_death == "death_due_to_{cause}"')
base_key = get_output_template(**config)
pop = clean_cause_of_death(pop)
time_spans = get_time_iterable(config, sim_start, sim_end)
deaths = {}
for year, (t_start, t_end) in time_spans:
died_in_span = pop[(t_start <= pop.exit_time) & (pop.exit_time < t_end)]
for cause in causes:
cause_year_key = base_key.substitute(measure=f'death_due_to_{cause}', year=year)
cause_filter = base_filter.format(cause=cause)
group_deaths = get_group_counts(died_in_span, cause_filter, cause_year_key, config, age_bins)
deaths.update(group_deaths)
return deaths
def get_years_of_life_lost(pop: pd.DataFrame,
config: Dict[str, bool],
sim_start: pd.Timestamp,
sim_end: pd.Timestamp,
age_bins: pd.DataFrame,
life_expectancy: LookupTable,
causes: List[str]) -> Dict[str, float]:
"""Counts the years of life lost by cause.
Parameters
----------
pop
The population dataframe to be counted. It must contain sufficient
columns for any necessary filtering (e.g. the ``age`` column if
filtering by age).
config
A dict with ``by_age``, ``by_sex``, and ``by_year`` keys and
boolean values.
sim_start
The simulation start time.
sim_end
The simulation end time.
age_bins
A dataframe with ``age_start`` and ``age_end`` columns.
life_expectancy
A lookup table that takes in a pandas index and returns the life
expectancy of the each individual represented by the index.
causes
List of causes present in the simulation.
Returns
-------
Dict[str, float]
A dictionary of output_key, yll_count pairs where the output_key
represents a particular demographic subgroup.
"""
base_filter = QueryString('alive == "dead" and cause_of_death == "death_due_to_{cause}"')
base_key = get_output_template(**config)
pop = clean_cause_of_death(pop)
time_spans = get_time_iterable(config, sim_start, sim_end)
years_of_life_lost = {}
for year, (t_start, t_end) in time_spans:
died_in_span = pop[(t_start <= pop.exit_time) & (pop.exit_time < t_end)]
for cause in causes:
cause_year_key = base_key.substitute(measure=f'ylls_due_to_{cause}', year=year)
cause_filter = base_filter.format(cause=cause)
group_ylls = get_group_counts(died_in_span, cause_filter, cause_year_key, config, age_bins,
aggregate=lambda subgroup: sum(life_expectancy(subgroup.index)))
years_of_life_lost.update(group_ylls)
return years_of_life_lost
def get_years_lived_with_disability(pop: pd.DataFrame,
config: Dict[str, bool],
current_year: int,
step_size: pd.Timedelta,
age_bins: pd.DataFrame,
disability_weights: Dict[str, Pipeline],
causes: List[str]) -> Dict[str, float]:
"""Counts the years lived with disability by cause in the time step.
Parameters
----------
pop
The population dataframe to be counted. It must contain sufficient
columns for any necessary filtering (e.g. the ``age`` column if
filtering by age).
config
A dict with ``by_age``, ``by_sex``, and ``by_year`` keys and
boolean values.
current_year
The current year in the simulation.
step_size
The size of the current time step.
age_bins
A dataframe with ``age_start`` and ``age_end`` columns.
disability_weights
A mapping between causes and their disability weight pipelines.
causes
List of causes present in the simulation.
Returns
-------
Dict[str, float]
A dictionary of output_key, yld_count pairs where the output_key
represents a particular demographic subgroup.
"""
base_key = get_output_template(**config).substitute(year=current_year)
base_filter = QueryString('alive == "alive"')
years_lived_with_disability = {}
for cause in causes:
cause_key = base_key.substitute(measure=f'ylds_due_to_{cause}')
def count_ylds(sub_group):
"""Counts ylds attributable to a cause in the time step."""
return sum(disability_weights[cause](sub_group.index) * to_years(step_size))
group_ylds = get_group_counts(pop, base_filter, cause_key, config, age_bins, aggregate=count_ylds)
years_lived_with_disability.update(group_ylds)
return years_lived_with_disability
def clean_cause_of_death(pop: pd.DataFrame) -> pd.DataFrame:
"""Standardizes cause of death names to all read ``death_due_to_cause``."""
def _clean(cod: str) -> str:
if 'death' in cod or 'dead' in cod:
pass
else:
cod = f'death_due_to_{cod}'
return cod
pop.cause_of_death = pop.cause_of_death.apply(_clean)
return pop
def get_state_person_time(pop: pd.DataFrame, config: Dict[str, bool],
state_machine: str, state: str, current_year: Union[str, int],
step_size: pd.Timedelta, age_bins: pd.DataFrame) -> Dict[str, float]:
"""Custom person time getter that handles state column name assumptions"""
base_key = get_output_template(**config).substitute(measure=f'{state}_person_time',
year=current_year)
base_filter = QueryString(f'alive == "alive" and {state_machine} == "{state}"')
person_time = get_group_counts(pop, base_filter, base_key, config, age_bins,
aggregate=lambda x: len(x) * to_years(step_size))
return person_time
class TransitionString(str):
def __new__(cls, value):
# noinspection PyArgumentList
obj = str.__new__(cls, value.lower())
obj.from_state, obj.to_state = value.split('_TO_')
return obj
def get_transition_count(pop: pd.DataFrame, config: Dict[str, bool],
state_machine: str, transition: TransitionString,
event_time: pd.Timestamp, age_bins: pd.DataFrame) -> Dict[str, float]:
"""Counts transitions that occurred this step."""
event_this_step = ((pop[f'previous_{state_machine}'] == transition.from_state)
& (pop[state_machine] == transition.to_state))
transitioned_pop = pop.loc[event_this_step]
base_key = get_output_template(**config).substitute(measure=f'{transition}_event_count',
year=event_time.year)
base_filter = QueryString('')
transition_count = get_group_counts(transitioned_pop, base_filter, base_key, config, age_bins)
return transition_count
|
character_name = "John" # it is the variable we can create
character_age ="35"
x = 35 #you can also create a integer data type
is_Male = False #you can also create boolean values as data type
print("There once was a man name " + character_name + ",")
print("he was " + character_age + " years old.")
character_name = "Mike" #you can change the variable in the middle way
print("He really liked the name " + character_name + ", ")
print("but didn't like being " + character_age +".")
|
from django.shortcuts import render
from django.conf import settings
from django.db import IntegrityError
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from .serializers import UserProfileSerializer, TweetSerializer
from .models import UserProfile, Tweet
class Followers(APIView):
serializer_class = UserProfileSerializer
def get(self, request):
"""Get the list of his followings"""
user_profile = UserProfile.objects.get(user__id=request.user.id)
serializer=self.serializer_class(user_profile)
return Response(serializer.data)
def post(self, request, format=None):
"""Adds the follower to Users profile"""
try:
if request.user.email == request.data.get("follow_email"):
return Response({"error": ["Follow email cannot be same as User email"]},status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(data=request.data, context={"user_email": request.user.email})
if serializer.is_valid():
serializer.save() #invokes create function defined in serializer class
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return Response({"error": ["an error occured"]},status=status.HTTP_400_BAD_REQUEST)
def put(self, request, format=None):
"""Removes the follower from Users profile"""
try:
user_profile = UserProfile.objects.get(user__id=request.user.id)
serializer = self.serializer_class(user_profile, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return Response({"error": ["an error occured"]},status=status.HTTP_400_BAD_REQUEST)
class TweetHandling(APIView):
serializer_class = TweetSerializer
def get(self, request):
"""Get all the tweets created by the current logged in user"""
tweets = Tweet.objects.filter(created_by__id=request.user.id)
serializer=self.serializer_class(tweets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
"""Creates a new Tweet"""
try:
serializer = self.serializer_class(data=request.data, context={"user_email": request.user.email})
if serializer.is_valid():
serializer.save() #invokes create function defined in serializer class
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return Response({"error": ["an error occured"]},status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, format=None):
"""Deletes a Tweet"""
try:
tweet_id = request.data.get('id')
if tweet_id is None:
return Response(status=status.HTTP_206_PARTIAL_CONTENT)
tweet = Tweet.objects.get(id=tweet_id)
if request.user.id == tweet.created_by.id:
tweet.delete()
return Response(status=status.HTTP_202_ACCEPTED)
return Response(status=status.HTTP_403_FORBIDDEN)
except Exception as e:
print(e)
return Response({"error": ["an error occured"]},status=status.HTTP_400_BAD_REQUEST)
class AllTweetsHandling(APIView):
serializer_class = TweetSerializer
def get(self, request):
"""Get all the tweets that could be viewed by the current user(except his own tweets)"""
user_profile = UserProfile.objects.get(user__id=request.user.id)
followings = user_profile.following.all()
all_tweets = Tweet.objects.none()
for user in followings:
tweets = Tweet.objects.filter(created_by=user)
all_tweets = all_tweets | tweets
all_tweets = all_tweets.order_by('-created_time')
serializer=self.serializer_class(all_tweets, many=True)
return Response(serializer.data)
|
import sys
import boto3
import uuid
from cfn_keypair_provider import KeyPairProvider
from secrets import handler
def test_defaults():
request = Request("Create", "abc")
r = KeyPairProvider()
r.set_request(request, {})
assert r.is_valid_request()
assert r.get("Name") == "abc"
assert r.get("PublicKeyMaterial") is not None
request["ResourceProperties"] = {"Name": "abc"}
r.set_request(request, {})
assert not r.is_valid_request(), "PublicKeyMaterial is required"
request["ResourceProperties"] = {"PublicKeyMaterial": "abc"}
r.set_request(request, {})
assert not r.is_valid_request(), "Name is required"
def test_key_name_from_physical_resource_id():
request = Request(
"Update",
"abc",
"arn:aws:ec2:eu-central-1:245111612214:keypair/kb062b200-4b67-4eee-8933-44d76c0a199a",
)
provider = KeyPairProvider()
provider.set_request(request, {})
assert (
provider.key_name_from_physical_resource_id()
== "kb062b200-4b67-4eee-8933-44d76c0a199a"
)
request = Request("Update", "abc", "sdfasdfsdfsf")
provider = KeyPairProvider()
provider.set_request(request, {})
assert provider.key_name_from_physical_resource_id() is None
def get_finger_print(name):
ec2 = boto3.resource("ec2")
key_pair = ec2.KeyPair(name)
key_pair.load()
return key_pair.key_fingerprint
def test_create_and_public():
# create a test parameter
provider = KeyPairProvider()
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = provider.handle(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert provider.is_valid_cfn_response(), response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
assert "Data" in response
assert "Arn" in response["Data"]
assert "Name" in response["Data"]
assert response["Data"]["Arn"] == physical_resource_id
assert response["Data"]["Name"] == name
finger_print_1 = get_finger_print(name)
assert finger_print_1 is not None
# update the material
request = Request(
"Update", name, physical_resource_id, KeyPair().public_key_material
)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert response["Data"]["Name"] == name
finger_print_2 = get_finger_print(name)
assert finger_print_2 is not None
assert finger_print_1 != finger_print_2
assert response["Data"]["Name"] == name
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_request_duplicate_create():
# prrequest duplicate create
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
physical_resource_id = response["PhysicalResourceId"]
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "FAILED", response["Reason"]
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_update_name():
# create a keypair
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
assert response["Data"]["Name"] == name
# update keypair name
name_2 = "k2%s" % name
request = Request("Update", name_2, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
assert response["Data"]["Name"] == name_2
physical_resource_id_2 = response["PhysicalResourceId"]
assert physical_resource_id != physical_resource_id_2
# delete the keypairs
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_request_duplicate_through_update():
# update parameter name
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
physical_resource_id = response["PhysicalResourceId"]
name_2 = "k2%s" % name
request = Request("Create", name_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id_2 = response["PhysicalResourceId"]
request = Request("Update", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "FAILED", response["Reason"]
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
class KeyPair(object):
def __init__(self):
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import (
default_backend as crypto_default_backend,
)
self.key = rsa.generate_private_key(
backend=crypto_default_backend(), public_exponent=65537, key_size=2048
)
self.private_key = self.key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption(),
).decode("ascii")
self.public_key = (
self.key.public_key()
.public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH,
)
.decode("ascii")
)
@property
def public_key_material(self):
return self.public_key
class Request(dict):
def __init__(
self,
request_type,
name,
physical_resource_id=str(uuid.uuid4()),
public_key_material=KeyPair().public_key_material,
):
self.update(
{
"RequestType": request_type,
"ResponseURL": "https://httpbin.org/put",
"StackId": "arn:aws:cloudformation:us-west-2:EXAMPLE/stack-name/guid",
"RequestId": "request-%s" % uuid.uuid4(),
"ResourceType": "Custom::KeyPair",
"LogicalResourceId": "MyKey",
"PhysicalResourceId": physical_resource_id,
"ResourceProperties": {
"Name": name,
"PublicKeyMaterial": public_key_material,
},
}
)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 18:42:08 2021
@author: gabri
"""
import urllib.parse
import requests
main_api = "https://www.mapquestapi.com/directions/v2/route?"
key = "QvrROLQbeGnXmsgha3A7O4tiYI5XHBUo"
#The "while True" construct creates an endless loop.
while True:
orig = input("Starting Location: ")
if orig=="quit" or orig=="q":
break
dest = input("Destination: ")
if dest=="quit" or dest=="q":
break
url = main_api + urllib.parse.urlencode({"key": key, "from": orig, "to": dest})
print("URL: " + (url))
|
from django.urls import include, path
from . import views
urlpatterns = [
# UI Bundle
path('', views.IndexView.as_view(), name='index'),
# Check if no-spa
path('no-spa/', include('wolves.ui.urls')),
# Rest are rest api
path('', include('wolves.api.urls'))
]
|
from config import Config
from website import create_app
app = create_app(Config)
app.run()
|
"""
Created on 2017-10-26
class: RL4SRD
@author: fengyue
"""
# !/usr/bin/python
# -*- coding:utf-8 -*-
from treelib import Tree
import copy
from utils import normalize
from utils import compute_bleu_rouge
"""
num : number of visit time
once_num : nothing
Q : value funtion calculate value of now-node
p : policy score of now-node
doc : doc episode list of now-state
"""
class node(object):
def __init__(self):
self.num = 0.0
self.Q = 0.0
self.p = 0.0
self.word = []
class search_tree(object):
def __init__(self, mcst, q_id, max_depth, l_passages, p_words_list, ref_answer, vocab):
self.tree = Tree()
self.q_id = q_id
self.tree.create_node(identifier='question_' + str(q_id), data=node())
root_node = self.tree.get_node('question_' + str(q_id))
root_node.data.num = 1.0
self.node_map = {}
self.l_passages = l_passages
self.p_words_list = p_words_list
self.ref_answer = ref_answer
self.count = 0.0
self.carpe_diem = mcst
self.max_depth = max_depth
self.vocab = vocab
self.expand(self.tree.get_node(self.tree.root))
def expand(self, leaf_node):
print '---expand: '
words_list = leaf_node.data.word
print 'word_list:'
print words_list
p_word_id, p_pred = self.carpe_diem.get_policy(words_list, self.l_passages)
print 'candidate_id: '
print p_word_id
for word in p_word_id:
#print word
#print 'self.node_map: ' + str(self.node_map)
#print 'len of self.node_map: '+ str(len(self.node_map))
self.node_map[' '.join(words_list + [str(word)])] = len(self.node_map)
#print 'yi dun cao zuo'
#print 'self.node_map: ' + str(self.node_map)
#print 'len of self.node_map: ' + str(len(self.node_map))
new_node = node()
new_node.word = words_list + [str(word)]
#print new_node.word
new_node.p = p_pred[p_word_id.index(word)]
new_node.Q = self.carpe_diem.value_function(words_list)[0][0]
#print new_node.p
self.tree.create_node(identifier=self.node_map[' '.join(new_node.word)], data=new_node,
parent=leaf_node.identifier)
def update(self, node_list, value):
#print '----update'
for node_id in node_list:
tmp_node = self.tree.get_node(node_id)
tmp_node.data.Q = (tmp_node.data.Q * tmp_node.data.num + value) / (tmp_node.data.num + 1)
tmp_node.data.num += 1
def search(self, start_node_id):
#print '----tree search'
tmp_node = self.tree.get_node(start_node_id)
#print tmp_node.data.num
has_visit_num = tmp_node.data.num - 1
self.count = has_visit_num
if int(self.carpe_diem.search_time - has_visit_num) > 0:
start_node_search_time = int(self.carpe_diem.search_time - has_visit_num)
#print 'start_node_search_time: '
#print start_node_search_time
else:
start_node_search_time = 0
#print 'print str(self.l_passages): '
#print str(self.l_passages - 1)
for time in range(start_node_search_time):
search_list = [start_node_id]
tmp_node = self.tree.get_node(start_node_id)
#print 'search time :'+ str(time)
while not tmp_node.is_leaf():
max_score = float("-inf")
max_id = -1
for child_id in tmp_node.fpointer:
child_node = self.tree.get_node(child_id)
#score = child_node.data.p
#print "child_node.data.p: " + str(child_node.data.p)
#print "tmp_node.data.num: " + str(tmp_node.data.num)
score = self.carpe_diem.beta * child_node.data.p * (
(tmp_node.data.num) ** 0.5 / (1 + child_node.data.num))
#print 'child_node.data.Q: '
#print child_node.data.Q
score += child_node.data.Q
#print 'score: '
#print score
#print '**************'
if score > max_score:
max_id = child_id
max_score = score
search_list.append(max_id)
tmp_node = self.tree.get_node(max_id)
#query_id_mcts = self.tree.root.split('_')[1]
#print query_id_mcts
#print 'end'
#print 'tmp_node.data.word'
#print tmp_node.data.word[-1]
#end
#if tmp_node.data.word[-1] == str(self.l_passages-1):
if tmp_node.data.word[-1] == str(self.l_passages - 1):
v = 0
pred_answer = tmp_node.data.word
print 'pred_answer: '
print pred_answer
print 'listSelectedSet'
listSelectedSet_words = []
listSelectedSet = map(eval, pred_answer)
print listSelectedSet
for idx in listSelectedSet:
listSelectedSet_words.append(self.p_words_list[idx])
print 'str123'
str123 = self.vocab.recover_from_ids(listSelectedSet_words, 0)
print str123
pred_answers = []
pred_answers.append({'question_id': [self.q_id],
'question_type': [],
'answers': [''.join(str123)],
'entity_answers': [[]],
'yesno_answers': []})
if len(self.ref_answer) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, self.ref_answer):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
print '========compare in tree======='
print pred_dict[question_id]
print '----------------------'
print ref_dict[question_id]
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
print 'bleu_rouge'
print bleu_rouge
v = bleu_rouge['Bleu-4']
else:
v = self.carpe_diem.value_function(tmp_node.data.word)[0][0]
#print 'v: '
#print v
self.update(search_list, v)
self.count += 1
if tmp_node.is_leaf() and (self.tree.depth(tmp_node) < self.max_depth) and tmp_node.data.word[-1] != str(self.l_passages-1):
self.expand(tmp_node)
###########
'''
if time % 100 == 0:
tmp_policy = self.get_ppolicy(start_node_id)
print tmp_policy.values()
print sum(tmp_policy.values())
print time
'''
#print tmp_node.data.word
#print '------finish search '
#print '===== finish all search ======'
def take_action(self, start_node_id):
#print '----take action: '
tmp_node = self.tree.get_node(start_node_id)
max_time = -1
prob = {}
for child_id in tmp_node.fpointer:
child_node = self.tree.get_node(child_id)
prob[child_node.data.word[-1]] = child_node.data.num / self.count
if child_node.data.num > max_time:
#print child_node.data.num
#print max_time
#print 'child_node.data.num > max_time'
max_time = child_node.data.num
select_word = child_node.data.word[-1]
select_word_node_id = child_node.identifier
#else:
#print 'not > max time '
#print select_word
#print select_word_node_id
#print '-----take action end'
return prob, select_word, select_word_node_id
def get_ppolicy(self, start_node_id):
tmp_node = self.tree.get_node(start_node_id)
max_time = -1
prob = {}
for child_id in tmp_node.fpointer:
child_node = self.tree.get_node(child_id)
if self.count == 0:
prob[child_node.data.word[-1]] = 0.0
else:
prob[child_node.data.word[-1]] = child_node.data.num / self.count
return prob
|
def matrixInit():
"""
The function to initialize a matrix from the user.
Returns:
Matrix: A matrix
"""
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
matrix = []
print("Enter the entries row wise:")
for i in range(R): # A for loop for row entries
a = []
for j in range(C): # A for loop for column entries
a.append(int(input()))
matrix.append(a)
return matrix
def isToeplitz(matrix):
"""
The function to check Toeplitz Matrix.
Parameters:
matrix (two-dimensional array): The two-dimensional array to check on.
Returns:
Boolean: A True if the matrix is toeplitz, else returns False
"""
col = len(matrix[0]) # columns size //n
row = len(matrix) # rows size //m
# indexes, i&ii for the rows, j&jj for the columns
i = 0
ii = 0
j = 0
jj = 0
# first we check all the diagonals in the upper triangular matrix
while j < col:
val = matrix[i][j]
# assign the next val in the diagonal
ii = i + 1
jj = j + 1
while ii < row and jj < col:
# if the val is'nt equal then we immediately brake the loop so we wont check the rest for nothing
if val != matrix[ii][jj]:
return False
ii += 1
jj += 1
j += 1
# now we check all the diagonals in the lower triangular matrix
# except for the main diagonal to avoid double checking
i = 1; # we go a row down so we can skip checking the main diagonal
j = 0; # initializing the columns index
while i < row:
val = matrix[i][j]
# assign the next val in the diagonal
ii = i + 1
jj = j + 1
while ii < row and jj < col:
# if the val is'nt equal then we immediately brake the loop so we wont check the rest for nothing
if val != matrix[ii][jj]:
return False
ii += 1
jj += 1
i += 1
return True
def main():
matrix = matrixInit()
is_toeplitz = isToeplitz(matrix)
print(is_toeplitz)
if __name__ == "__main__":
main()
|
x=float(input("x= "))
if((x>0)and((x%2)!=0)):
print("positive odd number")
elif((x>0)and(x%2)==0):
print("positive even number")
elif ((x<0)and((x%2)!=0)):
print("negative odd number")
elif((x<0)and((x%2)==0)):
print("negative even number")
else:
print("zero number")
|
from main import Handler
from models.user import User
from models.post import Post
from main import blog_key
class NewPost(Handler):
def get(self):
if self.user:
title = 'New Post'
self.render(
"newpost.html",
title=title
)
else:
self.redirect('/login')
def post(self):
if not self.user:
return self.redirect('/login')
subject = self.request.get('subject')
content = self.request.get('content')
creator = self.user.key().id()
if subject and content:
p = Post(
parent=blog_key(),
subject=subject,
content=content,
creator=creator
)
p.put()
self.redirect('/post/%s' % str(p.key().id()))
else:
title = 'New Post'
error = "Please enter both a subject and some content"
self.render(
"newpost.html",
title=title,
subject=subject,
content=content,
error=error,
edit=False
)
|
from peewee import *
db = PostgresqlDatabase('flashcards', user='postgres', password='', host='localhost', port=5432)
class BaseModel(Model):
class Meta:
database = db
class Flashcard(BaseModel):
front = CharField()
back = CharField()
times_correct = IntegerField()
times_missed = IntegerField()
db.connect()
from translations import translate
import random
db.drop_tables([Flashcard])
db.create_tables([Flashcard])
translate()
print("\n BIENVENIDXS! What would you like to do?")
def train():
action = input("\n * 'create' to create a new card, \n * 'view' to view your deck of cards', \n * 'train' to review your cards. ")
if action == "create" or action == "Create":
new_card = input("What Spanish word or phrase would you like to practice? Type in all lowercase. ")
new_translation = input("And its English translation? Type in all lowercase. ")
new = Flashcard(front=new_card, back=new_translation, times_correct = 0, times_missed=0)
new.save()
print(f'{new.front} / translation: {new.back}')
train()
elif action == "view" or action == "View":
for flashcard in Flashcard.select():
print(f"{flashcard.front} / translation: {flashcard.back} / times correct: {flashcard.times_correct} / times missed: {flashcard.times_missed}")
train()
elif action == "train" or action == "Train":
print("\n * Each turn, you will be presented with a word or phrase in Spanish. \n * Return its translation in English. Type in all lowercase!")
deck = list(Flashcard.select())
random.shuffle(deck)
cardstack = int(
input("\nHow many cards would you like to review? Choose up to 20. "))
while cardstack > 20 or cardstack < 1:
cardstack = int(input("\nNope! Please choose a number between 1 and 20."))
print("\nWrite 'quit' at any time to finish your review sesh.")
for i in range(0, cardstack, 1):
response = input(deck[i].front+" / translation: ")
if response == 'quit':
print("\nHasta luego!")
exit()
elif response == deck[i].back:
deck[i].times_correct = deck[i].times_correct + 1
deck[i].save()
print("\nMuy bien! \nYou've answered this correctly {} times.".format(deck[i].times_correct))
elif response != deck[i].back:
deck[i].times_missed = deck[i].times_missed + 1
deck[i].save()
print("\nNope! \nYou've missed this one {} times.".format(deck[i].times_missed))
if i == (cardstack - 1):
review_again = input(
"You've reached the end of this session. Back to the main menu, y/n?")
if review_again == "Y" or review_again == "y":
train()
else:
print("Hasta luego!")
break
train()
|
import torch
from torch.autograd import Variable
from PIL import Image
from torchvision import transforms
import json
from model.gram_efficientnet import GramEfficientNet
use_gpu = torch.cuda.is_available()
net_name = 'efficientnet-b0'
image_size = GramEfficientNet.get_image_size(net_name)
img = Image.open('img.jpg')
tfms = transforms.Compose([transforms.Resize(image_size), transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])
img = tfms(img).unsqueeze(0)
for i in range(10):
img[i]=img[0]
print(img.size())
labels_map = json.load(open('labels_map.txt'))
labels_map = [labels_map[str(i)] for i in range(1000)]
model_ft = GramEfficientNet.from_pretrained(net_name, load_fc=False)
if use_gpu:
model_ft = model_ft.cuda()
img = Variable(img.cuda())
outputs = model_ft(img)
preds = torch.topk(outputs, k=5).indices.squeeze(0).tolist()
print('-----')
for idx in preds:
label = labels_map[idx]
prob = torch.softmax(outputs, dim=1)[0, idx].item()
print('{:<75} ({:.2f}%)'.format(label, prob*100))
|
import csv
import sys
import os
import time
import re
import json
from collections import Counter
try: # Python 3.x
from urllib.parse import quote as urlencode
from urllib.request import urlretrieve
except ImportError: # Python 2.x
from urllib import pathname2url as urlencode
from urllib import urlretrieve
try: # Python 3.x
import http.client as httplib
except ImportError: # Python 2.x
import httplib
from astropy.table import Table
import numpy as np
import pprint
pp = pprint.PrettyPrinter(indent=4)
PROPID_FILE = "proposalids_gto_ers.csv"
RESULTS_FILE = "testV230.csv"
def mastQuery(request):
"""Perform a MAST query.
Parameters
----------
request (dictionary): The MAST request json object
Returns head,content where head is the response HTTP headers, and content is the returned data"""
server='mast.stsci.edu'
# Grab Python Version
version = ".".join(map(str, sys.version_info[:3]))
# Create Http Header Variables
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
"User-agent":"python-requests/"+version}
# Encoding the request as a json string
requestString = json.dumps(request)
requestString = urlencode(requestString)
# opening the https connection
conn = httplib.HTTPSConnection(server)
# Making the query
print("Sending MAST query...")
conn.request("POST", "/api/v0/invoke", "request="+requestString, headers)
# Getting the response
resp = conn.getresponse()
head = resp.getheaders()
content = resp.read().decode('utf-8')
# Close the https connection
conn.close()
return head,content
def count_planned_obs():
mashupRequest = {"service":"Mast.Caom.Filtered.TestV230",
"format":"json",
"params":{
"columns":"COUNT_BIG(*)",
"filters":[
{"paramName":"calib_level",
"values":["-1"],
}
]}}
headers,outString = mastQuery(mashupRequest)
countData = json.loads(outString)
pp.pprint(countData)
data = countData['data']
count = data[0]['Column1']
return count
def get_planned_obs():
mashupRequest = {"service":"Mast.Caom.Filtered.TestV230",
"format":"json",
#"clearcache":"true",
"params":{
"columns":"*",
"filters":[
{"paramName":"calib_level",
"values":["-1"],
}
]}}
headers,outString = mastQuery(mashupRequest)
countData = json.loads(outString)
return countData
def write_to_csv(countData):
headers = countData['fields']
head = []
for field in headers:
head.append(field['name'])
all_rows = []
data = countData['data']
for obs in data:
row = []
for entry in obs.keys():
as_string = str(obs[entry])
row.append(as_string)
all_rows.append(row)
saveas = "testv230.csv"
saveas = os.path.abspath(saveas)
with open(saveas, 'w') as output:
writer = csv.writer(output)
writer.writerow(head)
writer.writerows(all_rows)
output.close()
return saveas
def collect_results():
num = count_planned_obs()
if num < 50000:
results = get_planned_obs()
as_csv = write_to_csv(results)
print(".csv file written!")
return as_csv
def extract_results(filename):
filepath = os.path.abspath(filename)
all_results = []
with open(filepath, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for entry in reader:
all_results.append(dict(entry))
csvfile.close()
return all_results
def generate_census(field, table):
if isinstance(field, list):
list0 = table[field[0]]
list1 = table[field[1]]
print(list0[0])
print(list1[0])
all_entries = list(zip(list0, list1))
print(all_entries[0])
else:
all_entries = table[field]
uniques = Counter(all_entries)
#uniques = list(set(all_entries))
print("Found {0} unique entries for '{1}'".format(len(uniques.keys()), field))
filename = str(field) + ".csv"
filepath = os.path.abspath(filename)
head = [field, "occurrences"]
with open(filepath, 'w') as result_file:
writer = csv.writer(result_file)
writer.writerow(head)
for entry in sorted(uniques.keys()):
row = [entry, uniques[entry]]
writer.writerow(row)
return uniques
def find_missing_entries(all_entries, found):
#all_entries = list(all_entries.keys())
found = list(found.keys())
missing = []
for entry in all_entries:
if entry in found:
continue
else:
missing.append(entry)
print("Missing ID's: {0}".format(missing))
def analyze_stats(all_results):
primer = all_results[0]
by_column = {}
for field in primer.keys():
by_column[field] = []
for entry in all_results:
for current_field in entry:
by_column[current_field].append(entry[current_field])
proposals = generate_census('proposal_id', by_column)
generate_census('proposal_pi', by_column)
all_proposals = []
proposal_file = os.path.abspath(PROPID_FILE)
with open(proposal_file, 'r') as props:
reader = csv.reader(props)
for row in reader:
all_proposals.append(row[0])
props.close()
find_missing_entries(all_proposals, proposals)
generate_census('target_name', by_column)
generate_census('proposal_type', by_column)
generate_census('filters', by_column)
generate_census(['s_ra', 's_dec'], by_column)
def analyze_results():
results = extract_results(RESULTS_FILE)
stats = analyze_stats(results)
if __name__ == "__main__":
collect_results()
analyze_results()
|
import subprocess
import os
import shutil
import re
import tempfile
from behave import *
import parse
use_step_matcher("cfparse")
@parse.with_pattern(r"finally\s+")
def parse_word_finally(text):
"""Type converter for "finally " (followed by one/more spaces)."""
return text.strip()
register_type(finally_=parse_word_finally)
COMMAND = ['python', '-m', 'ronto.main' ]
def match_line(expected, line):
m = re.search(expected, line)
return m != None
@given(u'ronto is installed')
def step_impl(context):
command = COMMAND + ['--help']
with open(os.devnull, 'w') as FNULL:
result = subprocess.run(command, stdout=FNULL)
assert 0 == result.returncode
@when('I {:finally_?}enter "{cli}"')
def step_impl(context, finally_, cli):
rontofile = []
if hasattr(context, 'rontofile'):
rontofile = ['--file', context.rontofile ]
command = COMMAND + rontofile + cli.split()
context.command = command
try:
output = subprocess.check_output(command)
context.output = output.decode().split('\n')
except subprocess.CalledProcessError as err:
context.exitcode = err.returncode
context.output = err.output.decode().split('\n')
if hasattr(context, 'rontofile') and finally_:
# cleanup temporary rontofile after command was running "finally"
os.remove(context.rontofile)
@then('the exit code indicates an error')
def step_impl(context):
assert hasattr(context, 'exitcode')
@then(u'ronto prints "{version}"')
def step_impl(context, version):
assert context.failed is False
assert context.output[0] == version
@given('a rontofile content as')
def step_impl(context):
with tempfile.NamedTemporaryFile(mode='w', delete=False,) as f:
f.write(context.text)
context.rontofile = f.name
@then(u'ronto prints')
def step_impl(context):
if hasattr(context, 'command'):
print(f"Command run: {context.command}")
expected = context.text.split('\n')
assert hasattr(context, 'output')
print(f"length Exp: {len(expected)} Out: {len(context.output)}")
assert len(context.output) >= len(expected)
for i in range(len(expected)):
print(f"Exp at({i}):, {expected[i]}")
print(f"Out at({i}):, {context.output[i]}")
assert match_line(expected[i], context.output[i])
|
for i in range(10): # from 0 to 9 i or smt else doesn't matter
print('Hello i ',i) # in i all other must be other but after can be i too
for k in range(10):#100
print("Hello from K",k)
for j in range(10): #1000
print("Hi from j",j)
for i in range(3):
name = input(f'Вы {i} в очереди ')
print(name)
|
from steam import get_games
# should mock this out but let's call feedparser for real
# for a feedparser mocking see the AttrDict (advanced) Bite 50's tests
games = get_games()
def test_assert_number_of_entries():
assert len(games) == 30
def test_all_list_items_are_namedtuples():
assert all(isinstance(game, tuple) for game in games)
def test_assert_all_links_contain_store():
assert all('store.steampowered.com' in game.link for game in games)
def test_title_and_url_first_entry():
first_game = games[0]
assert first_game.title == 'Midweek Madness - RiME, 33% Off'
assert first_game.link == 'http://store.steampowered.com/news/31695/'
def test_title_and_url_last_entry():
last_game = games[-1]
assert last_game.title == 'Now Available on Steam - Loco Dojo, 35% off!'
assert last_game.link == 'http://store.steampowered.com/news/31113/'
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class ValgrindToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('valgrind')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['valgrind'].version
values['PFX'] = spec['valgrind'].prefix
fname = 'valgrind.xml'
contents = str("""
<tool name="valgrind" version="${VER}">
<client>
<environment name="VALGRIND_BASE" default="${PFX}"/>
<environment name="INCLUDE" default="$$VALGRIND_BASE/include"/>
</client>
<runtime name="PATH" value="$$VALGRIND_BASE/bin" type="path"/>
<runtime name="VALGRIND_LIB" value="$$VALGRIND_BASE/lib/valgrind"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
import math
import csv
from operator import itemgetter
import random
import statistics
import copy
import simulation_data1
chargingTimeSimulation = []
unallocatedVehiclesSimulation = []
chargingTimeMinDistance = []
unallocatedVehiclesMinDistance = []
def Simulation (simulationData) :
#{
vehicles = simulationData[0]
stations = simulationData[1]
schedules = simulationData[2]
SOCr = simulationData[3][:vehicles]
SOCt = simulationData[4][:vehicles]
SOCc = simulationData[5][:vehicles]
location = simulationData[6][:vehicles]
requestTime = simulationData[7][:vehicles]
speed = simulationData[8][:vehicles]
dischargeRate = simulationData[9][:vehicles]
direction = simulationData[10][:vehicles]
stationLocation = simulationData[11][:stations]
waitTime = simulationData[12][:stations]
chargingRate = simulationData[13][:stations]
powerAvailable = simulationData[14][:stations]
busImpedance = simulationData[15][:stations]
potentialStations = []
chargingTime = []
listToCompare = []
travelTimeList = []
stationAssigned = []
for i in range(0, vehicles):
#{
potentialStations.append([])
chargingTime.append([])
listToCompare.append([])
#}
dth = []
def FindSlot(requestTime) :
#{
slot = int(requestTime/15)
return slot
#}
for i in range(0, vehicles):
#{
dth.append((SOCc[i] - SOCt[i])/dischargeRate[i])
for iteration, val in enumerate(stationLocation):
#{
if (direction[i] == 1):
#{
if ( val > location[i] and val < location[i] + dth[i] ) :
#{
potentialStations[i].append(iteration)
#}
#}
else :
#{
if ( val < location[i] and val > location[i] - dth[i]) :
#{
potentialStations[i].append(iteration)
#}
#}
#}
for index, value in enumerate(potentialStations[i]) :
#{
travelTime = (abs(stationLocation[value] - location[i]))/speed[i]
slot = FindSlot(requestTime[i] + travelTime)
waitAtStation = waitTime[value][slot]
chargeTime = (SOCr[i] - (SOCc[i] -(abs(stationLocation[value] - location[i]))*dischargeRate[i] ))/chargingRate[value]
chargingTime[i].append(waitAtStation + chargeTime)
listToCompare[i].append((i, value, requestTime[i], travelTime, waitAtStation, chargeTime))
#}
#}
tmpList = []
for i, value in enumerate(chargingTime):
#{
tmpList.append((i, len(chargingTime[i]), sorted( list(zip(chargingTime[i], potentialStations[i]))) ))
sortedChargingTime = sorted(tmpList, key= itemgetter(1))
#}
def SortChargingTime(sortedChargingTime) :
#{
tmpList = []
for i, value in enumerate(sortedChargingTime):
#{
if(sortedChargingTime[i][2] != []):
#{
sortedChargingTime[i][2].sort()
#}
#}
#}
for i, val in enumerate(sortedChargingTime) :
#{
print(sortedChargingTime[i])
#}
n = 3
m = 5
positionCount = []
for i in range(0, n) :
#{
positionCount.append([])
for j in range(0, m):
#{
positionCount[i].append(0)
#}
#}
def FindPositionCount(vehicleIndex, stationIndex, sortedChargingTime) :
#{
stationID = sortedChargingTime[vehicleIndex][2][stationIndex][1]
chargingTimeToCompare = sortedChargingTime[vehicleIndex][2][stationIndex][0]
vehicleToAllocate = sortedChargingTime[vehicleIndex][0]
for i, val in enumerate(listToCompare[vehicleToAllocate]) :
#{
if(val[1] == stationID):
#{
requestTime = val[2]
travelTime = val[3]
waitTime = val[4]
chargeTime = val[5]
#}
for i in range(0, m) :
#{
for j in range(vehicleIndex + 1, len(sortedChargingTime)) :
#{
indexToCompare = sortedChargingTime[j][0]
if (i >= len(sortedChargingTime[j][2])) :
#{
continue
#}
if(sortedChargingTime[j][2][i][1] == stationID) :
#{
for k, val in enumerate(listToCompare[indexToCompare]) :
#{
if (val[1] == stationID) :
#{
if(requestTime + travelTime < val[2] + val[3] < requestTime + travelTime + waitTime + chargeTime) :
#{
positionCount[stationIndex][i] = positionCount[stationIndex][i] + 1
#}
#}
#}
#{
positionCount[stationIndex][i] = positionCount[stationIndex][i] + 1
#}
#}
#}
#}
#}
def ClearPositionCount(positionCount) :
#{
for i in range(0, n) :
#{
for j in range(0, m):
#{
positionCount[i][j] = 0
#}
#}
#}
assignedStations = []
listForAverage = []
def UpdateWaitingTime(listIndexAllocated, vehicleAllocated, stationID, timeToCharge) :
#{
for i, val in enumerate(listToCompare[vehicleAllocated]) :
#{
if(val[1] == stationID):
#{
travelTime = val[3]
waitTime = val[4]
chargeTime = val[5]
#}
#}
for i in range (listIndexAllocated +1, len(sortedChargingTime)) :
#{
index = sortedChargingTime[i][0]
if ( stationID in [value[1] for j, value in enumerate(sortedChargingTime[i][2])] ) :
#{
indexToUpdate = [j for j, value in enumerate(sortedChargingTime[i][2]) if value[1] == stationID][0]
for iteration, val in enumerate(listToCompare[index]) :
#{
if (val[1] == stationID) :
#{
if(requestTime[vehicleAllocated] + travelTime < val[2] + val[3] < requestTime[vehicleAllocated] + travelTime + waitTime + chargeTime ) :
#{
tmpListToUpdate = list(sortedChargingTime[i][2][indexToUpdate])
tmpListToUpdate[0] = sortedChargingTime[i][2][indexToUpdate][0] + requestTime[vehicleAllocated] + travelTime + waitTime + chargeTime - val[2] - val[3]
sortedChargingTime[i][2][indexToUpdate] = tuple(tmpListToUpdate)
#}
#}
#}
#}
#}
#}
def FindFirstElementTime(sortedChargingTime, index, stationID) :
#{
timeFirstElement = 0
for i in range(index + 1, len(sortedChargingTime) - 1) :
#{
if(sortedChargingTime[i][2] != []) :
#{
if (sortedChargingTime[i][2][0][1] == stationID ) :
#{
timeFirstElement = timeFirstElement + sortedChargingTime[i][2][0][0]
#}
#}
#}
return timeFirstElement
#}
tmpList = []
for i, val in enumerate(sortedChargingTime) :
#{
if (sortedChargingTime[i][2] != []) :
#{
tmpVehicleAllocated = sortedChargingTime[i][0]
tmpStationID = sortedChargingTime[i][2][0][1]
tmpTimeToCharge = sortedChargingTime[i][2][0][0]
for j, value in enumerate(sortedChargingTime[i][2]) :
#{
tmpList = copy.deepcopy(sortedChargingTime)
diff1 = FindFirstElementTime(sortedChargingTime, i, sortedChargingTime[i][2][j][1])
UpdateWaitingTime(i, tmpVehicleAllocated, tmpStationID, tmpTimeToCharge )
diff2 = FindFirstElementTime(sortedChargingTime, i, sortedChargingTime[i][2][j][1])
tmpDiff1 = diff2 - diff1
sortedChargingTime = tmpList
for k in range(j+1, len(sortedChargingTime[i][2]) - 2 ):
#{
tmpList = copy.deepcopy(sortedChargingTime)
diff3 = FindFirstElementTime(sortedChargingTime, i, sortedChargingTime[i][2][k][1])
UpdateWaitingTime(i, tmpVehicleAllocated, sortedChargingTime[i][2][k][1], sortedChargingTime[i][2][k][0] )
diff4 = FindFirstElementTime(sortedChargingTime, i, sortedChargingTime[i][2][k][1])
sortedChargingTime = tmpList
tmpDiff2 = diff4 - diff3
diff5 = sortedChargingTime[i][2][k][0] - sortedChargingTime[i][2][j][0]
if(tmpDiff1 > diff5 and tmpDiff1 > tmpDiff2) :
#{
tmpStationID = sortedChargingTime[i][2][k][1]
tmpTimeToCharge = sortedChargingTime[i][2][k][0]
tmpDiff1 = tmpDiff2
#}
#}
#}
vehicleAllocated = sortedChargingTime[i][0]
stationID = tmpStationID
timeToCharge = tmpTimeToCharge
assignedStations.append((vehicleAllocated, timeToCharge, stationID))
UpdateWaitingTime(i, vehicleAllocated, stationID, timeToCharge)
#}
SortChargingTime(sortedChargingTime)
#}
for i, val in enumerate (assignedStations) :
#{
listForAverage.append(val[1])
#}
averageChargingTime = statistics.mean(listForAverage)
unallocatedVehicles = potentialStations.count([])
print(assignedStations)
for i, val in enumerate(sortedChargingTime) :
#{
print(sortedChargingTime[i])
#}
print(assignedStations)
print(averageChargingTime)
print(unallocatedVehicles)
chargingTimeSimulation.append(averageChargingTime)
unallocatedVehiclesSimulation.append(unallocatedVehicles)
#}
#}
def SimulationMinDistance(simulationData) :
#{
vehicles = simulationData[0]
stations = simulationData[1]
schedules = simulationData[2]
SOCr = simulationData[3][:vehicles]
SOCt = simulationData[4][:vehicles]
SOCc = simulationData[5][:vehicles]
location = simulationData[6][:vehicles]
requestTime = simulationData[7][:vehicles]
speed = simulationData[8][:vehicles]
dischargeRate = simulationData[9][:vehicles]
direction = simulationData[10][:vehicles]
stationLocation = simulationData[11][:stations]
waitTime = simulationData[12][:stations]
chargingRate = simulationData[13][:stations]
powerAvailable = simulationData[14][:stations]
busImpedance = simulationData[15][:stations]
potentialStations = []
chargingTime = []
listToCompare = []
travelTimeList = []
stationAssigned = []
stationLocation.sort()
for i in range(0, vehicles):
#{
potentialStations.append([])
chargingTime.append([])
listToCompare.append([])
travelTimeList.append([])
stationAssigned.append([])
#}
dth = []
stationLocation.sort()
for i in range (0, stations) :
#{
for j in range (0, schedules):
#{
waitTime[i].append(random.randrange(5, 10))
powerAvailable[i].append(random.randrange(50, 100))
#}
#}
dth = []
def FindSlot(requestTime) :
#{
slot = int(requestTime/15)
return slot
#}
for i in range(0, vehicles):
#{
dth.append((SOCc[i] - SOCt[i])/dischargeRate[i])
for iteration, val in enumerate(stationLocation):
#{
if (direction[i] == 1):
#{
if ( val > location[i] and val < location[i] + dth[i] ) :
#{
potentialStations[i].append(iteration)
#}
#}
else :
#{
if ( val < location[i] and val > location[i] - dth[i]) :
#{
potentialStations[i].append(iteration)
#}
#}
#}
for index, value in enumerate(potentialStations[i]) :
#{
travelTime = (abs(stationLocation[value] - location[i]))/speed[i]
slot = FindSlot(requestTime[i] + travelTime)
travelTimeList[i].append((value, travelTime))
waitAtStation = waitTime[value][slot]
chargeTime = (SOCr[i] - (SOCc[i] -(abs(stationLocation[value] - location[i]))*dischargeRate[i] ))/chargingRate[value]
chargingTime[i].append(travelTime + waitAtStation + chargeTime)
travelTimeList[i].append((value, travelTime))
listToCompare[i].append((i, value, requestTime[i], travelTime, waitAtStation, chargeTime))
#}
#}
def UpdateWaitingTime(listIndexAllocated, vehicleAllocated, stationID, travelTime, waitingTime, chargeTime) :
#{
for i in range(listIndexAllocated + 1, len(listToCompare)) :
#{
for j, val in enumerate(listToCompare[i]) :
#{
if (val[1] == stationID) :
#{
if (requestTime[vehicleAllocated] + travelTime < val[2] + val[3] < requestTime[vehicleAllocated] + travelTime + waitingTime + chargeTime) :
#{
tmpList = list(listToCompare[i][j])
tmpList[4] = listToCompare[i][j][4] + requestTime[vehicleAllocated] + travelTime + waitingTime + chargeTime - val[2] - val[3]
listToCompare[i][j] = tuple(tmpList)
#}
#}
#}
#}
#}
AssignedStations = []
listForAverage = []
for i in range(0, vehicles) :
#{
if(travelTimeList[i] != []) :
#{
vehicleAllocated = i
stationAssigned[i].append(min(listToCompare[i], key= lambda x: x[3]))
stationID = stationAssigned[i][0][1]
travelTime = stationAssigned[i][0][3]
waitingTime = stationAssigned[i][0][4]
chargeTime = stationAssigned[i][0][5]
AssignedStations.append((i, stationID, travelTime, waitingTime, chargeTime))
UpdateWaitingTime(i, vehicleAllocated, stationID, travelTime, waitingTime, chargeTime)
#}
#}
print(potentialStations)
for i, val in enumerate (AssignedStations) :
#{
listForAverage.append(val[3] + val[4])
#}
averageChargingTime = statistics.mean(listForAverage)
unallocatedVehicles = potentialStations.count([])
print(AssignedStations)
print(averageChargingTime)
print(unallocatedVehicles)
chargingTimeMinDistance.append(averageChargingTime)
unallocatedVehiclesMinDistance.append(unallocatedVehicles)
#}
meanValues = [[], [], [], [], []]
vehicles = 200
stations = 0
for i in range(0, 10):
#{
stations = stations + 5
variableParameter = stations
for j in range(0, 100):
#{
simulationData = simulation_data1.SimulationData()
simulationData[0] = vehicles
simulationData[1] = stations
Simulation(simulationData)
SimulationMinDistance(simulationData)
#}
meanValues[0].append(variableParameter)
meanValues[1].append(statistics.mean(chargingTimeSimulation))
meanValues[2].append(statistics.mean(chargingTimeMinDistance))
meanValues[3].append(statistics.mean(unallocatedVehiclesSimulation))
meanValues[4].append(statistics.mean(unallocatedVehiclesMinDistance))
print(chargingTimeSimulation)
print(chargingTimeMinDistance)
print(unallocatedVehiclesSimulation)
print(unallocatedVehiclesMinDistance)
chargingTimeSimulation.clear()
chargingTimeMinDistance.clear()
unallocatedVehiclesSimulation.clear()
unallocatedVehiclesMinDistance.clear()
#}
print(meanValues)
with open("outputStationUniform.csv", "w") as f:
writer = csv.writer(f)
writer.writerows(meanValues)
'''
def UpdateWaitingTime(listIndexAllocated, vehicleAllocated, stationID, timeToCharge) :
#{
for i, val in enumerate(listToCompare[vehicleAllocated]) :
#{
if(val[1] == stationID):
#{
travelTime = val[3]
waitTime = val[4]
chargeTime = val[5]
#}
#}
for i in range (listIndexAllocated +1, len(sortedChargingTime)) :
#{
index = sortedChargingTime[i][0]
if (stationID in sortedChargingTime[i][3] ) :
#{
for iteration, val in enumerate(listToCompare[index]) :
#{
if (val[1] == stationID) :
#{
if(requestTime[vehicleAllocated] + travelTime < val[2] + val[3] < requestTime[vehicleAllocated] + travelTime + waitTime + chargeTime ) :
#{
sortedChargingTime[i][2][sortedChargingTime[i][3].index(stationID)] = sortedChargingTime[i][2][sortedChargingTime[i][3].index(stationID)] + (requestTime[vehicleAllocated] + travelTime + waitTime + chargeTime - val[2] - val[3] )
#}
#}
#}
#}
#}
#}
assignedStations = []
listForAverage = []
for i, val in enumerate(sortedChargingTime):
#{
if (sortedChargingTime[i][2] != []) :
#{
timeToCharge = min((sortedChargingTime[i][2]))
index = sortedChargingTime[i][2].index(min(sortedChargingTime[i][2]))
vehicleAllocated = sortedChargingTime[i][0]
stationID = sortedChargingTime[i][3][index]
assignedStations.append((vehicleAllocated, timeToCharge, stationID))
UpdateWaitingTime(i, vehicleAllocated, stationID, timeToCharge)
#}
#}
for i, val in enumerate(sortedChargingTime) :
#{
print(sortedChargingTime[i])
#}
for i, val in enumerate (assignedStations) :
#{
listForAverage.append(val[1])
#}
averageChargingTime = statistics.mean(listForAverage)
unallocatedVehicles = potentialStations.count([])
print(assignedStations)
print(averageChargingTime)
print(unallocatedVehicles)
'''
|
import json
from collections import ChainMap
file_name = 'south-park.json'
final_file = 'lines-by-character.json'
with open(file_name, "r") as read_file:
data = json.load(read_file)
all_characters = list(set([line['character'] for line in data])) # code smell
all_characters.sort()
lines_by_character = []
for character in all_characters:
character_lines = []
print(f'Current character: {character}')
for line in data:
if line['character'] == character:
character_lines.append(line['line'])
lines_by_character.append({character: character_lines})
print(f'{character} done.')
# print(lines_by_character[0])
data = dict(ChainMap(*lines_by_character))
json_string = json.dumps(data, indent=4)
with open(final_file, "w") as outfile:
outfile.write(json_string)
|
# -*- coding: utf-8 -*-
# @Time : 2018/9/6 12:41
# @Author : WJH
# @Email : 1226778264@qq.com
# @File : spider_papers2.py
# @Software: PyCharm
import os
import re
import urllib
import requests
# get web context
def get_context(url):
header = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16"}
web_context = requests.get(url=web_url, headers=header)
return web_context.text
web_url = 'http://openaccess.thecvf.com/ECCV2018.py'
web_context = get_context(web_url)
# find paper files
'''
(?<=href=\"): 寻找开头,匹配此句之后的内容
.+: 匹配多个字符(除了换行符)
?pdf: 匹配零次或一次pdf
(?=\">pdf): 以">pdf" 结尾
|: 或
'''
# link pattern: href="***_CVPR_2016_paper.pdf">pdf
link_list = re.findall(r"(?<=href=\").+?pdf(?=\">pdf)|(?<=href=\').+?pdf(?=\">pdf)", web_context)
# name pattern: <a href="***_CVPR_2016_paper.html">***</a>
name_list = re.findall(r"(?<=2018_paper.html\">).+(?=</a>)", web_context)
# download
# create local filefolder
local_dir = r'E:\pythonProgram\LittleProgram\data\CVPR2016'
if not os.path.exists(local_dir):
os.makedirs(local_dir)
cnt = 1
while cnt < len(link_list):
file_name = name_list[cnt]
download_url = link_list[cnt]
# 为了可以保存为文件名,将标点符号和空格替换为'_'
file_name = re.sub('[:\?/]+', "_", file_name).replace(' ', '_')
file_path = os.path.join(local_dir, file_name + '.pdf')
# download
print('[' + str(cnt) + '/' + str(len(link_list)) + '] Downloading' + file_path)
try:
urllib.request.urlretrieve("http://openaccess.thecvf.com/" + download_url, file_path)
except Exception as e:
print('download Fail: ' + file_path)
cnt += 1
print('Finished')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.