text stringlengths 8 6.05M |
|---|
from .IPFSServices import IPFSServices |
import numpy as np
import pandas as pd
import seaborn as sns
import folium
import json
from matplotlib import pyplot as plt
from folium import plugin
dados = pd.read_csv("crime.csv", encoding = "latin-1", keep_default_na=False)
locations = dados.groupby('REPORTING_AREA').first()
locations = locations.loc[:, ["lat", "long", "street"]]
area_count = dados.groupby('REPORTING_AREA').count()
area_count = area_count.iloc[:, [0]]
area_count.columns = ["Area Count"]
area_info = area_count.join(locations)
area_info = area_info[area_info.Long != '']
bostonMap = folium.Map(location = [42.3601, -71.0589], zoom_start = 13, tiles = 'CartoDB positron')
for index, row in area_info.iterrows():
area_count = row["Area Count"]
# normalize radius to avoid overlapping
radius = area_count/70
# determine color
if area_count >= 1500:
color = "#ff0000" #red
elif area_count >= 500 and area_count < 1500:
color = "E37222" #orange
elif area_count < 500:
color = "#0A8A9F" #blue
# add pop-up info
street = row["STREET"]
popup_text = """Street: {}<br>total crime: {}"""
popup_text = popup_text.format(street, area_count)
# plot bubbles
folium.CircleMarker(
location = (row["Lat"], row["Long"]),
radius = radius,
color = color,
fill = True,
popup = popup_text
).add_to(bostonMap)
with open('Boston_Neighborhoods.geojson') as f:
bostonArea = json.load(f)
bostonMap.save('map.html')
#add the shape of Boston County to the map
folium.GeoJson(bostonArea).add_to(bostonMap)
for i,row in dados.iterrows():
folium.CircleMarker((row.Lat,row.Long), radius=3, weight=2, color='red', fill_color='red', fill_opacity=.5).add_to(bostonMap)
|
"""
Bits for creating spline curves.
Class BSplineCurve:
A class that creates, maniuplates, and queries basis-spline curves.
"""
import logging
from typing import Callable, Optional
import numpy as np
from zu.analytic_curve import AnalyticCurve
class BSplineCurve(AnalyticCurve):
"""B-Spline curves are geometric curves defined on a set of points
that they pass through, and typically its traversal through those
points is smooth.
Sometimes they're referred to as NURBS curves (Non-Uniform Rational
B-Spline)
They're commonly used in vector graphics and CAD programs to
represent arbitrary curves and fonts.
B-Spline curves are constructed from basis spline functions, which
are scalar, typically smooth functions used for interpolation.
"""
def __init__(
self,
control_points: np.ndarray,
knot_vector: Optional[np.ndarray] = None,
order: int = 3,
periodic: bool = False,
cyclic_closed: bool = False,
) -> None:
"""Constructs a new B-Spline curve
:param control_points: The control_points defining the spline.
:type control_points: numpy.ndarray
:param order: The order of the spline.
:type order: int
:param knot_vector: The knot the spline is constructed on.
:type knot_vector: numpy.ndarray
"""
|
from collections import defaultdict
def search_nearby(i, j, grid, paths, n):
"""search_nearby
Returns:
A dictionary which format is like:
{0: No. of Paths, 1: No. of Paths, 2: No. of Paths ...}
Description:
This is a recursive function.
It starts from a coordinate (i, j) which value is n.
Search for n+1 in the nearst 4 coordinate.
"""
n_can_be_extended = False
if grid[i][j - 1] == n + 1:
paths[n + 1] += 1
n_can_be_extended = True
paths = search_nearby(i, j - 1, grid, paths, n + 1)
if grid[i][j + 1] == n + 1:
paths[n + 1] += 1
n_can_be_extended = True
paths = search_nearby(i, j + 1, grid, paths, n + 1)
if grid[i - 1][j] == n + 1:
paths[n + 1] += 1
n_can_be_extended = True
paths = search_nearby(i - 1, j, grid, paths, n + 1)
if grid[i + 1][j] == n + 1:
paths[n + 1] += 1
n_can_be_extended = True
paths = search_nearby(i + 1, j, grid, paths, n + 1)
if n_can_be_extended:
paths[n] -= 1
return paths
if __name__ == "__main__":
i, j, max_length = 2, 8, 4
paths = defaultdict()
paths = {1: 1, 2: 0, 3: 0, 4: 0}
# print(paths)
grid = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 2, 4, 3, 3, 2, 0],
[0, 3, 2, 4, 1, 4, 1, 2, 1, 0],
[0, 0, 4, 2, 4, 4, 1, 2, 0, 0],
[0, 0, 2, 3, 4, 0, 2, 3, 2, 0],
[0, 4, 1, 4, 3, 3, 4, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
search_nearby(i, j, grid, paths, 1)
print(paths)
|
from sklearn.externals import joblib
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import json
if __name__ == '__main__':
with open('log.json') as json_file:
data = json.load(json_file)
data = pd.DataFrame.from_dict(data)
print(data)
# data = pd.DataFrame.from_csv("dataset.csv")
x = data.drop(columns=["label"])
y = data["label"]
# x = data[list(data)[:-1]].values
# y = data[list(data)[-1]].values
with open("final_model.joblib", 'rb') as model:
classifier = joblib.load(model)
y_pred = classifier.predict(x)
for i, j in y, y_pred:
print(i, j)
|
__all__ = [
'UpdateGraphsData',
]
from gim.core.tasks.repository import RepositoryJob
class UpdateGraphsData(RepositoryJob):
queue_name = 'update-graphs-data'
def run(self, queue):
super(UpdateGraphsData, self).run(queue)
from .limpyd_models import GraphData
graph, _ = GraphData.get_or_connect(repository_id=self.repository.pk)
graph.reset_issues_and_prs_by_day()
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright: Fabien Rosso
# Version 0.1.1 - 19 Avril 2016
# Version 0.1.2 - 29 Avril 2016
from __future__ import unicode_literals
from __future__ import print_function
import os
import psutil
def ListDirectory(path):
''' Fonction listdirectory(path)
Fait une liste de tous les fichiers dans le repertoire 'path'
et des sous repertoires
'''
fichier=[]
for root, dirs, files in os.walk(path):
for i in files:
fichier.append(os.path.join(root, i))
return fichier
def pythonExist(pythonProcess):
process = False
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
except:
pass
else:
if pinfo['name'] == 'python' and proc.cmdline()[1] == pythonProcess:
process = proc.pid
if type(process) == int:
return process
|
n = int(raw_input().strip())
x = 0
y = 0
for a_i in xrange(n):
a_temp = map(int, raw_input().strip().split())
x += a_temp[a_i]
y += a_temp[n - a_i - 1]
print abs(x - y)
|
# Generated by Django 2.1.3 on 2019-03-28 13:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20190328_0409'),
]
operations = [
migrations.AlterField(
model_name='acciones',
name='latitud',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=8, null=True),
),
migrations.AlterField(
model_name='acciones',
name='localidad_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Localidad'),
),
migrations.AlterField(
model_name='acciones',
name='longitud',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=8, null=True),
),
migrations.AlterField(
model_name='acciones',
name='monto',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
|
from collections import OrderedDict
from urllib.parse import urlencode
'''
"params": "query=Materiais de Constru%C3%A7%C3%A3o&page=0&highlightPreTag=__ais-highlight__&highlightPostTag=__/ais-highlight__&filters=hide_prices:false AND available:true AND (campus_city:"Aracaju" OR campus_virtual:true) AND (campus_state_abbr:"SE" OR campus_virtual:true) AND offered_price<=10000&facets=["level","simplified_kind","simplified_level","simplified_shift","searchable_enrollment_semester","great_area","benefits"]&tagFilters="
'''
query_param = {
"query": "Materiais de Constru%C3%A7%C3%A3o",
"page": "0",
"highlightPreTag": "__ais-highlight__",
"highlightPostTag": "__/ais-highlight__",
"filters": 'hide_prices:false AND available:true AND (campus_city:"Aracaju" OR campus_virtual:true) AND (campus_state_abbr:"SE" OR campus_virtual:true) AND offered_price<=10000',
"facets": '["level","simplified_kind","simplified_level","simplified_shift","searchable_enrollment_semester","great_area","benefits"]',
"tagFilters": ""
}
query_param = OrderedDict(
[
("query", "Materiais de Constru%C3%A7%C3%A3o"),
("page", "0"),
("highlightPreTag", "__ais-highlight__"),
("highlightPostTag", "__/ais-highlight__"),
("filters", 'hide_prices:false AND available:true AND (campus_city:"Aracaju" OR campus_virtual:true) AND (campus_state_abbr:"SE" OR campus_virtual:true) AND offered_price<=10000'),
("facets", '["level","simplified_kind","simplified_level","simplified_shift","searchable_enrollment_semester","great_area","benefits"]'),
("tagFilters", ""),
]
)
print(urlencode(query_param).replace("+", "%20"))
|
from matrix import Matrix
def main():
A = Matrix([[1,2,3], [2, 4, 4], [4, 4, 4]])
B = Matrix([[1,2,3], [2, 40, 4], [4, 4, 4]])
print(A)
print(B)
print(B.add(A))
if __name__ == '__main__':
main()
|
class Edge:
def __init__(self, startVertex, endVertex):
self.startVertex = startVertex;
self.endVertex = endVertex;
|
from django.db import models
class BlockModel(models.Model):
block_index = models.CharField(max_length=255, unique=True)
block_timestamp = models.CharField(max_length=255)
block_id = models.CharField(max_length=255)
block_hash = models.CharField(max_length=255)
block_previous_hash = models.CharField(max_length=255)
block_wallet_id = models.CharField(max_length=255)
block_chain_id = models.CharField(max_length=255)
def __str__(self):
return f"{str(self.block_id)} - {str(self.block_wallet_id)}" |
import http.client, urllib.request, urllib.parse, urllib.error, base64
from PIL import Image
import numpy as np
from aip import AipImageClassify
from aip import AipOcr
import platform
import ssl
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
def image_caption(image_path):
headers = {
# Request headers
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': 'c0da4e6ed24f48d5b3e5893899e83777',
}
params = urllib.parse.urlencode({
# Request parameters
'visualFeatures': 'Description',
'language': 'en',
})
conn = http.client.HTTPSConnection('api.cognitive.azure.cn', context=ssl._create_unverified_context())
conn.request("POST", "/vision/v1.0/analyze?%s" % params, get_file_content(image_path), headers)
response = conn.getresponse()
data = response.read()
conn.close()
dict_result = eval(data)
result = {}
result['caption'] = dict_result['description']['captions'][0]['text']
return result
def object_detection(image_path):
headers = {
# Request headers
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': 'c0da4e6ed24f48d5b3e5893899e83777',
}
params = urllib.parse.urlencode({
# Request parameters
'visualFeatures': 'Categories',
'language': 'en',
})
obj_list = ['car','']
APP_ID = '11099177'
API_KEY = '537mYO15okLxK5kEIX9ons8N'
SECRET_KEY = 'GoNqKxvwUNdOSLDxIg2O1kteSRygYl2q'
client = AipImageClassify(APP_ID, API_KEY, SECRET_KEY)
image = get_file_content(image_path)
result_temp = client.objectDetect(image)
im = Image.open(image_path)
left = result_temp['result']['left']
top = result_temp['result']['top']
width = result_temp['result']['width']
height = result_temp['result']['height']
region = im.crop((left, top, left + width, top + height))
conn = http.client.HTTPSConnection('api.cognitive.azure.cn', context=ssl._create_unverified_context())
conn.request("POST", "/vision/v1.0/analyze?%s" % params, image, headers)
response = conn.getresponse()
data = response.read()
conn.close()
dict_result = eval(data)
key = dict_result['categories'][0]['name']
if key == 'people_crowd' or key == 'portrait':
key = 'people'
if len(key.split('_')) > 1:
key = key.split('_')[1]
if key == '':
return 'Regional security, please go straight.'
middle = [im.size[0] / 2, im.size[1] / 2]
left_up = [middle[0] / 2, middle[1] / 2]
left_down = [left_up[0], left_up[1] + middle[1]]
right_up = [left_up[0] + middle[0], left_up[1]]
right_down = [right_up[0], left_down[1]]
points = [middle, left_up, left_down, right_up, right_down]
dis = [0,0,0,0,0]
our_middle = [(left + width) / 2, (top + height) / 2]
for i in range(5):
dis[i] = (our_middle[0] - points[i][0]) * (our_middle[0] - points[i][0]) + (our_middle[1] - points[i][1]) * (our_middle[1] - points[i][1])
id = np.argmin(dis)
if id == 0:
tip = 'There is a ' + key + ' in front of you. Please be careful'
elif id == 1:
tip = 'Please note that there is a ' + key + ' in left front'
elif id == 2:
tip = 'On the left there is a ' + key + ', please be careful'
elif id == 3:
tip = 'There is a ' + key +' in front of the right, please be careful'
else:
tip = 'On the right there is a ' + key + ', please be careful'
return tip
def word_detection(image_path):
APP_ID = '10673557'
API_KEY = 'LMXCGq3dCDmNS9xXGB9HrzuG'
SECRET_KEY = 'jyqeWRBZ5S3cmAPO7LTk8taBo6GdB55s'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
image = get_file_content(image_path)
result_temp = client.basicGeneral(image)
words = result_temp['words_result']
result = ''
for word in words:
result += word['words'] + '\n'
if result == '':
return 'No any word.'
return result
#print(object_detection('F:/IMG_20130328_174347.jpg'))
#print(image_caption('F:/IMG_20130328_174347.jpg'))
#print(word_detection('F:/IMG_20130328_174347.jpg'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
本示例代码仅作参考。如果您是业务调用,建议对body体也做加密。
注意:本示例用到的鉴权方式不适用于一部分产品(语音识别、人脸识别等)。请查阅您使用产品的API文档,如果鉴权方式基于Access Key(包括Access Key ID(AK)和Secret Access Key(SK)),
且最终认证字符串为bce-auth-v{version}/{accessKeyId}/{timestamp}/{expirationPeriodInSeconds}/{signedHeaders}/{signature},则适用本示例。
此外,部分产品在AK/SK加密之外,还会要求额外的认证,例如百度信息流推广API需要您将百度推广账号的信息填到body体里。请注意查阅文档。
'''
import hashlib
import hmac
import urllib.parse
import time
# 1.AK/SK、host、method、URL绝对路径、querystring
AK = "c02376d42a05434c98*************"
SK = "08ead63a52f84ad****************"
host = "iotdm.gz.baidubce.com"
method = "GET"
query = ""
deviceName = "wu_t_shadow"
URI = "/v3/iot/management/device/"
# 2.x-bce-date
x_bce_date = time.gmtime()
x_bce_date = time.strftime('%Y-%m-%dT%H:%M:%SZ',x_bce_date)
#x_bce_date = "2020-03-20T13:33:41Z"
# 3.header和signedHeaders
header = {
"Host":host,
"Content-type":"application/json; charset=utf-8"
}
signedHeaders = "content-type;host;x-bce-date"
# 4.认证字符串前缀
authStringPrefix = "bce-auth-v1" + "/" +AK + "/" + x_bce_date + "/" + "1800"
# 5.生成CanonicalRequest
#5.1生成CanonicalURI
CanonicalURI = urllib.parse.quote(URI) # windows下为urllib.parse.quote,Linux下为urllib.quote
#5.2生成CanonicalQueryString
CanonicalQueryString = query # 如果您调用的接口的query比较复杂的话,需要做额外处理
#5.3生成CanonicalHeaders
result = []
for key,value in header.items():
tempStr = str(urllib.parse.quote(key.lower(),safe="")) + ":" + str(urllib.parse.quote(value,safe=""))
result.append(tempStr)
result.sort()
CanonicalHeaders = "\n".join(result)
print("--------CanonicalHeaders---------" + CanonicalHeaders)
#5.4拼接得到CanonicalRequest
CanonicalRequest = method + "\n" + CanonicalURI + deviceName + "\n" + CanonicalQueryString +"\n" + "host:" + host
# 6.生成signingKey
signingKey = hmac.new(SK.encode('utf-8'),authStringPrefix.encode('utf-8'),hashlib.sha256)
print("---------signingKey------" + signingKey.hexdigest())
# 7.生成Signature
Signature = hmac.new((signingKey.hexdigest()).encode('utf-8'),CanonicalRequest.encode('utf-8'),hashlib.sha256)
print(signingKey.hexdigest().encode('utf-8'))
print(CanonicalRequest.encode('utf-8'))
print("---------Signature------" + Signature.hexdigest())
# 8.生成Authorization并放到header里
header['Authorization'] = authStringPrefix + "/host/" + Signature.hexdigest()
# 9.发送API请求并接受响应
print(header)
import requests
import json
body={
"name" : "QQQQQQ"
}
url = "http://"+ host + URI + deviceName
print(url)
r = requests.get(url,headers = header,data=json.dumps(body))
print(r.text)
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def number_visible_nodes(self, root):
"""
"""
max_so_far = float('-inf')
count = self.count_visible_nodes(root, max_so_far)
return count
def count_visible_nodes(self, root, max_so_far):
"""
Counts the number of visible nodes in binary tree.
Note: In a binary tree, if in the path from root to the node A,
there is no node with greater value than A’s, this node A is
visible. We need to count the number of visible nodes in
a binary tree.
"""
if root is None:
return 0
count = 1 if root.val >= max_so_far else 0
max_so_far = max(max_so_far, root.val)
left = self.count_visible_nodes(root.left, max_so_far)
right = self.count_visible_nodes(root.right, max_so_far)
return count + left + right
if __name__ == "__main__":
root = TreeNode(5)
root.left = TreeNode(3)
root.right = TreeNode(10)
root.left.left = TreeNode(20)
root.left.right = TreeNode(21)
root.right.left = TreeNode(1)
obj = Solution()
nodes_count = obj.number_visible_nodes(root)
print(nodes_count)
|
#!/usr/bin/env python3
"""
PEA simulateur - Draw cotation
Copyright (c) 2020-2021 Nicolas Beguier
Licensed under the MIT License
Written by Nicolas BEGUIER (nicolas_beguier@hotmail.com)
"""
from pathlib import Path
import sys
import matplotlib.pyplot as plt
# Debug
# from pdb import set_trace as st
COTATION_DIR = sys.argv[1]
def main():
"""
Main function
"""
data = dict()
cotation_path = Path(COTATION_DIR)
count = 0
for month_path in sorted(cotation_path.iterdir()):
if not month_path.is_file():
continue
month_file = month_path.open()
for isin_line in month_file.readlines():
isin = isin_line.split(';')[0]
date = count # isin_line.split(';')[1]
# val = float(isin_line.split(';')[5])
val = float(isin_line.split(';')[2])
if isin not in data:
data[isin] = [[], []]
data[isin][0].append(date)
data[isin][1].append(val)
count += 1
for isin in data:
plt.plot(data[isin][0],data[isin][1], label=f'Generated isin {isin}')
plt.xlabel('date')
plt.ylabel('value')
plt.title(f'Finance of {isin}')
plt.legend()
plt.show()
if __name__ == '__main__':
main()
|
solarsys = ['태양','수성','금성','지구','화성','목성','토성','천왕성','해왕성','지구']
planet = '지구'
pos = solarsys.index(planet)
print('%s은(는) 태양계에서 %d번째에 위치하고 있습니다.'%(planet,pos))
pos = solarsys.index(planet,5)
print('%s은(는) 태양계에서 %d번째에 위치하고 있습니다.'%(planet,pos))
|
from flask import Flask, render_template, request, jsonify
import TestScript
import os
import werkzeug
import cv2
import io
import base64
import time
from PIL import Image
path = "C:/Classnotes/Dress-Virtual-Trialroom/static/"
fn = []
app = Flask(__name__)
def get_response_image(image_path):
with open(image_path,"rb") as file:
content = file.read()
return content
def model_run():
frontloc = path + fn[0]
bentloc = path + fn[1]
backloc = path + fn[2]
shirtloc = path + fn[3]
TestScript.start(frontloc, bentloc, backloc, shirtloc)
encoded = get_response_image(path+"out.jpg")
return encoded
def deleting_files():
for file in os.listdir(path):
os.remove(os.path.join(path, file))
@app.route("/api/android",methods=['GET', 'POST'])
def api_call():
incoming_files = list(request.files)
print("Files recieved :",len(incoming_files))
os.chdir(path)
it = 1
for file in incoming_files:
image = request.files[file]
filename = werkzeug.utils.secure_filename(image.filename)
print(filename)
print()
abc = filename.split(".")
fn.append(str(it) + "." + abc[-1])
image.save(fn[it-1])
print("File saved ",it)
it = it + 1
cypher = model_run()
deleting_files()
return cypher
@app.route("/", methods=['GET', 'POST'])
def home():
return "HEllo World"
if __name__ == "__main__":
app.run(host= '0.0.0.0')
|
class clusterClass:
def __init__(self, res, box, id, car_flag):
self.res = res
self.box = box
self.id = id
self.car_flag = car_flag
self.processed = 0
|
# 区间dp
class Solution:
def strangePrinter(self, s: str) -> int:
n = len(s)
# f[i][j]表示打印i~j字符串需要的最少步数
# 求f[0][n-1]
f = [[math.inf]*n for _ in range(n)]
for i in range(n):
f[i][i] = 1
for d in range(2, n+1):
for l in range(n-d+1):
r = l + d - 1
if s[l] == s[r]:
f[l][r] = min(f[l+1][r], f[l][r-1])
else:
for k in range(l, r):
f[l][r] = min(f[l][r], f[l][k]+f[k+1][r])
return f[0][n-1]
# class Solution {
# public:
# int strangePrinter(string s) {
# int n = s.size();
# vector<vector<int>> dp(n, vector<int>(n, INT_MAX));
# for(int i = n-1; i >= 0; --i)
# {
# dp[i][i] = 1;
# for(int j = i + 1; j < n; ++j)
# {
# if(s[i] == s[j])
# dp[i][j] = dp[i][j-1];
# else //枚举区间内所有的可能性,取最优
# for(int k = i; k < j; ++k)
# dp[i][j] = min(dp[i][j], dp[i][k] + dp[k+1][j]);
# }
# }
# return dp[0][n-1];
# }
# };
|
# -*- coding: utf-8 -*-
# Задача на программирование повышенной сложности: огромное число Фибоначчи по модулю
# Даны целые числа 1≤n≤1018 и 2≤m≤105, необходимо найти остаток от деления n-го числа Фибоначчи на m.
# Sample Input:
# 47905881698199969 76940
# Sample Output:
# 13794
import sys
from functools import lru_cache
@lru_cache(maxsize=1024)
def fbmodl(n, m):
n, m = int(n), int(m)
if n <= 3:
return (0, 1, 1, 2)[n] % m
elif n % 2 == 0:
a = fbmodl(n // 2 - 1, m)
b = fbmodl(n // 2, m)
return ((2 * a + b) * b) % m
else:
a = fbmodl(n // 2, m)
b = fbmodl(n // 2 + 1, m)
return (a * a + b * b) % m
n,m = sys.stdin.readline().split(' ')
print(fbmodl(n,m)) |
from ..config import BaseProposalCreatorConfig
import json
from grant.proposal.models import Proposal, ProposalRevision
from grant.utils.enums import ProposalChange
from ..test_data import test_team
test_milestones_a = [
{
"title": "first milestone a",
"content": "content a",
"daysEstimated": "30",
"payoutPercent": "25",
"immediatePayout": False
},
{
"title": "second milestone a",
"content": "content a",
"daysEstimated": "10",
"payoutPercent": "25",
"immediatePayout": False
},
{
"title": "third milestone a",
"content": "content a",
"daysEstimated": "20",
"payoutPercent": "25",
"immediatePayout": False
},
{
"title": "fourth milestone a",
"content": "content a",
"daysEstimated": "30",
"payoutPercent": "25",
"immediatePayout": False
}
]
test_proposal_a = {
"team": test_team,
"content": "## My Proposal A",
"title": "Give Me Money A",
"brief": "$$$ A",
"milestones": test_milestones_a,
"target": "200",
"payoutAddress": "123",
}
test_milestones_b = [
{
"title": "first milestone b",
"content": "content b",
"daysEstimated": "30",
"payoutPercent": "25",
"immediatePayout": True
},
{
"title": "second milestone b",
"content": "content b",
"daysEstimated": "40",
"payoutPercent": "75",
"immediatePayout": False
}
]
test_proposal_b = {
"team": test_team,
"content": "## My Proposal B",
"title": "Give Me Money B",
"brief": "$$$ B",
"milestones": test_milestones_b,
"target": "100",
"payoutAddress": "123",
}
test_proposal_c = {
"team": test_team,
"content": "## My Proposal C",
"title": "Give Me Money C",
"brief": "$$$ C",
"milestones": test_milestones_b,
"target": "100",
"payoutAddress": "123",
}
test_proposal_d = {
"team": test_team,
"content": "## My Proposal B",
"title": "Give Me Money B",
"brief": "$$$ B",
"milestones": test_milestones_b,
"target": "200",
"payoutAddress": "123",
}
class TestProposalMethods(BaseProposalCreatorConfig):
def init_proposal(self, proposal_data):
self.login_default_user()
resp = self.app.post(
"/api/v1/proposals/drafts"
)
self.assertStatus(resp, 201)
proposal_id = resp.json["proposalId"]
resp = self.app.put(
f"/api/v1/proposals/{proposal_id}",
data=json.dumps(proposal_data),
content_type='application/json'
)
self.assert200(resp)
return proposal_id
def validate_changes(self, changes, expected_change, expected_milestone_index=None):
if expected_milestone_index is not None:
change = {"type": expected_change, "milestone_index": expected_milestone_index}
else:
change = {"type": expected_change}
self.assertTrue(change in changes)
def test_calculate_milestone_changes_no_changes(self):
old_proposal_id = self.init_proposal(test_proposal_a)
new_proposal_id = self.init_proposal(test_proposal_a)
old_proposal = Proposal.query.get(old_proposal_id)
new_proposal = Proposal.query.get(new_proposal_id)
changes = ProposalRevision.calculate_milestone_changes(old_proposal.milestones, new_proposal.milestones)
self.assertEqual(len(changes), 0)
def test_calculate_milestone_changes_a_to_b(self):
old_proposal_id = self.init_proposal(test_proposal_a)
new_proposal_id = self.init_proposal(test_proposal_b)
old_proposal = Proposal.query.get(old_proposal_id)
new_proposal = Proposal.query.get(new_proposal_id)
changes = ProposalRevision.calculate_milestone_changes(old_proposal.milestones, new_proposal.milestones)
print(changes)
# going from milestones a to b, there should be 9 total changes
self.assertEqual(len(changes), 9)
# the following change types should be detected
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_TITLE, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_CONTENT, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_IMMEDIATE_PAYOUT, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_TITLE, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_CONTENT, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_DAYS, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_PERCENT, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_REMOVE, 2)
self.validate_changes(changes, ProposalChange.MILESTONE_REMOVE, 3)
def test_calculate_milestone_changes_b_to_a(self):
old_proposal_id = self.init_proposal(test_proposal_b)
new_proposal_id = self.init_proposal(test_proposal_a)
old_proposal = Proposal.query.get(old_proposal_id)
new_proposal = Proposal.query.get(new_proposal_id)
changes = ProposalRevision.calculate_milestone_changes(old_proposal.milestones, new_proposal.milestones)
print(changes)
# going from milestones b to a, there should be 9 total changes
self.assertEqual(len(changes), 9)
# the following change types should be detected
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_TITLE, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_CONTENT, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_IMMEDIATE_PAYOUT, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_TITLE, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_CONTENT, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_DAYS, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_PERCENT, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_ADD, 2)
self.validate_changes(changes, ProposalChange.MILESTONE_ADD, 3)
def test_calculate_proposal_changes_c_to_d(self):
old_proposal_id = self.init_proposal(test_proposal_c)
new_proposal_id = self.init_proposal(test_proposal_d)
old_proposal = Proposal.query.get(old_proposal_id)
new_proposal = Proposal.query.get(new_proposal_id)
changes = ProposalRevision.calculate_proposal_changes(old_proposal, new_proposal)
print(changes)
# going from proposal c to d, there should be 4 total changes
self.assertEqual(len(changes), 4)
# the following change types should be detected
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_CONTENT)
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_TITLE)
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_BRIEF)
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_TARGET)
def test_calculate_proposal_changes_d_to_a(self):
old_proposal_id = self.init_proposal(test_proposal_d)
new_proposal_id = self.init_proposal(test_proposal_a)
old_proposal = Proposal.query.get(old_proposal_id)
new_proposal = Proposal.query.get(new_proposal_id)
changes = ProposalRevision.calculate_proposal_changes(old_proposal, new_proposal)
print(changes)
# going from proposal d to a, there should be 4 total changes
self.assertEqual(len(changes), 12)
# the following proposal change types should be detected
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_CONTENT)
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_TITLE)
self.validate_changes(changes, ProposalChange.PROPOSAL_EDIT_BRIEF)
# the following milestone change types should be detected
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_TITLE, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_CONTENT, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_IMMEDIATE_PAYOUT, 0)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_TITLE, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_CONTENT, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_DAYS, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_EDIT_PERCENT, 1)
self.validate_changes(changes, ProposalChange.MILESTONE_ADD, 2)
self.validate_changes(changes, ProposalChange.MILESTONE_ADD, 3)
|
from app import api
from hello import handlers as hello_handlers
api.add_route('/', hello_handlers.HelloResource())
api.add_route('/test', hello_handlers.HelloResource())
api.add_route('/whatever', hello_handlers.TestVariableResource())
|
"""
z_algo.py
Name: Wirmantono
Contains function for Z algorithm, a linear time pattern matching algorithm
"""
def z_algo(input_str):
"""
Z-algorithm performs prefix matching in linear time
The following implementation of algorithm are based on
Lecture slides provided by the Unit Coordinator for the Algorithms unit
on my institution.
Details were omitted to prevent academic integrity issues.
Time complexity: O(N), N as length of input_str
Space complexity: O(N) input space
O(N) auxiliary space
"""
def compare(input_str, i, j):
"""
Performs character by character comparison
For each iteration compares the suffix of input_str
with prefix of input_str
Time complexity: Worst&Best case: O(m); m as length of input_str
Space complexity: O(m) Input space
"""
n = len(input_str)
matches = 0
while i < n:
prefix_char = input_str[j]
suffix_char = input_str[i]
if prefix_char == suffix_char:
matches += 1
i += 1
j += 1
else:
break
return matches
# initialize values
n = len(input_str)
left = 0
right = 0
if n > 0:
z_values = [0] * n
z_values[0] = n
else:
return []
# base case
# skip if input string is only 1
# get value of Z1
if (n > 1):
value_z1 = compare(input_str, 1, 0)
z_values[1] = value_z1
if (value_z1 > 0):
right = value_z1 + 1
left = 1
# general case
for k in range(1, n):
if k > (right - 1): # case 1
value_zk = compare(input_str, k, 0)
z_values[k] = value_zk
if (value_zk > 0):
# calculate position of mismatch
right = value_zk + k
left = k
# else no update is necessary
else: # case 2
# get previous z values
prev_z = z_values[k - left] # z_values[k - left + 1]
remainder = right - k
if (prev_z < remainder): # case 2a
z_values[k] = prev_z
else:
if (prev_z > remainder): # case 2b
z_values[k] = remainder
else: # case 2c
# explicit comparison from right
right_compare = compare(input_str, right, right - k)
z_values[k] = prev_z + right_compare
left = k
right = k + right_compare
return z_values
def bitwise_z_algo(pattern, text):
"""
creates the first bitvector in len(pattern)
Concept: run z algo for only 0:len(pattern) of text
:param pattern:
:param text:
:return: z array for text[0:len(pattern)]
Time complexity: Worst&Best case: O(m); m as length of pattern
Space complexity: O(m + n); m as length of pattern, n as length of text
O(m) auxiliary space
The algorithm performs pattern matching with Z algorithm
for pattern. Which will be later be used to a customised z algorithm
to compute bit vector for the first m character of text, by comparing
it with pattern.
"""
def compare(pattern, text, i, j):
"""
Performs character by character comparison
For each iteration compares the suffix of text
with prefix of pattern
Time complexity: Worst&Best case: O(m); m as length of pattern
Space complexity: O(pattern + text) Input space
O(pattern) Auxiliary space
"""
m = len(pattern)
matches = 0
while i < m:
prefix_char = pattern[j]
suffix_char = text[i]
if prefix_char == suffix_char:
matches += 1
i += 1
j += 1
else:
break
return matches
n = len(text)
m = len(pattern)
left = 0
right = 0
z_values = [0] * m
result = 2 ** m - 1
z_patt = z_algo(pattern)
# base case
# if text is shorter than pattern
if m > n:
return result
if n > 0:
value_z0 = compare(pattern, text, 0, 0)
z_values[0] = value_z0
if value_z0 > 0:
right = value_z0
left = 0
for k in range(1, m):
if k > right - 1: # case 1
value_zk = compare(pattern, text, k, 0)
z_values[k] = value_zk
if value_zk > 0:
# calculate position of mismatch
right = value_zk + k
left = k
# else no update is necessary
else: # case 2
# get previous z values
prev_z = z_patt[k - left]
remainder = right - k
if prev_z < remainder: # case 2a
z_values[k] = prev_z
else:
if prev_z > remainder: # case 2b
z_values[k] = remainder
else: # case 2c
# explicit comparison from right
right_compare = compare(pattern, text, right, right - k)
z_values[k] = prev_z + right_compare
left = k
right = k + right_compare
"""
Based on the computation of z_values above, compute the bitvector
Z_values that is equal to the length of the suffix means that the bitvector
for the position will be 0.
"""
for i in range(m):
if z_values[i] == (m - i):
result -= 2 ** (m - i - 1)
return result
|
from django.utils.encoding import force_text
from wagtail.admin import compare
def page_revision_diff(revision_a, revision_b):
comparison = [
comp(revision_a, revision_b)
for comp in revision_b.get_edit_handler().get_comparison()
]
comparison = [comp for comp in comparison if comp.has_changed()]
result = []
for comp in comparison:
item = {
'label': force_text(comp.field_label()),
}
if isinstance(comp, compare.StreamFieldComparison):
item['diff'] = comp.htmldiff()
elif comp.is_field:
item['diff'] = comp.htmldiff()
elif comp.is_child_relation():
item['children'] = []
for child_comp in comp.get_child_comparisons():
item_child = {
'move': child_comp.get_position_change(),
'operation': '',
'fields': []
}
if child_comp.is_addition:
item_child['operation'] = 'addition'
if child_comp.is_deletion:
item_child['operation'] = 'deletion'
for child_field_comp in child_comp.get_field_comparisons():
item_child['fields'].append({
'label': child_field_comp.field_label,
'htmldiff': child_field_comp.htmldiff
})
item['children'].append(item_child)
result.append(item)
return result
|
import numpy as np
import sys
from Gravity.functions import (mPrismCart, mpoinCart, mHollowSphere,
update_progress)
from Gravity.plotting import plot_gravity, plot_hollow_sphere
"""
v 1.0
Gravity modelling
with point mass and prism models
Author: I. Vasconcelos 2016
Translated to python: S. Schneider 2018
"""
def model_gravity(object='all', scal=1, plotting=True):
"""
object: 'prisma', 'point' or 'all'
"""
nstart = -1E3
nend = 1E3
nstep = 2E1
x = scal * np.arange(nstart, nend+nstep, nstep) + 1E-3
y = scal * np.arange(nstart, nend+nstep, nstep) + 1E-3
z = (scal)**1.5*1.1E2
mass = 7.1430e+012
Vxx1 = np.zeros((x.size, y.size))
Vxy1 = np.zeros((x.size, y.size))
Vxz1 = np.zeros((x.size, y.size))
Vyy1 = np.zeros((x.size, y.size))
Vyz1 = np.zeros((x.size, y.size))
Vzz1 = np.zeros((x.size, y.size))
Vz1 = np.zeros((x.size, y.size))
P1 = np.zeros((x.size, y.size))
Vxx2 = np.zeros((x.size, y.size))
Vxy2 = np.zeros((x.size, y.size))
Vxz2 = np.zeros((x.size, y.size))
Vyy2 = np.zeros((x.size, y.size))
Vyz2 = np.zeros((x.size, y.size))
Vzz2 = np.zeros((x.size, y.size))
Vz2 = np.zeros((x.size, y.size))
P2 = np.zeros((x.size, y.size))
update_progress(0)
for j, v in enumerate(y):
for i, w in enumerate(x):
if object in ['prisma', 'all']:
V = mPrismCart(x[i], y[j], z, mass)
Vxx1[i, j], Vxy1[i, j], Vxz1[i, j], Vyy1[i, j] = V[:4]
Vyz1[i, j], Vzz1[i, j], P1[i, j], Vz1[i, j] = V[4:]
if object in ['point', 'all']:
V = mpoinCart(x[i], y[j], z, mass)
Vxx2[i, j], Vxy2[i, j], Vxz2[i, j], Vyy2[i, j] = V[:4]
Vyz2[i, j], Vzz2[i, j], P2[i, j], Vz2[i, j] = V[4:]
update_progress(j/float(len(x)))
sys.stdout.write("\n")
if plotting:
if object in ['prisma', 'all']:
title = 'Prism-Mass Model | height = %f m' % z
plot_gravity(Vxx1, Vxy1, Vxz1, Vyy1, Vyz1, Vzz1, P1, Vz1, title)
if object in ['point', 'all']:
title = 'Point-Mass Model | height = %f m' % z
plot_gravity(Vxx2, Vxy2, Vxz2, Vyy2, Vyz2, Vzz2, P2, Vz2, title)
if object == 'prisma':
return Vxx1, Vxy1, Vxz1, Vyy1, Vyz1, Vzz1, P1, Vz1
elif object == 'point':
return Vxx2, Vxy2, Vxz2, Vyy2, Vyz2, Vzz2, P2, Vz2
def hollow_sphere(a=3, b=6):
r, g = mHollowSphere(a, b, N=250)
plot_hollow_sphere(a, b, r, g)
return
|
from django.db import models
# Create your models here.
class Led(models.Model):
hub_information = models.CharField(max_length=256)
hub_id = models.IntegerField(default=0)
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path('addhosts/', views.add_hosts, name='add_hosts'),
path('addmodules/', views.add_modules, name='add_modules'),
path('tasks/', views.tasks, name='tasks'),
path('result/', views.result, name='result'),
re_path('^del_arg/(?P<pk_id>[0-9]+)/(?P<flag>[0-9]+)/$', views.del_arg, name='del_arg'),
re_path('', views.host_info, name="host_info"),
]
|
import unittest
from collections import OrderedDict
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../src')
from bio_formators import clean_format_sequence, format_to_string, format_gc_content
class TestBioFormators(unittest.TestCase):
def test_clean_sequence(self):
#Arrange
sequence = ">TCAGg"
#Act
clean_seq = clean_format_sequence(sequence)
#Assert
self.assertEqual(clean_seq, "TCAGG")
def test_format_to_string_when_message_is_dict(self):
#Arrange
key_values = OrderedDict()
key_values['1'] = "ATGC"
key_values['2'] = "AAA"
#Act
string = format_to_string(key_values)
#Assert
self.assertEqual(string, "1 ATGC\n2 AAA")
def test_format_to_string_when_message_is_str(self):
#Arrange
message = "ATG"
#Act
string = format_to_string(message)
#Assert
self.assertEqual(string, "ATG")
def test_format_gc_contentr(self):
#Arrange
gc = 0.5
#Act
string = format_gc_content(gc)
#Assert
self.assertEqual(string, "GC = 50.00%")
def main():
unittest.main()
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-11-23 16:27:40
# @Author : ayyll (upyh@foxmail.com)
# @Link : http://ayyll.com
# @Version : V-1.0.0
import os
import sys
#获取当前目录下的文件
file_arr = os.listdir(os.getcwd())
for i in range(len(file_arr)):
if file_arr[i].endswith('.c') or file_arr[i].endswith('.py'):
print 'now we are repairing the ' + file_arr[i] + '...'
file_obj = open(file_arr[i],'r')
try:
all_the_text = file_obj.read()
finally:
file_obj.close()
#print all_the_text
ans = all_the_text.replace('\t',' ')
#写文件,覆盖的方式
file_obj = open(file_arr[i],'w')
try:
file_obj.write(ans)
finally:
file_obj.close()
print 'Formatting Successful!'
os.system("pause")
|
import os
import sys
import shutil
import time
import saved_metrics
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "families"))
import experiments as exp
import fam
import sequence_model
import ete3
def generate_scheduler_commands_file(datadir, subst_model, cores, output_dir):
results_dir = os.path.join(output_dir, "results")
scheduler_commands_file = os.path.join(output_dir, "commands.txt")
with open(scheduler_commands_file, "w") as writer:
for family in fam.get_families_list(datadir):
family_dir = fam.get_family_path(datadir, family)
dicotree_dir = os.path.join(output_dir, "dico_" + family)
command = []
command.append(family)
command.append("1")
command.append("1")
command.append(exp.dicotree_script)
command.append(fam.get_alignment(datadir, family))
command.append(subst_model)
command.append(dicotree_dir)
command.append("20")
writer.write(" ".join(command) + "\n")
return scheduler_commands_file
def extract_dicotree_trees(datadir, output_dir, subst_model):
families_dir = os.path.join(datadir, "families")
for family in os.listdir(families_dir):
dicotree_dir = os.path.join(output_dir, "dico_" + family)
dicoasteroid = fam.build_gene_tree_path(datadir, subst_model, family, "dicotree-asteroid-nobl")
dicoasteroidbl = fam.build_gene_tree_path(datadir, subst_model, family, "dicotree-asteroid-bl")
dicoastral = fam.build_gene_tree_path(datadir, subst_model, family, "dicotree-astral")
dicomrptnt = fam.build_gene_tree_path(datadir, subst_model, family, "dicotree-mrp-tnt")
shutil.copyfile(os.path.join(dicotree_dir, "final_asteroid.newick"), dicoasteroid)
shutil.copyfile(os.path.join(dicotree_dir, "final_asteroid-bl.newick"), dicoasteroidbl)
shutil.copyfile(os.path.join(dicotree_dir, "final_mrp_tnt.newick"), dicomrptnt)
#shutil.copyfile(os.path.join(dicotree_dir, "final_astral.newick"), dicoastral)
def run_dicotree_on_families(datadir, subst_model, cores):
output_dir = fam.get_run_dir(datadir, subst_model, "dicotree_run")
shutil.rmtree(output_dir, True)
os.makedirs(output_dir)
scheduler_commands_file = generate_scheduler_commands_file(datadir, subst_model, cores, output_dir)
start = time.time()
exp.run_with_scheduler(exp.python(), scheduler_commands_file, "fork", cores, output_dir, "logs.txt")
saved_metrics.save_metrics(datadir, fam.get_run_name("dicotree", subst_model), (time.time() - start), "runtimes")
lb = fam.get_lb_from_run(output_dir)
saved_metrics.save_metrics(datadir, fam.get_run_name("dicotree", subst_model), (time.time() - start) * lb, "seqtimes")
extract_dicotree_trees(datadir, output_dir, subst_model)
if (__name__== "__main__"):
max_args_number = 4
if len(sys.argv) < max_args_number:
print("Syntax error: python " + os.path.basename(__file__) + " datadir subst_model cores.")
print("Cluster can be either normal, haswell or magny")
sys.exit(0)
datadir = sys.argv[1]
subst_model = sys.argv[2]
cores = int(sys.argv[3])
run_dicotree_on_families(datadir, subst_model, cores)
|
import plotly.express as px
import csv
with open("csv files\Student Marks vs Days Present.csv",encoding = "utf-8") as csv_file:
df = csv.DictReader(csv_file)
fig = px.scatter(df,x = "Marks In Percentage",y = "Days Present")
fig.show() |
#!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# # Authors informations
#
# @author: HUC Stéphane
# @email: <devs@stephane-huc.net>
# @url: http://stephane-huc.net
#
# @license : BSD "Simplified" 2 clauses
#
''' Tools needed for Class PixUP '''
import pprint
import os
import sys
class Tools(object):
'''Tools for PixUP'''
def __init__(self, init):
for i in init:
setattr(self, i, init[i])
self.pp = pprint.PrettyPrinter(indent=4)
@staticmethod
def array_search(haystack, needle):
''' Equivalent PHP array_search() '''
return [k for k, v in enumerate(haystack) if v == needle][0]
def array_search_by_label(self, haystack, needle):
''' Search k if label exists and comp with needle '''
try:
return [k for k, v in enumerate(haystack['index'])
if cmp(needle, haystack[v]['label']) == 0][0]
except IndexError as iee:
print 'Error Index => %s ' % iee
self.pp.pprint(haystack)
print 'needle: %s ' % needle
self.log.debug('haystack: \n' + str(self.pp.pformat(haystack)))
self.log.debug('needle: %s ' % needle)
def array_search_by_label2(self, haystack, needle):
''' Search v if label exists and comp with needle '''
try:
return [v for k, v in enumerate(haystack['index'])
if cmp(needle, haystack[v]['label']) == 0][0]
except IndexError as iee:
print 'Error Index => %s ' % iee
self.pp.pprint(haystack)
print 'needle: %s ' % needle
self.log.debug('haystack: \n' + str(self.pp.pformat(haystack)))
self.log.debug('needle: %s ' % needle)
def build_cfg_file(self):
'''Copy file config, and fill-it!'''
import shutil
dst = os.path.dirname(self.config['file'])
src = os.path.join(self.dir, self.dirs[0], 'pixup.conf')
if not os.path.isdir(dst):
os.makedirs(dst)
if os.path.isfile(src):
shutil.copyfile(src, self.config['file'])
@staticmethod
def display_historic(init):
'''Display historic of images uploaded'''
from modules import browser
viewer = browser.Browser(init)
viewer.view()
def get_cfg_file(self):
'''Get infos into config file'''
haystack = dict()
try:
if os.path.getsize(self.config['file']) > 0:
dict_cfg = open(self.config['file']).readlines()
for val in dict_cfg:
if '^#' in val:
dict_cfg.remove(val)
if "=" in val:
key = val.split('=')
key[1] = key[1].strip()
if key[1] == "true":
haystack[key[0]] = True
elif key[1] == "false":
haystack[key[0]] = False
else:
if key[0] == 'mini_width':
haystack[key[0]] = int(key[1])
elif key[0] == 'service_defaut':
for string in ['"','-','_']:
key[1] = key[1].replace(string,'').lower()
haystack[key[0]] = key[1]
else:
haystack[key[0]] = key[1].strip('"')
del(dict_cfg)
return haystack
except IOError as ioe:
self.log.exception('Error to read document: %s' % ioe)
def get_config(self):
'''Get infos from file config'''
from ConfigParser import ConfigParser
from StringIO import StringIO
haystack = dict()
section = 'Main'
parser = ConfigParser()
with open(self.config['file'], 'r') as stream:
fakefile = StringIO("["+section+"]\n" + stream.read())
parser.readfp(fakefile)
options = parser.options(section)
for option in options:
val = parser.get(section, option)
if option == 'mini_width':
haystack[option] = parser.getint(section, option)
elif option == 'service_defaut':
for string in ['"','-','_',' ']:
val = val.replace(string,'')
haystack[option] = val.lower()
else:
if val == 'true':
haystack[option] = True
elif val == 'false':
haystack[option] = False
else:
haystack[option] = val.strip('"')
return haystack
@staticmethod
def in_array(haystack, needle):
''' Equivalent PHP in_array() '''
if needle in haystack:
return True
else:
return False
@staticmethod
def read_file(rfile):
'''Read file'''
try:
fil = open(rfile, 'r')
if fil:
strings = fil.read()
fil.close()
return strings
except IOError as ioe:
self.log.exception('Error to read document: %s' % ioe)
return False
@staticmethod
def rebuild_dict(haystack):
''' Rebuild dictionnary to remove index if active is false '''
haystack['key'] = haystack['index']
haystack['index'] = list()
for name in haystack['key']:
if haystack[name]['active']:
haystack['index'].append(name)
del haystack['key']
return haystack
def sed_lines(self, filename, pattern=None, replace=None):
'''Edit file to replace lines'''
try:
import re
with open(filename, 'r') as rfile:
lines = rfile.readlines()
with open(filename, 'w') as wfile:
for line in lines:
if filename == self.config['vars']['logname']:
if pattern in line:
wfile.write(re.sub(r'^' + pattern, replace, line))
else:
wfile.write(line)
if filename == self.config['file']:
if '#' in line:
wfile.write(line)
if "=" in line:
key = line.split('=')
pattern = key[0] + '=$'
if self.config['vars'][key[0]]:
val = self.config['vars'][key[0]]
if isinstance(val, bool):
text = key[0] + '=true'
elif isinstance(val, int):
text = key[0] + '=' + str(val)
else:
text = key[0] + '="' + val + '"'
wfile.write(re.sub(r'^' + pattern, text, line))
return True
except IOError as ioe:
self.log.exception('Error to write document: %s' % ioe)
return False
@staticmethod
def write_file(text, wfile):
''' Write file '''
try:
fil = open(wfile, 'w')
if fil:
fil.write(text)
fil.close()
return True
except IOError as ioe:
self.log.exception('Error to write document: %s' % ioe)
return False
|
#!/usr/bin/env /proj/sot/ska/bin/python
#########################################################################################
# #
# recover_compgradkodak.py: recover compgradkodak_fits from beginning #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Apr 06, 2017 #
# #
#########################################################################################
import os
import sys
import re
import string
import random
import math
import time
import numpy
import astropy.io.fits as pyfits
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release; source /home/mta/bin/reset_param ', shell='tcsh')
#
#--- add the path to mta python code directory
#
sys.path.append('/data/mta/Script/Python_script2.7/')
sys.path.append('./')
import mta_common_functions as mcf
import convertTimeFormat as tcnv
import recover_suppl_functions as rsf
#
#--- define column names of compgradkodak.fits (header part only)
#
gradkodak = ['hrmaavg', 'hrmacav', 'hrmaxgrd', 'hrmaradgrd', 'obaavg', 'obaconeavg', \
'obaaxgrd', 'obadiagrad', 'fwblkhdt', 'aftblkhdt', 'mzobacone', 'pzobacone', \
'hrmarange', 'tfterange', 'hrmastrutrnge', 'scstrutrnge']
#
#--- temp writing file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
hpass = '/data/mta/Script/Month/Config/house_keeping/loginfile'
#------------------------------------------------------------------------
#-- create_compgradkodak_fits: recover compgradkodak_fits from beginning
#------------------------------------------------------------------------
def create_compgradkodak_fits(start_year, start_yday, stop_year, stop_yday, pfits=''):
"""
recover compgradkodak_fits from beginning
input: start_year --- the year of the starting date
start_yday --- the ydate of the starting date
stop_year --- the year of the stopping date
stop_yday --- the ydate of the sopttping date
if start_year is not given (e.g.""), then it will re-compute the entire period
pfits --- the previous fits file; the new data will be appended
if this is not given, the new result will be written on compgradkodak.fits
output: compgradkodak.fits
"""
#
#--- find this year
#
if start_year == '':
today = time.localtime()
start_year = 1999
start_yday = 203
stop_year = today.tm_year
stop_yday = today.tm_yday
#
#--- create an empty list for each data
#
otime = []
for ent in gradkodak:
exec "%s_avg = []" % (ent)
exec "%s_dev = []" % (ent)
#
#--- dom start from yday 203 of 1999
#
chk = 0
dom = set_start_dom(start_year, start_yday)
#
#--- go though one day at a time from 1999.07.21 to today
#
for year in range(start_year, (stop_year+1)):
if tcnv.isLeapYear(year) == 1:
yend = 367
else:
yend = 366
for yday in range(1, yend):
if year == start_year and yday < start_yday:
continue
if year == stop_year and yday >= stop_yday:
chk = 1
break
print "DATE: " + str(year) + '<-->' + str(yday)
dom += 1
cyday = str(int(yday))
if yday < 10:
cyday = '00' + cyday
elif yday < 100:
cyday = '0' + cyday
#
#--- set time span for a day
#
start = str(year) + ':' + cyday + ':00:00:00'
stop = str(year) + ':' + cyday + ':23:59:59'
#
#--- run dataseeker and extract data. return a list of lists of each data
#
try:
e_data = extract_data(start, stop)
except:
continue
#
#--- compute daily avg and std for each column of compgradkodak
#
try:
d_data = convert_avg(e_data)
except:
continue
print "DOM: " + str(dom)
otime.append(dom)
#
#--- first sep data are avg and from sep+1 to the end are std
#
sep = len(gradkodak)
for k in range(0, sep):
data = d_data[k]
dev = d_data[k+sep]
exec "%s_avg.append(%s)" % (gradkodak[k], str(data))
exec "%s_dev.append(%s)" % (gradkodak[k], str(dev))
if chk == 1:
break
#
#--- create fits file
#
col_save = []
col = pyfits.Column(name="time", format='F', array=otime)
col_save.append(col)
for nam in gradkodak:
mname = nam + '_avg'
dname = nam + '_dev'
exec "marray = numpy.array(%s_avg)" % (nam)
exec "darray = numpy.array(%s_dev)" % (nam)
col = pyfits.Column(name=mname, format='F', array=marray)
col_save.append(col)
col = pyfits.Column(name=dname, format='F', array=darray)
col_save.append(col)
cols = pyfits.ColDefs(col_save)
tbhdu = pyfits.BinTableHDU.from_columns(cols)
if pfits == "":
if os.path.isfile('./compgradkodak.fits'):
os.system(' mv compgradkodak.fits compgradkodak.fits~')
tbhdu.writeto('compgradkodak.fits')
else:
tbhdu.writeto('add_part.fits')
rsf.fits_append(pfits, 'add_part.fits', 'temp_out.fits')
if os.path.isfile('./compgradkodak.fits'):
os.system(' mv compgradkodak.fits compgradkodak.fits~')
os.system('mv -f temp_out.fits compgradkodak.fits')
#------------------------------------------------------------------------
#-- set_start_dom: finding starting dom date ---
#------------------------------------------------------------------------
def set_start_dom(start_year, start_yday):
"""
finding starting dom date
input: start_year --- the year that the new dataset starts
start_yday --- the ydate that the new dataset starts
output: dom --- dom of the starting date
"""
dom = 0
don = 202 #---- this is not correct as real definition of dom but for now use this
for year in range(1999, (start_year+1)):
if tcnv.isLeapYear(year) == 1:
yend = 367
else:
yend = 366
for yday in range(1, yend):
if year == 1999 and yday < 203:
continue
elif year == start_year and yday >= start_yday:
break
else:
dom += 1
return dom
#------------------------------------------------------------------------
#-- convert_avg: compute avg and std of each column entry in gradkodak for the given data set
#------------------------------------------------------------------------
def convert_avg(input_list):
"""
compute avg and std of each column entry in gradkodak for the given data set
input: input_list --- a list of lists. each sub-list contains dataseeker output for "_avg"
each entry is a dictionary which contains avg, std, min, max
output: a list of avg and std of each columns of gradkodak. first len(gradkodak)
are avg of the columns and the next len(gradkodak) are std.
"""
#
#--- open the list of lists
#
(_4rt575t_avg, _4rt700t_avg, _4rt701t_avg, _4rt702t_avg, _4rt703t_avg, _4rt704t_avg,\
_4rt705t_avg, _4rt706t_avg, _4rt707t_avg, _4rt708t_avg, _4rt709t_avg, _4rt710t_avg,\
_4rt711t_avg, \
ohrthr02_avg, ohrthr03_avg, ohrthr04_avg, ohrthr05_avg, ohrthr06_avg, ohrthr07_avg, \
ohrthr08_avg, ohrthr09_avg, ohrthr10_avg, ohrthr11_avg, ohrthr12_avg, ohrthr13_avg, \
ohrthr14_avg, ohrthr15_avg, ohrthr17_avg, ohrthr21_avg, ohrthr22_avg, ohrthr23_avg, \
ohrthr24_avg, ohrthr25_avg, ohrthr26_avg, ohrthr27_avg, ohrthr28_avg, ohrthr29_avg, \
ohrthr30_avg, ohrthr31_avg, ohrthr33_avg, ohrthr34_avg, ohrthr35_avg, ohrthr36_avg, \
ohrthr37_avg, ohrthr39_avg, ohrthr40_avg, ohrthr42_avg, ohrthr44_avg, ohrthr45_avg, \
ohrthr46_avg, ohrthr47_avg, ohrthr49_avg, ohrthr50_avg, ohrthr51_avg, ohrthr52_avg, \
ohrthr53_avg, ohrthr54_avg, ohrthr55_avg, ohrthr56_avg, ohrthr57_avg, ohrthr58_avg, \
ohrthr60_avg, ohrthr61_avg, \
oobthr02_avg, oobthr03_avg, oobthr04_avg, oobthr05_avg, oobthr06_avg, oobthr07_avg, \
oobthr08_avg, oobthr09_avg, oobthr10_avg, oobthr11_avg, oobthr12_avg, oobthr13_avg, \
oobthr14_avg, oobthr15_avg, oobthr17_avg, oobthr18_avg, oobthr19_avg, oobthr20_avg, \
oobthr21_avg, oobthr22_avg, oobthr23_avg, oobthr24_avg, oobthr25_avg, oobthr26_avg, \
oobthr27_avg, oobthr28_avg, oobthr29_avg, oobthr30_avg, oobthr31_avg, oobthr33_avg, \
oobthr34_avg, oobthr35_avg, oobthr36_avg, oobthr37_avg, oobthr38_avg, oobthr39_avg, \
oobthr40_avg, oobthr41_avg, oobthr42_avg, oobthr43_avg, oobthr44_avg, oobthr45_avg, \
oobthr48_avg, oobthr49_avg, oobthr50_avg, oobthr51_avg, oobthr52_avg, oobthr53_avg, \
oobthr54_avg, oobthr55_avg, oobthr56_avg, oobthr57_avg, oobthr58_avg, oobthr59_avg, \
oobthr60_avg, oobthr61_avg, oobthr62_avg, oobthr63_avg \
) = input_list
#
#--- get avg and std of the enter entries of the given list
#
hrmarange_list = range(2,14) + range(21,27) + [28, 29,30,33, 36, 37] + range(44,49) + range(49, 54) +[55, 56]
[hrmaavg, hrmadev] = get_avg_std('ohrthr', hrmarange_list, input_list)
#------------
hrmacs_list = range(6, 16) + [17, 25, 26, 29, 30, 31] + range(33, 38) + [39, 40] + range(50, 59) + [60, 61]
[hrmacavg, hrmacdev] = get_avg_std('ohrthr', hrmacs_list, input_list)
#-----------
hrmaxgrad_list1 = [10, 11, 34, 35, 55, 56]
[hrmaxgrd1, xxx] = get_avg_std('ohrthr', hrmaxgrad_list1, input_list)
hrmaxgrad_list2 = [12, 13, 35, 37, 57, 58]
[hrmaxgrd2, xxx] = get_avg_std('ohrthr', hrmaxgrad_list2, input_list)
hrmaxgrd = hrmaxgrd1 - hrmaxgrd2
#
#--- compute std separately for the case two outputs are add/subtracted after computed
#
dev_list = hrmaxgrad_list1 + hrmaxgrad_list2
[z, hrmaxgrd_dev] = get_avg_std('ohrthr', dev_list, input_list)
#-----------
hrmarad1grd_list = [8, 31, 33, 52]
[hrmarad1grd, xxx] = get_avg_std('ohrthr', hrmarad1grd_list, input_list)
hrmarad2grd_list = [9, 53, 54]
[hrmarad2grd, xxx] = get_avg_std('ohrthr', hrmarad2grd_list, input_list)
hrmaradgrd = hrmarad1grd - hrmarad2grd
dev_list = hrmarad1grd_list + hrmarad2grd_list
[z, hrmaradgrd_dev] = get_avg_std('ohrthr', dev_list, input_list)
#-----------
obaavg_list = range(8, 33) + range(33, 42) + [44, 45]
[obaavg, obadev] = get_avg_std('oobthr', obaavg_list, input_list)
#-----------
obacone_list = range(8, 16) + range(17, 31) + range(57, 62)
[obaconeavg, obacone_dev] = get_avg_std('oobthr', obacone_list, input_list)
#-----------
#
#--- for the case two different header entries are needed;
#
fwblkhd_list1 = [62, 63]
fwblkhd_list2 = [700, 712]
[fwblkhdt, fwblkhdt_dev] = get_avg_std('oobthr', fwblkhd_list1, input_list, '_4rt', fwblkhd_list2, tail='t' )
aftblkhdt_list = [31, 33, 34]
[aftblkhdt, aftblkhdt_dev] = get_avg_std('oobthr', aftblkhdt_list, input_list)
obaaxgrd = fwblkhdt - aftblkhdt
s_list = fwblkhd_list1 + aftblkhdt_list
[z, obaaxgrd_dev] = get_avg_std('oobthr', s_list, input_list, '_4rt', fwblkhd_list2, tail='t' )
#-----------
mzoba_list1 = [8, 19, 25, 31, 57, 60]
mzoba_list2 = [575]
[mzobacone, mzobacone_dev] = get_avg_std('oobthr', mzoba_list1, input_list, '_4rt', mzoba_list2, tail='t')
pzoba_list = [13, 22, 23, 28, 29, 61]
[pzobacone, pzobacone_dev] = get_avg_std('oobthr', pzoba_list, input_list)
obadiagrad = mzobacone - pzobacone
d_list = mzoba_list1 + pzoba_list
[z, obadiagrad_dev] = get_avg_std('oobthr', d_list, input_list, '_4rt', mzoba_list2, tail='t')
#-----------
#
#--- compute the range of the data
#
clist = range(2,14) + range(21,28) + [29,30,33, 36, 37, 42] + range(45,54) +[55, 56]
[hrmarange, hrmarange_dev] = get_range('ohrthr', clist, input_list)
clist = range(2, 8)
[hrmastrutrnge, hrmastrutrnge_dev] = get_range('oobthr', clist, input_list)
clist = range(42, 45)
[tfterange, tfterange_dev] = get_range('oobthr', clist, input_list)
clist = range(49, 55)
[scstrutrnge, scstrutrnge_dev] = get_range('oobthr', clist, input_list)
out = []
for val in [hrmaavg, hrmacavg, hrmaxgrd, hrmaradgrd, obaavg, obaconeavg, obaaxgrd, obadiagrad, fwblkhdt, \
aftblkhdt, mzobacone, pzobacone, hrmarange, tfterange, hrmastrutrnge, scstrutrnge, \
hrmadev, hrmacdev, hrmaxgrd_dev, hrmaradgrd_dev, obadev, obacone_dev, obaaxgrd_dev, obadiagrad_dev, fwblkhdt_dev, \
aftblkhdt_dev, mzobacone_dev, pzobacone_dev, hrmarange_dev, tfterange_dev, hrmastrutrnge_dev, scstrutrnge_dev]:
if mcf.chkNumeric(val) == False or str(val) == 'nan':
val = -99.0
out.append(val)
return out
#-------------------------------------------------------------------------------------------
#-- get_avg_std: compute avg and std the given data list ---
#-------------------------------------------------------------------------------------------
def get_avg_std(mhead, clist, input_list, mhead2 ='', clist2='', tail=''):
"""
compute avg and std the given data list
input: mhead --- msid head part, e.g., 'ohrthr', 'oobthr', '_4rt'
clist --- a list of numbers of the data sets e.g. [3, 4, 5] for ohrthr03_avg, ohrthr04_avg...
input_list --- a list of the lists of the data
mhead2 --- the second head part. default: "" means there is no second entry
clist2 --- the second list of numbers. default: ""
tail --- the tail indicator. this is needed for _4rt case (e.g. _4rt701t_avg and "t" is the tail)
output: [avg, std] --- average and std of the combined entries of the given list.
"""
(_4rt575t_avg, _4rt700t_avg, _4rt701t_avg, _4rt702t_avg, _4rt703t_avg, _4rt704t_avg,\
_4rt705t_avg, _4rt706t_avg, _4rt707t_avg, _4rt708t_avg, _4rt709t_avg, _4rt710t_avg,\
_4rt711t_avg, \
ohrthr02_avg, ohrthr03_avg, ohrthr04_avg, ohrthr05_avg, ohrthr06_avg, ohrthr07_avg, \
ohrthr08_avg, ohrthr09_avg, ohrthr10_avg, ohrthr11_avg, ohrthr12_avg, ohrthr13_avg, \
ohrthr14_avg, ohrthr15_avg, ohrthr17_avg, ohrthr21_avg, ohrthr22_avg, ohrthr23_avg, \
ohrthr24_avg, ohrthr25_avg, ohrthr26_avg, ohrthr27_avg, ohrthr28_avg, ohrthr29_avg, \
ohrthr30_avg, ohrthr31_avg, ohrthr33_avg, ohrthr34_avg, ohrthr35_avg, ohrthr36_avg, \
ohrthr37_avg, ohrthr39_avg, ohrthr40_avg, ohrthr42_avg, ohrthr44_avg, ohrthr45_avg, \
ohrthr46_avg, ohrthr47_avg, ohrthr49_avg, ohrthr50_avg, ohrthr51_avg, ohrthr52_avg, \
ohrthr53_avg, ohrthr54_avg, ohrthr55_avg, ohrthr56_avg, ohrthr57_avg, ohrthr58_avg, \
ohrthr60_avg, ohrthr61_avg, \
oobthr02_avg, oobthr03_avg, oobthr04_avg, oobthr05_avg, oobthr06_avg, oobthr07_avg, \
oobthr08_avg, oobthr09_avg, oobthr10_avg, oobthr11_avg, oobthr12_avg, oobthr13_avg, \
oobthr14_avg, oobthr15_avg, oobthr17_avg, oobthr18_avg, oobthr19_avg, oobthr20_avg, \
oobthr21_avg, oobthr22_avg, oobthr23_avg, oobthr24_avg, oobthr25_avg, oobthr26_avg, \
oobthr27_avg, oobthr28_avg, oobthr29_avg, oobthr30_avg, oobthr31_avg, oobthr33_avg, \
oobthr34_avg, oobthr35_avg, oobthr36_avg, oobthr37_avg, oobthr38_avg, oobthr39_avg, \
oobthr40_avg, oobthr41_avg, oobthr42_avg, oobthr43_avg, oobthr44_avg, oobthr45_avg, \
oobthr48_avg, oobthr49_avg, oobthr50_avg, oobthr51_avg, oobthr52_avg, oobthr53_avg, \
oobthr54_avg, oobthr55_avg, oobthr56_avg, oobthr57_avg, oobthr58_avg, oobthr59_avg, \
oobthr60_avg, oobthr61_avg, oobthr62_avg, oobthr63_avg \
) = input_list
#
#--- save values in the list
#
alist = []
for k in clist:
ck = str(k)
if k < 10:
ck = '0' + ck
#
#--- the data are save in a dictionary form which has avg, std, min, and max
#
try:
exec 'dlist = %s%s%s_avg' % (mhead, ck, tail)
except:
dlist = []
try:
alist = alist + list(dlist)
except:
pass
#
#--- for the case when the different mhead entry are requested
#
if clist2 != '':
for k in clist2:
ck = str(k)
if k < 10:
ck = '0' + ck
try:
exec 'dlist = %s%s%s_avg' % (mhead2, ck, tail)
except:
dlist= []
try:
alist = alist + list(dlist)
except:
pass
#
#--- make sure that all entries are float values
#
blist = []
for ent in alist:
if ent == "":
continue
try:
blist.append(float(ent))
except:
pass
avg = numpy.mean(blist)
std = numpy.std(blist)
return [avg, std]
#-------------------------------------------------------------------------------------------
#-- get_range: get avg and std of data range for given group --
#-------------------------------------------------------------------------------------------
def get_range(mshead, clist, input_list):
"""
get avg and std of data range for given group
input: mshead --- the head part of the data group. e.g., ohrthr
clist --- the list of each data, e.g., 03 for ohrthr03_avg
input_list --- a list of lists of data
output: [avg, std]
"""
#
#--- open the data
#
(_4rt575t_avg, _4rt700t_avg, _4rt701t_avg, _4rt702t_avg, _4rt703t_avg, _4rt704t_avg,\
_4rt705t_avg, _4rt706t_avg, _4rt707t_avg, _4rt708t_avg, _4rt709t_avg, _4rt710t_avg,\
_4rt711t_avg, \
ohrthr02_avg, ohrthr03_avg, ohrthr04_avg, ohrthr05_avg, ohrthr06_avg, ohrthr07_avg, \
ohrthr08_avg, ohrthr09_avg, ohrthr10_avg, ohrthr11_avg, ohrthr12_avg, ohrthr13_avg, \
ohrthr14_avg, ohrthr15_avg, ohrthr17_avg, ohrthr21_avg, ohrthr22_avg, ohrthr23_avg, \
ohrthr24_avg, ohrthr25_avg, ohrthr26_avg, ohrthr27_avg, ohrthr28_avg, ohrthr29_avg, \
ohrthr30_avg, ohrthr31_avg, ohrthr33_avg, ohrthr34_avg, ohrthr35_avg, ohrthr36_avg, \
ohrthr37_avg, ohrthr39_avg, ohrthr40_avg, ohrthr42_avg, ohrthr44_avg, ohrthr45_avg, \
ohrthr46_avg, ohrthr47_avg, ohrthr49_avg, ohrthr50_avg, ohrthr51_avg, ohrthr52_avg, \
ohrthr53_avg, ohrthr54_avg, ohrthr55_avg, ohrthr56_avg, ohrthr57_avg, ohrthr58_avg, \
ohrthr60_avg, ohrthr61_avg, \
oobthr02_avg, oobthr03_avg, oobthr04_avg, oobthr05_avg, oobthr06_avg, oobthr07_avg, \
oobthr08_avg, oobthr09_avg, oobthr10_avg, oobthr11_avg, oobthr12_avg, oobthr13_avg, \
oobthr14_avg, oobthr15_avg, oobthr17_avg, oobthr18_avg, oobthr19_avg, oobthr20_avg, \
oobthr21_avg, oobthr22_avg, oobthr23_avg, oobthr24_avg, oobthr25_avg, oobthr26_avg, \
oobthr27_avg, oobthr28_avg, oobthr29_avg, oobthr30_avg, oobthr31_avg, oobthr33_avg, \
oobthr34_avg, oobthr35_avg, oobthr36_avg, oobthr37_avg, oobthr38_avg, oobthr39_avg, \
oobthr40_avg, oobthr41_avg, oobthr42_avg, oobthr43_avg, oobthr44_avg, oobthr45_avg, \
oobthr48_avg, oobthr49_avg, oobthr50_avg, oobthr51_avg, oobthr52_avg, oobthr53_avg, \
oobthr54_avg, oobthr55_avg, oobthr56_avg, oobthr57_avg, oobthr58_avg, oobthr59_avg, \
oobthr60_avg, oobthr61_avg, oobthr62_avg, oobthr63_avg \
) = input_list
range_list = []
min_list = []
max_list = []
#
#--- find min and max of each data period (usually a day) of each group
#
for n in clist:
cn = str(n)
if n < 10:
cn = '0' + cn
#
#--- finding min and max
#
try:
exec 'vlist = %s%s_avg' % (mshead, cn)
try:
val1 = min(vlist)
val2 = max(vlist)
except:
val1 = 0.0
val2 = 0.0
val1 = float(val1)
val2 = float(val2)
if val1 == 0.0:
val1 = -999.0
if val2 == 0.0:
val2 = -999.0
except:
val1 = -999.0
val2 = -999.0
#
#--- get the interval and save
#
if val1 != -999.0 and val2 != -999.0:
diff = val2 - val1
range_list.append(diff)
min_list.append(val1)
max_list.append(val2)
#
#--- find the avg and std of the period
#
avg = numpy.mean(range_list)
std = numpy.std(range_list)
rng = max(max_list) - min(min_list)
#return [avg, std]
return [rng, std]
#-------------------------------------------------------------------------------------------
#-- extract_data: extract needed data for a given time period from dataseeker --
#-------------------------------------------------------------------------------------------
def extract_data(start, stop):
"""
extract needed data for a given time period from dataseeker
input: start --- starting time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
stop --- stopping time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
output: resuts --- a list of lists of dataseeker output
"""
#
#--- there are three distinctive data groups
#
obfwdbulkhead =\
('_4rt575t_avg', '_4rt700t_avg', '_4rt701t_avg', '_4rt702t_avg', '_4rt703t_avg', '_4rt704t_avg',\
'_4rt705t_avg', '_4rt706t_avg', '_4rt707t_avg', '_4rt708t_avg', '_4rt709t_avg', '_4rt710t_avg',\
'_4rt711t_avg')
hrmaheaters = \
('ohrthr02_avg', 'ohrthr03_avg', 'ohrthr04_avg', 'ohrthr05_avg', 'ohrthr06_avg', 'ohrthr07_avg', \
'ohrthr08_avg', 'ohrthr09_avg', 'ohrthr10_avg', 'ohrthr11_avg', 'ohrthr12_avg', 'ohrthr13_avg', \
'ohrthr14_avg', 'ohrthr15_avg', 'ohrthr17_avg', 'ohrthr21_avg', 'ohrthr22_avg', 'ohrthr23_avg', \
'ohrthr24_avg', 'ohrthr25_avg', 'ohrthr26_avg', 'ohrthr27_avg', 'ohrthr28_avg', 'ohrthr29_avg', \
'ohrthr30_avg', 'ohrthr31_avg', 'ohrthr33_avg', 'ohrthr34_avg', 'ohrthr35_avg', 'ohrthr36_avg', \
'ohrthr37_avg', 'ohrthr39_avg', 'ohrthr40_avg', 'ohrthr42_avg', 'ohrthr44_avg', 'ohrthr45_avg', \
'ohrthr46_avg', 'ohrthr47_avg', 'ohrthr49_avg', 'ohrthr50_avg', 'ohrthr51_avg', 'ohrthr52_avg', \
'ohrthr53_avg', 'ohrthr54_avg', 'ohrthr55_avg', 'ohrthr56_avg', 'ohrthr57_avg', 'ohrthr58_avg', \
'ohrthr60_avg', 'ohrthr61_avg')
obaheaters = \
('oobthr02_avg', 'oobthr03_avg', 'oobthr04_avg', 'oobthr05_avg', 'oobthr06_avg', 'oobthr07_avg', \
'oobthr08_avg', 'oobthr09_avg', 'oobthr10_avg', 'oobthr11_avg', 'oobthr12_avg', 'oobthr13_avg', \
'oobthr14_avg', 'oobthr15_avg', 'oobthr17_avg', 'oobthr18_avg', 'oobthr19_avg', 'oobthr20_avg', \
'oobthr21_avg', 'oobthr22_avg', 'oobthr23_avg', 'oobthr24_avg', 'oobthr25_avg', 'oobthr26_avg', \
'oobthr27_avg', 'oobthr28_avg', 'oobthr29_avg', 'oobthr30_avg', 'oobthr31_avg', 'oobthr33_avg', \
'oobthr34_avg', 'oobthr35_avg', 'oobthr36_avg', 'oobthr37_avg', 'oobthr38_avg', 'oobthr39_avg', \
'oobthr40_avg', 'oobthr41_avg', 'oobthr42_avg', 'oobthr43_avg', 'oobthr44_avg', 'oobthr45_avg', \
'oobthr48_avg', 'oobthr49_avg', 'oobthr50_avg', 'oobthr51_avg', 'oobthr52_avg', 'oobthr53_avg', \
'oobthr54_avg', 'oobthr55_avg', 'oobthr56_avg', 'oobthr57_avg', 'oobthr58_avg', 'oobthr59_avg', \
'oobthr60_avg', 'oobthr61_avg', 'oobthr62_avg', 'oobthr63_avg')
#
#--- call dataseeker function for each group, then compute stats which are saved in dict form
#
obfwdbulkhead_data = runDataseeker(start, stop, obfwdbulkhead)
hrmaheaters_data = runDataseeker(start, stop, hrmaheaters)
obaheaters_data = runDataseeker(start, stop, obaheaters)
#
#--- combine the lists and return
#
results = obfwdbulkhead_data + hrmaheaters_data + obaheaters_data
return results
#-------------------------------------------------------------------------------------------
#-- runDataseeker: extract data using dataseeker --
#-------------------------------------------------------------------------------------------
def runDataseeker(start, stop, col_list):
"""
extract data using dataseeker
input: start --- starting time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
stop --- stopping time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
col_list --- a list of column names
output: data_list --- a list of data. dataseeker return avg of 5 min interval
"""
clen = len(col_list)
step = int(clen / 3)
begin = [0, step, 2*step]
end = [step, 2*step, clen]
data_list = []
for m in range(0, 3):
cols = ''
for col in col_list[begin[m]:end[m]]:
if col[0] != '_':
col = '_' + col
mc = re.search('oobthr', col)
if mc is not None:
col = 'mtatel..obaheaters_avg.' + col
if cols != '':
cols = cols + ',' + col
else:
cols = col
if not os.path.exists('./param'):
os.system('mkdir param')
if not os.path.exists('./test'):
os.system('touch test')
cmd = ' /home/ascds/DS.release/bin/dataseeker.pl '
cmd = cmd + 'infile=test outfile=./ztemp.fits search_crit="columns=' + cols + ' timestart=' + str(start)
cmd = cmd + ' timestop=' + str(stop) +'" loginFile='+ hpass + ' clobber="yes"'
run_ascds(cmd)
hdulist = pyfits.open('ztemp.fits')
tbdata = hdulist[1].data
for col in col_list[begin[m]:end[m]]:
if col[0] == '_':
col = col[1:]
data = tbdata[col]
data_list.append(data)
mcf.rm_file('./ztemp.fits')
return data_list
#-------------------------------------------------------------------------------------------
#-- run_ascds: run the command in ascds environment --
#-------------------------------------------------------------------------------------------
def run_ascds(cmd, clean =0):
"""
run the command in ascds environment
input: cmd --- command line
clean --- if 1, it also resets parameters default: 0
output: command results
"""
if clean == 1:
acmd = '/usr/bin/env PERL5LIB="" source /home/mta/bin/reset_param ;' + cmd
else:
acmd = '/usr/bin/env PERL5LIB="" ' + cmd
try:
bash(acmd, env=ascdsenv)
except:
try:
bash(acmd, env=ascdsenv)
except:
pass
#-------------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) >= 5:
start_year = sys.argv[1]
start_year = int(float(start_year))
start_yday = sys.argv[2]
start_yday = int(float(start_yday))
stop_year = sys.argv[3]
stop_year = int(float(stop_year))
stop_yday = sys.argv[4]
stop_yday = int(float(stop_yday))
pfits = ''
if len(sys.argv) >= 6:
pfits = sys.argv[5] #--- existing table fits file to be appended
else:
start_year = ''
start_yday = ''
stop_year = ''
stop_yday = ''
pfits = ''
create_compgradkodak_fits(start_year, start_yday, stop_year, stop_yday, pfits)
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp',
'-G', 'xcode_ninja_target_pattern=^all_targets$',
chdir='src')
test.relocate('src', 'relocate/src')
# Build all.
test.build('all.gyp', chdir='relocate/src')
if test.format=='xcode':
chdir = 'relocate/src/dir1'
else:
chdir = 'relocate/src'
# Output is as expected.
file_content = 'Hello from emit.py\n'
test.built_file_must_match('out2.txt', file_content, chdir=chdir)
test.built_file_must_not_exist('out.txt', chdir='relocate/src')
test.built_file_must_not_exist('foolib1',
type=test.SHARED_LIB,
chdir=chdir)
# xcode-ninja doesn't generate separate workspaces for sub-gyps by design
if test.format == 'xcode-ninja':
test.pass_test()
# TODO(mmoss) Make consistent with msvs, with 'dir1' before 'out/Default'?
if test.format in ('make', 'ninja', 'cmake'):
chdir='relocate/src'
else:
chdir='relocate/src/dir1'
# Build the action explicitly.
test.build('actions.gyp', 'action1_target', chdir=chdir)
# Check that things got run.
file_content = 'Hello from emit.py\n'
test.built_file_must_exist('out.txt', chdir=chdir)
# Build the shared library explicitly.
test.build('actions.gyp', 'foolib1', chdir=chdir)
test.built_file_must_exist('foolib1',
type=test.SHARED_LIB,
chdir=chdir,
subdir='dir1')
test.pass_test()
|
# Find the minimum path sum from top left to bottom right of an N x N matrix by
# only moving to the right and down.
myMatrix = []
def findMinPathSum():
global myMatrix
matrixSize = readMatrix()
for row in range(0, matrixSize):
for col in range(0, matrixSize):
if row != 0 or col != 0:
if col == 0:
# There is only 1 direction that can reach A[row][col]:
# 1) From A[row - 1][col] (i.e. going down)
myMatrix[row][col] += myMatrix[row - 1][col]
elif row == 0:
# There is only 1 direction that can reach A[row][col]:
# 1) From A[row][col - 1] (i.e. going right)
myMatrix[row][col] += myMatrix[row][col - 1]
else:
# There are only 2 directions that can reach A[row][col]:
# 1) From A[row - 1][col]
# 2) From A[row][col - 1]
temp = min(myMatrix[row - 1][col], myMatrix[row][col - 1])
myMatrix[row][col] += temp
# The bottom right corner!
return myMatrix[matrixSize - 1][matrixSize - 1]
def readMatrix():
global myMatrix
matrixSize = int(input('Enter the order of the square matrix: '))
myMatrix = [[0] * matrixSize for row in range(0, matrixSize)]
matrixText = input('Enter the matrix itself: ')
setRow = matrixText.split('\n')
curRow = 0
for row in setRow:
setNumStr = row.split(',')
setNum = [int(numStr) for numStr in setNumStr]
myMatrix[curRow] = setNum
curRow += 1
return matrixSize
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Hanzhiyun'
def normalize(name):
name = name[0].upper() + name[1:].lower()
return name
# return name.capitalize() #Python 自带的函数使首字母大写,其余小写
# 测试:
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2)
|
from django.contrib import admin
# import에 include 추가
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
# urlpatterns에 추가
path('test1/', include('test1.urls')),
]
|
from django.contrib import admin
from .models import Organization, UserProfile, MailingList
class OrganizationAdmin(admin.ModelAdmin):
pass
class UserProfileAdmin(admin.ModelAdmin):
pass
class MailingListAdmin(admin.ModelAdmin):
pass
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(MailingList, MailingListAdmin)
|
"""Plot words on sheet."""
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
class PlotObjects(object):
def __init__(self, page_plot, object_in, colour):
self.page_plot = page_plot
self.object_top = object_in[1]
self.object_right = object_in[2]
self.object_bottom = object_in[3]
self.object_left = object_in[4]
self.colour = colour
self.plot_object()
def plot_object(self):
"""plot word on page_plot object."""
self.page_plot.add_patch(Rectangle((self.object_left, self.object_bottom),
(self.object_right-self.object_left),
(self.object_top-self.object_bottom),
fill = None, edgecolor = self.colour, alpha = 1))
|
from tkinter import *
import math, random
import sqlite3
import sys
import django
from time import sleep
django.setup()
# Conductor est un daemon
from threading import Thread
from components.mission import Mission
import signal
polling_interval = 1
def bddCreation():
"""
CREATE TYPE droneStatus AS ENUM('busy','free','maintenance');
CREATE TYPE stationStatus AS ENUM('working','empty');
CREATE TYPE deliveryStatus AS ENUM('not assigned','in progress','cancelled'); """
conn.commit()
cursor.execute("""
CREATE TABLE IF NOT EXISTS drone(
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
status TEXT
)
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS station(
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
status TEXT,
xCoord FLOAT,
yCoord FLOAT,
batterieNumber INTEGER ,
anticipatedBatterieNumber INTEGER
)
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS mission(
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
status TEXT
xStockCoord FLOAT,
yStockCoord FLOAT,
xFinalCoord FLOAT,
yFinalCoord FLOAT,
creationDate DATETIME
)
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS mission_drone(
drone_id INTEGER,
mission_id INTEGER,
FOREIGN KEY(drone_id) REFERENCES drone(id),
FOREIGN KEY(mission_id) REFERENCES mission(id)
)
""")
conn.commit()
def random2DCoords():
return [random.uniform(RAYON/2, LARGEUR-RAYON/2), random.uniform(RAYON/2, HAUTEUR-RAYON/2)]
def createDrones(number):
i = 1
while i < number:
cursor.execute("""
INSERT INTO drone(status) VALUES("free")""" )
i = i+1
conn.commit()
def deplacement():
""" Dplacement de la balle """
global X, Y, DX, DY, RAYON, LARGEUR, HAUTEUR
# rebond droite
if X + RAYON + DX > LARGEUR:
X = 2 * (LARGEUR - RAYON) - X
DX = -DX
# rebond gauche
if X - RAYON + DX < 0:
X = 2 * RAYON - X
DX = -DX
# rebond en bas
if Y + RAYON + DY > HAUTEUR:
Y = 2 * (HAUTEUR - RAYON) - Y
DY = -DY
# rebond en haut
if Y - RAYON + DY < 0:
Y = 2 * RAYON - Y
DY = -DY
X = X + DX
Y = Y + DY
# affichage
Canevas.coords(Balle, X - RAYON, Y - RAYON, X + RAYON, Y + RAYON)
# mise a jour toutes les 50 ms
Mafenetre.after(50, deplacement)
"""
# Fenetre
LARGEUR = 480
HAUTEUR = 320
# Drone
RAYON = 5 # rayon des drones
NUMBER_DRONE = 10
# Station
COTE = 5
NUMBER_STATION = 5
# direction initiale alatoire
vitesse = random.uniform(1.8, 2) * 5
angle = random.uniform(0, 2 * math.pi)
DX = vitesse * math.cos(angle)
DY = vitesse * math.sin(angle)
conn = sqlite3.connect(':memory:')
cursor = conn.cursor()
# Cration de la fentre principale
Mafenetre = Tk()
Mafenetre.title("Animation Balle")
# Cration d'un widget Canvas
Canevas = Canvas(Mafenetre, height=HAUTEUR, width=LARGEUR, bg='white')
Canevas.pack(padx=15, pady=15)
bddCreation()
createDrones(5)
# Cration d'un objet graphique
Balle = Canevas.create_oval(X - RAYON, Y - RAYON, X + RAYON, Y + RAYON, width=1, fill='green')
deplacement()
Mafenetre.mainloop()
conn.close()
"""
# Fonction quand le thread du daemon est lance
def run():
print("Processing... press q key to exit")
#traitement ici
while True:
if mission_mgr.flag_has_new_mission:
missions = mission_mgr.get_missions
print(missions)
sleep(polling_interval)
mission_mgr = Mission()
processing_th = Thread(target=run)
processing_th.start()
signal.signal(signal.SIGINT, signal_sigint_handler)
processing_th.join()
|
import numpy as np
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from mypipeline import MultinomialNaiveBayesLogProbs, CleanTable, test_pipeline
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
import pickle
results_files = ["{}_results.pypkl".format(m) for m in ['lr', 'rf', 'xgb'] ]
results = []
for f in results_files:
with open(f, 'rb') as in_file:
results.append(pickle.load(in_file))
lr_results, rf_results, xgb_results = results
lr_acc = [np.mean(j) for i,j in lr_results]
rf_acc = [np.mean(j) for i,j in rf_results]
xgb_acc = [np.mean(j) for i,j in xgb_results]
lr_var = [np.var(j) for i,j in lr_results]
rf_var = [np.var(j) for i,j in rf_results]
xgb_var = [np.var(j) for i,j in xgb_results]
lr_mask = np.argsort(lr_acc)[::-1][:7]
rf_mask = np.argsort(rf_acc)[::-1][:7]
xgb_mask = np.argsort(xgb_acc)[::-1][:7]
lr_models = []
for i in lr_mask:
print lr_results[i]
print np.mean(lr_results[i][1])
print ""
lr_models.append(lr_results[i])
rf_models = []
for i in rf_mask:
rf_models.append(rf_results[i])
xgb_models = []
for i in rf_mask:
xgb_models.append(xgb_results[i])
def test_pipeline(df, nlp_pipeline, y_column='speaker'):
label = df[y_column].copy()
X = df.drop(y_column, axis=1).copy()
rskf = StratifiedKFold(n_splits=10, shuffle=True)
accs = []
for train_index, test_index in tqdm(rskf.split(X, label), total=10):
X_train, X_test = X.iloc[train_index].copy(), X.iloc[test_index].copy()
y_train, y_test = label[train_index], label[test_index]
nlp_pipeline.fit(X_train.reset_index(), y_train)
accs.append((nlp_pipeline.predict(X_test.reset_index()) == y_test).mean())
print "avg accuracies:", np.mean(accs)
return accs
lr_model_score = []
df = pd.read_pickle("talks_norm_pos.pdpkl")
for params, _ in lr_models:
accs = []
lr_pipe = Pipeline(steps=[('log_probs', MultinomialNaiveBayesLogProbs()),
('clean', CleanTable()), ('lr', LogisticRegression())])
for _ in xrange(3):
a = test_pipeline(df, lr_pipe.set_params(log_probs__text_column = params[0],
log_probs__count_weighting = params[1],
log_probs__min_df = params[2],
lr__C = params[3]))
accs += a
lr_model_score.append(np.mean(accs))
with open("final_grid_lr.pypkl", 'w') as f:
pickle.dump(zip(lr_models, lr_model_score) , f)
from sklearn.ensemble import RandomForestClassifier
rf_model_score = []
df = pd.read_pickle("talks_norm_pos.pdpkl")
for params, _ in rf_models:
accs = []
rf_pipe = Pipeline(steps=[('log_probs', MultinomialNaiveBayesLogProbs()),
('clean', CleanTable()), ('rf', RandomForestClassifier())])
for _ in xrange(3):
a = test_pipeline(df, rf_pipe.set_params(log_probs__text_column = text_column,
log_probs__count_weighting = count_weighting,
log_probs__min_df = min_df,
rf__n_estimators = n_estimators,
rf__max_depth = max_depth))
accs += a
rf_model_score.append(np.mean(accs))
with open("final_grid_rf.pypkl", 'w') as f:
pickle.dump(zip(rf_models, rf_model_score) , f)
from xgboost import XGBClassifier
xgb_model_score = []
df = pd.read_pickle("talks_norm_pos.pdpkl")
for params, _ in xgb_models:
accs = []
xgb_pipe = Pipeline(steps=[('log_probs', MultinomialNaiveBayesLogProbs()),
('clean', CleanTable()), ('rf', XGBClassifier())])
for _ in xrange(3):
a = test_pipeline(df, xgb_pipe.set_params(log_probs__text_column = text_column,
log_probs__count_weighting = count_weighting,
log_probs__min_df = min_df,
rf__n_estimators = n_estimators,
rf__max_depth = max_depth))
accs += a
xgb_model_score.append(np.mean(accs))
with open("final_grid_xgb.pypkl", 'w') as f:
pickle.dump(zip(xgb_models, xgb_model_score) , f)
|
#!/usr/bin/python
"""
bulkhover.py 1.1
This is a command-line script to import and export DNS records for a single
domain into or out of a hover account.
Usage:
bulkhover.py [options] (import|export) <domain> <dnsfile>
bulkhover.py (-h | --help)
bulkhover.py --version
Options:
-h --help Show this screen
--version Show version
-c --conf=<conf> Path to conf
-u --username=<user> Your hover username
-p --password=<pass> Your hover password
-f --flush Delete all existing records before importing
Examples:
The DNS file should have one record per line, in the format:
{name} {type} {content}
For example:
www A 127.0.0.1
@ MX 10 example.com
Since the script output is in the same format as its input, you can use shell
pipelines to do complex operations.
Copy all DNS records from one domain to another:
bulkhover.py -c my.conf export example.com - | ./bulkhover.py -c my.conf -f import other.com -
Copy only MX records from one domain to another:
./bulkhover.py -c my.conf export foo.com - | awk '$2 == "MX" {print $0}' | ./bulkhover.py -c my.conf import bar.com -
To avoid passing your username and password in the command-line, you can use
a conf file that contains them instead:
[hover]
username=YOUR_USERNAME
password=YOUR_PASSWORD
"""
import ConfigParser
import docopt
import requests
import sys
class HoverException(Exception):
pass
class HoverAPI(object):
def __init__(self, username, password):
params = {"username": username, "password": password}
r = requests.post("https://www.hover.com/api/login", params=params)
if not r.ok or "hoverauth" not in r.cookies:
raise HoverException(r)
self.cookies = {"hoverauth": r.cookies["hoverauth"]}
def call(self, method, resource, data=None):
url = "https://www.hover.com/api/{0}".format(resource)
r = requests.request(method, url, data=data, cookies=self.cookies)
if not r.ok:
raise HoverException(r)
if r.content:
body = r.json()
if "succeeded" not in body or body["succeeded"] is not True:
raise HoverException(body)
return body
def import_dns(username, password, domain, filename, flush=False):
try:
client = HoverAPI(username, password)
except HoverException as e:
raise HoverException("Authentication failed")
if flush:
records = client.call("get", "domains/{0}/dns".format(domain))["domains"][0]["entries"]
for record in records:
client.call("delete", "dns/{0}".format(record["id"]))
print "Deleted {name} {type} {content}".format(**record)
domain_id = client.call("get", "domains/{0}".format(domain))["domain"]["id"]
if filename == "-": filename = "/dev/stdin"
with open(filename, "r") as f:
for line in f:
parts = line.strip().split(" ", 2)
record = {"name": parts[0], "type": parts[1], "content": parts[2]}
client.call("post", "domains/{0}/dns".format(domain), record)
print "Created {name} {type} {content}".format(**record)
def export_dns(username, password, domain, filename):
try:
client = HoverAPI(username, password)
except HoverException as e:
raise HoverException("Authentication failed")
records = client.call("get", "domains/{0}/dns".format(domain))["domains"][0]["entries"]
if filename == "-": filename = "/dev/stdout"
with open(filename, "w") as f:
for record in records:
f.write("{name} {type} {content}\n".format(**record))
def main(args):
def get_conf(filename):
config = ConfigParser.ConfigParser()
config.read(filename)
items = dict(config.items("hover"))
return items["username"], items["password"]
if args["--conf"] is None:
if not all((args["--username"], args["--password"])):
print("You must specifiy either a conf file, or a username and password")
return 1
else:
username, password = args["--username"], args["--password"]
else:
username, password = get_conf(args["--conf"])
try:
if args["import"]:
import_dns(username, password, args["<domain>"], args["<dnsfile>"], args["--flush"])
elif args["export"]:
export_dns(username, password, args["<domain>"], args["<dnsfile>"])
except HoverException as e:
print "Unable to update DNS: {0}".format(e)
return 1
if __name__ == "__main__":
version = __doc__.strip().split("\n")[0]
args = docopt.docopt(__doc__, version=version)
status = main(args)
sys.exit(status) |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
See http://localhost/env_notes/graphics/ggeoview/issues/stratification/
"""
import os, logging
import numpy as np
from env.python.utils import *
from env.numerics.npy.types import *
import env.numerics.npy.PropLib as PropLib
log = logging.getLogger(__name__)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from env.numerics.npy.ana import Evt, Selection, Rat, theta, scatter3d
X,Y,Z,W = 0,1,2,3
np.set_printoptions(suppress=True, precision=3)
rat_ = lambda n,d:float(len(n))/float(len(d))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
e = Evt(tag="1")
a = Selection(e)
s = Selection(e,"BT SA")
i = s.recpost(1)
z = i[:,Z]
t = i[:,W]
#fig = plt.figure()
#scatter3d(fig, i)
#fig.show()
#p0a = a.recpost(0)
#p0 = s.recpost(0)
#p1 = s.recpost(1)
#p2 = s.recpost(2)
|
"""ImageCropper module; imported by ImageOperate aggregate class."""
import statistics
import ImageColumnCropOperators
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
pd.options.mode.chained_assignment = None
class ImageCropper(object):
"""
Cropping function; removes black image borders and outputs cropped image without Moody's header.
Attributes:
final_rotate: Incoming straightened image object from the ImageRotater class.
cropped_array: Cropped body portion of image as an array not including Moody's header.
page_number_array: Cropped header portion of image as an array.
"""
def __init__(self, rotated_image):
self.final_rotate = rotated_image.final_rotate
self.luminance_img = ImageColumnCropOperators.convert_image_luminance(self.final_rotate)
self.cropped_array = self.rotate_crop_refine()
self.final_cropped_array = self.trim_sides()
def rotate_crop_refine(self):
"""Loop to identify crop points; examines image vertically, rotates 90°, repeats."""
crop_pts = []
for index in range(0, 2):
# Rotate 90° for second treatment.
if index == 1:
self.luminance_img = np.rot90(self.luminance_img, k = 3)
rolling_mean = ImageColumnCropOperators.convert_rolling_mean(self.luminance_img, 1, 20, 0)
# Take the median dimension value of range of values with 'whiteness' greater than .65.
white_score = .65
split_val = 300
split_list = ImageColumnCropOperators.split_array(rolling_mean, split_val)
crop_pts_iteration = 0
while crop_pts_iteration < 2:
for split_range in split_list:
white_points = list(split_range.index[split_range['values'] > white_score])
if len(white_points) > 0:
if len(white_points) < 350:
if max(white_points) < (int(len(rolling_mean)) / 2):
white_range = int(min(white_points))
elif max(white_points) > (int(len(rolling_mean)) / 2):
white_range = int(max(white_points))
else:
white_range = int(statistics.median(white_points))
crop_pts.append(white_range)
crop_pts_iteration += 1
white_score = .70
split_val = 300
if crop_pts_iteration == 2:
break
else:
if split_range['values'].mean() < .5 and split_val < 700:
split_val += 100
else:
white_score -= .02
split_list = ImageColumnCropOperators.split_array(rolling_mean, split_val)
if crop_pts_iteration in [0, 2]:
break
elif crop_pts_iteration == 1:
split_list = split_list[::-1]
break
# Rotate image back to upright; crop image on 'white_range' values.
edge_trimmed_array = np.rot90(self.luminance_img, k = 1)
cropped_array = edge_trimmed_array[crop_pts[0] : crop_pts[1],
crop_pts[2] : crop_pts[3]]
return cropped_array
def trim_sides(self):
"""Trim extra white space from edges of page text; standardise images."""
def build_array(array):
"""
Construct dataframe noting decreasing brightness.
This indicates approach of the edge of the page text.
"""
rolling_mean_vertical_array = ImageColumnCropOperators.convert_rolling_mean(array, 0, 10, 0)
split_value = int(len(rolling_mean_vertical_array) / 2)
split_list = ImageColumnCropOperators.split_array(rolling_mean_vertical_array, split_value)
iteration = 1
array_var_list = []
for array in split_list:
# plt.plot(array)
# plt.show()
if iteration == 2:
array = array.iloc[::-1]
array_shift = pd.DataFrame(array['values'].shift(-1))
array_shift.columns = ['values']
array['values2'] = array_shift['values']
array['distance'] = array['values2'] - array['values']
array['distance_binary'] = [0 if value >= 0 else 1 for value in array['distance']]
array_shift2 = pd.DataFrame(array['distance_binary'].shift(-1))
array_shift2.columns = ['distance']
array['distance_binary_shift'] = array_shift2['distance']
array['index'] = array.index
array = array[['index', 'distance', 'distance_binary', 'distance_binary_shift']]
array['distance'].fillna(0, inplace=True)
array['distance_binary'].fillna(0, inplace=True)
array['distance_binary_shift'].fillna(0, inplace=True)
array['distance_binary'] = array['distance_binary'].astype(int)
array['distance_binary_shift'] = array['distance_binary_shift'].astype(int)
array = array.values.tolist()
array_var_list.append(array)
iteration += 1
return (rolling_mean_vertical_array, array_var_list)
def descending_continuity(input_data, run_type):
"""
Evaluate movement from black edge-of-page to edge-of-text.
Crop page in at edges of text w/ ~45px (or less) of padding.
"""
rolling_mean_vertical_array = input_data[0]
array_var_list = input_data[1]
iteration = 1
crop_points_inner = []
for array in array_var_list:
continuity = 0
if run_type == 'original':
for distance_list in array:
if distance_list[2] == 1 and distance_list[3] == 1:
continuity += -(distance_list[1])
if continuity >= .013:
break
else:
continuity = 0
# if run_type == 'second_pass':
# for distance_list in array:
# if distance_list[2] == 1 and distance_list[3] == 1:
# continuity += -(distance_list[1])
# if continuity >= .04:
# break
# else:
# continuity = 0
if iteration == 1:
if int(distance_list[0]) <= 45:
crop_points_inner.append(0)
else:
crop_points_inner.append(int(distance_list[0]) - 45)
elif iteration == 2:
if len(rolling_mean_vertical_array) - int(distance_list[0]) <= 45:
crop_points_inner.append(len(rolling_mean_vertical_array))
else:
crop_points_inner.append(int(distance_list[0]) + 45)
iteration += 1
return crop_points_inner
crop_points = []
array_list = [self.cropped_array, np.rot90(self.cropped_array, k = 1)]
for array in array_list:
array_data = build_array(array)
array_crop_pts = descending_continuity(array_data, 'original')
for point in array_crop_pts:
crop_points.append(point)
trimmed_image = self.cropped_array[crop_points[2] : crop_points[3],
crop_points[0] : crop_points[1]]
# Image.fromarray(trimmed_image).show()
# # Account for incomplete trimming on darker images.
# array_data = build_array(trimmed_image)
# array_crop_pts = descending_continuity(array_data, 'second_pass')
#
# if array_crop_pts[0] > 15:
# if array_crop_pts[0] > 100:
# array_crop_pts[0] = 100
# crop_points[0] = crop_points[0] + array_crop_pts[0]
# elif (abs(array_crop_pts[1] - crop_points[1]) > 15 and
# abs(array_crop_pts[1] - crop_points[1]) < 100):
#
# difference = abs(array_crop_pts[1] - crop_points[1])
# if difference > 20:
# difference = 20
# crop_points[1] = crop_points[1] - difference
#
# trimmed_image_out = self.cropped_array[crop_points[2] : crop_points[3],
# crop_points[0] : crop_points[1]]
return trimmed_image
|
l=list(map(int,input()))
for x in range(0,len(l)):
if(l[x]%2!=0):
print(l[x],end=" ")
|
# Exercício 8.3 - Livro
def areaQuadrado (lado):
area = lado ** 2
return area
a = areaQuadrado(9)
print(a)
|
# ------------------------------------------------------------------------------
# Gnn like cnn Pytorch Implementation
# paper:
# https://arxiv.org/abs/1603.09065
# Written by Haiyang Liu (haiyangliu1997@gmail.com)
# ------------------------------------------------------------------------------
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
from torch.nn import init
# import torch_geometric.transforms as T
# from torch_geometric.nn import GCNConv,GatedGraphConv
# from torch_geometric.data import Data
device = torch.device('cuda')
edge_matrix = [[0,1,14,15],[1,0,2,5],[2,1,3,4,8],[3,2,4],[4,2,3],
[5,1,6,7,11],[6,5,7],[7,5,6],[8,2,9,10],[9,8,10],[10,8,9],
[11,5,12,13],[12,11,13],[13,11,12],[14,0,15,16],[15,0,14,17],[16,14],[17,15]]
#edge_matrix = edge_matrix.to(device)
edge_matrix_paf = [[1,8],[8,9],[9,10],[1,11],[11,12],[12,13],
[1,2],[2,3],[3,4],[2,16],[1,5],[5,6],[6,7],
[5,17],[0,1],[0,14],[0,15],[14,16],[15,17]]
#edge_matrix_paf = edge_matrix_paf.to(device)
def get_input(i,gnn_input):
length = len(edge_matrix[i])
select_index = torch.tensor(edge_matrix[i])
select_index = select_index.to(device)
#print(select_index)
input_new = torch.index_select(gnn_input,1,select_index)
return input_new,length
def get_input_paf(i,gnn_input):
select_index = torch.tensor(edge_matrix_paf[i])
select_index = select_index.to(device)
select_index_1 = torch.tensor([2*i+19,2*i+20])
select_index_1 = select_index_1.to(device)
input_paf = torch.index_select(gnn_input,1,select_index_1)
#print(input_paf.size())
input_new = torch.index_select(gnn_input,1,select_index)
#print(input_new.size())
input_final = torch.cat([input_new,input_paf],1)
#print(input_final.size())
return input_final
class Model_GNN(nn.Module):
''' A GGNN module, input 19 nodes,
CNN input size: N * 19 * H * W
'''
def __init__(self, Gnn_layers, use_gpu):
super().__init__()
self.gnn_0 = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_1 = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_2 = nn.ModuleList([nn.Conv2d(in_channels = 5, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_3 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_4 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_5 = nn.ModuleList([nn.Conv2d(in_channels = 5, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_6 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_7 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_8 = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_9 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_10 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_11 = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_12 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_13 = nn.ModuleList([nn.Conv2d(in_channels = 3, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_14 = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_15 = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_16 = nn.ModuleList([nn.Conv2d(in_channels = 2, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_17 = nn.ModuleList([nn.Conv2d(in_channels = 2, out_channels = 64,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 64, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
#nn.Conv2d(in_channels = 128, out_channels = 128,
#kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 128,
kernel_size = (7,7), stride = 1,padding = 3),
nn.Conv2d(in_channels = 128, out_channels = 512,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 512, out_channels = 1,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_actfs = nn.ModuleList([nn.ReLU() for l in range(7)])
self.use_gpu = use_gpu
self._initialize_weights_norm()
def forward(self, out1, cnn_output, gnn_interations):
N = cnn_output.size()[0]
C = cnn_output.size()[1]
H = cnn_output.size()[2]
W = cnn_output.size()[3]
gnn_output = cnn_output.clone()
gnn_output_final = cnn_output.clone()
for n in range(18):# for n samples
gnn_out_one_batch,length = get_input(n, gnn_output)
#gnn_out_one_batch = torch.cat([gnn_out_one_batch,out1],1)
if n == 1:
for idx, g_layer in enumerate(self.gnn_1):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 2:
for idx, g_layer in enumerate(self.gnn_2):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 3:
for idx, g_layer in enumerate(self.gnn_3):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 0:
for idx, g_layer in enumerate(self.gnn_0):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 4:
for idx, g_layer in enumerate(self.gnn_4):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 5:
for idx, g_layer in enumerate(self.gnn_5):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 6:
for idx, g_layer in enumerate(self.gnn_6):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 7:
for idx, g_layer in enumerate(self.gnn_7):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 8:
for idx, g_layer in enumerate(self.gnn_8):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 9:
for idx, g_layer in enumerate(self.gnn_9):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 10:
for idx, g_layer in enumerate(self.gnn_10):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 11:
for idx, g_layer in enumerate(self.gnn_11):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 12:
for idx, g_layer in enumerate(self.gnn_12):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 13:
for idx, g_layer in enumerate(self.gnn_13):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 14:
for idx, g_layer in enumerate(self.gnn_14):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 15:
for idx, g_layer in enumerate(self.gnn_15):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
elif n == 16:
for idx, g_layer in enumerate(self.gnn_16):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
else:
for idx, g_layer in enumerate(self.gnn_17):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
#print(gnn_out_one_batch.size())
gnn_output_final[:,n:n+1,:,:] = gnn_out_one_batch
#print(gnn_output_final.size())
return gnn_output_final
def _initialize_weights_norm(self):
# last layer of these block don't have Relu
for i in range(5):
init.zeros_(self.gnn_0[i].weight)
init.zeros_(self.gnn_1[i].weight)
init.zeros_(self.gnn_2[i].weight)
init.zeros_(self.gnn_3[i].weight)
init.zeros_(self.gnn_4[i].weight)
init.zeros_(self.gnn_5[i].weight)
init.zeros_(self.gnn_6[i].weight)
init.zeros_(self.gnn_7[i].weight)
init.zeros_(self.gnn_8[i].weight)
init.zeros_(self.gnn_9[i].weight)
init.zeros_(self.gnn_10[i].weight)
init.zeros_(self.gnn_11[i].weight)
init.zeros_(self.gnn_12[i].weight)
init.zeros_(self.gnn_13[i].weight)
init.zeros_(self.gnn_14[i].weight)
init.zeros_(self.gnn_15[i].weight)
init.zeros_(self.gnn_16[i].weight)
init.zeros_(self.gnn_17[i].weight)
class Model_GNN_paf(nn.Module):
''' A GGNN module, input 19 nodes,
CNN input size: N * 38 * H * W
'''
def __init__(self, Gnn_layers, use_gpu):
super().__init__()
self.gnn_layers = nn.ModuleList([nn.Conv2d(in_channels = 4, out_channels = 16,
kernel_size = (5,5), stride = 1,padding = 2),
nn.Conv2d(in_channels = 16, out_channels = 16,
kernel_size = (5,5), stride = 1,padding = 2),
nn.Conv2d(in_channels = 16, out_channels = 32,
kernel_size = (5,5), stride = 1,padding = 2),
nn.Conv2d(in_channels = 32, out_channels = 32,
kernel_size = (5,5), stride = 1,padding = 2),
nn.Conv2d(in_channels = 32, out_channels = 32,
kernel_size = (5,5), stride = 1,padding = 2),
nn.Conv2d(in_channels = 32, out_channels = 128,
kernel_size = (1,1), stride = 1,padding = 0),
nn.Conv2d(in_channels = 128, out_channels = 2,
kernel_size = (1,1), stride = 1,padding = 0)])
self.gnn_actfs = nn.ModuleList([nn.ReLU() for l in range(7)])
self.use_gpu = use_gpu
self._initialize_weights_norm()
def forward(self, cnn_output, gnn_interations):
N = cnn_output.size()[0]
C = 38
H = cnn_output.size()[2]
W = cnn_output.size()[3]
gnn_output = cnn_output.clone()
index_s = torch.tensor([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,
24,25,26,27,28,29,30,31,32,33,34,35,36,37])
index_s = index_s.to(device)
gnn_output_final = torch.index_select(gnn_output,1,index_s)
for n in range(19):# for n samples
gnn_out_one_batch = get_input_paf(n, gnn_output)
gnn_out_one_batch.to(device)
#print('pafin',gnn_out_one_batch.size())
for idx, g_layer in enumerate(self.gnn_layers):
if idx == 6:
gnn_out_one_batch = g_layer(gnn_out_one_batch)
else:
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_out_one_batch))
gnn_output_final[:,2*n: 2*n + 1,:,:] = gnn_out_one_batch[:,0:1,:,:]
gnn_output_final[:,2*n + 1:2*n+2,:,:] = gnn_out_one_batch[:,1:2,:,:]
#print('paf',gnn_output_final.size())
return gnn_output_final
def _initialize_weights_norm(self):
# last layer of these block don't have Relu
for i in range(7):
init.normal_(self.gnn_layers[i].weight, std=0.01)
def make_stages(cfg_dict):
"""Builds CPM stages from a dictionary
Args:
cfg_dict: a dictionary
"""
layers = []
for i in range(len(cfg_dict) - 1):
one_ = cfg_dict[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
one_ = list(cfg_dict[-1].keys())
k = one_[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
def make_vgg19_block(block):
"""Builds a vgg19 block from a dictionary
Args:
block: a dictionary
"""
layers = []
for i in range(len(block)):
one_ = block[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
def get_model(trunk='vgg19'):
blocks = {}
# block0 is the preprocessing stage
if trunk == 'vgg19':
block0 = [{'conv1_1': [3, 64, 3, 1, 1]},
{'conv1_2': [64, 64, 3, 1, 1]},
{'pool1_stage1': [2, 2, 0]},
{'conv2_1': [64, 128, 3, 1, 1]},
{'conv2_2': [128, 128, 3, 1, 1]},
{'pool2_stage1': [2, 2, 0]},
{'conv3_1': [128, 256, 3, 1, 1]},
{'conv3_2': [256, 256, 3, 1, 1]},
{'conv3_3': [256, 256, 3, 1, 1]},
{'conv3_4': [256, 256, 3, 1, 1]},
{'pool3_stage1': [2, 2, 0]},
{'conv4_1': [256, 512, 3, 1, 1]},
{'conv4_2': [512, 512, 3, 1, 1]},
{'conv4_3_CPM': [512, 256, 3, 1, 1]},
{'conv4_4_CPM': [256, 128, 3, 1, 1]}]
elif trunk == 'mobilenet':
block0 = [{'conv_bn': [3, 32, 2]}, # out: 3, 32, 184, 184
{'conv_dw1': [32, 64, 1]}, # out: 32, 64, 184, 184
{'conv_dw2': [64, 128, 2]}, # out: 64, 128, 92, 92
{'conv_dw3': [128, 128, 1]}, # out: 128, 256, 92, 92
{'conv_dw4': [128, 256, 2]}, # out: 256, 256, 46, 46
{'conv4_3_CPM': [256, 256, 1, 3, 1]},
{'conv4_4_CPM': [256, 128, 1, 3, 1]}]
# Stage 1
blocks['block1_1'] = [{'conv5_1_CPM_L1': [128, 128, 3, 1, 1]},
{'conv5_2_CPM_L1': [128, 128, 3, 1, 1]},
{'conv5_3_CPM_L1': [128, 128, 3, 1, 1]},
{'conv5_4_CPM_L1': [128, 512, 1, 1, 0]},
{'conv5_5_CPM_L1': [512, 38, 1, 1, 0]}]
blocks['block1_2'] = [{'conv5_1_CPM_L2': [128, 128, 3, 1, 1]},
{'conv5_2_CPM_L2': [128, 128, 3, 1, 1]},
{'conv5_3_CPM_L2': [128, 128, 3, 1, 1]},
{'conv5_4_CPM_L2': [128, 512, 1, 1, 0]},
{'conv5_5_CPM_L2': [512, 19, 1, 1, 0]}]
# Stages 2 - 6
for i in range(2, 7):
blocks['block%d_1' % i] = [
{'Mconv1_stage%d_L1' % i: [185, 128, 7, 1, 3]},
{'Mconv2_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv3_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv4_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv5_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv6_stage%d_L1' % i: [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L1' % i: [128, 38, 1, 1, 0]}
]
blocks['block%d_2' % i] = [
{'Mconv1_stage%d_L2' % i: [185, 128, 7, 1, 3]},
{'Mconv2_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv3_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv4_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv5_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv6_stage%d_L2' % i: [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L2' % i: [128, 19, 1, 1, 0]}
]
models_dict = {}
if trunk == 'vgg19':
print("Bulding VGG19")
models_dict['block0'] = make_vgg19_block(block0)
for k, v in blocks.items():
models_dict[k] = make_stages(list(v))
return models_dict
def use_vgg(model, model_path, trunk,weight_path):
try:
old_weights = torch.load(weight_path)
vgg_keys = old_weights.keys()
weights_load = {}
# weight+bias,weight+bias.....(repeat 10 times)
for i in range(len(vgg_keys)):
weights_load[list(model.state_dict().keys())[i]
] = old_weights[list(vgg_keys)[i]]
state = model.state_dict()
state.update(weights_load)
model.load_state_dict(state)
#model.load_state_dict(old_weights)
print('success load old weights and epoch num:')
except:
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'ssd': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'}
number_weight = {
'vgg16': 18,
'ssd': 18,
'vgg19': 20}
url = model_urls[trunk]
if trunk == 'ssd':
urllib.urlretrieve('https://s3.amazonaws.com/amdegroot-models/ssd300_mAP_77.43_v2.pth',
os.path.join(model_path, 'ssd.pth'))
vgg_state_dict = torch.load(os.path.join(model_path, 'ssd.pth'))
print('loading SSD')
else:
vgg_state_dict = model_zoo.load_url(url, model_dir=model_path)
vgg_keys = vgg_state_dict.keys()
# load weights of vgg
weights_load = {}
# weight+bias,weight+bias.....(repeat 10 times)
for i in range(number_weight[trunk]):
weights_load[list(model.state_dict().keys())[i]
] = vgg_state_dict[list(vgg_keys)[i]]
state = model.state_dict()
state.update(weights_load)
model.load_state_dict(state)
print('load imagenet pretrained model: {}'.format(model_path))
class Model_CNN(nn.Module):
def __init__(self, model_dict):
super(Model_CNN, self).__init__()
self.model0 = model_dict['block0']
self.model1_1 = model_dict['block1_1']
self.model2_1 = model_dict['block2_1']
self.model3_1 = model_dict['block3_1']
self.model4_1 = model_dict['block4_1']
self.model5_1 = model_dict['block5_1']
self.model6_1 = model_dict['block6_1']
self.model1_2 = model_dict['block1_2']
self.model2_2 = model_dict['block2_2']
self.model3_2 = model_dict['block3_2']
self.model4_2 = model_dict['block4_2']
self.model5_2 = model_dict['block5_2']
self.model6_2 = model_dict['block6_2']
self._initialize_weights_norm()
def forward(self, x):
saved_for_loss = []
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
saved_for_loss.append(out1_1)
saved_for_loss.append(out1_2)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
saved_for_loss.append(out2_1)
saved_for_loss.append(out2_2)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
saved_for_loss.append(out3_1)
saved_for_loss.append(out3_2)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
saved_for_loss.append(out4_1)
saved_for_loss.append(out4_2)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
saved_for_loss.append(out5_1)
saved_for_loss.append(out5_2)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
saved_for_loss.append(out6_1)
saved_for_loss.append(out6_2)
return out1,(out6_1, out6_2), saved_for_loss
def _initialize_weights_norm(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None: # mobilenet conv2d doesn't add bias
init.constant_(m.bias, 0.0)
# last layer of these block don't have Relu
init.normal_(self.model1_1[8].weight, std=0.01)
init.normal_(self.model1_2[8].weight, std=0.01)
init.normal_(self.model2_1[12].weight, std=0.01)
init.normal_(self.model3_1[12].weight, std=0.01)
init.normal_(self.model4_1[12].weight, std=0.01)
init.normal_(self.model5_1[12].weight, std=0.01)
init.normal_(self.model6_1[12].weight, std=0.01)
init.normal_(self.model2_2[12].weight, std=0.01)
init.normal_(self.model3_2[12].weight, std=0.01)
init.normal_(self.model4_2[12].weight, std=0.01)
init.normal_(self.model5_2[12].weight, std=0.01)
init.normal_(self.model6_2[12].weight, std=0.01)
class Model_Total(nn.Module):
def __init__(self, model_dict, gnn_layers, use_gpu):
super().__init__()
self.cnn = Model_CNN(model_dict)
self.gnn = Model_GNN(gnn_layers,use_gpu)
self.gnn_paf = Model_GNN_paf(gnn_layers,use_gpu)
def forward(self, input, gnn_interations, use_gnn):
out1,x_loss,saved_for_loss = self.cnn.forward(input)
x_heatmap = x_loss[1].clone()
x_paf = x_loss[0].clone()
x_paf_input = torch.cat([x_heatmap,x_paf],1)
if use_gnn:
y = self.gnn.forward(out1,x_heatmap,gnn_interations)
out7_2 = y + x_heatmap
z = self.gnn_paf.forward(x_paf_input,gnn_interations)
out7_1 = x_paf
saved_for_loss.append(out7_1)
saved_for_loss.append(out7_2)
x_loss_gnn = (out7_1,out7_2)
return x_loss_gnn,saved_for_loss
else:
return x_loss,saved_for_loss
|
# -*- coding: utf-8 -*-
'''
Implementa todo el codigo relacionado al modelo y las entidades de los usuarios
'''
import psycopg2
from psycopg2 import pool
from psycopg2.extras import DictCursor
import inject
from model.registry import Registry
import logging
logging.basicConfig(format='%(asctime)s, %(stack_info)s, %(thread)s, %(message)s')
logging.getLogger().setLevel(logging.DEBUG)
import cProfile
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
import traceback
def do_traceback(func):
logging.basicConfig(format='%(asctime)s, %(stack_info)s, %(thread)s, %(message)s')
logging.getLogger().setLevel(logging.DEBUG)
def tracebacked_func(*args, **kwargs):
for line in traceback.format_stack():
logging.info(line.strip())
r = func(*args, **kwargs)
return r
return tracebacked_func
class Connection:
logging = logging.getLogger(__name__)
@classmethod
def readOnly(cls, conn):
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
def __init__(self, registry=Registry()):
self.logging.setLevel(logging.DEBUG)
self.host = registry.get('host')
self.database = registry.get('database')
self.user = registry.get('user')
self.password = registry.get('password')
self.pool = psycopg2.pool.ThreadedConnectionPool(1, 50, host=self.host, database=self.database, user=self.user, password=self.password, cursor_factory=DictCursor)
def get(self):
self.logging.debug('obteniendo conexion a la base')
return self.pool.getconn()
def put(self, conn):
self.logging.debug('retornando la conexion al pool')
self.pool.putconn(conn)
def __del__(self):
self.logging.debug('cerrando todas las conexiones a la base')
self.pool.closeall()
|
import praw
import time
import datetime
import re
import os
import urllib
import requests
import json
from goog import getQuotes
from collections import OrderedDict
__author__ = '/u/spookyyz'
__version__ = '0.2'
user_agent = 'Stock Quotr 0.2 by /u/spookyyz'
r = praw.Reddit(user_agent=user_agent)
r.login(os.environ['REDDIT_USER'], os.environ['REDDIT_PASS'])
cache = []
ticker_symbols = []
ticker_found = 0
symbol = ""
START_TIME = time.time()
class ticker_post(object):
def yql_name(self, symbol):
print symbol
url = "https://query.yahooapis.com/v1/public/yql"
query = 'select Name from yahoo.finance.quotes where symbol in ("%s")' %symbol
payload = {'q' : query, 'diagnostics' : 'false', 'env' : 'store://datatables.org/alltableswithkeys', 'format' : 'json'}
try:
r = requests.get(url, params=payload)
except e:
print "YAHOO LOOKUP ERROR: " + str(e)
json_response = json.loads(r.text)
json_quote = json_response["query"]["results"]["quote"]
company_names = []
print "YQL: " + str(json_quote)
for item in json_quote:
try:
company_names.append( '[' + str(item['Name']) + ']' + '(http://finance.yahoo.com/q?s=' + symbol + ')') #multiple symbols
except:
company_names.append('[' + str(json_quote['Name']) + ']' + '(http://finance.yahoo.com/q?s=' + symbol + ')') # only 1 symbol
return company_names
def __init__(self, symbol):
self.current_price = []
self.time_of_quote = []
self.symbol = []
self.price_change = []
self.price_change_percent = []
self.spacer = []
self.price_info = []
self.company_names = []
self.split_symbols = ', '.join(symbol)
self.company_names = self.yql_name(self.split_symbols)
data_g = getQuotes(symbol)
for idx, info in enumerate(data_g):
#gathering google data
self.current_price.append(info['LastTradePrice'])
self.time_of_quote.append("^^as ^^of ^^" + info['LastTradeDateTimeLong'].replace(" ", " ^^"))
self.symbol.append(info['StockSymbol'])
self.price_change.append(info['Change'])
self.price_change_percent.append(info['ChangePercentage'])
self.price_info.append("**$" + info['LastTradePrice'] + "** *^" + info['Change'] + "* ^*(" + info['ChangePercentage'] + "%)*")
self.spacer.append('---')
def post_data(self):
#build post line by line (for table formatting)
header_line = "| " + ' | '.join(self.symbol) + " |\r\n"
spacer_line = "| " + '|'.join(self.spacer) + " |\r\n"
company_name_line = "| " + ' | '.join(self.company_names) + " |\r\n"
price_line = "| " + ' | '.join(self.price_info) + " |\r\n"
date_line = "| " + ' | '.join(self.time_of_quote) + " |\r\n"
info_line = "\r\n^(_Quotr Bot v%s created by /u/spookyyz ) ^|| ^(Feel free to message me with any ideas or problems_)" % __version__
self.post = "" + header_line + spacer_line + company_name_line + "" + price_line + "" + date_line + "" + info_line
print self.post
return self.post
def run_bot():
subreddit = r.get_subreddit('investing')
for comment in subreddit.get_comments(limit=25): #iterate through 25 most recent comments for symbols
ticker_symbols = []
ticker_found=0
comment_text = comment.body
pattern = re.compile('\$[A-Z\.]{1,6}')
comment_utcunix = datetime.datetime.utcfromtimestamp(comment.created) - datetime.timedelta(hours=8) #offset from comment time as seen by the server to UTC
start_utcunix = datetime.datetime.utcfromtimestamp(START_TIME)
if (comment.id not in cache and comment_utcunix > start_utcunix): #ignore previously grabbed comments
for symbol in re.findall(pattern, comment_text): #check for symbol against regex in comment text for non-cached comments
ticker_symbols.append(symbol[1:])
ticker_found=1
ticker_symbols = list(OrderedDict.fromkeys(ticker_symbols))
if (ticker_found):
try:
cache.append(comment.id)
text_to_post = ticker_post(ticker_symbols)
post_text = text_to_post.post_data()
print "Attempting to reply to " + comment.id + "."
comment.reply(post_text)
print "Replied to " + comment.id + " successfully. Sleeping for 5."
time.sleep(5)
except Exception,e:
print ("Error posting (possible throttling) response to %s" % comment.id)
print "ERROR: " + str(e)
cache.pop()
print cache
time.sleep(5)
del ticker_symbols[:]
while True:
run_bot()
time.sleep(20)
|
N=int(input("N="))
sum=int
if(N>0):
sum=0
for i in range(N,2*N+1):
x=i**2
sum=sum+x
print(sum) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as mpl
import copy
import time
# Plot define
mpl.show(block=True)
# Absolute path
path_to_dataset = ''
csv_name = 'NYPD_Motor_Vehicle_Collisions'
def compute_total_weeks(dataframe):
#Print minimum and maximum timestamps available
min_timestamp = min(dataframe['Datetime'])
max_timestamp = max(dataframe['Datetime'])
print('Minimum timestamp is {}'.format(min_timestamp))
print('Maximum timestamp is {}'.format(max_timestamp))
min_week = min_timestamp.week
min_year = min_timestamp.year
max_week = max_timestamp.week
max_year = max_timestamp.year
# Correct, if any, the year (see below)
if (min_timestamp.month == 12 and min_week == 1):
min_year += 1
elif (min_timestamp.month == 1 and min_week == 52):
min_year -= 1
# Correct, if any, the year (see below)
if (max_timestamp.month == 12 and max_week == 1):
max_year += 1
elif (max_timestamp.month == 1 and max_week == 52):
max_year -= 1
# Total weeks are those of "complete" years plus those of first year and
# those of the last year
total_weeks = (max_year - min_year - 1)*52 + (52-min_week+1) + (max_week)
print('There are {} weeks into the dataset'.format(total_weeks))
return total_weeks
if __name__ == "__main__":
accidents_ny = pd.read_csv('{}{}.csv'.format(path_to_dataset,csv_name), \
delimiter=',')
#print(accidents_ny.head())
print('The available attributes are:\n' + '\n'.join(accidents_ny.columns.values))
'''#Convert the ZIP CODE to integer. The Nan are set to -1
print('Correcting the Zip code...')
s_t = time.time()
accidents_ny['ZIP CODE'] = accidents_ny['ZIP CODE'].fillna(-1).astype(int)
print('Required time... {} s'.format(time.time() - s_t))'''
#Create a new column Datetime that joins the date and the time ones.
print('Converting to datetime...')
s_t = time.time()
accidents_ny['Datetime'] = accidents_ny['DATE'] + ' ' + accidents_ny['TIME']
del accidents_ny['DATE']
del accidents_ny['TIME']
#Convert to timestamp the data column
accidents_ny['Datetime'] = pd.to_datetime(accidents_ny['Datetime'],\
format="%m/%d/%Y %H:%M")
print('Required time... {} s'.format(time.time() - s_t))
#print(accidents_ny.head())
############################################################################
############################################################################
############################### FATAL ACCIDENTS ############################
############################################################################
############################################################################
#Create a dataframe containing exactly the lethal accidents
print('First step... how many lethal accidents per week are there?')
s_t = time.time()
accidents_ny_lethal = \
accidents_ny[['NUMBER OF PERSONS KILLED','Datetime']].copy()
accidents_ny_lethal = \
accidents_ny_lethal[accidents_ny_lethal['NUMBER OF PERSONS KILLED']>0]
#Group by week (and year)
accidents_ny_lethal.index = accidents_ny_lethal['Datetime']
times = pd.DatetimeIndex(accidents_ny_lethal['Datetime'])
# Remove the columns after it is become the index
del accidents_ny_lethal['Datetime']
#Define the year correction to deal with particular week numbers
#For instance, the first of January may be in the 52th week of previous year)
times_year_correction = ((times.month/12)*(52-times.week)/51)-\
((12-times.month)/11*(times.week/52))
times_year_correction = times_year_correction.astype(np.int32)
accidents_ny_lethal_g = accidents_ny_lethal.\
groupby([times.year+times_year_correction,times.week]).agg(['count','sum'])
accidents_ny_lethal_g.columns=['Fatal Accidents','Deaths']
print(accidents_ny_lethal_g.head())
#Plot the graph
accidents_ny_lethal_g['Fatal Accidents'].plot(figsize=(15,10))
mpl.savefig('./figures/{}_lethal_accidents_per_week.pdf'.format(csv_name))
#Save on a csv
accidents_ny_lethal_g.to_csv('./output/'+\
'{}_lethal_accidents_per_week.csv'.format(csv_name), sep=',',\
index_label=['Year','Week'])
print('Required time... {} s'.format(time.time() - s_t))
############################################################################
############################################################################
############################# CONTRIBUTING FACTORS #########################
############################################################################
############################################################################
print('Second step... how many lethal accidents per contributing factors are there?')
print('Observation: only the contributing factor of the first vehicle '+\
'is used, when specified.')
s_t = time.time()
accidents_ny_cf = accidents_ny[['CONTRIBUTING FACTOR VEHICLE 1',\
'NUMBER OF PERSONS KILLED']].copy()
accidents_ny_cf['CONTRIBUTING FACTOR VEHICLE 1'] = \
accidents_ny_cf['CONTRIBUTING FACTOR VEHICLE 1'].fillna('Unspecified')
accidents_ny_cf_g = accidents_ny_cf.groupby('CONTRIBUTING FACTOR VEHICLE 1').\
agg(['count','sum'])
accidents_ny_cf_g.columns = ['Total Accidents', 'Total Deaths']
accidents_ny_cf_g['Average Deaths'] = \
(accidents_ny_cf_g['Total Deaths'].astype(np.float32))/\
accidents_ny_cf_g['Total Accidents']
print(accidents_ny_cf_g.head())
#Plot the graph (without unspecified field)
accidents_ny_cf_g = accidents_ny_cf_g.reset_index()
accidents_ny_cf_g = accidents_ny_cf_g[accidents_ny_cf_g\
['CONTRIBUTING FACTOR VEHICLE 1'] != 'Unspecified']
accidents_ny_cf_g.index = accidents_ny_cf_g['CONTRIBUTING FACTOR VEHICLE 1']
del accidents_ny_cf_g['CONTRIBUTING FACTOR VEHICLE 1']
accidents_ny_cf_g['Total Accidents'].plot(figsize=(15,10), kind='bar')
mpl.savefig('./figures/{}_total_accidents_per_contributing_factor.pdf'.\
format(csv_name))
# Clear the current figure
mpl.clf()
accidents_ny_cf_g['Average Deaths'].plot(figsize=(15,10), kind='bar')
mpl.savefig('./figures/{}_average_deaths_per_contributing_factor.pdf'.\
format(csv_name))
#Save on a csv
accidents_ny_cf_g.to_csv('./output/'+\
'{}__total_accidents_per_contributing_factor.csv'.format(csv_name),\
sep=',',index_label=['Contributing Factor'])
print('Required time... {} s'.format(time.time() - s_t))
############################################################################
############################################################################
################ WEEKLY AVERAGE NUMBER OF ACCIDENTS PER BOROUGH ############
############################################################################
############################################################################
print('Third step... how many accidents are there per borough weekly in average?')
s_t = time.time()
accidents_ny_borough = \
accidents_ny[['NUMBER OF PERSONS KILLED', 'Datetime', 'BOROUGH']]\
.copy().fillna('Unknown')
times = pd.DatetimeIndex(accidents_ny_borough['Datetime'])
# Convert the column 'NUMBER OF PERSONS KILLED' into 0-1's value column.
accidents_ny_borough['NUMBER OF PERSONS KILLED'] = \
accidents_ny_borough['NUMBER OF PERSONS KILLED'].astype(bool)
accidents_ny_borough['NUMBER OF PERSONS KILLED'] = \
accidents_ny_borough['NUMBER OF PERSONS KILLED'].astype(np.int32)
#Define the year correction to deal with particular week numbers
#For instance, the first of January may be in the 52th week of previous year)
times_year_correction = ((times.month/12)*(52-times.week)/51)-\
((12-times.month)/11*(times.week/52))
times_year_correction = times_year_correction.astype(np.int32)
# Group by week and borough
accidents_ny_borough_g = accidents_ny_borough.groupby(['BOROUGH',times.year+\
times_year_correction,times.week])['NUMBER OF PERSONS KILLED'].\
agg(['count','sum'])
accidents_ny_borough_g.columns = ['Total accidents', 'Lethal accidents']
print(accidents_ny_borough_g.head())
#Plot a graph per week
mpl.clf()
# Unstack the borough.
aa = accidents_ny_borough_g.unstack(0)
aa['Total accidents'].plot(figsize=(1500,100), kind='bar')
mpl.savefig('./figures/{}_total_accidents_weekly_per_borough.pdf'.\
format(csv_name))
#Save on a csv
accidents_ny_borough_g.to_csv('./output/'+\
'{}__weekly_accidents_per_borough.csv'.format(csv_name), sep=',',\
index_label=['Borough', 'Year', 'Week'])
# Compute the averages per borough by simply summing the lethal accidents and,
# then, by dividing by the total number of weeks
print('Computing the total number of weeks...')
total_weeks = compute_total_weeks(accidents_ny)
accidents_ny_borough_weekly_g = accidents_ny_borough_g.reset_index()
accidents_ny_borough_weekly_g = accidents_ny_borough_weekly_g.groupby('BOROUGH')\
['Lethal accidents'].agg(['sum'])
accidents_ny_borough_weekly_g.columns = ['Lethal accidents']
accidents_ny_borough_weekly_g['Weekly lethal accidents'] = \
accidents_ny_borough_weekly_g['Lethal accidents'] / total_weeks
print(accidents_ny_borough_weekly_g)
#Plot a graph
mpl.clf()
accidents_ny_borough_weekly_g['Weekly lethal accidents'].plot(figsize=(15,10),\
kind='bar')
mpl.savefig('./figures/{}_average_lethal_accidents_weekly_per_borough.pdf'.\
format(csv_name))
#Save on a csv
accidents_ny_borough_weekly_g.to_csv('./output/'+\
'{}_average_weekly_lethal_accidents_per_borough.csv'.format(csv_name),\
sep=',',index_label=['Borough'])
print('Required time... {} s'.format(time.time() - s_t))
|
from flask import Flask , request
from flask_restful import Resource , Api,reqparse
import json , time
app = Flask (_name_)
api = Api(app)
APP_ROOT =os.path.dirname()
parser = reqparse.RequestParser()
parser.add_argument('info')
class Hello(Resource):
def post(self):
args = parser.parse_args()
name = args['info']
x = name.split('-')
age = 0
if(time.gmtime()[1] - int(x[1])>=0):
if(time.gmtime()[0] - int(x[0]) >= 0):
age = age + 1
age = age + (time.gmtime().tm_year - int(x[2]))
if(age >= 0):
return{"birthday":name ,"Age":age}
else:
return{"you are not born yet."}
api.add_resource(Hello,'/timestamp')
if _name_ == '_main_':
app.run(host='0.0.0.0',port = 5500)
|
import sqlite3
from flask import Flask
from flask import render_template, request
from flask import jsonify, flash
app = Flask(__name__)
app.secret_key = "888"
def db_connection():
dbconn = sqlite3.connect('../Data/CTA_Data.db')
cur = dbconn.cursor()
return (cur, dbconn)
@app.route('/', methods= ['GET', 'POST'])
def getdata(pick={'year':'', 'station':'', 'direction':''}):
c, db = db_connection()
# RETRIEVE ALL STATIONS
sql = "SELECT DISTINCT [stationname] FROM Ridership r ORDER BY [stationname];"
c.execute(sql)
station_data = [i[0] for i in c.fetchall()]
# RUN QUERY
sql = """SELECT r.station_id, strftime('%m-%d-%Y', r.date, 'unixepoch') As ride_date, s.station_descriptive_name, r.rides, s.direction_id
FROM Ridership r
INNER JOIN Stations s ON r.station_id = s.map_id
WHERE strftime('%m-%d-%Y', r.date, 'unixepoch') LIKE :year
AND r.stationname = :station
AND s.direction_id = :direction
"""
pick['year'] = '%' + pick['year'] + '%'
c.execute(sql, pick)
data = []
for row in c.fetchall():
data.append({'station_id':row[0], 'date':row[1], 'station_name': row[2], 'rides':row[3], 'direction':row[4] })
c.close()
db.close()
return render_template('output.html', stations=station_data, cta_data=data)
@app.route('/data', methods=['POST'])
def data():
return getdata(pick={'year':request.form['year'], 'station':request.form['station'], 'direction':request.form['direction']})
if __name__ == '__main__':
app.run(debug=True)
|
import csv
class data_logger():
def twoPressurTransducers(self, data, iteration, fileName='youForgotToNameYourFile',
save_path='C:/Users/bob/Desktop/imu_presure/tests/test_files/'):
fileName = fileName + '.csv'
nameOfFile = save_path + fileName
if iteration == 0:
with open(nameOfFile, "w") as csvfile:
fieldnames = ['Voltage_P0', 'Voltage_N0', 'Pressure0',
'Voltage_P1', 'Voltage_N1', 'Pressure1']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'Voltage_P0': data[0], 'Voltage_N0': data[1], 'Pressure0': data[2],
'Voltage_P1': data[3], 'Voltage_N1': data[4], 'Pressure1': data[5]})
if iteration != 0:
with open(nameOfFile, "a") as csvfile:
fieldnames = ['Voltage_P0', 'Voltage_N0', 'Pressure0',
'Voltage_P1', 'Voltage_N1', 'Pressure1']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'Voltage_P0': data[0], 'Voltage_N0': data[1], 'Pressure0': data[2],
'Voltage_P1': data[3], 'Voltage_N1': data[4], 'Pressure1': data[5]})
def a3_g3_t2(self, data, iteration, fileName='youForgotToNameYourFile',
save_path='C:/Users/bob/Desktop/imu_presure/tests/test_files/'):
fileName = fileName + '.csv'
nameOfFile = save_path + fileName
fieldnames = ['xAcc', 'yAcc', 'zAcc',
'xGyr', 'yGyr', 'zGyr',
'vp0', 'vn0', 'p0',
'vp1', 'vn1', 'p1',
'datetime']
if iteration == 0:
with open(nameOfFile, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'xAcc': data[0], 'yAcc': data[1], 'zAcc': data[2],
'xGyr': data[3], 'yGyr': data[4], 'zGyr': data[5],
'vp0': data[6], 'vn0': data[7], 'p0': data[8],
'vp1': data[9], 'vn1': data[10], 'p1': data[11],
'datetime': data[12]})
if iteration != 0:
with open(nameOfFile, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'xAcc': data[0], 'yAcc': data[1], 'zAcc': data[2],
'xGyr': data[3], 'yGyr': data[4], 'zGyr': data[5],
'vp0': data[6], 'vn0': data[7], 'p0': data[8],
'vp1': data[9], 'vn1': data[10], 'p1': data[11],
'datetime': data[12]})
def tca3_r_t2(self, data, iteration, fileName='youForgotToNameYourFile',
save_path='C:/Users/bob/Desktop/imu_presure/tests/test_files/'):
fileName = fileName + '.csv'
nameOfFile = save_path + fileName
fieldnames = ['xAcc', 'yAcc', 'zAcc',
'R[00]', 'R[01]', 'R[02]',
'R[10]', 'R[11]', 'R[12]',
'R[20]', 'R[21]', 'R[22]',
'vp0', 'vn0', 'p0',
'vp1', 'vn1', 'p1',
'datetime']
if iteration == 0:
with open(nameOfFile, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'xAcc': data[0], 'yAcc': data[1], 'zAcc': data[2],
'R[00]': data[3], 'R[01]': data[4], 'R[02]': data[5],
'R[10]': data[6], 'R[11]': data[7], 'R[12]': data[8],
'R[20]': data[9], 'R[21]': data[10], 'R[22]': data[11],
'vp0': data[12], 'vn0': data[13], 'p0': data[14],
'vp1': data[15], 'vn1': data[16], 'p1': data[17],
'datetime': data[18]})
if iteration != 0:
with open(nameOfFile, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'xAcc': data[0], 'yAcc': data[1], 'zAcc': data[2],
'R[00]': data[3], 'R[01]': data[4], 'R[02]': data[5],
'R[10]': data[6], 'R[11]': data[7], 'R[12]': data[8],
'R[20]': data[9], 'R[21]': data[10], 'R[22]': data[11],
'vp0': data[12], 'vn0': data[13], 'p0': data[14],
'vp1': data[15], 'vn1': data[16], 'p1': data[17],
'datetime': data[18]}) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==================================================
# @Time : 2019-06-20 10:14
# @Author : ryuchen
# @File : ResultManager.py
# @Desc :
# ==================================================
import os
import json
import errno
import socket
import logging
import threading
import gevent.pool
import gevent.server
import gevent.socket
from lib.common.config import config
from cuckoo.common.abstracts import ProtocolHandler
from lib.exceptions.critical import CuckooCriticalError
from lib.exceptions.operation import CuckooOperationalError
from cuckoo.common.files import open_exclusive
from lib.utils.utils import Singleton
from cuckoo.core.log import task_log_start, task_log_stop
from cuckoo.misc import cwd
log = logging.getLogger(__name__)
# Maximum line length to read for netlog messages, to avoid memory exhaustion
MAX_NETLOG_LINE = 4 * 1024
# Maximum number of bytes to buffer for a single connection
BUFSIZE = 16 * 1024
# Directories in which analysis-related files will be stored; also acts as
# whitelist
RESULT_UPLOADABLE = ("files", "shots", "buffer", "extracted", "memory")
RESULT_DIRECTORIES = RESULT_UPLOADABLE + ("reports", "logs")
# Prevent malicious clients from using potentially dangerous filenames
# E.g. C API confusion by using null, or using the colon on NTFS (Alternate
# Data Streams); XXX: just replace illegal chars?
BANNED_PATH_CHARS = b"\x00:"
def netlog_sanitize_fname(path):
"""Validate agent-provided path for result files"""
path = path.replace("\\", "/")
dir_part, name = os.path.split(path)
if dir_part not in RESULT_UPLOADABLE:
raise CuckooOperationalError("Netlog client requested banned path: %r" % path)
if any(c in BANNED_PATH_CHARS for c in name):
for c in BANNED_PATH_CHARS:
path = path.replace(c, "X")
return path
class HandlerContext(object):
"""Holds context for protocol handlers.
Can safely be cancelled from another thread, though in practice this will
not occur often -- usually the connection between VM and the ResultServer
will be reset during shutdown."""
def __init__(self, task_id, storagepath, sock):
self.task_id = task_id
self.command = None
# The path where artifacts will be stored
self.storagepath = storagepath
self.sock = sock
self.buf = ""
def __repr__(self):
return "<Context for %s>" % self.command
def cancel(self):
"""Cancel this context; gevent might complain about this with an
exception later on."""
try:
self.sock.shutdown(socket.SHUT_RD)
except socket.error:
pass
def read(self):
try:
return self.sock.recv(16384)
except socket.error as e:
if e.errno == errno.EBADF:
return ""
if e.errno != errno.ECONNRESET:
raise
log.debug("Task #%s had connection reset for %r", self.task_id, self)
return ""
def drain_buffer(self):
"""Drain buffer and end buffering"""
buf, self.buf = self.buf, None
return buf
def read_newline(self):
"""Read until the next newline character, but never more than
`MAX_NETLOG_LINE`."""
while True:
pos = self.buf.find("\n")
if pos < 0:
if len(self.buf) >= MAX_NETLOG_LINE:
raise CuckooOperationalError("Received overly long line")
buf = self.read()
if buf == "":
raise EOFError
self.buf += buf
continue
line, self.buf = self.buf[:pos], self.buf[pos + 1 :]
return line
def copy_to_fd(self, fd, max_size=None):
if max_size:
fd = WriteLimiter(fd, max_size)
fd.write(self.drain_buffer())
while True:
buf = self.read()
if buf == "":
break
fd.write(buf)
fd.flush()
class WriteLimiter(object):
def __init__(self, fd, remain):
self.fd = fd
self.remain = remain
self.warned = False
def write(self, buf):
size = len(buf)
write = min(size, self.remain)
if write:
self.fd.write(buf[:write])
self.remain -= write
if size and size != write:
if not self.warned:
log.warning(
"Uploaded file length larger than upload_max_size, "
"stopping upload."
)
self.fd.write("... (truncated)")
self.warned = True
def flush(self):
self.fd.flush()
class FileUpload(ProtocolHandler):
def init(self):
self.upload_max_size = config("cuckoo:resultserver:upload_max_size")
self.storagepath = self.handler.storagepath
self.fd = None
self.filelog = os.path.join(self.handler.storagepath, "files.json")
def handle(self):
# Read until newline for file path, e.g.,
# shots/0001.jpg or files/9498687557/libcurl-4.dll.bin
self.handler.sock.settimeout(30)
dump_path = netlog_sanitize_fname(self.handler.read_newline())
if self.version and self.version >= 2:
# NB: filepath is only used as metadata
filepath = self.handler.read_newline()
pids = map(int, self.handler.read_newline().split())
else:
filepath, pids = None, []
log.debug("Task #%s: File upload for %r", self.task_id, dump_path)
file_path = os.path.join(self.storagepath, dump_path.decode("utf-8"))
try:
self.fd = open_exclusive(file_path)
except OSError as e:
if e.errno == errno.EEXIST:
raise CuckooOperationalError(
"Analyzer for task #%s tried to "
"overwrite an existing file" % self.task_id
)
raise
# Append-writes are atomic
with open(self.filelog, "a+b") as f:
print(
json.dumps({"path": dump_path, "filepath": filepath, "pids": pids}),
file=f,
)
self.handler.sock.settimeout(None)
try:
return self.handler.copy_to_fd(self.fd, self.upload_max_size)
finally:
log.debug("Task #%s uploaded file length: %s", self.task_id, self.fd.tell())
class LogHandler(ProtocolHandler):
"""The live analysis log. Can only be opened once in a single session."""
def init(self):
self.logpath = os.path.join(self.handler.storagepath, "analysis.log")
try:
self.fd = open_exclusive(self.logpath, bufsize=1)
except OSError:
log.error(
"Task #%s: attempted to reopen live log analysis.log.", self.task_id
)
return
log.debug("Task #%s: live log analysis.log initialized.", self.task_id)
def handle(self):
if self.fd:
return self.handler.copy_to_fd(self.fd)
class BsonStore(ProtocolHandler):
def init(self):
# We cheat a little bit through the "version" variable, but that's
# acceptable and backwards compatible (for now). Backwards compatible
# in the sense that newer Cuckoo Monitor binaries work with older
# versions of Cuckoo, the other way around doesn't apply here.
if self.version is None:
log.warning(
"Agent is sending BSON files without PID parameter, "
"you should probably update it"
)
self.fd = None
return
self.fd = open(
os.path.join(self.handler.storagepath, "logs", "%d.bson" % self.version),
"wb",
)
def handle(self):
"""Read a BSON stream, attempting at least basic validation, and
log failures."""
log.debug("Task #%s is sending a BSON stream", self.task_id)
if self.fd:
return self.handler.copy_to_fd(self.fd)
class GeventResultServerWorker(gevent.server.StreamServer):
"""The new ResultServer, providing a huge performance boost as well as
implementing a new dropped file storage format avoiding small fd limits.
The old ResultServer would start a new thread per socket, greatly impacting
the overall performance of Cuckoo Sandbox. The new ResultServer uses
so-called Greenlets, low overhead green-threads by Gevent, imposing much
less kernel overhead.
Furthermore, instead of writing each dropped file to its own location (in
$CWD/storage/analyses/<task_id>/files/<partial_hash>_filename.ext) it's
capable of storing all dropped files in a streamable container format. This
is one of various steps to start being able to use less fd's in Cuckoo.
"""
commands = {"BSON": BsonStore, "FILE": FileUpload, "LOG": LogHandler}
task_mgmt_lock = threading.Lock()
def __init__(self, *args, **kwargs):
super(GeventResultServerWorker, self).__init__(*args, **kwargs)
# Store IP address to task_id mapping
self.tasks = {}
# Store running handlers for task_id
self.handlers = {}
def do_run(self):
self.serve_forever()
def add_task(self, task_id, ipaddr):
with self.task_mgmt_lock:
self.tasks[ipaddr] = task_id
log.debug("Now tracking machine %s for task #%s", ipaddr, task_id)
def del_task(self, task_id, ipaddr):
"""Delete ResultServer state and abort pending RequestHandlers. Since
we're about to shutdown the VM, any remaining open connections can
be considered a bug from the VM side, since all connections should
have been closed after the analyzer signalled completion."""
with self.task_mgmt_lock:
if self.tasks.pop(ipaddr, None) is None:
log.warning(
"ResultServer did not have a task with ID %s and IP %s",
task_id,
ipaddr,
)
else:
log.debug("Stopped tracking machine %s for task #%s", ipaddr, task_id)
ctxs = self.handlers.pop(task_id, set())
for ctx in ctxs:
log.debug("Cancel %s for task %r", ctx, task_id)
ctx.cancel()
def handle(self, sock, addr):
"""Handle the incoming connection.
Gevent will close the socket when the function returns."""
ipaddr = addr[0]
with self.task_mgmt_lock:
task_id = self.tasks.get(ipaddr)
if not task_id:
log.warning("ResultServer did not have a task for IP %s", ipaddr)
return
storagepath = cwd(analysis=task_id)
ctx = HandlerContext(task_id, storagepath, sock)
task_log_start(task_id)
try:
try:
protocol = self.negotiate_protocol(task_id, ctx)
except EOFError:
return
# Registering the context allows us to abort the handler by
# shutting down its socket when the task is deleted; this should
# prevent lingering sockets
with self.task_mgmt_lock:
# NOTE: the task may have been cancelled during the negotation
# protocol and a different task for that IP address may have
# been registered
if self.tasks.get(ipaddr) != task_id:
log.warning(
"Task #%s for IP %s was cancelled during " "negotiation",
task_id,
ipaddr,
)
return
s = self.handlers.setdefault(task_id, set())
s.add(ctx)
try:
with protocol:
protocol.handle()
except CuckooOperationalError as e:
log.error(e)
finally:
with self.task_mgmt_lock:
s.discard(ctx)
ctx.cancel()
if ctx.buf:
# This is usually not a good sign
log.warning(
"Task #%s with protocol %s has unprocessed "
"data before getting disconnected",
task_id,
protocol,
)
finally:
task_log_stop(task_id)
def negotiate_protocol(self, task_id, ctx):
header = ctx.read_newline()
if " " in header:
command, version = header.split()
version = int(version)
else:
command, version = header, None
klass = self.commands.get(command)
if not klass:
log.warning(
"Task #%s: unknown netlog protocol requested (%r), "
"terminating connection.",
task_id,
command,
)
return
ctx.command = command
return klass(task_id, ctx, version)
class ResultServer(object):
"""Manager for the ResultServer worker and task state."""
__metaclass__ = Singleton
def __init__(self):
ip = config("cuckoo:resultserver:ip")
port = config("cuckoo:resultserver:port")
pool_size = config("cuckoo:resultserver:pool_size")
sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((ip, port))
except (OSError, socket.error) as e:
if e.errno == errno.EADDRINUSE:
raise CuckooCriticalError(
"Cannot bind ResultServer on port %d "
"because it was in use, bailing." % port
)
elif e.errno == errno.EADDRNOTAVAIL:
raise CuckooCriticalError(
"Unable to bind ResultServer on %s:%s %s. This "
"usually happens when you start Cuckoo without "
"bringing up the virtual interface associated with "
"the ResultServer IP address. Please refer to "
"https://cuckoo.sh/docs/faq/#troubles-problem "
"for more information." % (ip, port, e)
)
else:
raise CuckooCriticalError(
"Unable to bind ResultServer on %s:%s: %s" % (ip, port, e)
)
# We allow user to specify port 0 to get a random port, report it back
# here
_, self.port = sock.getsockname()
sock.listen(128)
self.thread = threading.Thread(
target=self.create_server, args=(sock, pool_size)
)
self.thread.daemon = True
self.thread.start()
def add_task(self, task, machine):
"""Register a task/machine with the ResultServer."""
self.instance.add_task(task.id, machine.ip)
def del_task(self, task, machine):
"""Delete running task and cancel existing handlers."""
self.instance.del_task(task.id, machine.ip)
def create_server(self, sock, pool_size):
if pool_size:
pool = gevent.pool.Pool(pool_size)
else:
pool = "default"
self.instance = GeventResultServerWorker(sock, spawn=pool)
self.instance.do_run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import configparser
import os
import re
import sys
from operator import methodcaller
import pexpect
from funcy import (
autocurry,
compose,
count_by,
filter,
lmap,
lmapcat,
map,
mapcat,
merge,
partial,
rcompose,
re_all,
re_find,
re_test,
select,
)
prompter = "]"
pager = "---- More ----"
logfile = sys.stdout
conf = configparser.ConfigParser()
conf.read(os.path.expanduser('~/.weihu/config.ini'))
username = conf.get('bras', 'username')
password = conf.get('bras', 'password')
def telnet(ip):
child = pexpect.spawn('telnet {ip}'.format(ip=ip), encoding='ISO-8859-1')
child.logfile = logfile
child.expect('Username:')
child.sendline(username)
child.expect('Password:')
child.sendline(password)
child.expect('>')
child.sendline('sys')
child.expect(prompter)
return child
def close(child):
child.sendcontrol('z')
child.expect('>')
child.sendline('q')
child.close()
def do_some(child, cmd):
child.sendline(cmd)
rslt = []
while True:
index = child.expect([prompter, pager], timeout=120)
rslt.append(child.before)
if index == 0:
break
else:
child.send(' ')
continue
return ''.join(rslt).replace('\x1b[42D', '').replace(cmd + '\r\n', '', 1)
def get_bingfa(ip):
def _get_users(child, slot):
record = do_some(child, 'disp max-online slot {s}'.format(s=slot))
users = re_find(r'Max online users since startup\s+:\s+(\d+)', record)
users = int(users or 0)
date = re_find(r'Time of max online users\s+:\s+(\d{4}-\d{2}-\d{2})',
record)
return (slot, users, date)
try:
child = telnet(ip)
rslt = do_some(child, 'disp dev | in BSU')
ff = compose(partial(select, bool), partial(map, r'(\d+)\s+BSU'))
slots = ff(rslt.split('\r\n'))
maxUsers = lmap(partial(_get_users, child), slots)
close(child)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', maxUsers, ip)
def get_vlan_users(ip, inf):
def _get_users(child, i):
rslt = do_some(
child, 'disp access-user interface {i} | in /'.format(i=i))
users = re_all(r'(\d+)/', rslt)
return users
try:
child = telnet(ip)
infs = do_some(
child, 'disp cu interface | in Eth-Trunk{inf}\.'.format(inf=inf))
infs = re_all(r'interface (\S+)', infs)
rslt = lmapcat(partial(_get_users, child), infs)
close(child)
rslt = count_by(int, rslt)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', rslt, ip)
def get_ip_pool(ip):
def _get_sections(child, name):
rslt = do_some(
child, 'disp cu configuration ip-pool {name}'.format(name=name))
sections = re_all(r'section \d+ (\S+) (\S+)', rslt)
return sections
try:
child = telnet(ip)
rslt = do_some(child, 'disp domain 163.js | in pool-name')
poolNames = re_all(r'pool-name\s+:\s(\S+)', rslt)
ips = lmapcat(partial(_get_sections, child), poolNames)
close(child)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', ips, ip)
def get_itv_online(ip):
try:
child = telnet(ip)
rslt = do_some(child,
'disp access-user online-total-number domain vod')
count = re_find(r'total users\s+:\s+(\d+)', rslt, flags=re.I)
count = int(count) if count else 0
rslt = do_some(child,
'disp access-user online-total-number domain itv')
count1 = re_find(r'total users\s+:\s+(\d+)', rslt, flags=re.I)
count1 = int(count1) if count1 else 0
close(child)
except (pexpect.EOF, pexpect.TIMEOUT):
return ('fail', None, ip)
return ('success', count + count1, ip)
def get_vlans_of_port(ip, port):
try:
child = telnet(ip)
rslt = do_some(child, f'disp cu interface {port}')
eth_trunk = re_find(r'eth-trunk \d+', rslt).replace(' ', '')
rslt = do_some(child, 'disp cu interface filter user-vlan')
close(child)
except Exception as e:
raise e
rslt = rcompose(
methodcaller('split', '#'),
autocurry(filter)(lambda x: re_test(eth_trunk, x, re.I)),
autocurry(mapcat)(lambda x: x.split('\r\n')),
autocurry(filter)('user-vlan'),
autocurry(map)(lambda x: x.strip()),
autocurry(map)(lambda x: _item_to_vlans(x)))(rslt)
return merge(set(), *rslt)
def _item_to_vlans(item):
if re_test(r'qinq \d+ \d+', item, re.I):
start, end = re_find(r'qinq (\d+) (\d+)', item, re.I)
return range(int(start), int(end) + 1)
vlan = item.split()[-1]
return [int(vlan)]
|
#!/usr/local/bin/python3.8
print('Hello, Nabeel')
users_input = input('Enter a message: ')
# First character
print ( 'First character:', users_input[0] )
# Last character
print ( 'Last character:', users_input[-1] )
# Middle character
print ( 'Middle character:', users_input[ int(len(users_input) / 2) ] )
# Even character
print ( 'Even index characters:', users_input[::2] )
# Odd character
print ( 'Odd index characters:', users_input[1::2] )
# Reversed message
print ( 'Reversed message:', users_input[::-1] ) |
import torch
from torch import optim
import math
def _check_param_device(param, old_param_device):
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError('Found two parameters on different devices, '
'this is currently not supported.')
return old_param_device
def parameters_grad_to_vector(parameters):
param_device = None
vec = []
for param in parameters:
param_device = _check_param_device(param, param_device)
vec.append(param.grad.view(-1))
return torch.cat(vec)
class VJP_Adam(optim.Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, alpha_vjp=0.0, alpha_grad=1.0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
# if not 0.0 <= betas[0] < 1.0:
# raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
# if not 0.0 <= betas[1] < 1.0:
# raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad,
alpha_vjp=alpha_vjp, alpha_grad=alpha_grad)
super(VJP_Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(VJP_Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, vjps, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for index, p in enumerate(group['params']):
vjp = vjps[index]
if p.grad is None:
continue
# vjp_new = p.grad.data.norm(2) * vjp / (vjp.norm(2) + 1e-5)
grad = group['alpha_grad'] * p.grad.data - vjp * group['alpha_vjp']
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
# if GLOB:
# import ipdb; ipdb.set_trace()
p.data.addcdiv_(-step_size, exp_avg, denom)
# p.data = p.data + group['alpha'] * vjp
return loss
|
# coding=utf-8
# matplotlib背景透明示例图
# python 3.5
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
# import scipy.stats as stats
# 设置中文字体
mpl.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots()
font = {'family': 'Times New Roman',
# 'weight' : 'bold',
'size': 12}
plt.rc('font', **font)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
# 设置标注文字及位置
ax.text(rect.get_x() + rect.get_width() / 2, 0.03 + height, '%.1f' % height, ha='center', va='bottom')
# 数据
testData = [[20.73, 15.24],
[9.27, 9.53],
[11.99, 9],
[6.6, 6.8],
[4, 4]]
N = 2
width = 0.75
ind = np.arange(width, width * 7 * N, width * 7)
print
ind
# plt.rc('font',family='Times New Roman')
rectsTest1 = ax.bar(ind, (testData[0][0], testData[0][1]), width, color="#0000FF",
edgecolor='black', hatch="/")
rectsTest2 = ax.bar(ind + width, (testData[1][0], testData[1][1]), width, color='#1E90FF',
edgecolor='black', hatch="-")
rectsTest3 = ax.bar(ind + 2 * width, (testData[2][0], testData[2][1]), width, color='#82CEFF',
edgecolor='black', hatch="x")
rectsTest4 = ax.bar(ind + 3 * width, (testData[3][0], testData[3][1]), width, color='#00BFBF',
edgecolor='black', hatch="|")
rectsTest5 = ax.bar(ind + 4 * width, (testData[4][0], testData[4][1]), width, color='#00CC66',
edgecolor='black', hatch="+")
ax.set_xlim(0, 9.6)
ax.set_ylim(0, 30)
ax.set_ylabel("Average delay (ms)", **font)
ax.yaxis.grid(True)
ax.yaxis.grid(alpha=0.7, linestyle=':')
ax.set_xticks(ind + width * 2)
ax.set_xticklabels(('Freeway scenario', 'Urban scenario'), **font)
ax.set_yticklabels((0, 5, 10, 15, 20, 25, 30), **font)
# 设置图例
legend = ax.legend((rectsTest1, rectsTest2, rectsTest3, rectsTest4, rectsTest5),
('The proposed GTB algorithm', 'UMB', 'Weighted p-Persistence protocol', 'Two-hop flooding',
'One-hop flooding'), markerscale=100)
frame = legend.get_frame()
frame.set_alpha(1)
# 给每个数据矩形标注数值
autolabel(rectsTest1)
autolabel(rectsTest2)
autolabel(rectsTest3)
autolabel(rectsTest4)
autolabel(rectsTest5)
plt.show()
|
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import args
import bert
from bert import run_classifier
from bert import tokenization
def data_processor():
if os.path.exists("sentiment_data/train.csv"):
x_train = pd.read_csv("sentiment_data/train.csv")
x_test = pd.read_csv("sentiment_data/test.csv")
else:
data = pd.read_csv("sentiment_data/simplifyweibo_4_moods.csv")
data["label"] = data["label"].replace(0, "happy").replace(1, "angry").replace(2, "disgust").replace(3, "sad")
x_train,x_test, y_train, y_test = train_test_split(data,data["label"],test_size=0.4, random_state=0)
x_test, x_dev, y_test, y_dev = train_test_split(x_test, y_test, test_size=0.4, random_state=0)
x_train.to_csv("sentiment_data/train.csv", index = False)
x_test.to_csv("sentiment_data/test.csv", index=False)
x_dev.to_csv("sentiment_data/dev.csv", index=False)
train_InputExamples = x_train.apply(lambda x: run_classifier.InputExample(guid=None,
# Globally unique ID for bookkeeping, unused in this example
text_a=x[args.DATA_COLUMN],
text_b=None,
label=x[args.LABEL_COLUMN]), axis=1)
test_InputExamples = x_test.apply(lambda x: run_classifier.InputExample(guid=None,
text_a=x[args.DATA_COLUMN],
text_b=None,
label=x[args.LABEL_COLUMN]), axis=1)
#获取tokenizer
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
#获取特征
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, args.label_list, args.MAX_SEQ_LENGTH,
tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, args.label_list, args.MAX_SEQ_LENGTH,
tokenizer)
return train_features, test_features
if __name__ == "__main__":
data_processor() |
from typing import List, Tuple
from pathlib import Path
from collections import deque
from itertools import repeat
"""
Part 1: find boxes with similar ids
Tasks:
- count the boxes that have exactly 2 repeated letters in their id
- do the same for 3 repeats
Hint: multiple occurences of repeats count only once
Hint: if an id contains 2s and 3s repeats it counts towards both totals
"""
data_folder = Path('')
with open(data_folder / 'box_ids.txt') as f:
ids = [line.strip() for line in f]
def two_and_threes(codes: List[str]) -> int:
"""
Counts the letter repetition in a box id string (codes).
Adds two and three repeats to a total count.
Multiplies the total to find the boxes checksum
"""
twos = 0
threes = 0
for code in codes:
# set an hash table to keep count for each letter
zeros = list(repeat(0, len(code)))
counts = dict(zip(code, zeros))
for letter in code:
if letter in counts.keys():
counts[letter] += 1
occurences = set(counts.values())
for number in occurences:
if number == 2:
twos += 1
elif number == 3:
threes += 1
else:
pass
return twos * threes
# part 1 answer
print(two_and_threes(ids))
"""
Part 2: identify the boxes that contain prototype fabric
Tasks:
- Identify box ids which differs by just one letter
- Return the common letters
Hint: only two boxes are the correct ones
"""
test_ids = [
'abcde',
'fghij',
'klmno',
'pqrst',
'fguij',
'axcye',
'wvxyz'
]
def identify(ids: List[str]) -> Tuple[Tuple[str], str]:
"""
Takes a list of box ids and return the position of
the two that differ by one letter
"""
ids_queue = deque(ids)
prototype_ids = ()
current_id = []
current_id = ids_queue.popleft()
def match(box_id):
nonlocal ids
nonlocal prototype_ids
nonlocal current_id
nonlocal ids_queue
# difference counter
diff_count = 0
# compare the current id versus all the boxes ids
for id in ids:
for c1, c2 in zip(box_id, id):
if c1 != c2:
diff_count += 1
else:
diff_count += 0
if diff_count == 1:
prototype_ids += (box_id, id)
# the commented gives the right answer but changes
# sequence order so the answer is not valid
# equal_letters = ''.join(set(box_id).intersection(id))
equal_letters = box_id.replace(list(set(box_id).difference(id))[0], '')
return prototype_ids, equal_letters
else:
# onto the next id
pass
# reset the counter for the next id
diff_count = 0
# process the next id if not succesful
return match(ids_queue.popleft())
return match(current_id)
print(identify(ids))
|
import os
import re
import wikipedia as wiki
from urllib2 import urlopen
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from math import log
def tokenize(review, remove_stopwords = True ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
# 1. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review)
# 2. Convert words to lower case and split them
words = review_text.lower().split()
# 3. Optionally remove stop words (true by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
# 5. Return a list of words
return words
def ensure_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def get_keyword_from_url_topic(url_topic):
# Topic includes: Earth Science, Life Science, Physical Science, Biology, Chemestry and Physics
lst_url = []
html = urlopen(url_topic).read()
soup = BeautifulSoup(html, 'html.parser')
for tag_h3 in soup.find_all('h3'):
url_res = ' '.join(tag_h3.li.a.get('href').strip('/').split('/')[-1].split('-'))
lst_url.append(url_res)
return lst_url
def get_save_wiki_docs(keywords, save_folder = 'data/wiki_data/'):
ensure_dir(save_folder)
n_total = len(keywords)
for i, kw in enumerate(keywords):
kw = kw.lower()
print i, n_total, i * 1.0 / n_total, kw
try:
content = wiki.page(kw).content.encode('ascii', 'ignore')
except wiki.exceptions.DisambiguationError as e:
print 'DisambiguationError', kw
except:
print 'Error', kw
if not content:
continue
with open(os.path.join(save_folder, '_'.join(kw.split()) + '.txt'), 'w') as f:
f.write(content)
def get_docstf_idf(dir_data):
""" indexing wiki pages:
returns {document1:{word1:tf, word2:tf ...}, ....},
{word1: idf, word2:idf, ...}"""
docs_tf = {}
idf = {}
vocab = set()
for fname in os.listdir(dir_data):
dd = {}
total_w = 0
path = os.path.join(dir_data, fname)
for index, line in enumerate(open(path)):
lst = tokenize(line)
for word in lst:
vocab.add(word)
dd.setdefault(word, 0)
dd[word] += 1
total_w += 1
for k, v in dd.iteritems():
dd[k] = 1.* v / total_w
docs_tf[fname] = dd
for w in list(vocab):
docs_with_w = 0
for path, doc_tf in docs_tf.iteritems():
if w in doc_tf:
docs_with_w += 1
idf[w] = log(len(docs_tf)/docs_with_w)
return docs_tf, idf
def get_docs_importance_for_question(question, dosc_tf, word_idf, max_docs = None):
question_words = set(tokenize(question))
#go through each article
doc_importance = []
for doc, doc_tf in dosc_tf.iteritems():
doc_imp = 0
for w in question_words:
if w in doc_tf:
doc_imp += doc_tf[w] * word_idf[w]
doc_importance.append((doc, doc_imp))
#sort doc importance
doc_importance = sorted(doc_importance, key=lambda x: x[1], reverse = True)
if max_docs:
return doc_importance[:max_docs]
else:
return doc_importance
|
#Dictionary python.
# import required libirary
from tkinter import *
from pip import PyDictionary
# Create Object
dictionary = PyDictionary()
root = Tk()
#Set geometry
root.geomentry("400x400")
def dict():
meaning.config(text=dictionary.meaning(word.get())['None'][0])
synonym.config(text=dictionary.synonym(word.get()))
antonym.config(text=dictionary.antonym(word.get()))
# Add Labels, Button and Frames
Label(root, text="Dictionary",font=("Helvetica 20 bold"), ffg="Green").pack(pady=10)
# Frames 1
frame = Frame(root)
Label(frame, txt="Type word", font=("Helvetica 15 bold")).pack(side=LEFT)
word = Entry(frame, font=("Helvetica 15 bold"))
word.pack()
frame.pack(pady=10)
#Frame 2
frame1 = Frame(root)
Label(frame1, text="Meaning:-", font=("Helvetica 10 bold")).pack(side=LEFT)
meaning = Label(frame1, text="", font=("Helvetica 10"))
meaning.pack()
frame1.pack(pady=10)
# Frame 3
frame2 = Frame(root)
Label(frame2, text="Synonyms:- ", font=("Helvetica 10 bold")).pack(side=LEFT)
synonym = Label(frame2, text="", font=("Helvetica 10 bold"))
synonym.pack()
frame2.pack(pady=10)
# Frame 4
frame3 = Frame(root)
Label(frame3, text="Antonym:-", font=("Helvetica 10 bold"))
antonym = Label(frame3, text="", font=("Helvetica 10"))
antonym.pack(side=LEFT)
frame3.pack(pady=10)
Button(root, text="Submit", font=("Helvetica 15 bold"), command=dict).pack()
#Execute Tkinter
root.mainloop()
|
import json
class MsgConvert():
def __init__(self):
pass
def msg_json(self):
pass
def json_msg(self):
pass
if __name__ == '__main__':
app = MsgConvert()
|
# Generated by Django 3.0.3 on 2020-02-23 02:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Meetups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('City', models.CharField(max_length=100)),
('Venue', models.TextField()),
('Time', models.TextField()),
('Theme', models.TextField()),
('MOderator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import sys
import nltk
import sklearn
print ("Configurations")
print (sys.version) + str("\n")
print str(sys.version_info) + str("\n")
print (sys.path)
print('The nltk version is {}.'.format(nltk.__version__))
print('The scikit-learn version is {}.'.format(sklearn.__version__))
|
# Python dictionary
dictionary = {
"word" : "Meaning",
"python" : "fast programing language",
"C" : "Speed",
"HTML" : "Not programing language"
}
print(dictionary["python"])
# to add elements in python dictionary
dictionary["machine language"] = 10101
print(dictionary)
# To change values in existing python dictionary
dictionary["HTML"] = "Coding Language"
dictionary["C"] = "Fastest executing language"
print(dictionary)
#dictionary.update({"year"})
|
from flask import Flask, request
from sqlalchemy.orm import create_session
import sqlite3
import random
from flask import Flask
from flask import request, url_for
from sqlalchemy.orm import sessionmaker
import sqlalchemy
from data import db_session
import db
import sqlite3
app = Flask(__name__)
db_session.global_init("db/users.db")
log = ''
pas = ''
@app.route('/', methods=['POST', 'GET'])
@app.route('/index')
@app.route('/vhh', methods=['POST', 'GET'])
def vhh():
if request.method == 'GET':
return f'''<!doctype html>
<html lang="en">
<head>
<title>Пример формы</title>
</head>
<body>
<h1>Форма для регистрации</h1>
<div>
<form class="login_form" method="post">
<input type="text" class="form-control" id="log" placeholder="Введите логин" name="log">
<input type="password" class="form-control" id="pas" placeholder="Введите пароль" name="pas">
<button type="submit" class="btn btn-primary">Записаться</button>
</form>
</div>
</body>
</html>'''
elif request.method == 'POST':
global log, pas
log = request.form['log']
pas = request.form['pas']
return f'''<!doctype html>
<html lang="en">
<head>
<title>Пример формы</title>
</head>
<body>
<div>
<form class="login_form" method="post">
<input value="Перейти к решению" type="button" onclick="location.href='http://127.0.0.1:8080/re'" />
</form>
</div>
</body>
</html>'''
@app.route('/re', methods=['POST', 'GET'])
def forma_vopr():
global log, pas
g = 0
d = []
list_word = []
list_ans = []
con = sqlite3.connect('db/vopro.db')
cur = con.cursor()
result = cur.execute(f"""SELECT * FROM vopro""").fetchall()
for el in result:
g = el[0]
while len(d) != 10:
r = random.randint(1, int(g))
print(g)
if r not in d:
d.append(r)
con = sqlite3.connect('db/vopro.db')
cur = con.cursor()
result = cur.execute(f"""SELECT * FROM vopro
WHERE id = {r} """).fetchall()
for el in result:
list_word.append(el[1])
list_ans.append(el[2])
if request.method == 'GET':
return f'''<!doctype html>
<html lang="en">
<head>
<title>Вопрос</title>
</head>
<body>
<h1>Вопросы</h1>
<div>
<form class="login_form" method="post">
<h8>{list_word[0]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_0" rows="1" name="about_0"></textarea>
<h8>{list_word[1]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_1" rows="1" name="about_1"></textarea>
<h8>{list_word[2]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_2" rows="1" name="about_2"></textarea>
<h8>{list_word[3]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_3" rows="1" name="about_3"></textarea>
<h8>{list_word[4]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_4" rows="1" name="about_4"></textarea>
<h8>{list_word[5]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_5" rows="1" name="about_5"></textarea>
<h8>{list_word[6]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_6" rows="1" name="about_6"></textarea>
<h8>{list_word[7]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_7" rows="1" name="about_7"></textarea>
<h8>{list_word[8]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_8" rows="1" name="about_8"></textarea>
<h8>{list_word[9]}</h8>
<label for="about">Ваш ответ</label>
<textarea class="form-control" id="about_9" rows="1" name="about_9"></textarea>
<button type="submit" class="btn btn-primary">ответить</button>
</form>
</div>
</body>
</html>'''
elif request.method == 'POST':
g = 0
con = sqlite3.connect('db/vopro.db')
cur = con.cursor()
result = cur.execute(f"""SELECT * FROM vopro""").fetchall()
for el in result:
g = el[0]
g = int(g) + 1
con = sqlite3.connect('db/users.db')
cur = con.cursor()
resultss = cur.execute(f"""SELECT * FROM users""").fetchall()
tt = 0
print(resultss)
for el in resultss:
if el[1] == str(log):
print(log)
if el[2] == str(pas):
tt = el[0]
break
else:
print('вас нет в базе')
list_about = [request.form['about_0'], request.form['about_1'], request.form['about_2'],
request.form['about_3'], request.form['about_4'], request.form['about_5'],
request.form['about_6'], request.form['about_7'], request.form['about_8'],
request.form['about_9']]
print(list_about)
tr = 0
fl = 0
for i in range(0, 10):
if list_about[i] == list_ans[i]:
con = sqlite3.connect('db/users.db')
cur = con.cursor()
cur.execute(f"""UPDATE users SET correct_answer = correct_answer + 1
WHERE id = {tt}""")
con.commit()
con.close()
tr += 1
else:
con = sqlite3.connect('db/users.db')
cur = con.cursor()
cur.execute(f"""UPDATE users SET wrong_answer = wrong_answer + 1
WHERE id = {tt}""")
con.commit()
con.close()
fl += 1
return f'Верно {tr}'
if __name__ == '__main__':
app.run(port=8080, host='127.0.0.1')
|
#--*-- codign:utf-8 --*--
import sys
from PyQt4 import QtGui,QtCore
class Example(QtGui.QWidget):
def __init__(self):
super(Example,self).__init__()
self.initUI()
def initUI(self):
QtGui.QToolTip.setFont(QtGui.QFont('SansSerif',10))
self.setToolTip('This is a <b>QWidget</b> widget')
btn=QtGui.QPushButton('Button',self)
btn.setToolTip('This is a <b> QPushBtn</b> button')
btn.resize(btn.sizeHint())
btn.move(50,50)
btn.clicked.connect(QtCore.QCoreApplication.instance().quit)
self.setGeometry(300,300,250,150,)
self.setWindowTitle('Icon')
self.setWindowIcon(QtGui.QIcon('web.png'))
self.show()
def closeEvent(self, QCloseEvent):
replay=QtGui.QMessageBox.question('Are you sure to quit?',QtGui.QMessageBox.Yes |QtGui.QMessageBox.No,QtGui.QMessageBox.No)
if replay==QtGui.QMessageBox.Yes:
QCloseEvent.accept()
else:
QCloseEvent.ignore()
def main():
app=QtGui.QApplication(sys.argv)
e=Example()
#w=QtGui.QWidget()
#w.resize(250,150)
#w.move(300,300)
#w.setWindowTitle('Simple')
#w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
import numpy as np
import h5py
import mcubes
from mayavi import mlab
#from itertools import count
from mayavi.api import OffScreenEngine
import trimesh
input_model_file = 'CA1_ssmall_model.lm'
input_morpho_file = "CA1_ssmall.h5"
# input_morpho_file = "CA1_small.h5"
# input_morpho_file = "CA1.h5"
output_file = 'morph_dend.png'
with h5py.File(input_model_file,'r') as f:
particles = f['Model']['Diffusion']['Lattice'][:,:,:,:]
with h5py.File(input_morpho_file,'r') as f:
memb_v = f['membrane vertices'][()]
memb_f = f['membrane faces'][()]
PSD_ids = f['PSD ids in membrane faces'][()]
mito_v = f['mitochondrion vertices'][()]
mito_f = f['mitochondrion faces'][()]
pitch = f['unit length per voxel (um)'][()]
memb_v = memb_v / pitch
mito_v = mito_v / pitch
# Plot surface mesh
mlab.figure(bgcolor=(1.0,1.0,1.0), size=(400,741))
mlab.view(-180, 90, 700, [120.0, 120.0, 120.0 ])
mlab.triangular_mesh(mito_v[:,0] , mito_v[:,1] , mito_v[:,2] , mito_f, color=(1.0,1.0,0.6), opacity=0.6)
mlab.triangular_mesh(memb_v[:,0], memb_v[:,1], memb_v[:,2], memb_f, color=(0.7,0.7,0.7) , opacity=0.3)
mlab.triangular_mesh(memb_v[:,0], memb_v[:,1], memb_v[:,2], memb_f[PSD_ids,:], color=(1,0,0) , opacity=0.3)
#Plot intracellular molecules
Ca = []
NR = []
PMCA = []
NCX = []
for i in range(particles.shape[3]):
Ca.extend( np.flatnonzero(particles[:,:,:,i] == 1 ).tolist() )
PMCA.extend( np.flatnonzero(particles[:,:,:,i] == 23).tolist() )
NR.extend( np.flatnonzero(particles[:,:,:,i] == 27).tolist() )
NCX.extend( np.flatnonzero(particles[:,:,:,i] == 25).tolist() )
Ca = np.unravel_index(Ca, particles[:,:,:,0].shape )
NR = np.unravel_index(NR, particles[:,:,:,0].shape )
PMCA = np.unravel_index(PMCA, particles[:,:,:,0].shape )
NCX = np.unravel_index(NCX, particles[:,:,:,0].shape )
print('Num Ca : ', Ca[0].shape[0])
print('Num PMCA : ', PMCA[0].shape[0])
print('Num NMDAR : ', NR[0].shape[0])
# plot_points1 = mlab.points3d(Ca[0], Ca[1], Ca[2], color=(0,0,1), scale_factor=2.0,line_width=0.1)
plot_points2 = mlab.points3d(NR[0], NR[1], NR[2], color=(1,0,0), scale_factor=2.0,line_width=0.1)
plot_points3 = mlab.points3d(PMCA[0], PMCA[1], PMCA[2], color=(0,1,0), scale_factor=2.0,line_width=0.1)
#
xvnum,yvnum,zvnum = particles[:,:,:,0].shape
Zoff = 0
xvnum = xvnum / 2
mlab.plot3d( [xvnum,xvnum],[0,100],[Zoff,Zoff],color=(0.7,0.7,0.7),tube_radius=2.5)
mlab.text3d( xvnum, 100, Zoff-30, '2.0 um', scale=15,color=(0.2,0.2,0.2))
# 0.02 um x 100
mlab.savefig(output_file)
mlab.show()
|
"""
What I will learn
Modules
More built-in Python functions
Module search path
Python Standart Library
#Modules
Python modules are files that have a .py extension.
They can implement a set of attributes (variables), methods(functions), and classes(types).
A module can be included in another Python program by using the import nstatement followed by the module name.
import time
time.method_name()
time.attribute_name
"""
import time
print(time.asctime())
print(time.timezone)
"""
Tue Aug 31 15:09:08 2021
0
#Modules
if you want single method
---import module_name---
module_name.method_name()
from module_name import method_name
method_name()
"""
print('------------------------------------')
from time import asctime
print(asctime())
"""
0
Tue Aug 31 15:12:43 2021
from module_name import method_name
from module_name import method_name1, method_nameN
"""
# print('------------------------------------')
# from time import asctime, sleep
# print(asctime())
# sleep(3)
# print(asctime())
# print('------------------------------------')
# sleep(10)
# print(asctime())
"""
Tue Aug 31 15:17:10 2021
------------------------------------
Tue Aug 31 15:17:10 2021
Tue Aug 31 15:17:13 2021
------------------------------------
Tue Aug 31 15:17:23 2021
#Modules
sleep()
time.sleep()
#Don't do this!
from time import *
"""
# print('------------------------------------')
# from time import *
# print(timezone)
# print(asctime())
# sleep(3)
# print(asctime())
"""
0
Tue Aug 31 15:21:57 2021
Tue Aug 31 15:22:00 2021
#Module Search Path
sys.path - Returns the search path for modules
import sys
sys.path
"""
print('------------------------------------')
import sys
for path in sys.path:
print(path)
"""
/Users/alisariboga/Desktop/python_course /11_modules
/Library/Frameworks/Python.framework/Versions/3.9/lib/python39.zip
/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9
/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/lib-dynload
/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages
#PYTHONPATH Environment Variable
Mac/Linux:
PYTHONPATH = path1:pathN
Windows:
PYTHONPATH=path1;pathN
"""
"""
import say_hi
Traceback (most recent call last):
File "/Users/alisariboga/Desktop/python_course /11_modules/modules.py", line 111, in <module>
import say_hi
ModuleNotFoundError: No module named 'say_hi'
#Python Standard Library
#https://docs.python.org/3/library/
Python is distributed with a large library of modules
Check the Python standard library before writing any of your own code!
Just a few modules
CSV
logging
urllib.request
json
"""
print('------------------------------------')
import sys
file_name = 'test.txt'
try:
with open (file_name) as test_file:
for line in test_file:
print(line)
except:
print('Could not open {}.'.format(file_name))
sys.exit(1)
"""
Deneme 123
Deneme 123
"""
# print('------------------------------------')
# def say_hi():
# print('Hi!')
# import say_hi
# say_hi.say_hi()
# print('------------------------------------')
# def say_hi():
# print('Hi!')
# print('Hello from say_hi2.py!')
# import say_hi2
# say_hi2.say_hi()
print('------------------------------------')
def say_hi():
print('Hi!')
def main():
print('Hello from say_hi3.py!')
say_hi()
if __name__ == '__main__':
main()
"""
Hello from say_hi3.py!
Hi!
"""
|
from django.shortcuts import render, redirect, reverse
from .models import *
from django.contrib import messages
# Create your views here.
def landing(request):
return render(request, 'login/landing.html')
def process(request):
print 'entered process'
if 'register' in request.POST:
print 'register'
errors = User.objects.validate_registration(request.POST)
if 'error' in errors:
for error in errors['error']:
messages.add_message(request, messages.INFO, error)
return redirect(reverse('login:landing'))
if 'success' in errors:
user = User.objects.register_user(request.POST)
request.session['login_id']=user.id
return redirect(reverse('dashboard:dashboard')) #this takes us to the dashboard app
if 'login' in request.POST:
print 'login'
errors = User.objects.validate_login(request.POST)
if 'error' in errors:
for error in errors['error']:
messages.add_message(request, messages.INFO, error)
return redirect(reverse('login:landing'))
if 'success' in errors:
user_id = errors['success']
request.session['login_id']=user_id
return redirect(reverse('login:dashboard'))
pass #might be good to return someone to hell in this case
def dashboard(request):
return redirect(reverse('dashboard:dashboard'))
if not 'login_id' in request.session:
# in the future I could send someone to hell here
return redirect(reverse('landing'))
user_id = request.session['user_id']
user = User.objects.get(id=user_id)
context = {
'id':user_id,
'first_name':user.first_name,
'last_name':user.last_name,
'email':user.email,
'created_at':str(user.created_at)
}
return render(request, 'login/dashboard.html', context)
def logout(request):
print 'entered logout'
del request.session['login_id']
return redirect(reverse('login:landing'))
|
"""
This code computes the order of two sibling nodes in a dependency subtree,
where the left and the right siblings are defined on the source dependency tree.
Each node is represented by the continuous vector of its dependency link to its
parent node.
"""
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from mlp import MLP
from logistic_sgd import LogisticRegression
from lookuptable import LookupTable
class SiblingsOrderPredictor(object):
"""Multi-Layer Perceptron Class with Lookup Table Input
"""
def __init__(self, rng, input, feature_size, emb_mat, hidden_sizes=None):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type emb_mat: theano.tensor.TensorType
:param emb_mat: embedding matrix, must be pre-initialized (random or from w2v)
"""
# Since we are dealing with a one lookup table layer LR, we need lookup table before LR
self.lookupTableLayer = LookupTable(
rng=rng, input=input, emb_mat=emb_mat
)
# The projection layer, i.e., an MLP layer, gets as input the output units
# of the lookup table layer
emb_size = emb_mat.get_value().shape[1]
if hidden_sizes is None:
hidden_sizes = [emb_size] # default: 1 hidden layer, same dimension as the input
self.projectionLayer = MLP(
rng=rng,
input=self.lookupTableLayer.output,
n_in=feature_size * emb_size,
n_hiddens=hidden_sizes
)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = self.projectionLayer.L1
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = self.projectionLayer.L2_sqr
# prediction of the sibling pair order is given by the prediction of the
# model, computed in the multilayer perceptron projection layer
self.y_pred = self.projectionLayer.y_pred
# same holds for the function computing the errors
self.errors = self.projectionLayer.errors
# and for the function computing the cross-entropy
self.cross_entropy = self.projectionLayer.cross_entropy
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.lookupTableLayer.params + self.projectionLayer.params
# keep track of model input
self.input = input
|
#!/usr/bin/python
from datetime import datetime
startTime = datetime.now()
import sys
import getopt
import numpy as np
# default parameters
kmer = 15
coverage = 10
file_name = ""
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "hk:f:c:")
except getopt.GetoptError:
print 'knorm.py -k <kmer_size>[15] -c <coverage>[10] -f <inputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print "#--- Read Normalization Script ---#\n"
print "Usage:"
print "knorm.py -k <kmer_size>[15] -c <coverage>[10] -f <inputfile>\n"
print "Goals:"
print "1) Input fastq reads"
print "2) K-merize and filter out reads based on desired coverage"
print "3) Save reads with coverage less than or equal to coverage threshold in new file"
print ""
print "Output:"
print "FILENAME_k_KMER#_cov_COVERAGE#_norm.fastq"
sys.exit()
elif opt in ("-k"):
kmer = arg
elif opt in ("-f"):
file_name = arg
elif opt in ("-c"):
coverage = arg
print "Input file: ", file_name
print "Kmer size: ", kmer
print "Desired coverage: %sx" % (coverage)
###############################################################################
# Progress bar is not my own work from:
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
#
def progress(count, total, suffix=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
##################################################
# Importing file for line count
in_file = file_name
fh1 = open(in_file, 'r')
# Count number of lines in a file
num_lines = sum(1 for line in fh1)
fh1.close
if num_lines >= 100000:
print "Your file has %s number of lines..." % (num_lines)
print "This may take a while to process..."
print "...so be patience..."
print " \n"
# re-open input file for processing
fh2 = open(in_file, 'r')
# open output file for reads to be kept
out_file = str("%s_k_%s_cov_%s_norm.fastq" % (file_name[:-6], kmer, coverage))
fh_out = open(out_file, 'w')
# Init dictionary for storing kmers
kmer_dic = {}
# Init array for storing kmer occurence for each read
cov_array = []
# Iteration counter
count = 1
# variables for outputing read data
read_name = ""
read_seq = ""
read_plus = "+"
read_quality = ""
print "K-merizing the reads..."
for line in fh2:
line = line.strip('\n')
progress(count, num_lines, suffix='done')
# save read header
if count % 4 == 1:
read_name = line
count += 1
# save sequence string and kmerize sequence
elif count % 4 == 2:
read_seq = line
line_length = len(line)
# Starting kmer parsing 0 to length of line minus kmer size
for kmer_start_index in range(int(line_length) - int(kmer) + 1):
# range for kmer
kmer_end_index = kmer_start_index + int(kmer)
# collect khmer for this iteraton
kmer_string = line[kmer_start_index: kmer_end_index]
# check for kmer in dictionary and ++ if not present add to dic and equal 1
kmer_dic[kmer_string] = kmer_dic.get(kmer_string, 0) + 1
# append the coverage for each k-mer onto the coverage array
cov_array.append(kmer_dic[kmer_string])
count += 1
# add counter for plus line
elif count % 4 == 3:
count += 1
# save quality line
elif count % 4 == 0:
read_quality = line
# determine if median of coverage array is <= coverage
if np.median(cov_array) <= int(coverage):
# write header, seq, +, quality score to file
fh_out.write("%s\n%s\n%s\n%s\n" % (read_name, read_seq, read_plus, read_quality))
cov_array = []
else:
cov_array = [] # clear array
count += 1
# close out files
fh2.close
fh_out.close
print "\nFinish! \nDone in:", datetime.now() - startTime
print "Normalized reads were stored in :", out_file
|
#
# Sane, easy and straightforward logging for Python
#
# vim:set ts=8:
#
# This Works is placed under the terms of the Copyright Less License,
# see file COPYRIGHT.CLL. USE AT OWN RISK, ABSOLUTELY NO WARRANTY.
#
# Sorry, builtin logging in Python is far too complex.
# There must not be the need for to take care about logging.
# This means, too, do not use `{}` in logging at all!
# Just plain dump, what's given, and let the caller sort things out.
#
# This is a hopefully sane logging handler:
# - Allows to specify the logging level in the environment
# - Allows to specify the logging filename in the environment as well
# - Use UTC timestamp on all systems to make it easy to compare logs
#
# In __main__:
#
# import pytino.log as log
# #log.setup('name', log.ALL) # for full debugging
# log.setup('name', log.ERROR) # default is log.INFO
# log.warn('hello', 'world') # not shown for log.ERROR
#
# log.twisted() # if you use twisted
# log.asyncio() # if you use asyncio (makes logging using async)
#
# In modules:
#
# import pytino.log as log
# __LOGLEVEL__ = log.ERROR
# log.warn(*['suppressed', 'by','__LOGLEVEL__'])
# log.err('this', 'is', 'shown', 'if',loglevel='ERROR',_or_='below')
# Note: Sequence of KWs cannot be maintained!
#
# Also you can do:
#
# from pytino.log import setup as logsetup
#
# log = logsetup('name').info # logsetup('name',logsetup.ALL) for ALL etc.
# log('whatever')
# ignore the "as", it is to hide away the globals
from __future__ import absolute_import as _absolute_import
from __future__ import print_function as _print_function
import os as _os
import sys as _sys
import time as _time
import logging as _logging
# This is a log wrapper
__LOGWRAPPER__ = _logging
# WTF? Why is this missing?
__module__ = _sys.modules[__name__]
# Define some constants
_SANEFORMAT = '%(asctime)s %(levelname)s %(name)s %(module)s:%(lineno)s %(funcName)s %(message)s'
_FULLFORMAT = '%(asctime)s %(levelname)s %(name)s %(pathname)s:%(lineno)s %(funcName)s %(message)s'
_SANEDATE = '%Y%m%d-%H%M%S'
# Set some global runtime variables
__DEBUGGING__ = __name__ == '__main__'
_disabled = False
_level = None
# missing environment test
def __setup__(): # Do not pollute globals()
'''
Thos only runs once when this module is first included. You cannot call it.
It sets up everything properly and calls logging.basicConfig() for you.
If you want something else, you can call logging.basicConfig() before.
Environment:
PYTHON_LOG_LEVEL: either a number (level) or 'DEBUG', 'INFO', etc.
PYTHON_LOG_FILE: an additional file to log to.
PYTHON_LOG_FORMAT: set your own logging format
PYTHON_LOG_DEBUG: use full name to filenames in log and further debugging
There is a special level 'NONE', which disables logger via this module.
'''
# ensure this is called only once
__module__.__setup__ = lambda: None
if hasattr(_logging, '__LOGWRAPPER__') and _logging.__LOGWRAPPER__ == _logging: return
#
# Below only runs once:
#
_logging.addLevelName(NONE, 'NONE') # sad: this does not define logging.NONE
_logging.addLevelName(ALL, 'ALL') # sad: this does not define logging.ALL
_logging.ALL = ALL # WTF? addLevelName() forgets this?
_logging.NONE = NONE # sad: this only works with this module here
# Now fix some of the most obvious fatal design errors in logging
# WTF? Blanks in a column of traditionally blank separated fields?
# Patch in some underscore, and write it uppercase as other levels.
if getattr(_logging, '_levelToName', None) and getattr(_logging, '_nameToLevel', None):
_logging.getLevelName = lambda level: _logging._levelToName.get(level, _logging._nameToLevel.get(level, ("LEVEL_%s" % level)))
else:
_logging.getLevelName = lambda level: _logging._levelNames.get(level, ("LEVEL_%s" % level))
# possibly needed in setup()
__module__._o_curframe_ = _logging.currentframe
# Eliminate stackframes from (this and possibly other) wrapper modules
_logging.__LOGWRAPPER__ = _logging
_logging.currentframe = _removeWrapperFrames(_logging.currentframe)
_logging.Logger._log = _ignoreNoLoggingException(_logging.Logger._log)
if _os.getenv('PYTHON_LOG_DEBUG'):
__module__.__DEBUGGING__ = True
_logging.basicConfig(datefmt=_SANEDATE, format=_os.getenv('PYTHON_LOG_FORMAT') or __module__.__DEBUGGING__ and _FULLFORMAT or _SANEFORMAT)
# Why isn't there an ENV var which let us overwrite the level?
# Why has this to be done by yourself, parsing options or even more crappy?
lvl = None
env = _os.getenv('PYTHON_LOG_LEVEL')
if env:
try: lvl = int(env)
except: lvl = getattr(__module__, env, None)
if not isinstance(lvl, int):
lvl = INFO
level(lvl)
# BUG: Timezone is missing in timestamps by default (the ISO8601 isn't ISO8601 compliant)
# Logging shall always be done in UTC, to be able to compare server times
_logging.Formatter.converter = _time.gmtime # stackoverflow.com/questions/6321160
# Why isn't this the default since Python 0.0?
# Why does only work for Python 2.7 and above?
try: _logging.captureWarnings(True)
except: pass
env = _os.getenv('PYTHON_LOG_FILE')
if env:
_logging.getLogger().addHandler(_logging.FileHandler(env,'a'))
# inject everything into everything
# such that even if you get some .info as logging routine,
# you can still can switch over to .warn etc.
names = { name:getattr(__module__,name) for name in __module__.__dict__ if not name.startswith('_') }
for a in names:
o = getattr(__module__, a)
if callable(o):
for k,v in names.items():
setattr(o, k, v)
# barely tested
def level(level=None):
'''
Get or set the logging level to something else.
If set to 0 (NONE), logging is entirely disabled.
If it is lower than a module's __LOGLEVEL__,
the latter wins on the module scale.
'''
if level == NONE or level == 'NONE':
__module__._disabled = True
level = 99999
elif level:
__module__._disabled = False
if level:
# Are we really in Java here? Pretty looks like it: WTF!
_logging.getLogger().setLevel(level)
__module__._level = level
return not __module__._disabled and __module__._level or NONE
def levelName(level=None):
if level is None:
level = __module__.level()
return _logging.getLevelName(level)
def disabled():
"""
returns true if logging is disabled
"""
return __module__._disabled
# untested
def formatter(form=None):
'''
Change the output format.
Perhaps use in combination of xlog(.., extra={..})
If new formatter is None or not given, switch back to sane format.
'''
if form is None:
form = _logging.Formatter(_SANEFORMAT, _SANEDATE)
# This strongly reminds me to the minecraft.forge style:
_logging.getLogger().setFormatter(form)
def setup(name=None, level=None):
'''
From main run:
log.setup(__file__)
Optional argument "level" defaults to logging.INFO.
The default value is only used if not overridden in environment.
If 0 (NONE), logging (via this module) is entirely disabled.
'''
if name is None:
# try to deduce the correct name from the caller of setup()
name = __module__._o_curframe_().f_code.co_filename
# Zap the 'root' in favor of the set name (why is there no .setName()?)
_logging.getLogger().name = name
__module__.level(level)
# returns the logging module itself
return __module__
# Only used internally, to allow to hack a module's __LOGLEVEL__
class _NoLoggingException(Exception): pass
# Not tested with kw yet
def xlog(__LOGLEVEL__, s, *args, **kw):
'''
Use this, if you cannot use log() use xlog() instead of logging.log()!
It is a wrapper around logging.log() with the same arguments:
exc_info = True: print exception information
exc_info = sys.exc_info(): print that info
stack_info = True: print stack info
extra = { .. }: Extra values which can be used in logformat
For more see:
https://docs.python.org/3/library/logging.html#logging.log
'''
if __module__._disabled: return
# print(__LOGLEVEL__, s, args, kw)
# module.__LOGLEVEL__ support hacked in here.
# This probably only works with modules,
# which use pytino.log, of course.
try:
_logging.log(__LOGLEVEL__, str(s).rstrip(' \t\n\r'), *args, **kw)
# if __module__.__DEBUGGING__: print('oh, baby, try')
except _NoLoggingException:
# if __module__.__DEBUGGING__: print('eye to eye')
pass
# XXX TODO XXX tested, but incomplete (missing escapes)
def log(level, *args, **kw):
'''
Do some logging without thinking about anything.
All arguments given are just output in the log.
No hidden pitfalls or similar.
'''
if __module__._disabled: return
j = []
for v in args:
try:
j.append(str(v))
except Exception as e:
j.append('(exception '+str(e)+')')
for k,v in kw.items():
try:
j.append(str(k)+'='+str(v))
except Exception as e:
j.append('(exception '+str(e)+')')
# XXX TODO XXX
# Here be Dragons:
# We need some standard escaping here
# to allow easy parsing with 3rd party tools
# in future
xlog(level, "%s", ' '.join(j).rstrip(' \t\n\r'))
# This could be improved by re-implementing logging.Logger.findCaller()
# However I do not like that, as this is very likely to change.
# Here we just wrap the currentframe, which should be relatively safe.
def _removeWrapperFrames(currentframe, same=True):
"""
ignore the stack for modules, which have a property
__LOGWRAPPER__ = logging
This is totally safe, because who else would do this?
Also in modules using this logger, you can set
__LOGLEVEL__ = N
to skip all output below this minimal global level,
such that full debugging needs to be enabled on the module level with
modulename.__LOGLEVEL__ = 0
for the case this is needed.
(However this comes with a performance penalty at low debug levels.)
"""
def wrap(*args, **kw):
c = currentframe(*args, **kw)
p = c
f = c
l = 0
while not f is None:
if not f.f_globals.get('__LOGWRAPPER__', None) is _logging:
if f.f_globals.get('__LOGLEVEL__', 0) > l > 0:
# if __module__.__DEBUGGING__: print('hush hush')
raise _NoLoggingException()
if same: c = f
# if __module__.__DEBUGGING__ and c.f_code: print('@DEBUG@log@', c.f_code.co_filename, file=_sys.stderr)
return c
l = f.f_locals.get('__LOGLEVEL__', l)
p = c
c = f
f = f.f_back
if f is None and not same:
c = p
return c
return wrap
# This can be used as a decorator
def _ignoreNoLoggingException(_log):
'''
Wrap a logging function such, that _NoLoggingException is ignored.
In that case the function just returns without doing anything.
'''
def wrap(c, l, s, *args, **kw):
try:
_log(c, l, str(s).rstrip(' \t\n\r'), *args, **kw)
except _NoLoggingException:
pass
return wrap
# Currently only tested with Python2
def twisted(*args, **kw):
'''
Enable this module for twisted logging,
including patches to improve the output.
Please beware as twisted is not prepared for this,
because twisted assumes, logging is nonblocking,
but python standard logging might block,
depending on the configuration.
You must not have touched twisted.logging before,
else it might get the wrong currentframe().
Currently no workaround is known by me, see my (Tino's) comment at
https://stackoverflow.com/a/2493725
'''
import twisted.logger as t0
import twisted.logger._legacy as t1
import twisted.logger._logger as t2
import twisted.logger._observer as t3
import twisted.logger._stdlib as ts # must be: t4=_stdlib
import twisted.python.log as p0
import twisted.python.threadable as p1
if hasattr(t0, '__LOGWRAPPER__'): return # looks like already patched
# Ignore all those in logging
for a in (t0, t1, t2, t3, ts, p0, p1):
a.__LOGWRAPPER__ = _logging
# Patch in our stackframe hack
ts.currentframe = _removeWrapperFrames(ts.currentframe, same=True)
if args or kw:
setup(*args, **kw)
ll("Twisted logging enabled")
return __module__
def asyncio(*args, **kw):
'''
Enable this module for asyncio logging.
NOT YET READY (logging is blocking)
'''
if args or kw:
setup(*args, **kw)
ll("AsyncIO logging enabled")
return __module__
NONE = 0
ALL = 1
DEBUG = _logging.DEBUG
INFO = _logging.INFO
WARNING = _logging.WARNING
ERROR = _logging.ERROR
FATAL = _logging.FATAL
# And here some convenience wrappers:
# Note that they dump their keywords,
# if you need stackframes etc. use xlog()!
def ll (*args, **kw): log(ALL, *args, **kw)
def debug(*args, **kw): log(DEBUG, *args, **kw)
def info (*args, **kw): __module__.log(__module__.INFO, *args, **kw)
def warn (*args, **kw): __module__.log(__module__.WARNING, *args, **kw)
def err (*args, **kw): __module__.log(__module__.ERROR, *args, **kw)
def fatal(*args, **kw): __module__.log(__module__.FATAL, *args, **kw)
# Do all the patching stuff, once for a lifetime.
__setup__()
def _test():
sep = lambda: print('---------------------------------------------------------')
# reveal me
# well, this reveals the innermost function here
# which is "xlog", but this is for testing only anyway.
__module__.__LOGWRAPPER__ = None
log = fatal
sep()
print(log.__dict__)
sep()
log.setup()
print(log.level(), log.levelName(), log.disabled())
log("this can be seen")
sep()
log.setup('test', NONE)
print(log.level(), log.levelName(), log.disabled())
log("this cannot be seen")
sep()
if __name__=='__main__':
_test()
|
import time
import threading
from pixels import Pixels, pixels
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern
from flask import Flask
from flask_admin import Admin
app = Flask(__name__)
app.config['FLASK_ADMIN_SWATCH'] = 'cerulean'
admin = Admin(app, name='microblog', template_mode='bootstrap3')
@app.route('/')
def root():
return 'Hello Ferguson Maker Space!'
def flaskThread():
app.run(host="0.0.0.0", port=80, threaded=True)
if __name__ == '__main__':
threading.Thread(target=flaskThread).start()
pixels.pattern = GoogleHomeLedPattern(show=pixels.show)
#pixels.pattern = AlexaLedPattern(show=pixels.show)
while True:
try:
pixels.wakeup()
time.sleep(3)
pixels.think()
time.sleep(3)
pixels.speak()
time.sleep(6)
pixels.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixels.off()
time.sleep(1)
|
# Coffee Machine Class
from menu import Menu, MenuItem
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
latte = MenuItem('latte', 100, 16, 1, 3.50)
espresso = MenuItem('espresso', 100, 16, 1, 2.50)
cappuccino = MenuItem('cappuccino', 100, 16, 1, 3.25)
coffeeMaker = CoffeeMaker()
payment = MoneyMachine()
drink = input('What would you like? (espresso/latte/cappuccino/: ')
if(drink == 'latte'):
drink = latte
if(drink == 'espresso'):
drink = espresso
if(drink == 'cappuccino'):
drink = cappuccino
resources = coffeeMaker.is_resource_sufficient(drink)
if(resources):
print('We can totally make your drink!')
print('This drink costs', drink.cost, '. Please enter your payment.')
if(payment.make_payment(drink.cost)):
print('------- REPORT -------')
coffeeMaker.report()
coffeeMaker.make_coffee(drink)
else:
print(resources)
print('Uh Oh! We are all out of resources!')
print('Come back tomorrow!')
|
x = 100
print(type(x))
y = 2.0
print(type(y))
z = int(y)
print(type(z))
a = int("123")
print(a) |
#!/usr/bin/env python
import argparse
import numpy
import os
import sys
from pickleExptLogs import readPickledFile
from expsiftUtils import *
from plotCompare import plotClusterBarComparisonDirs
from plotCompare import getRateMbpsFromPropValSet
from plotCompare import getNClassesFromPropValSet
from plotCompare import sortRateValSets
from plotCompare import sortNClassesValSets
from plotCompare import getSysConfLabel
parser = argparse.ArgumentParser(description='Plot burst_len comparison graphs')
parser.add_argument('expt_dirs', nargs='+', help='Experiment directories')
parser.add_argument('plotfile_prefix', help='Filename prefix for output graphs')
parser.add_argument('-r', dest='recursive', action='store_true',
help='Recursively look for experiment directories under '
'each specified directory')
def getAvgBurstLenPkt(directory):
# Read the sniffer pickle file and return the average burst length in
# packets
burstlen_pkt_summary_pfile = os.path.join(
directory, 'pickled/burstlen_pkt_summary.txt')
summary = readPickledFile(burstlen_pkt_summary_pfile)
avg_burstlen = numpy.average(map(lambda port: summary[port][0],
summary.keys()))
return avg_burstlen
def getAvgBurstLenUsec(directory):
# Read the sniffer pickle file and return the average burst length in
# usecs (convert from nsecs)
burstlen_nsec_summary_pfile = os.path.join(
directory, 'pickled/burstlen_nsec_summary.txt')
summary = readPickledFile(burstlen_nsec_summary_pfile)
avg_burstlen = (numpy.average(map(lambda port: summary[port][0],
summary.keys())) / 1000.0)
return avg_burstlen
def getPc99BurstLenUsec(directory):
# Read the sniffer pickle file and return the average of pc99 burst length
# in usecs (convert from nsecs)
burstlen_nsec_summary_pfile = os.path.join(
directory, 'pickled/burstlen_nsec_summary.txt')
summary = readPickledFile(burstlen_nsec_summary_pfile)
avg_pc99_burstlen = (numpy.average(map(lambda port: summary[port][1],
summary.keys())) / 1000.0)
return avg_pc99_burstlen
# Returns the burstlen comparison summary graph
def plotBurstLenComparisonDirs(dir2props_dict, fn_get_datapoint,
yLabel, layout=None):
return plotClusterBarComparisonDirs(
dir2props_dict,
subplot_props = ['rate_mbps'],
cluster_props = ['nclasses'],
trial_props = ['run'],
fn_sort_subplots = sortRateValSets,
fn_sort_clusters = sortNClassesValSets,
fn_sort_majorgroups = lambda majorgroups: majorgroups,
fn_get_subplot_title = (lambda rate_val_set:
'Rate: %s Gbps' %
(getRateMbpsFromPropValSet(rate_val_set) / 1000)),
fn_get_cluster_label = (lambda nclasses_val_set:
str(getNClassesFromPropValSet(nclasses_val_set))),
fn_get_majorgroup_label = getSysConfLabel,
fn_get_datapoint = fn_get_datapoint,
xLabel = 'Number of classes',
yLabel = yLabel,
layout = layout)
# Returns the "avg burstlen in pkts" comparison summary graph
def plotAvgBurstLenPktComparisonDirs(dir2props_dict = {}, layout = None):
return plotBurstLenComparisonDirs(
dir2props_dict,
fn_get_datapoint = getAvgBurstLenPkt,
yLabel = 'Avg. burst length (packets)',
layout = layout)
# Returns the "avg burstlen in usecs" comparison summary graph
def plotAvgBurstLenUsecComparisonDirs(dir2props_dict = {}, layout = None):
return plotBurstLenComparisonDirs(
dir2props_dict,
fn_get_datapoint = getAvgBurstLenUsec,
yLabel = 'Avg. burst length (usecs)',
layout = layout)
# Returns the "avg of pc99 burstlen in usecs" comparison summary graph
def plotPc99BurstLenUsecComparisonDirs(dir2props_dict = {}, layout = None):
return plotBurstLenComparisonDirs(
dir2props_dict,
fn_get_datapoint = getPc99BurstLenUsec,
yLabel = '99th perc. burst length (usecs)',
layout = layout)
# Returns the "burstlen in usecs" comparison summary graph
def plotBurstLenUsecComparisonDirs(dir2props_dict = {}, layout = None):
# Plot burstlen_usec avg comparison graph
_, _, _, _, layout = plotAvgBurstLenUsecComparisonDirs(dir2props_dict)
# Plot burstlen_usec pc99 comparison graph in the same layout
return plotPc99BurstLenUsecComparisonDirs(dir2props_dict, layout)
def main(argv):
# Parse flags
args = parser.parse_args()
# Generate the list of experiment directories to compare
expt_dirs = []
if args.recursive:
for directory in args.expt_dirs:
directory = os.path.abspath(directory)
for (path, dirs, files) in os.walk(directory, followlinks=True):
# Check if an experiment directory was found
if os.path.exists(os.path.join(path, 'expsift_tags')):
#print 'Found experiment directory:', path
expt_dirs.append(path)
print 'Found %d experiment directories to compare' % len(expt_dirs)
else:
expt_dirs = args.expt_dirs
# Check if any experiment directories were found or not
if len(expt_dirs) == 0:
return
# Read the properties for each directory from the expsift tags files
dir2props_dict = getDir2PropsDict(expt_dirs)
# Plot burstlen_pkt comparison graph
_, _, _, _, burstlen_pkt_plot_layout = (
plotAvgBurstLenPktComparisonDirs(dir2props_dict))
burstlen_pkt_plot_layout.save(args.plotfile_prefix +
'compare_burstlen_pkt.png')
# Plot burstlen_usec comparison graph
_, _, _, _, burstlen_usec_plot_layout = (
plotBurstLenUsecComparisonDirs(dir2props_dict))
burstlen_usec_plot_layout.save(args.plotfile_prefix +
'compare_burstlen_usec.png')
if __name__ == '__main__':
main(sys.argv)
|
# RachelPotterP2.py
# A program that takes a list of numbers and returns the sum of numbers in the list
def list_sum(num_list):
total_num = 0
for i in range(len(num_list)):
total_num = total_num + num_list[i]
return total_num
# Let's test it!
# We can create a function to get a list from the user to pass through list_sum(num_list)
# then print the results so we can see.
def get_list():
num_list = []
num_string = input("Enter a list of numbers to be added, separated by a comma: ")
for num in num_string.split(","):
num = float(num)
num_list.append(num)
print(list_sum(num_list)) # Call our adding function and print the return
get_list()
# I pledge my honor that I have abided by the Stevens Honor System
# Rachel Potter
|
####
# prototyping anomaly detection using python numpy and scipy
####
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from scipy.optimize import curve_fit
import scipy
import datetime
import matplotlib.dates as mdates
#loading data from the arctan csv
data = np.genfromtxt('ArcTan_Data.csv',dtype=None, delimiter=',')
#time,course,speed,x(m),Y(m) data
X= data[1:,1:5].astype(float)
#bearing data
Y=data[1:,5].astype(float)
#data for the last leg
Ymin=Y[68:145]
#date data from the csv
date=data[1:,0]
#time in seconds
ti=np.linspace(0,145,145)
#variable to hold bi to be calculated
y_fit=np.zeros(Ymin.size)
#converting the string date data to python dates
for i in range (0,Y.size):
date[i]=datetime.datetime.strptime(date[i], "%d/%m/%Y %H:%M:%S")
#converting the dates to numbers that we can use in computations
dates=mdates.datestr2num(date)
#dates for the last leg
datesmin=dates[68:145]
#obtaining the time elapsed starting from zero then dividing by 100000 to normalize it for curve fitting
#since fitting doesnt work well with large numbers
for i in range (0,Y.size):
diff=datetime.timedelta(dates[i]-dates[0])
ti[i]=diff.seconds
ti[i]=ti[i]/10000
#Arctan function used to fit a curve to the data
def func2(x,B,P,Q):
return np.degrees(np.arctan2(np.sin(np.radians(B))+P*x,np.cos(np.radians(B))+Q*x))
#finding the fitting parameters to use .ti(for the last leg)
#fitpars2 are the fitting parameters obtained by the curve_fit using equation func2
#covmat2 are the convariant matrix
#maxfev= number of times we minimize the values.the larger the better
# The diagonals of the covariance matrix are variances
fitpars2, covmat2 = curve_fit(func2,ti[68:145],Ymin,maxfev=100000000)
#plotting dates against bearing for the last leg
plt.figure(1)
plt.plot(datesmin,Ymin,'.',color='red')
#finding the bearing bi using the fitting parameters obtained by curve_fit equation.y_fit=bi
y_fit=func2(ti[68:145], fitpars2[0],fitpars2[1],fitpars2[2])
#plotting bi calculated for the last leg against dates
plt.plot(datesmin,y_fit,'r-',color='green')
locs, labels = plt.xticks()
plt.setp(labels, rotation=45)
#obtaining the sum of the squared errors for the leg
wholelegerros=np.sum((Ymin-y_fit)**2)
#showing the plots
plt.show()
|
""""
A list is a collection of more than one variable
They need not be of the same type
[]--declare a list
"""
x=["John Doe",20,"john@gmail.com","Nairobi", True]
print(x)
dishes=["Ugali","Samaki wa Kupakwa","Boilo","Nyama"]
colors=['Blue','white','grey']
combined=[dishes,colors,["Monday","Tuesday"]]
print(dishes)
print(colors)
print(combined)
#accessing items of nested lists
print(combined[2][1])
print("Length of list",len(combined))
print("Length of list",len(dishes))
#Concatination of lists
print("Concatinated lits",dishes + colors)
#del dishes[2]
print("sliced list",dishes[0:2])
#POP()....removes the last element of the list
print("POP function on Colors list:-",colors.pop())
#Reverse() reverse the order of the elements
list1=[1,2,3,4,5]
list1.reverse()
print("Reverse function on colors list:-",list1)
colors.reverse()
print(colors)
#Append function- Adds elements ion a list
colors.append('brown')
print(colors)
#extend() adds the new elements into an existing
list2=["Jon","May","lil"]
another_list=[1,3,4,5,6]
list2.extend(another_list)
print(list2)
#Insert into a list
list2.insert(1,"kim")
print("Combined list", list2)
#COunt
u=list2.count("Jon")
print(u)
#len
print(len(list2))
|
import requests
import random
import logging
import re
"""
Derpiboooru API accessing for parsing, function parameters are explicitly strings
Changed from urllib2 to requests (3rd party)
Changes are being applied to the derpibooru API
2015-05-31: Minor changes to return non-200 HTTP status codes as if to back off
2015-06-12: More changes to the derpibooru API, but no errors raised on parsing image page URLs.
2015-07-05: Added small function to limit how many tags it can show
2015-07-23: stats_string now returns a tuple with two messages, first one being the image info(Excluding the tags), and the second containing the tags. This has been marked as a major change
2015-08-03: randimg searches for anything specified and returns a random image with the tag or tag combination
2015-08-08: randimg requires two arguments, first is the search string, second is the boolean for unfiltering
2015-08-25: Added in uploaded and updated time and shorter way of tag checking for questionable/explicit/grimdark
"""
system_tags = ["explicit", "grimdark", "grotesque", "questionable", "safe", "semi-grimdark", "suggestive"]
logging.basicConfig(filename='logs/derpi.log',level=logging.WARNING)
templist = ""
def rating_iterate(taglist):
tlist = taglist.split(", ")
return ", ".join([i for i in tlist if i in system_tags]).title()
def split_taglist(tag_str):
length = 200
suffix = "..."
taglen = len(tag_str.split(", ", maxsplit=-1)[0:])
if len(tag_str) <= length:
return tag_str
else:
return tag_str[:length].rsplit(' ', 1)[0]+suffix+" ("+str(taglen)+" total tags)"
def derpitimestamp(time_string): #Returns in Y-m-d H:M format
ts_re = re.compile("(?P<year>[0-9]*-)?(?P<month>[0-9]*-)(?P<day>[0-9]*)?(T)?(?P<hour>[0-9]*:)?(?P<minute>[0-9]*:)?(?P<second>[0-9]*.)?(?P<millisecond>[0-9]*.)")
ftime = re.match(ts_re, time_string)
timestamp_str = "{y}-{mo}-{d} {h}:{m}".format(
y=ftime.group("year").rstrip("-"),
mo=ftime.group("month").rstrip("-"),
d=ftime.group("day"),
h=ftime.group("hour").rstrip(":"),
m=ftime.group("minute").rstrip(":")
)
return timestamp_str
def randimg(t, apikey):
#Param room: 'Room' object from ch.py
#Param t: Search term string
#Param nofilter: Boolean to ignore default filter.
if apikey != None:
if t is None:
r = requests.get("https://derpibooru.org/search.json?key={key}&q=cute".format(key=apikey))
else:
t = t.replace(" ", "+")
t = t.replace(", ", ",")
r = requests.get("https://derpibooru.org/search.json?key={key}&q={t}".format(t=t,key=apikey))
else:
if t is None:
r = requests.get("https://derpibooru.org/search.json?q=cute")
else:
t = t.replace(" ", "+")
t = t.replace(", ", ",")
r = requests.get("https://derpibooru.org/search.json?q={t}".format(t=t))
if r.status_code != 200: #Back off in events of a non-200 status code
return "Status returned {status}".format(status=r.status_code)
else:
jso = r.json()
if jso['total'] == 0:
room.message("That tag or tag combination does not exist")
else:
dat = random.choice(jso['search'])
iid = int(dat['id'])
return "https://derpibooru.org/{id} (Tag/Tag combination has {n} images)".format(id=iid,n=str(jso['total']))
def tagsearch(tag):
ser1 = tag.replace(" ", "+")
ser1 = ser1.replace(", ", ",")
r = requests.get("https://derpibooru.org/search.json?q={t}".format(t=ser1))
if r.status_code != 200: #Back off in events of a non-200 status code
return "Status returned {status}".format(status=r.status_code)
else:
jso = r.json()
if jso['total'] == 0:
return None
else:
img_count = jso['total']
return str(img_count)
def tagsp(tag): #Returns URL of spoiler image, returns None (or null) if no url is present
ser1 = tag.replace(" ", "+")
ser1 = ser1.replace(":", "-colon-")
r = requests.get("https://derpibooru.org/tags/"+ser1+".json")
if r.status_code != 200:
return "Status returned {status}".format(status=r.status_code)
else:
jso = r.json()
sp = jso['tag']['spoiler_image_uri']
if sp is None:
logging.warning('No spoiler image for tag "{tag}"'.format(tag=tag))
return None
else:
return "http:"+sp
def rating(num_id):
r = requests.get('https://derpibooru.org/'+num_id+'.json')
if r.status_code != 200:
return "Server returned {status}".format(status=r.status_code)
else:
jso = r.json()
sp = "http:"+jso['representations']['rating']
return str(sp)
def fetch_info(numid):
r = requests.get('https://derpibooru.org/images/{num}.json'.format(num=numid))
if r.status_code != 200:
logging.warning("Server returned {status}")
return None
else:
return r.json()
_score = lambda img_info: int(img_info['score'])
_upv = lambda img_info: int(img_info['upvotes'])
_dwv = lambda img_info: int(img_info['downvotes'])
_faves = lambda img_info: int(img_info['faves'])
_cmts = lambda img_info: int(img_info['comment_count'])
_uled = lambda img_info: img_info['uploader']
_tags = lambda img_info: img_info['tags']
_format = lambda img_info: img_info['original_format']
_created_time = lambda img_info: img_info['created_at']
_updated_time = lambda img_info: img_info['updated_at']
def stats_string(numid):
#Param room: 'Room' object from ch.py
#Param numid: numid string for image number
img_info = fetch_info(numid)
if img_info is None: #Return none if fetch_info sees a non-200 HTTP status code
return None
else:
uled_time = derpitimestamp(_created_time(img_info))
upd_time = derpitimestamp(_updated_time(img_info))
rating = rating_iterate(_tags(img_info))
return ("\002({rating})\017 https://derpibooru.org/{num} | \002Uploaded at\017: {uledtime} UTC by {uled} | \002Score\017: {score} ({upv} up / {dwv} down) with {faves} faves | \002Comment count\017: {cmts} ".format(
rating=rating,
uledtime=uled_time,
score=_score(img_info),
upv=_upv(img_info),
dwv=_dwv(img_info),
faves=_faves(img_info),
cmts=_cmts(img_info),
uled=_uled(img_info),
num=numid
),
"Image #{n} tags: {tlist}".format(
n=numid,
tlist=split_taglist(_tags(img_info))
))
|
from tda import PD, PWGK, PL, PSSK
import tda
import numpy as np
import os
import random
def n_mmd(mat_gram, unbias=True):
n_total = mat_gram.shape[0]
n = int(n_total / 2)
mat_xx = mat_gram[0:n, 0:n]
mat_yy = mat_gram[n:n_total, n:n_total]
mat_xy = mat_gram[0:n, n:n_total]
sum_xx = sum(sum(mat_xx))
sum_yy = sum(sum(mat_yy))
sum_xy = sum(sum(mat_xy))
if unbias:
sum_xx -= sum(np.diag(mat_xx))
sum_yy -= sum(np.diag(mat_yy))
sum_xy -= sum(np.diag(mat_xy))
return (sum_xx + sum_yy - 2 * sum_xy) / (n - 1)
else:
return (sum_xx + sum_yy - 2 * sum_xy) / n
def hist_wchi(mat_gram, num_hist=int(1e+4)):
n = len(mat_gram)
# centered Gram matrix
mat_center = np.empty((n, n))
vec_gram = sum(mat_gram)
val_total = sum(vec_gram)
for i in range(n):
for j in range(i + 1):
mat_center[i, j] = (mat_gram[i, j]
- ((vec_gram[i] + vec_gram[j]) / n)
+ (val_total / (n ** 2)))
mat_center[j, i] = mat_center[i, j]
# estimated eigenvalues
vec_nu = np.sort(np.linalg.eigh(mat_center)[0])[::-1][0: - 1]
vec_lambda = vec_nu / (n - 1)
sum_lambda = sum(vec_lambda)
# histogram of the null distribution (weighted chi square)
vec_hist = np.empty(num_hist)
for i in range(num_hist):
vec_z = np.random.normal(0, np.sqrt(2), n - 1) ** 2
vec_hist[i] = np.inner(vec_lambda, vec_z) - 2 * sum_lambda
return np.sort(vec_hist)[::-1]
def extract_submat(mat_gram, num_m=None):
n_total = mat_gram.shape[0]
n = int(n_total / 2)
if num_m is None:
num_m = n - 1
d = int(2 * num_m)
mat = np.empty((d, d))
idx_x = random.sample(range(0, n), num_m)
idx_y = random.sample(range(n, n_total), num_m)
idx_xy = idx_x + idx_y
for i, a in enumerate(idx_xy):
for j, b in enumerate(idx_xy):
mat[i, j] = mat_gram[a, b]
return mat
def two_sample_test(mat_gram, alpha=0.05, num_m=None, num_test=1000):
vec_wchi = hist_wchi(mat_gram)
vec_p_value = np.empty(num_test)
for temp_test in range(num_test):
mat_reduced = extract_submat(mat_gram, num_m)
value_mmd = n_mmd(mat_reduced)
vec_temp = np.where(vec_wchi > value_mmd)[0]
vec_p_value[temp_test] = len(vec_temp) / len(vec_wchi)
return vec_p_value, len(np.where(vec_p_value < alpha)[0]) / num_test
pwgk = [True, False][0]
landscape = [True, False][0]
pssk = [True, False][0]
CONST_PD = 1
CONST_ALPHA = 0.05
CONST_IID = 100
CONST_M = 20
type_one = [False, True][0]
main = ["lattice", "matern"][1]
name_dir = "../data/" + main
# type of P and Q
if main == "lattice":
list_list_pcd = [["square_014", "gauss_010"], ["square_017", "gauss_010"],
["square_020", "gauss_010"]]
CONST_PCD = 2
CONST_SIDE = 20
name_parameter = "pcd%s_side%s_iid%s" % (CONST_PCD, CONST_SIDE, CONST_IID)
else: # matern
list_list_pcd = [["type_0", "type_1"], ["type_0", "type_2"],
["type_1", "type_2"]]
CONST_PCD = 2
CONST_LAMBDA = 100
CONST_WIDTH = 1
CONST_DISTANCE = 0.05
name_parameter = "pcd%s_lambda%s_width%s_distance%s_iid%s" % (
CONST_PCD, CONST_LAMBDA, CONST_WIDTH,
str(CONST_DISTANCE).replace(".", ""), CONST_IID)
for list_name_pcd in list_list_pcd:
# make directory to save the Gram matrix
name_dir_gram = "%s/%s_pd%s_%s_vs_%s" % (
name_dir, name_parameter, CONST_PD, list_name_pcd[0],
list_name_pcd[1])
tda.os_mkdir(name_dir_gram)
# compute/import parameters
temp_v = tda.import_parameters(name_dir, name_parameter, list_name_pcd,
CONST_IID, CONST_PD)
min_birth, max_death, med_pers, max_pers, med_sigma = temp_v
# import diagrams of P and Q
list_diagram_xy = []
for name_pcd in list_name_pcd:
name_data = "%s_%s" % (name_parameter, name_pcd)
diagram = PD(name_dir, name_data, CONST_IID, CONST_PD)
list_diagram_xy.extend(diagram.data)
print(main, list_name_pcd)
if pwgk:
for name_large in ["", "_small", "_large"]:
print("===============")
for name_weight in ["one", "linear", "arctan"]:
# define a kernel and a weight function
if name_large == "_small":
sigma = med_sigma / 10
elif name_large == "_large":
sigma = med_sigma * 10
else:
sigma = med_sigma
func_kernel = tda.function_kernel("Gaussian", sigma=sigma)
func_weight = tda.function_weight(
name_weight, arc_c=med_pers, arc_p=5, lin_el=max_death)
# compute/import the (k,w)-linear Gram matrix
name_linear_pwgk = "%s/gram_mat_pwgk_%s%s_Linear.txt" % (
name_dir_gram, name_weight, name_large)
if not os.path.exists(name_linear_pwgk):
pwgk = PWGK(list_diagram_xy, func_kernel, func_weight,
sigma=sigma, name_rkhs="Linear",
approx=True)
mat_linear_pwgk = pwgk.gram_matrix()
np.savetxt(name_linear_pwgk, mat_linear_pwgk)
else:
mat_linear_pwgk = np.loadtxt(name_linear_pwgk)
# check type I error
if type_one:
mat_linear_pwgk = mat_linear_pwgk[CONST_IID:2 * CONST_IID,
CONST_IID:2 * CONST_IID]
else:
pass
# define the Gram matrix on persistence diagrams
name_rkhs = ["Linear", "Gaussian"][1]
mat_gram_pwgk = tda.matrix_gram(
mat_linear_pwgk, name_rkhs)[0]
# test result
vec_p, num_reject = two_sample_test(
mat_gram_pwgk, CONST_ALPHA, CONST_M)
print(name_weight, name_large)
print(np.mean(vec_p), num_reject)
if landscape:
# compute/import the linear Gram matrix
name_linear_landscape = "%s/gram_mat_landscape_Linear.txt" % (
name_dir_gram)
if not os.path.exists(name_linear_landscape):
landscape = PL(list_diagram_xy, name_rkhs="Linear")
mat_linear_land = landscape.gram_matrix()
np.savetxt(name_linear_landscape, mat_linear_land)
else:
mat_linear_land = np.loadtxt(name_linear_landscape)
# check type I error
if type_one:
mat_linear_land = mat_linear_land[CONST_IID:2 * CONST_IID,
CONST_IID:2 * CONST_IID]
else:
pass
# define the Gram matrix on persistence diagrams
name_rkhs = ["Linear", "Gaussian"][0]
mat_gram_landscape = tda.matrix_gram(
mat_linear_land, name_rkhs)[0]
# test result
vec_p, num_reject = two_sample_test(
mat_gram_landscape, CONST_ALPHA, CONST_M)
print("===============")
print("landscape")
print(np.mean(vec_p), num_reject)
if pssk:
for name_large in ["", "_small", "_large"]:
if name_large == "_small":
sigma_pssk = med_sigma / 10
elif name_large == "_large":
sigma_pssk = med_sigma * 10
else:
sigma_pssk = med_sigma
sigma_pssk /= np.sqrt(2)
# compute/import the linear Gram matrix
name_linear_pssk = "%s/gram_mat_pssk%s_Linear.txt" % (
name_dir_gram, name_large)
if not os.path.exists(name_linear_pssk):
pssk = PSSK(list_diagram_xy, sigma_pssk, name_rkhs="Linear")
mat_linear_pssk = pssk.gram_matrix()
np.savetxt(name_linear_pssk, mat_linear_pssk)
else:
mat_linear_pssk = np.loadtxt(name_linear_pssk)
# check type I error
if type_one:
mat_linear_pssk = mat_linear_pssk[CONST_IID:2 * CONST_IID,
CONST_IID:2 * CONST_IID]
else:
pass
# define the Gram matrix on persistence diagrams
name_rkhs = ["Linear", "Gaussian"][0]
mat_gram_pssk = tda.matrix_gram(
mat_linear_pssk, name_rkhs)[0]
# test result
vec_p, num_reject = two_sample_test(
mat_gram_pssk, CONST_ALPHA, CONST_M)
print("pssk", name_large)
print(np.mean(vec_p), num_reject)
|
from __future__ import absolute_import
import warnings
from .cuhk01 import CUHK01
from .cuhk03 import CUHK03
from .dukemtmc import DukeMTMC
from .market1501 import Market1501
from .viper import VIPeR
from .veri776 import Veri776
from .vehicleid import VehicleID
__factory = {
'market1501': Market1501,
'cuhk03': CUHK03,
'dukemtmc': DukeMTMC,
}
def get_names():
return __factory.keys()
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
|
import torch.nn as nn
from deep_depth_transfer.data.cameras_calibration import CamerasCalibration
from .inverse_depth_smoothness_loss import InverseDepthSmoothnessLoss
from .pose_loss import PoseLoss
from .pose_metric import PoseMetric
from .spatial_photometric_consistency_loss import SpatialPhotometricConsistencyLoss
from .temporal_photometric_consistency_loss import TemporalPhotometricConsistencyLoss
class UnsupervisedCriterion(nn.Module):
def __init__(self,
cameras_calibration: CamerasCalibration,
lambda_position=0.01,
lambda_angle=0.1,
lambda_s=0.85,
lambda_smoothness=1.0,
smooth_loss=True,
pose_loss=True
):
super(UnsupervisedCriterion, self).__init__()
self._spatial_consistency_loss = SpatialPhotometricConsistencyLoss(
lambda_s,
cameras_calibration.left_camera_matrix,
cameras_calibration.right_camera_matrix,
cameras_calibration.transform_from_left_to_right,
)
self._temporal_consistency_loss = TemporalPhotometricConsistencyLoss(
cameras_calibration.left_camera_matrix,
cameras_calibration.right_camera_matrix,
lambda_s,
)
if smooth_loss:
self._inverse_depth_smoothness_loss = InverseDepthSmoothnessLoss(
lambda_smoothness,
)
else:
self._inverse_depth_smoothness_loss = None
if pose_loss:
self._pose_loss = PoseLoss(
lambda_position,
lambda_angle,
cameras_calibration.transform_from_left_to_right
)
else:
self._pose_loss = None
self._pose_metric = PoseMetric()
self._cameras_calibration = cameras_calibration
def get_cameras_calibration(self):
return self._cameras_calibration
def forward(self, images, depths, transformations):
left_current_image, left_next_image, right_current_image, right_next_image = images
left_current_depth, left_next_depth, right_current_depth, right_next_depth = depths
left_current_transform, left_next_transform, right_current_transform, right_next_transform = transformations
losses = {}
current_spatial_loss = self._spatial_consistency_loss(
left_current_image,
right_current_image,
left_current_depth,
right_current_depth
)
next_spatial_loss = self._spatial_consistency_loss(
left_next_image,
right_next_image,
left_next_depth,
right_next_depth
)
losses["spatial_loss"] = (current_spatial_loss + next_spatial_loss) / 2.
if self._inverse_depth_smoothness_loss is not None:
smoothness_losses = [self._inverse_depth_smoothness_loss(x, y) for x, y in zip(depths, images)]
losses["smooth_loss"] = sum(smoothness_losses) / len(smoothness_losses)
left_temporal_loss = self._temporal_consistency_loss(
left_current_image,
left_next_image,
left_current_depth,
left_next_depth,
left_current_transform[1],
left_current_transform[0],
left_next_transform[1],
left_next_transform[0]
)
right_temporal_loss = self._temporal_consistency_loss(
right_current_image,
right_next_image,
right_current_depth,
right_next_depth,
right_current_transform[1],
right_current_transform[0],
right_next_transform[1],
right_next_transform[0]
)
losses["temporal_loss"] = (left_temporal_loss + right_temporal_loss) / 2.
if self._pose_loss is not None:
current_pose_loss = self._pose_loss(
left_current_transform[1],
left_current_transform[0],
right_current_transform[1],
right_current_transform[0],
)
next_pose_loss = self._pose_loss(
left_next_transform[1],
left_next_transform[0],
right_next_transform[1],
right_next_transform[0],
)
losses["poss_loss"] = (current_pose_loss + next_pose_loss) / 2.
losses["loss"] = 0
for value in losses.values():
losses["loss"] += value
return losses
|
#Write the python program to find the greatest number among the three numbers.
#Solution:
def greatest(num1,num2,num3):
if num1 >= num2 and num1 >= num3:
return num1
elif num2 >= num3 and num2 >= num1:
return num2
else:
return num3
num1 = int(input("Enter the first number:"))
num2 = int(input("Enter the second number:"))
num3 = int(input("Enter the third number:"))
largest = greatest(num1, num2, num3)
print(largest,"is the largest number among the given input numbers.")
'''Output:
Enter the first number:20
Enter the second number:10
Enter the third number:15
20 is the largest number among the given input numbers.
Process finished with exit code 0''' |
def multiples(s1, s2, s3):
return [a for a in xrange(1, s3) if not(a % s1 or a % s2)]
|
x = [1, 2, 3, 4]
if 5 in x:
print(True)
else:
print(False)
|
import webbrowser
def sad_run():
while True:
print('1. Understand why you are sad')
print('2. A list of great podcasts you might like')
print('3. Here is a list of best hollywood movies of all time to boost your mood')
print('4. Try reading these books might change your opinion ')
print('5. Back to main menu ')
opt = int(input())
if opt == 1:
webbrowser.open('https://www.youtube.com/watch?v=GOK1tKFFIQI', new=0, autoraise=True)
elif opt == 2:
webbrowser.open('https://www.bustle.com/p/12-podcasts-to-help-anxiety-depression-whether-you-want-to-laugh-cry-find-a-way-to-unwind-15909570', new=0, autoraise=True)
elif opt == 3:
webbrowser.open('https://fmovies.wtf/filter?sort=imdb%3Adesc&type%5B%5D=movie&subtitle%5B%5D=1', new=0, autoraise=True)
elif opt == 4:
webbrowser.open('https://reedsy.com/discovery/blog/best-books-to-read-in-a-lifetime', new=0,autoraise=True)
elif opt == 5:
return
else:
print('wrong option')
|
import torch
from torch import nn
class LinearAggregator(nn.Module):
def __init__(self, num_labels, n_heads):
super(LinearAggregator, self).__init__()
self.aggregator = nn.Linear(num_labels * n_heads, num_labels)
def forward(self, x):
return self.aggregator(torch.cat(x, len(x[0].shape) - 1))
|
import urllib.request
import re
# функция принимает три параметра (1 - url, 2 - путь и имя файла, который создастся при скачивании,
# 3 -флаг движка обработки (если True - обрабатываем с помощью lxml, если False - без библиотек))
def GetScriptTag(url, pathName, lib):
# открываем страницу и забираем всё содержимое
try:
f = urllib.request.urlopen(url)
text = f.read()
except urllib.error.HTTPError:
return False
except urllib.error.URLError:
return False
# записываем содержимое
try:
f = open(pathName, 'wb')
f.write(text)
f.close()
except IOError:
return False
if lib:
# обрабатываем данные страницы с библиотекой lxml
from lxml import html
# вытаскиваем все теги script и их параметры
arScripts = html.fromstring(text).xpath('//script')
for i in range(0, len(arScripts)):
# забираем содержимое атрибута src
srcVal = arScripts[i].attrib.get("src")
# при пустом значении атрибута src забираем содержимое тега
arScripts[i] = srcVal if srcVal else arScripts[i].text_content()
else:
# обрабатываем данные страницы без библиотек
# забираем содержимое между <script и </script
arScripts = re.findall(r'<script(.*?)</script', str(text), re.DOTALL)
for i in range(0, len(arScripts)):
# ищем подстроку src
srcPos = arScripts[i].find('src')
if srcPos == -1:
# забираем все сожержимое от конца открывающего тего
arScripts[i] = arScripts[i][arScripts[i].find('>') + 1:]
else:
# забираем все сожержимое атрибута src
arScripts[i] = arScripts[i][srcPos:]
# убираем ковычки и "\\"
arDistr = arScripts[i].split(('"' if bool(arScripts[i].find('"') + 1) else "'"))
arScripts[i] = arDistr[1].replace("\\", "") if (len(arDistr) == 3) else ""
return arScripts
print(GetScriptTag('http://sen.mcart.ru/test.php', '/home/python_tests/files/index.html', True)) |
# coding: utf-8
# In[1]:
import cv2
import numpy as np
import imutils
import argparse
# In[2]:
img = cv2.imread('./datasets/flower3.jpg')
cv2.imshow("Original_Image", img)
cv2.waitKey(0)
# In[3]:
#Splitting RGB components of an image
#Individual channel investigation hepls in understanding edge detection and thresholding
(B, G, R) = cv2.split(img) # as RGB image is stored in reverse channel order
#Showing individual channels in grayscale format
cv2.imshow("Red_Component", R)
cv2.imshow('Blue_Component', B)
cv2.imshow('Green_Component', G)
cv2.waitKey(0)
# In[4]:
#Merging the channels back
merged = cv2.merge([B, G, R])
cv2.imshow("Merged_Image", merged)
cv2.waitKey(0)
# In[5]:
#Merging the channels in reverse order - changes the color component of original image
cv2.imshow("Merged_Image2", cv2.merge([R, G, B]))
cv2.waitKey(0)
# In[8]:
#Visualize each channel in its corresponding color
zeros = np.zeros(img.shape[:2], dtype='uint8')
cv2.imshow("Red", cv2.merge([zeros, zeros, R]))
cv2.imshow("Green", cv2.merge([zeros, G, zeros]))
cv2.imshow("Blue", cv2.merge([B, zeros, zeros]))
cv2.waitKey(0)
# In[ ]:
|
#!/usr/bin/python
import sys, re
import argparse
from dependency_input import Dependency
from operator import itemgetter
import numpy as np
def smart_open(fname, mode = 'r'):
if fname.endswith('.gz'):
import gzip
# Using max compression (9) by default seems to be slow.
# Let's try using the fastest.
return gzip.open(fname, mode, 1)
else:
return open(fname, mode)
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("argv", metavar='A', nargs='+')
args = argparser.parse_args()
sampleSize = int(args.argv[0])
dimSize = int(args.argv[1])
trainmapName = args.argv[2]
print >> sys.stderr, str(sampleSize), str(dimSize), trainmapName
dotCutoff = 100000
dpl = 100
nlCutoff = dotCutoff * dpl
lCount = 0
np.random.seed(1234)
print >> sys.stderr, "Loading training instances"
fp = np.memmap(trainmapName, mode='w+', dtype='int32', shape=(sampleSize+1, dimSize))
fp[0,0] = sampleSize
fp[0,1] = dimSize
lCount = 0
for line in sys.stdin:
line = line.decode('utf-8').strip()
tokens = line.split()
try:
fp[lCount+1,:-1] = [int(t) for t in tokens[1:]]
fp[lCount+1,-1] = int(tokens[0])
except KeyError:
continue
# trace progress
lCount += 1
if lCount % dotCutoff == 0:
sys.stderr.write('.')
if lCount % nlCutoff == 0:
print >> sys.stderr, "%d" % lCount
print >> sys.stderr, "[%d instances in total]" % lCount
print >> sys.stderr, "Shuffling training instances started ..."
np.random.shuffle(fp[1:])
print >> sys.stderr, "Shuffling training instances done, now dumping ..."
fp.flush()
del fp
print >> sys.stderr, "COMPLETED!"
|
# -*- coding: utf-8 -*-
from django.forms import DateTimeInput
from django.utils.translation import gettext as _
class BootstrapDateTimePickerInput(DateTimeInput):
template_name = 'widgets/bootstrap_datetimepicker.html'
def get_context(self, name, value, attrs):
datetimepicker_id = 'datetimepicker_{name}'.format(name=name)
if attrs is None:
attrs = dict()
attrs['placeholder'] = _('Birthdate')
attrs['data-target'] = '#{id}'.format(id=datetimepicker_id)
attrs['class'] = 'form-control datetimepicker-input'
context = super().get_context(name, value, attrs)
context['widget']['datetimepicker_id'] = datetimepicker_id
return context
|
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=15, default="DefaultUserName")
status = models.IntegerField(default=0)
def __str__(self):
return str(self.name)
|
import os, sys, re
from bs4 import BeautifulSoup
for articleNumber in range(1,60):
filename = './temp/summary'+str(articleNumber)+'XML'
filenumber = 1
print filename+str(filenumber)+'.txt'
while os.path.exists(filename+str(filenumber)+'.txt'):
completeFile = open(filename+str(filenumber)+'.txt', 'r').read()
summaryFile = open('./SummarizationRouge/reference/news'+str(articleNumber)+'_reference'+str(filenumber)+'.txt', 'w')
soup = BeautifulSoup(completeFile)
for node in soup.findAll('s'):
text = node.text.replace('\n','')
text = text.strip()
summaryFile.write(text+'\n')
summaryFile.close()
filenumber += 1
articleNumber = 1
all_folders = [f for f in os.listdir(sys.argv[1]) if re.match(r'd[0-9]+.*', f)]
for folder in all_folders:
raw = open('./temp/combinedRaw'+str(articleNumber) + '.txt', 'w')
processed = open('./temp/combinedProcessed' + str(articleNumber) + '.txt', 'w')
for filename in os.listdir(sys.argv[1]+'/'+folder):
data = open(sys.argv[1]+'/'+folder+'/'+filename, 'r').read()
soup = BeautifulSoup(data)
for node in soup.findAll('text'):
for text in node.findAll('s'):
raw.write(text.text+'\n')
text = text.text.encode('ascii', 'ignore')
text = text.decode('utf-8')
text = text.replace('\n', '')
text = text.replace('.', '')
text = re.sub("[^\w\s]|_","", text)
text = re.sub(' +',' ',text)
text = text.lower()
processed.write(text+'\n')
raw.close()
processed.close()
articleNumber += 1
|
import math
import random
def check(n):
if n==1:
return "Nither prime nor composite"
for i in range(2, int(math.sqrt(n))+1):
if n%i==0:
return "Composite"
return "Prime"
x = random.randint(100, 1000)
print("The number " + str(x) + " is " + check(x)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.