text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/python3
"""Experimental script comparing performance of pairing heap and smooth heap
as priority queue in Dijkstra's algorithm. Algorithm is run on randomly generated
10-regular graphs of variable size.
Results are stored as .csv files in ../data folder and plots of results in ../plots"""
import networkx as nx
import random
import matplotlib.pyplot as plt
import psutil
import csv
import os, sys, inspect
# ensuring imports from parent directory work
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from node import Node
from pairing_heap import PairingHeap
COUNT_TYPE_BOTH = 0
COUNT_TYPE_LINKS = -1
COUNT_TYPE_COMPS = -2
TYPES = {21: "Pairing", 22: "Smooth", 23: "Slim", 25: "Pairing Lazy", 27: "Pairing Slim", 28: "Pairing Smooth"}
MAX_TYPE_KEY = max(TYPES.keys())
FIG_LABELS = ["comparisons", "links"]
# colours from https://xkcd.com/color/rgb/
COLOURS = {21:'xkcd:fire engine red', 22:'xkcd:sea green', 23:'xkcd:electric blue',
25:"xkcd:mauve", 27: "xkcd:pink", 28: "xkcd:orange"}
SHADE_COLOURS = {21:'xkcd:fire engine red', 22:'xkcd:sea green', 23:'xkcd:electric blue',
25:"xkcd:mauve", 27: "xkcd:pink", 28: "xkcd:orange"}
NUMBER_TESTS = 10 # number of tests to run
TEST_SIZE = 500
EDGE_PROBABILITY = 0.05
WEIGHT_RANGE = 10000
def plot_avg_counts_links(avgCounts):
MARKERS_LINK = {21:"o", 12:"D", 22:"D", 23: "X", 25: "*", 27: "<", 28: "d"}
plt.figure('avg number of operations in Dijkstra\'s algorithm')
deviations = [ 10+round(TEST_SIZE*20*factor * EDGE_PROBABILITY) for factor in range(1, 21, 1)]
for k in TYPES.keys():
avgLinks = [acounts[k] for acounts in avgCounts[1]]
maxLinks = [acounts[k] for acounts in avgCounts[3]]
minLinks = [acounts[k] for acounts in avgCounts[5]]
plt.plot(deviations, avgLinks, color=COLOURS[k], linestyle="--", marker=MARKERS_LINK[k], markerfacecolor=COLOURS[k], markersize=9, markeredgewidth=1, markeredgecolor='black', label=TYPES[k] + " links")
plt.fill_between(deviations, minLinks, maxLinks, color=SHADE_COLOURS[k], alpha=.3)
plt.xlabel('Graph size', fontsize=39)
plt.ylabel('Avg. number of links / size', fontsize=39)
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.rc('legend', fontsize=39) # using a size in points
plt.legend()
plt.grid(True)
figure = plt.gcf() # get current figure
figure.set_size_inches(16, 18) # set figure's size manually to full screen
plt.savefig(r'C:\Users\Admin\PycharmProjects\smooth-heap-pub\plots\paper-dijkstra2-links.svg', bbox_inches='tight') # bbox_inches removes extra white spaces
plt.legend(loc='best')
plt.show()
def plot_avg_counts_comps(avgCounts):
MARKERS_COMP = {21:"o", 12:"d", 22:"^", 23:"p", 25:"s", 27: ".", 28: ">"}#https://matplotlib.org/3.1.1/api/markers_api.html
plt.figure('avg number of comparisons in Dijkstra\'s algorithm')
deviations = [ 10+round(TEST_SIZE*20*factor * EDGE_PROBABILITY) for factor in range(1, 21, 1)]
for k in TYPES.keys():
avgComps = [acounts[k] for acounts in avgCounts[0]]
maxComps = [acounts[k] for acounts in avgCounts[2]]
minComps = [acounts[k] for acounts in avgCounts[4]]
plt.plot(deviations, avgComps, color=COLOURS[k], linestyle="-", marker=MARKERS_COMP[k], markerfacecolor=COLOURS[k], markersize=9, markeredgewidth=1, markeredgecolor='black', label=TYPES[k] + " comparisons")
plt.fill_between(deviations, minComps, maxComps, color=SHADE_COLOURS[k], alpha=.3)
plt.xlabel('Edge probability', fontsize=39)
plt.ylabel('Avg. number of comparisons / size', fontsize=39)
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.rc('legend',fontsize=30) # using a size in points
plt.legend()
plt.grid(True)
figure = plt.gcf() # get current figure
figure.set_size_inches(16, 18) # set figure's size manually to full screen
plt.savefig(r'C:\Users\Admin\PycharmProjects\smooth-heap-pub\plots\paper-dijkstra2-comps.svg', bbox_inches='tight') # bbox_inches removes extra white spaces
plt.legend(loc='best')
plt.show()
def plot_pointer_updates(avgCounts):
"""generates and saves plot of results"""
# colours from https://xkcd.com/color/rgb/
MARKERS_POINTERS = {21:"o", 12:"d", 22:"^", 23:"p", 25:"s", 27: ".", 28: ">"}#https://matplotlib.org/3.1.1/api/markers_api.html
deviations = [factor * EDGE_PROBABILITY for factor in range(1, 21, 1)]
plt.figure('avg number of pointer updates in Dijkstra\'s algorithm')
for k in TYPES.keys():
avgPointers = [acounts[k] for acounts in avgCounts[0]]
maxPointers = [acounts[k] for acounts in avgCounts[1]]
minPointers = [acounts[k] for acounts in avgCounts[2]]
plt.plot(deviations, avgPointers, color=COLOURS[k], linestyle="--", marker=MARKERS_POINTERS[k], markerfacecolor=COLOURS[k], markersize=9, markeredgewidth=1, markeredgecolor='black', label=TYPES[k] + " pointer updates")
plt.fill_between(deviations, minPointers, maxPointers, color=SHADE_COLOURS[k], alpha=.3)
plt.xlabel('Edge probability', fontsize=39)
plt.ylabel('Avg. number of pointer updates / size', fontsize=39)
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.rc('legend',fontsize=30) # using a size in points
plt.legend()
plt.grid(True)
figure = plt.gcf() # get current figure
figure.set_size_inches(16, 18) # set figure's size manually to full screen
plt.savefig(r'C:\Users\Admin\PycharmProjects\smooth-heap-pub\plots\pointer-updates-dijkstra2.svg', bbox_inches='tight') # bbox_inches removes extra white spaces
plt.legend(loc='best')
plt.show()
def export_results(xs, results, countType, heapTypes, filename="dijkstra2-lazy"):
# parse data as randomness parameter; counts per heap type
if countType == COUNT_TYPE_BOTH:
with open(r"C:\Users\Admin\PycharmProjects\smooth-heap-pub\data\\" + filename + '-comps.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["randomness parameter value"] + [name for name in TYPES.values()])
csvwriter.writerow(["randomness parameter value"] + [name for name in TYPES.keys()])
for i in range(len(results[0])):
row = [xs[i]] + [results[0][i][k] for k in TYPES.keys()]
csvwriter.writerow(row)
with open(r"C:\Users\Admin\PycharmProjects\smooth-heap-pub\data\\" + filename + '-links.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["randomness parameter value"] + [name for name in TYPES.values()])
csvwriter.writerow(["randomness parameter value"] + [name for name in TYPES.keys()])
for i in range(len(results[1])):
row = [xs[i]] + [results[1][i][k] for k in TYPES.keys()]
csvwriter.writerow(row)
else:
fn = r"C:\Users\Admin\PycharmProjects\smooth-heap-pub\data\\" + filename + '-links.csv' if countType == COUNT_TYPE_LINKS else r"C:\Users\Admin\PycharmProjects\smooth-heap-pub\data\\" + filename + '-comps.csv'
with open(fn, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["randomness parameter value"] + [name for name in TYPES.values()])
csvwriter.writerow(["randomness parameter value"] + [name for name in TYPES.keys()])
for i in range(len(results)):
row = [xs[i]]+[results[i][k] for k in TYPES.keys()]
csvwriter.writerow(row)
if __name__ == "__main__":
testOutputCount = []
avgLinksPerSize = []
avgCompsPerSize = []
avgPointersPerSize = []
maxLinksPerSize = []
maxCompsPerSize = []
maxPointersPerSize = []
minLinksPerSize = []
minCompsPerSize = []
minPointersPerSize = []
xs = [factor * EDGE_PROBABILITY for factor in range(1, 21, 1)]
for x in xs:
avgCountsLinks = [0 for _ in range(MAX_TYPE_KEY + 1)]
avgCountsComps = [0 for _ in range(MAX_TYPE_KEY + 1)]
avgCountsPointers = [0 for _ in range(MAX_TYPE_KEY + 1)]
maxCountsLinks = [0 for _ in range(MAX_TYPE_KEY + 1)]
maxCountsComps: list[int] = [0 for _ in range(MAX_TYPE_KEY + 1)]
maxCountsPointers = [0 for _ in range(MAX_TYPE_KEY + 1)]
minCountsLinks = [1000000000000 for _ in range(MAX_TYPE_KEY + 1)]
minCountsComps = [1000000000000 for _ in range(MAX_TYPE_KEY + 1)]
minCountsPointers = [1000000000000 for _ in range(MAX_TYPE_KEY + 1)]
for _ in range(NUMBER_TESTS):
# some nice graph generators here: https://networkx.github.io/documentation/stable/reference/generators.html
# initializing input
graph = nx.random_regular_graph(10,10+round(TEST_SIZE*x*20))
for (u, v) in graph.edges():
graph.edges[u, v]['w'] = random.randint(1, WEIGHT_RANGE)
for heapType in TYPES.keys():
for v in graph.nodes():
graph.nodes[v]['v'] = False # "visited" marker
linkCount = 0
compCount = 0
vertex2qnode = {}
dist = [888888888 for _ in range(len(graph.nodes()))]
prev = [None for _ in range(len(graph.nodes()))]
heap = PairingHeap(heapType, COUNT_TYPE_BOTH)
heap.make_heap()
# Dijkstra's algorithm
dist[0] = 0
for idx, v in enumerate(graph.nodes()):
qnode = Node(dist[v])
qnode.vertex = v
vertex2qnode[v] = qnode
(cc, lc) = heap.insert(qnode)
linkCount += lc
compCount += cc
for s in range(len(graph.nodes())):
(minNode, cc, lc) = heap.delete_min()
linkCount += lc
compCount += cc
if minNode is None:
raise Exception(
"delete-min on heap of type {} returned None with {} nodes removed".format(TYPES[heapType],
s))
u = minNode.vertex
uk = minNode.key
graph.nodes[u]['v'] = True # minNode has been visited
for idx, v in enumerate(graph.neighbors(u)):
alt = uk + graph.edges[u, v]['w']
if alt < dist[v] and not graph.nodes[v]['v']:
(cc, lc) = heap.decrease_key(vertex2qnode[v], dist[v] - alt)
linkCount += lc
compCount += cc
dist[v] = alt
prev[v] = u
pointers = heap.pointer_updates()
# tracking avg. results
TSIZE = 10+round(TEST_SIZE*x*20)
avgCountsLinks[heapType] += (linkCount / NUMBER_TESTS)/TEST_SIZE
avgCountsComps[heapType] += (compCount / NUMBER_TESTS)/TEST_SIZE
avgCountsPointers[heapType] += (pointers / NUMBER_TESTS)/TEST_SIZE
maxCountsLinks[heapType] = max(maxCountsLinks[heapType], linkCount/TEST_SIZE)
maxCountsComps[heapType] = max(maxCountsComps[heapType], compCount/TEST_SIZE)
maxCountsPointers[heapType] = max(maxCountsPointers[heapType], pointers/TEST_SIZE)
minCountsLinks[heapType] = min(minCountsLinks[heapType], linkCount/TEST_SIZE)
minCountsComps[heapType] = min(minCountsComps[heapType], compCount/TEST_SIZE)
minCountsPointers[heapType] = min(minCountsPointers[heapType], pointers/TEST_SIZE)
for heapType in TYPES.keys():
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0] / 2. ** 30 # memory use in GB
print(
"[{}] \t avgComp: {} \t avgLink: {} \t RAM: {} \t |V|={} \t |E|={}".format(
TYPES[heapType], avgCountsComps[heapType], avgCountsLinks[heapType], memoryUse, len(graph.nodes()), len(graph.edges())))
avgLinksPerSize += [avgCountsLinks]
avgCompsPerSize += [avgCountsComps]
avgPointersPerSize += [avgCountsPointers]
maxLinksPerSize += [maxCountsLinks]
maxCompsPerSize += [maxCountsComps]
maxPointersPerSize += [maxCountsPointers]
minLinksPerSize += [minCountsLinks]
minCompsPerSize += [minCountsComps]
minPointersPerSize += [minCountsPointers]
plot_avg_counts_links(
[avgCompsPerSize, avgLinksPerSize, maxCompsPerSize, maxLinksPerSize, minCompsPerSize, minLinksPerSize])
plot_avg_counts_comps(
[avgCompsPerSize, avgLinksPerSize, maxCompsPerSize, maxLinksPerSize, minCompsPerSize, minLinksPerSize])
plot_pointer_updates([avgPointersPerSize, maxPointersPerSize, minPointersPerSize])
export_results(xs, [avgCompsPerSize, avgLinksPerSize], COUNT_TYPE_BOTH, TYPES, "dijkstra2-lazy")
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class PiratesTutorialManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('PiratesTutorialManagerAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
def requestTutorial(self): # Recv the client field and create a zone for it and send back as enterTutorial field.
self.sendUpdate('enterTutorial', [
self.air.allocateZone()])
|
# -*- coding: utf-8 -*-
__author__ = 'lish'
from insertBSheet import insertbooksheet as insertbs
import urllib2,bs4,re
import urllib2
import urllib,random
import re,json,os
import sys,time
import ConfigParser
import requests,MySQLdb
import GenerateSheetCover as gsc
import sys
reload(sys)
sys.setdefaultencoding('utf8')
base_path=os.path.split( os.path.realpath( sys.argv[0] ) )[0]
def linkSQL(host,user,passwd,db):
global cursor,conn
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,db=db,charset="utf8")
cursor = conn.cursor()
return conn
def updateBookSheet(sheetconts):
try:
# print sheetconts
insql='insert into con_booksheet(sheet_id,sheet_name,content_cnt,sheet_brief,create_time,online_status,creator_user_id,sheet_type,initial_uv) values (%s,%s,0,%s,now(),1,0,1,0)'
n = cursor.executemany(insql,sheetconts)
conn.commit()
except Exception, e:
raise e
def updateBookSheetCont(contentids,sheetid):
try:
update_book_url="http://readapi.imread.com/api/book/update?source_bid="
if len(contentids)==1:
contentids=contentids+contentids
nsql='select source_bid from con_book where source_bid in '+str(tuple(contentids)).replace("',)","')")
n = cursor.execute(nsql)
nbids=[]
for row in cursor.fetchall():
nbids.append(str(row[0]))
for check in contentids:
if check not in nbids:
url=update_book_url+str(check)
r = requests.get(url)
print check,r
bids=[]
bidslist=[]
i=0
sql='select book_id from con_book where source_bid in '+str(tuple(contentids))
n = cursor.execute(sql)
for row in cursor.fetchall():
create_time=str(time.strftime('%Y-%m-%d %H:%m:',time.localtime(time.time())))+str(random.randint(10,59))
bidslist.append(tuple([sheetid,str(row[0]),create_time]))
# print bidslist,'???'
desql="delete from ebook_con.con_booksheet_content where sheet_id="+str(sheetid)+" and content_type='1'"
n = cursor.execute(desql)
conn.commit()
insql="insert into ebook_con.con_booksheet_content(sheet_id,content_id,content_type,create_time) values(%s,%s,1,%s)"
n = cursor.executemany(insql,bidslist)
conn.commit()
except Exception,e :
print e
return None
def booklist():
host="100.98.73.21"
# host="rdsljqv187pt04s68726.mysql.rds.aliyuncs.com"
user="ebook"
passwd="4titbrVcvnP6LSFA"
conn=linkSQL(host,user,passwd,'ebook_con')
# tt=insertbs("192.168.0.34","ebook","ebook%$amRead",'ebook_con')
headers={
'Host':"www.cmread.com",
'User-Agent':"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0",
'Accept':"application/json, text/javascript, */*; q=0.01",
'Content-Type':"application/x-www-form-urlencoded",
'X-Requested-With':"XMLHttpRequest",
# 'Referer':"http://www.cmread.com/u/bookSheetSquare",
'Connection':"keep-alive"
}
mgurl='http://www.cmread.com/u/bookSheetSquare'
req=urllib2.Request(mgurl,headers = headers)
mgcont = urllib2.urlopen(req).read()
soup=bs4.BeautifulSoup(mgcont,'lxml')
MostsharedCont=soup.findAll('div',{'id':"main_down_div_1"})[0]
sheetconts=[]
sheetids=['124413']
for para in MostsharedCont.findAll('a',{'style':"cursor:pointer;"}):
sheetid=re.findall('/u/bookSheet/(\d+)',para['href'])[0]
title= para.h1.findAll('div')[0].text
brief= para.p['title']
sheetids+=[sheetid]
sheetconts+=[(sheetid,title,brief)]
#判断是否是新增的书单
sql ='select sheet_id from con_booksheet where sheet_id in '+str(sheetids).replace('[','(').replace(']',')')
n = cursor.execute(sql)
isExistsheetids=[]
for row in cursor.fetchall():
isExistsheetids.append(str(row[0]))
# print isExistsheetids
newsheetids=list(set(sheetids)-set(isExistsheetids))
print '需要更新的图书书单列表:',newsheetids
#剔除已经存在的书单
newsheetconts=[]
for sheetcont in sheetconts:
if sheetcont[0] in newsheetids:
newsheetconts+=[sheetcont]
# print newsheetconts
#更新con_booksheet表
updateBookSheet(newsheetconts)
for sheetid in sheetids:
if sheetid!=None or sheetid!='':
bids=[]
for page in range(1,10):
url = 'http://wap.cmread.com/hbc/f/sheetDetail?page='+str(page)+'&std='+str(sheetid)
# print url
content = urllib2.urlopen(url).read()
tbids=re.findall('/hbc/cover_file/\d+/(\d+)/\d+/cover', content)
# print tbids
try:
if tbids[0] not in bids :
bids+=tbids
else:
break
except Exception,e :
print e
updateBookSheetCont(bids,sheetid)
gsc.mergepicture(sheetid)
print bids
if __name__ == '__main__':
booklist()
|
import tensorflow as tf
import cv2
import skvideo.io
import skimage.transform
import numpy as np
import datetime
from cv2 import VideoWriter, VideoWriter_fourcc
import os
import glob
import shutil
"""
Author: CS6670 Group
Code structure inspired from carpedm20/DCGAN-tensorflow, GV1028/videogan
"""
def clear_genvideos(path):
for i in glob.glob(path):
os.remove(i)
'''
def save_gen(generated_images, n_ex = 36, epoch = 0, iter = 0):
for i in range(generated_images.shape[0]):
cv2.imwrite('/root/code/Video_Generation/gen_images/image_' + str(epoch) + '_' + str(iter) + '_' + str(i) + '.jpg', generated_images[i, :, :, :])
'''
def process_and_write_image(images,name):
images = np.array(images)
# print(images.shape)
images = (images + 1)*127.5
img = images[0,0,:,:]
# print(img.shape)
cv2.imwrite("./genvideos/" + name + ".jpg", img)
def read_and_process_video(files,size,nof):
print("Processesing Videos")
print((datetime.datetime.now())) #added for keeping track of time
#TODO: Pass in the 64,64,3 thing
videos = np.zeros((size,nof,64,64,3))
counter = 0
for file in files:
print(file)
vid = skvideo.io.vreader(file)
curr_frames = []
i = 0
for frame in vid:
frame = skimage.transform.resize(frame,[64,64])
curr_frames.append(frame)
i = i + 1
if i >= nof:
break
curr_frames = np.array(curr_frames)
curr_frames = curr_frames*255.0
curr_frames = curr_frames/127.5 - 1
print("Shape of frames: {0}".format(curr_frames.shape))
#TODO: This should rely on the passed in (32,64,64,3) thing imo
videos[counter,:,:,:,:] = curr_frames[0:nof]
counter = counter + 1
return videos.astype('float32')
def process_and_write_video(videos,name):
videos =np.array(videos)
width = 64
height = 64
FPS = 24
fourcc = VideoWriter_fourcc(*'MP42')
video = VideoWriter('./genvideos/'+name+'.avi', fourcc, float(FPS), (width, height))
videos = np.reshape(videos,[-1,32,64,64,3])
for i in range(videos.shape[0]):
vid = videos[i,:,:,:,:]
vid = (vid + 1)*127.5
for j in range(vid.shape[0]):
frame = vid[j,:,:,:]
video.write(frame)
video.release()
|
USERS = (
{'username': 'Timmy', 'password': 'password'},
{'username': 'Johny', 'password': 'Hf7FAbf6'},
{'username': 'Alice', 'password': 'alice'},
{'username': 'Roger', 'password': 'pass'},
{'username': 'Simon', 'password': 'says'},
{'username': 'Admin', 'password': 'ads78adsg7dasga'}
)
class Validator(object): # Validator class is hidden in this kata
@staticmethod
def login(username, password):
for user in USERS:
if user['username'] == username and user['password'] == password:
return 'Successfully Logged in!'
return 'Wrong username or password!'
# solution is below this line
def validate(username, password):
return Validator().login(username, password)
|
import math
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import PIL.Image
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec
from torchvision import transforms as _transforms, tv_tensors
from torchvision.transforms import _functional_tensor as _FT
from torchvision.transforms.v2 import AutoAugmentPolicy, functional as F, InterpolationMode, Transform
from torchvision.transforms.v2.functional._geometry import _check_interpolation
from torchvision.transforms.v2.functional._meta import get_size
from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT
from ._utils import _get_fill, _setup_fill_arg, check_type, is_pure_tensor
ImageOrVideo = Union[torch.Tensor, PIL.Image.Image, tv_tensors.Image, tv_tensors.Video]
class _AutoAugmentBase(Transform):
def __init__(
self,
*,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__()
self.interpolation = _check_interpolation(interpolation)
self.fill = fill
self._fill = _setup_fill_arg(fill)
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
params = super()._extract_params_for_v1_transform()
if isinstance(params["fill"], dict):
raise ValueError(f"{type(self).__name__}() can not be scripted for when `fill` is a dictionary.")
return params
def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, Tuple[Callable, bool]]:
keys = tuple(dct.keys())
key = keys[int(torch.randint(len(keys), ()))]
return key, dct[key]
def _flatten_and_extract_image_or_video(
self,
inputs: Any,
unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask),
) -> Tuple[Tuple[List[Any], TreeSpec, int], ImageOrVideo]:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
needs_transform_list = self._needs_transform_list(flat_inputs)
image_or_videos = []
for idx, (inpt, needs_transform) in enumerate(zip(flat_inputs, needs_transform_list)):
if needs_transform and check_type(
inpt,
(
tv_tensors.Image,
PIL.Image.Image,
is_pure_tensor,
tv_tensors.Video,
),
):
image_or_videos.append((idx, inpt))
elif isinstance(inpt, unsupported_types):
raise TypeError(f"Inputs of type {type(inpt).__name__} are not supported by {type(self).__name__}()")
if not image_or_videos:
raise TypeError("Found no image in the sample.")
if len(image_or_videos) > 1:
raise TypeError(
f"Auto augment transformations are only properly defined for a single image or video, "
f"but found {len(image_or_videos)}."
)
idx, image_or_video = image_or_videos[0]
return (flat_inputs, spec, idx), image_or_video
def _unflatten_and_insert_image_or_video(
self,
flat_inputs_with_spec: Tuple[List[Any], TreeSpec, int],
image_or_video: ImageOrVideo,
) -> Any:
flat_inputs, spec, idx = flat_inputs_with_spec
flat_inputs[idx] = image_or_video
return tree_unflatten(flat_inputs, spec)
def _apply_image_or_video_transform(
self,
image: ImageOrVideo,
transform_id: str,
magnitude: float,
interpolation: Union[InterpolationMode, int],
fill: Dict[Union[Type, str], _FillTypeJIT],
) -> ImageOrVideo:
fill_ = _get_fill(fill, type(image))
if transform_id == "Identity":
return image
elif transform_id == "ShearX":
# magnitude should be arctan(magnitude)
# official autoaug: (1, level, 0, 0, 1, 0)
# https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290
# compared to
# torchvision: (1, tan(level), 0, 0, 1, 0)
# https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976
return F.affine(
image,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[math.degrees(math.atan(magnitude)), 0.0],
interpolation=interpolation,
fill=fill_,
center=[0, 0],
)
elif transform_id == "ShearY":
# magnitude should be arctan(magnitude)
# See above
return F.affine(
image,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[0.0, math.degrees(math.atan(magnitude))],
interpolation=interpolation,
fill=fill_,
center=[0, 0],
)
elif transform_id == "TranslateX":
return F.affine(
image,
angle=0.0,
translate=[int(magnitude), 0],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill_,
)
elif transform_id == "TranslateY":
return F.affine(
image,
angle=0.0,
translate=[0, int(magnitude)],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill_,
)
elif transform_id == "Rotate":
return F.rotate(image, angle=magnitude, interpolation=interpolation, fill=fill_)
elif transform_id == "Brightness":
return F.adjust_brightness(image, brightness_factor=1.0 + magnitude)
elif transform_id == "Color":
return F.adjust_saturation(image, saturation_factor=1.0 + magnitude)
elif transform_id == "Contrast":
return F.adjust_contrast(image, contrast_factor=1.0 + magnitude)
elif transform_id == "Sharpness":
return F.adjust_sharpness(image, sharpness_factor=1.0 + magnitude)
elif transform_id == "Posterize":
return F.posterize(image, bits=int(magnitude))
elif transform_id == "Solarize":
bound = _FT._max_value(image.dtype) if isinstance(image, torch.Tensor) else 255.0
return F.solarize(image, threshold=bound * magnitude)
elif transform_id == "AutoContrast":
return F.autocontrast(image)
elif transform_id == "Equalize":
return F.equalize(image)
elif transform_id == "Invert":
return F.invert(image)
else:
raise ValueError(f"No transform available for {transform_id}")
class AutoAugment(_AutoAugmentBase):
r"""[BETA] AutoAugment data augmentation method based on
`"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.
.. v2betastatus:: AutoAugment transform
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
policy (AutoAugmentPolicy, optional): Desired policy enum defined by
:class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.AutoAugment
_AUGMENTATION_SPACE = {
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),
True,
),
"TranslateY": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),
True,
),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
"Invert": (lambda num_bins, height, width: None, False),
}
def __init__(
self,
policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__(interpolation=interpolation, fill=fill)
self.policy = policy
self._policies = self._get_policies(policy)
def _get_policies(
self, policy: AutoAugmentPolicy
) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]:
if policy == AutoAugmentPolicy.IMAGENET:
return [
(("Posterize", 0.4, 8), ("Rotate", 0.6, 9)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
(("Posterize", 0.6, 7), ("Posterize", 0.6, 6)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Equalize", 0.4, None), ("Rotate", 0.8, 8)),
(("Solarize", 0.6, 3), ("Equalize", 0.6, None)),
(("Posterize", 0.8, 5), ("Equalize", 1.0, None)),
(("Rotate", 0.2, 3), ("Solarize", 0.6, 8)),
(("Equalize", 0.6, None), ("Posterize", 0.4, 6)),
(("Rotate", 0.8, 8), ("Color", 0.4, 0)),
(("Rotate", 0.4, 9), ("Equalize", 0.6, None)),
(("Equalize", 0.0, None), ("Equalize", 0.8, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Rotate", 0.8, 8), ("Color", 1.0, 2)),
(("Color", 0.8, 8), ("Solarize", 0.8, 7)),
(("Sharpness", 0.4, 7), ("Invert", 0.6, None)),
(("ShearX", 0.6, 5), ("Equalize", 1.0, None)),
(("Color", 0.4, 0), ("Equalize", 0.6, None)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
]
elif policy == AutoAugmentPolicy.CIFAR10:
return [
(("Invert", 0.1, None), ("Contrast", 0.2, 6)),
(("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)),
(("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)),
(("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.5, None), ("Equalize", 0.9, None)),
(("ShearY", 0.2, 7), ("Posterize", 0.3, 7)),
(("Color", 0.4, 3), ("Brightness", 0.6, 7)),
(("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)),
(("Equalize", 0.6, None), ("Equalize", 0.5, None)),
(("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)),
(("Color", 0.7, 7), ("TranslateX", 0.5, 8)),
(("Equalize", 0.3, None), ("AutoContrast", 0.4, None)),
(("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)),
(("Brightness", 0.9, 6), ("Color", 0.2, 8)),
(("Solarize", 0.5, 2), ("Invert", 0.0, None)),
(("Equalize", 0.2, None), ("AutoContrast", 0.6, None)),
(("Equalize", 0.2, None), ("Equalize", 0.6, None)),
(("Color", 0.9, 9), ("Equalize", 0.6, None)),
(("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)),
(("Brightness", 0.1, 3), ("Color", 0.7, 0)),
(("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)),
(("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)),
(("Equalize", 0.8, None), ("Invert", 0.1, None)),
(("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)),
]
elif policy == AutoAugmentPolicy.SVHN:
return [
(("ShearX", 0.9, 4), ("Invert", 0.2, None)),
(("ShearY", 0.9, 8), ("Invert", 0.7, None)),
(("Equalize", 0.6, None), ("Solarize", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)),
(("ShearY", 0.9, 8), ("Invert", 0.4, None)),
(("ShearY", 0.9, 5), ("Solarize", 0.2, 6)),
(("Invert", 0.9, None), ("AutoContrast", 0.8, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("Solarize", 0.3, 3)),
(("ShearY", 0.8, 8), ("Invert", 0.7, None)),
(("Equalize", 0.9, None), ("TranslateY", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Contrast", 0.3, 3), ("Rotate", 0.8, 4)),
(("Invert", 0.8, None), ("TranslateY", 0.0, 2)),
(("ShearY", 0.7, 6), ("Solarize", 0.4, 8)),
(("Invert", 0.6, None), ("Rotate", 0.8, 4)),
(("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)),
(("ShearX", 0.1, 6), ("Invert", 0.6, None)),
(("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)),
(("ShearY", 0.8, 4), ("Invert", 0.8, None)),
(("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)),
(("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)),
(("ShearX", 0.7, 2), ("Invert", 0.1, None)),
]
else:
raise ValueError(f"The provided policy {policy} is not recognized.")
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(image_or_video)
policy = self._policies[int(torch.randint(len(self._policies), ()))]
for transform_id, probability, magnitude_idx in policy:
if not torch.rand(()) <= probability:
continue
magnitudes_fn, signed = self._AUGMENTATION_SPACE[transform_id]
magnitudes = magnitudes_fn(10, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[magnitude_idx])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
image_or_video = self._apply_image_or_video_transform(
image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
class RandAugment(_AutoAugmentBase):
r"""[BETA] RandAugment data augmentation method based on
`"RandAugment: Practical automated data augmentation with a reduced search space"
<https://arxiv.org/abs/1909.13719>`_.
.. v2betastatus:: RandAugment transform
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
num_ops (int, optional): Number of augmentation transformations to apply sequentially.
magnitude (int, optional): Magnitude for all the transformations.
num_magnitude_bins (int, optional): The number of different magnitude values.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.RandAugment
_AUGMENTATION_SPACE = {
"Identity": (lambda num_bins, height, width: None, False),
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),
True,
),
"TranslateY": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),
True,
),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
}
def __init__(
self,
num_ops: int = 2,
magnitude: int = 9,
num_magnitude_bins: int = 31,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__(interpolation=interpolation, fill=fill)
self.num_ops = num_ops
self.magnitude = magnitude
self.num_magnitude_bins = num_magnitude_bins
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(image_or_video)
for _ in range(self.num_ops):
transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)
magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[self.magnitude])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
image_or_video = self._apply_image_or_video_transform(
image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
class TrivialAugmentWide(_AutoAugmentBase):
r"""[BETA] Dataset-independent data-augmentation with TrivialAugment Wide, as described in
`"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`_.
.. v2betastatus:: TrivialAugmentWide transform
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
num_magnitude_bins (int, optional): The number of different magnitude values.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.TrivialAugmentWide
_AUGMENTATION_SPACE = {
"Identity": (lambda num_bins, height, width: None, False),
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),
"TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 135.0, num_bins), True),
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
}
def __init__(
self,
num_magnitude_bins: int = 31,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
):
super().__init__(interpolation=interpolation, fill=fill)
self.num_magnitude_bins = num_magnitude_bins
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(image_or_video)
transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)
magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[int(torch.randint(self.num_magnitude_bins, ()))])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
image_or_video = self._apply_image_or_video_transform(
image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
class AugMix(_AutoAugmentBase):
r"""[BETA] AugMix data augmentation method based on
`"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" <https://arxiv.org/abs/1912.02781>`_.
.. v2betastatus:: AugMix transform
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
severity (int, optional): The severity of base augmentation operators. Default is ``3``.
mixture_width (int, optional): The number of augmentation chains. Default is ``3``.
chain_depth (int, optional): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3].
Default is ``-1``.
alpha (float, optional): The hyperparameter for the probability distributions. Default is ``1.0``.
all_ops (bool, optional): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.AugMix
_PARTIAL_AUGMENTATION_SPACE = {
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, width / 3.0, num_bins), True),
"TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, height / 3.0, num_bins), True),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
}
_AUGMENTATION_SPACE: Dict[str, Tuple[Callable[[int, int, int], Optional[torch.Tensor]], bool]] = {
**_PARTIAL_AUGMENTATION_SPACE,
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
}
def __init__(
self,
severity: int = 3,
mixture_width: int = 3,
chain_depth: int = -1,
alpha: float = 1.0,
all_ops: bool = True,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__(interpolation=interpolation, fill=fill)
self._PARAMETER_MAX = 10
if not (1 <= severity <= self._PARAMETER_MAX):
raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.")
self.severity = severity
self.mixture_width = mixture_width
self.chain_depth = chain_depth
self.alpha = alpha
self.all_ops = all_ops
def _sample_dirichlet(self, params: torch.Tensor) -> torch.Tensor:
# Must be on a separate method so that we can overwrite it in tests.
return torch._sample_dirichlet(params)
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, orig_image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(orig_image_or_video)
if isinstance(orig_image_or_video, torch.Tensor):
image_or_video = orig_image_or_video
else: # isinstance(inpt, PIL.Image.Image):
image_or_video = F.pil_to_tensor(orig_image_or_video)
augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE
orig_dims = list(image_or_video.shape)
expected_ndim = 5 if isinstance(orig_image_or_video, tv_tensors.Video) else 4
batch = image_or_video.reshape([1] * max(expected_ndim - image_or_video.ndim, 0) + orig_dims)
batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1)
# Sample the beta weights for combining the original and augmented image or video. To get Beta, we use a
# Dirichlet with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of
# augmented image or video.
m = self._sample_dirichlet(
torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1)
)
# Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images or videos.
combined_weights = self._sample_dirichlet(
torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1)
) * m[:, 1].reshape([batch_dims[0], -1])
mix = m[:, 0].reshape(batch_dims) * batch
for i in range(self.mixture_width):
aug = batch
depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item())
for _ in range(depth):
transform_id, (magnitudes_fn, signed) = self._get_random_item(augmentation_space)
magnitudes = magnitudes_fn(self._PARAMETER_MAX, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[int(torch.randint(self.severity, ()))])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
aug = self._apply_image_or_video_transform(
aug, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
mix.add_(combined_weights[:, i].reshape(batch_dims) * aug)
mix = mix.reshape(orig_dims).to(dtype=image_or_video.dtype)
if isinstance(orig_image_or_video, (tv_tensors.Image, tv_tensors.Video)):
mix = tv_tensors.wrap(mix, like=orig_image_or_video)
elif isinstance(orig_image_or_video, PIL.Image.Image):
mix = F.to_pil_image(mix)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, mix)
|
import serial
import serial.tools.list_ports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir="D:\\Documents\\GitHub\\dpf-line-cutter\\code\\launch archive\\2021-10-24 StAlbansMarman")
csv = pd.read_csv(file_path)
csv = np.array(csv)
pressure = csv[:, 2]
pressure = np.append(pressure[0:12000],pressure[140000:150000])
plt.figure()
plt.plot(pressure)
plt.show()
# pressure = 44330.76 * (1.0 - pow(pressure / 101451, 1.0 / 5.25588))
# pressure = pressure[108500:111200]
print(pressure.shape)
# with open("D:\\Documents\\GitHub\\dpf-line-cutter\\code\\v3\\line_cutter_v3\\simdata.h", 'w') as f:
with open("out.csv", 'w') as f:
f.write(f"int p[{len(pressure)}]=" + '{')
for p in pressure:
f.write(f"{int(p)}")
f.write(",")
f.write("};")
# np.savetxt("out.csv", pressure, delimiter=',', fmt="%f")
# plt.figure()
# plt.plot(pressure)
# plt.show()
# ports = serial.tools.list_ports.comports(include_links=False)
# port = None
# for port in ports :
# print('Found device at port '+ port.device)
# if ports == [] or port is None:
# print("No devices found!")
# ser = serial.Serial(port.device)
# if ser.isOpen():
# ser.close()
# ser = serial.Serial(port.device, 9600, timeout=1)
# ser.flushInput()
# ser.flushOutput()
# print('Connected to ' + ser.name)
|
# -*-coding: utf8 -*-
"""
dynamic_group comes from http://djangosnippets.org/snippets/2511/
"""
from importlib import import_module
from itertools import groupby, chain
from datetime import datetime
import time
import re
from pytimeago.english import english as english_ago
from pytimeago.english_short import english_short as english_short_ago
from django import template
from django.conf import settings
from django.template import TemplateSyntaxError
from django.utils.safestring import mark_safe
from gim.core.diffutils import split_patch_into_hunks, parse_hunk, extract_hunk_header_starts as extract, hunk_as_lines, get_encoded_hunk_shas_from_patch, convert_diff
register = template.Library()
class DynamicRegroupNode(template.Node):
def __init__(self, target, parser, expression, var_name):
self.target = target
self.expression = template.Variable(expression)
self.var_name = var_name
self.parser = parser
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list is None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
"""
Try to resolve the filter expression from the template context.
If the variable doesn't exist, accept the value that passed to the
template tag and convert it to a string
"""
try:
exp = self.expression.resolve(context)
except template.VariableDoesNotExist:
exp = str(self.expression)
filter_exp = self.parser.compile_filter(exp)
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda v, f=filter_exp.resolve: f(v, True))
]
return ''
@register.tag
def dynamic_regroup(parser, token):
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError("'dynamic_regroup' tag takes five arguments")
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise TemplateSyntaxError("second argument to 'dynamic_regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'dynamic_regroup' tag must"
" be 'as'")
"""
Django expects the value of `expression` to be an attribute available on
your objects. The value you pass to the template tag gets converted into a
FilterExpression object from the literal.
Sometimes we need the attribute to group on to be dynamic. So, instead
of converting the value to a FilterExpression here, we're going to pass the
value as-is and convert it in the Node.
"""
expression = lastbits_reversed[2][::-1]
var_name = lastbits_reversed[0][::-1]
"""
We also need to hand the parser to the node in order to convert the value
for `expression` to a FilterExpression.
"""
return DynamicRegroupNode(target, parser, expression, var_name)
@register.assignment_tag(takes_context=True)
def attributes_for_list(context, items, attribute, none_if_missing=False):
"""
Take a list of items (or something that can be iterated) and for each one,
return the given attribute, in a list. If the attribute is not found for an
item, no entry for this item will be returned, except if none_if_missing is
True, in which case None will be returned.
"""
if not items:
return []
final_list = []
for item in items:
if isinstance(item, dict):
if none_if_missing or attribute in item:
final_list.append(item.get(attribute, None))
else:
if none_if_missing or hasattr(item, attribute):
final_list.append(getattr(item, attribute, None))
return final_list
@register.filter
def dict_item(dikt, key):
"""
Custom template tag used like so:
{{ dictionary|dict_item:var }}
where dictionary is a dictionary and key is a variable representing
one of it's keys
"""
try:
return dikt.__getitem__(key)
except Exception:
return ''
@register.filter
def attr(obj, attr):
"""
Custom template tag used like so:
{{ object|attr:var }}
where object is an object with attributes and attr is a variable representing
one of it's attributes
"""
try:
result = getattr(obj, attr)
if callable(result):
return result()
return result
except Exception:
return ''
@register.filter
def ago(date, short=False):
method = english_short_ago if short else english_ago
try:
return method(time.mktime(datetime.now().timetuple()) - time.mktime(date.timetuple()))
except Exception:
return ''
register.filter('ago', ago)
@register.filter
def avatar_size(avatar_url, size):
if not avatar_url:
from gim.core.models import GithubUser
avatar_url = GithubUser.get_default_avatar()
if not size:
return avatar_url
if '?' in avatar_url:
return avatar_url + '&s=%s' % size
return avatar_url + '?s=%s' % size
class NoSpacesNode(template.Node):
"""
"""
spaces = re.compile('\s')
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
return self.spaces.sub('', self.nodelist.render(context).strip())
@register.tag
def nospaces(parser, token):
"""
Removes all spaces in the templatetag
Example usage::
<div class="{% nospaces %}
{% if foo %}
foo
{% else %}
bar
{% endif %}
{% endnospaces %}">
This example would return this HTML::
<div class="foo">
"""
nodelist = parser.parse(('endnospaces',))
parser.delete_first_token()
return NoSpacesNode(nodelist)
@register.filter
def tolist(value):
return [value]
@register.filter
def append(alist, item):
return alist + [item]
def _parse_diff(diff, reduce=False, hunk_shas=None, hunk_shas_reviewed=None, hunks=None):
if not diff or diff == 'u\n':
diff = u'@@ -1,0 +1,0 @@ EMPTY DIFF\n- %s was not able to retrieve this diff :(' % settings.BRAND_SHORT_NAME
hunks = None # force resplit
if not diff.startswith(u'@@'):
diff = u'@@ -1,0 +1,0 @@\n' + diff
hunks = None # force resplit
hunks = split_patch_into_hunks(diff) if hunks is None else hunks
results = []
position = 0
if reduce:
hunks = hunks[-1:]
# parse each hunk
for hunk_index, hunk in enumerate(hunks):
hunk = hunk_as_lines(hunk)
hunk_sha = None
is_reviewed = None
if hunk_shas:
hunk_sha = hunk_shas[hunk_index]
is_reviewed = hunk_shas_reviewed and hunk_shas_reviewed.get(hunk_sha, False)
result = parse_hunk(hunk, hunk_sha, position, is_reviewed)
position += len(result)
if reduce:
result = result[0:1] + result[1:][-12:]
results.append(result)
# as a list, not an iterable because the list is consumed twice!
return list(chain.from_iterable(results))
@register.filter
def parse_diff(diff):
return _parse_diff(diff=diff)
@register.filter
def parse_diff_and_reduce(diff):
return _parse_diff(diff=diff, reduce=True)
@register.filter
def parse_diff_for_file(file):
hunks = getattr(file, 'hunks', None)
if hunks is None:
hunks = split_patch_into_hunks(file.patch)
if hunks is not None:
hunks = [hunk_as_lines(hunk) for hunk in hunks]
if hunks and file.hunk_shas and len(hunks) != len(file.hunk_shas):
# In the past hunk_shas was saved in file without taking into account the fact that two hunks could
# be exactly the same and then have the same hash. And as it was computed using a dict, one
# was missing. In this case, we recompute the hunks shas
from gim.core.models.files import FileMixin
if isinstance(file, FileMixin) and not file._meta.abstract:
file.hunk_shas = None
file.patch_cha = None
file.save()
else:
file.hunk_shas = get_encoded_hunk_shas_from_patch(file.patch)
return _parse_diff(
diff=file.patch,
hunk_shas=file.hunk_shas,
hunk_shas_reviewed=getattr(file, 'reviewed_hunks_locally', {}),
hunks=hunks
)
@register.filter
def apply_diff_modes(diff, diff_modes):
return convert_diff(diff, diff_modes)
@register.filter
def count_comments_by_hunk_position(diff, entry_points_by_position):
if not entry_points_by_position:
return {}
hunks_by_position = {
line[4]: {
'count': 0,
'last_position': None
}
for line
in diff
if line[0] == 'comment' and line[4] != 'last-position'
}
if not hunks_by_position:
return {}
hunks_positions = sorted(hunks_by_position.keys())
for index, position in enumerate(hunks_positions):
try:
next_pos = hunks_positions[index + 1]
except IndexError:
hunks_by_position[position]['last_position'] = diff[-1][4]
else:
hunks_by_position[position]['last_position'] = next_pos - 1
all_hunks = list(hunks_by_position.items())
for entry_point_position, entry_point in entry_points_by_position.iteritems():
for hunk_position, hunk in all_hunks:
if hunk_position < entry_point_position <= hunk['last_position']:
hunk['count'] += len(entry_point.comments.all()) # .all is now cached for the use in the template
break
return {
position: hunk['count']
for position, hunk
in hunks_by_position.items()
}
@register.filter
def short_sha(sha, length=8):
return sha[:length]
@register.simple_tag(takes_context=True)
def import_debug(context):
import debug
return ""
@register.filter
def model(obj):
return obj._meta.object_name
@register.filter
def copy_fks_from(to_obj, from_obj):
"""
To avoid requests for objects we may already have in `from_obj` that may be
needed on `to_obj`, copy them between the two.
The return the "filtered" object
"""
# import debug
if not to_obj or not from_obj:
return None
def copy_fk(field, to_obj, from_obj):
id_attr = '%s_id' % field
cache_attr = '_%s_cache' % field
if not hasattr(to_obj, id_attr) or not hasattr(from_obj, id_attr):
return False
if getattr(to_obj, id_attr) != getattr(from_obj, id_attr):
return False
if not hasattr(from_obj, cache_attr):
return False
setattr(to_obj, cache_attr, getattr(from_obj, cache_attr))
return True
copy_fk('issue', to_obj, from_obj)
if not copy_fk('repository', to_obj, from_obj) and hasattr(from_obj, '_issue_cache'):
copy_fk('repository', to_obj, from_obj._issue_cache)
return to_obj
@register.filter
def map_attr(objs, attr):
return [getattr(obj, attr) for obj in objs]
@register.filter
def map_dict_item(dicts, dict_item):
return [dikt.get(dict_item) for dikt in dicts]
@register.filter
def strip(string_value):
return string_value.strip()
@register.filter
def replace(string, filter):
return string.replace(*filter.split(':'))
@register.filter
def group_by_filter_key(group_by_object):
from gim.core import models as core_models
if isinstance(group_by_object, core_models.LabelType):
return 'label_type:%d' % group_by_object.id
return group_by_object
@register.filter
def group_by_filter_value(grouper, group_field):
from gim.core import models as core_models
if grouper is None:
return '' if group_field == 'label_type_grouper' else '__none__'
if group_field == 'is_pull_request':
return 'yes' if grouper else 'no'
if group_field in {'state', 'githubnotification__reason', 'githubnotification__repository'}:
return grouper
if group_field == 'githubnotification__unread':
return 'unread' if grouper else 'read'
if isinstance(grouper, core_models.Milestone):
return grouper.number
if isinstance(grouper, core_models.GithubUser):
return grouper.username
if isinstance(grouper, core_models.Label):
return grouper.name
if isinstance(grouper, core_models.Project):
return grouper.number
if isinstance(grouper, core_models.Column):
return '%s:%s' % (grouper.project.number, grouper.position)
return ''
@register.filter
def concat(str1, str2):
return "%s%s" % (str1, str2)
@register.filter
def filter_truthy(list, attribute):
result = []
for entry in list:
try:
value = getattr(entry, attribute)
except AttributeError:
try:
value = entry.get(attribute)
except AttributeError:
continue
if value:
result.append(entry)
return result
@register.filter
def filter_falsy(list, attribute):
result = []
for entry in list:
try:
value = getattr(entry, attribute)
except AttributeError:
try:
value = entry.get(attribute)
except AttributeError:
value = False
if not value:
result.append(entry)
return result
@register.filter
def split(string, separator):
return string.split(separator)
@register.filter
def format_int_or_float(value):
try:
return (u'%.2f' % value).rstrip('0').rstrip('.')
except Exception:
return ''
@register.filter
def filter_status_ready(queryset):
return [obj for obj in queryset.all() if obj.status_ready]
@register.filter
def set_in_dict(value, key):
return {key: value}
@register.filter(name='sum')
def _sum(values):
return sum(values)
@register.filter
def values(dict_like):
# useful with counters because we cannot pass `()` and for the counter
# it's then an empty entries and return 0, instead of KeyError to let django
# try to call `values`
return dict_like.values()
@register.filter
def get_absolute_url_for_issue(obj, issue):
return obj.get_absolute_url_for_issue(issue)
@register.filter
def user_can_add_pr_review(issue, user):
if not issue:
return False
return issue.user_can_add_pr_review(user)
@register.filter
def extract_hunk_header_starts(header_text):
return '[%s,%s]' % extract(header_text)
@register.filter
def multiply(val1, val2):
return val1 * val2
@register.filter
def substract(val1, val2):
return val1 - val2
@register.assignment_tag(takes_context=True)
def user_or_account_name(context, account, capitalize_you=False, you_word=None, prefix_you=None, suffix_you=None, prefix_other=None, suffix_other=None):
if not you_word:
you_word = 'you'
return mark_safe(
(prefix_you or '') + (you_word.capitalize() if capitalize_you else you_word) + (suffix_you or '')
if context.get('user', None) == account
else (prefix_other or '') + account.username + (suffix_other or '')
)
@register.assignment_tag(takes_context=True)
def get_subscriptions_limit_management_data(context, account=None):
if not settings.SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC:
return {}
if isinstance(settings.SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC, basestring):
try:
module_name, method_name = settings.SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC.rsplit('.', 1)
settings.SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC = getattr(import_module(module_name), method_name)
except Exception:
settings.SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC = None
return {}
if not context.get('user'):
return {}
return settings.SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC(context, account)
@register.filter
def negate(val):
return not val
@register.filter
def validate_diff_modes(diff_modes, request=None):
from gim.core.diffutils import DIFF_MODES
if not diff_modes and request:
diff_modes = request.GET.get('diff_modes')
if diff_modes and isinstance(diff_modes, basestring):
diff_modes = set(diff_modes)
if diff_modes not in DIFF_MODES.ALLOWED:
diff_modes = DIFF_MODES.DEFAULT
return diff_modes
@register.filter(name='in')
def op_in(what, where):
return what in where
@register.filter(name='and')
def op_and(first, second):
return first and second
@register.filter
def equal(first, second):
return first == second
@register.filter(name='max')
def get_max(first, second):
return max(first, second)
|
import os
import json
import codecs
from optparse import OptionParser
import pandas as pd
from ..util import file_handling as fh, defines
import data_splitting as ds
def make_label_metaindex():
input_filename = os.path.join('.', 'codes.json')
with codecs.open(input_filename, 'r') as input_file:
codes = json.load(input_file)
label_index = {}
for question in codes.keys():
for mapping in codes[question]:
orig = mapping[0]
mine = int(mapping[1])
if mine not in label_index.keys():
label_index[mine] = {}
label_index[mine][question] = orig
return label_index
def get_dataset_labels(dataset):
input_dir = defines.data_raw_labels_dir
input_filename = fh.make_filename(input_dir, fh.get_basename(dataset), 'csv')
label_data = pd.read_csv(input_filename, header=0, index_col=0)
return label_data
def get_labels(datasets):
all_labels = []
for f in datasets:
labels = get_dataset_labels(f)
all_labels.append(labels)
return pd.concat(all_labels, axis=0)
def get_powerset_labels(datasets):
powerset_df = pd.DataFrame(columns=['powerset_index'])
all_labels = get_labels(datasets)
powerset_index = []
for i in all_labels.index:
key = str(list(all_labels.loc[i, :]))
if key not in powerset_index:
powerset_index.append(key)
index = powerset_index.index(key)
powerset_df.loc[i] = index
return powerset_df, powerset_index
def output_label_counts(datasets):
label_dict = {}
all_labels = get_labels(datasets)
n, p = all_labels.shape
for i in all_labels.index:
key = str(list(all_labels.loc[i, :]))
label_dict[key] = label_dict.get(key, 0) + 1
gt1 = 0
gt2 = 0
for k in label_dict:
if label_dict[k] > 1:
gt1 += 1
if label_dict[k] > 2:
gt2 += 1
print "total keys = ", len(label_dict.keys())
print "greater than 1 =", gt1
print "greater than 2 =", gt2
def get_groups(group_file):
groups = []
lines = fh.read_text(group_file)
for line in lines:
if len(line) > 0:
groups.append(line.split())
return groups
def main():
#labels = get_labels(['Republican-Dislikes'])
#train, dev, test = ds.get_all_splits(0, None)
#df = pd.DataFrame(columns=labels.columns)
#df.loc['n_train'] = labels.loc[train, :].sum(axis=0)
#df.loc['n_test'] = labels.loc[test, :].sum(axis=0)
#df.to_csv('/Users/dcard/Desktop/temp/counts.csv')
usage = "%prog group_file"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
group_filename = args[0]
group_string = fh.read_text(group_filename)[0]
group = [g for g in group_string.split()]
print group
output_label_counts(group)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from robot.libraries.BuiltIn import BuiltIn
from robot.api import logger
class HttpClientListener(object):
ROBOT_LISTENER_API_VERSION = 3
def __init__(self, requests_lib):
self._requests_lib = requests_lib
def end_test(self, data, result):
self._requests_lib.delete_all_sessions()
class HttpClient(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ADD_CSRF_METHODS = {'post_request', 'patch_request', 'put_request'}
def __init__(self):
self._bi = BuiltIn()
self._requests = self._bi.get_library_instance('RequestsLibrary')
self.ROBOT_LIBRARY_LISTENER = HttpClientListener(self._requests)
def _set_variable(self, response):
self._bi.set_test_variable('${last_response}', response)
return response
def _get_csrf_token(self, alias):
try:
return self._requests._cache.get_connection(alias).cookies['csrftoken']
except KeyError:
logger.warn('No CSRF token found')
return ''
def post_protected_request(self, *args, **kw):
return self._run_request('post_request', *args, **kw)
def _run_request(self, method_name, *args, **kw):
alias = self._bi.get_variable_value('${session}')
method = getattr(self._requests, method_name)
headers = kw.setdefault('headers', {})
if not kw.get('files'):
headers.setdefault('content-type', 'application/x-www-form-urlencoded')
if method_name in self.ADD_CSRF_METHODS:
headers['x-csrftoken'] = self._get_csrf_token(alias)
response = method(alias, *args, **kw)
self._set_variable(response)
return response
|
# created by Ryan Spies
# rspies@lynkertech.com
# 2/23/2016
# Python 2.7
# Description: create a new ColdStateFiles directory by copying the old directory contents
# and replacing the params_previous.xml with a new file using the moduleparfile
# Also copy the original statesI.txt to the new directory
# The script also renames the original ColdStateFiles directory to "ColdStateFiles_previous"
# and renames the new directory to "ColdStateFiles"
# Must be run from the main "/Config" directory
import os
import shutil
import zipfile
maindir = os.getcwd()
########################### User Input ########################################
# original ColdStatesFiles directory
cold_dir = maindir + os.sep +'ColdStateFiles'
# new ColdStatesFiles directory (e.g. ColdStatesFiles_updated)
new_cold_dir = maindir + os.sep + 'updated_ColdStateFiles'
# directory with the calibrated parameter mods (sub_directories: SAC_SMA, SNOW17, UH, Lag_K)
param_dir = maindir + os.sep +'updated_ModuleParFiles' + os.sep
######################### End User Input ######################################
for basin in os.listdir(cold_dir):
print basin
basin_dir = cold_dir + os.sep + basin
# check if the new coldstatefiles directory contains the basin sub-directory
# if the directory doesn't exist copy structure from the original ColdStateFiles directory
new_basin_dir = new_cold_dir + os.sep + basin
if os.path.exists(new_cold_dir + os.sep + basin) is False:
shutil.copytree(basin_dir,new_basin_dir)
else:
print 'Directory already exsists: ' + new_basin_dir
# loop through files in each zip file
for zip_file in os.listdir(basin_dir):
# grab the model component from zip file name -> use to locate mods and name files below
model_name = ((zip_file.split())[0]).rstrip() + '.xml'
model = model_name.split('_')[0]
# locate the corresponding ModuleParFile from the exported mods directory
add_file = param_dir + model + os.sep + model_name #+ model + os.sep + model_name
if os.path.isfile(add_file) is True:
# create new zip file to overwrite any existing zip files
new_open_zip = zipfile.ZipFile(new_basin_dir + os.sep + zip_file,'w')
open_zip = zipfile.ZipFile(basin_dir + os.sep + zip_file,'r')
# add the .xml moduleparfile to the new zip file and rename 'params_previous.xml'
new_open_zip.write(add_file, arcname = 'params_previous.xml')
# extract the 'statesI.txt' from the original ColdStateFiles directory to the working dir (new_basin_dir)
zip_files = open_zip.namelist()
if 'statesI.txt' in zip_files:
open_zip.extract('statesI.txt',new_basin_dir)
# add the extracted 'statesI.txt' to the new zip file
new_open_zip.write(new_basin_dir + os.sep +'statesI.txt', arcname = 'statesI.txt')
# remove the extracted copy of 'statesI.txt' from the working dir (new_basin_dir)
os.remove(new_basin_dir + os.sep +'statesI.txt')
else:
print 'statesI.txt not found in zip file: ' + basin + os.sep + model_name
# close files
open_zip.close()
new_open_zip.close()
else:
print 'ModuleParFile does not exist: ' + basin + os.sep + model_name
# rename directories to use the updated directory
#shutil.move(cold_dir, maindir + os.sep + 'ColdStateFiles_previous')
#shutil.move(new_cold_dir, maindir + os.sep + 'ColdStateFiles')
print 'Updated files to "ColdStateFiles" directory and renamed previous version "ColdStateFiles_previous"'
print 'Script Completed!'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Download files for udemy-dl."""
from __future__ import unicode_literals
from __future__ import print_function
import os
import subprocess
import sys
import colorlog
import requests
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
logger = colorlog.getLogger('udemy_dl.download')
# User Agent String
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:39.0) Gecko/20100101 Firefox/39.0'
class DLException(Exception):
"""Raise if some lectured failed to download."""
pass
def download(link, filename, update_progress, downloader='aria2c'):
"""Download files to given destination file-name."""
if 'youtube.com/' in link:
youtube_dl(link, filename)
else:
try:
if 'aria2c' in downloader:
aria2c_dl(link, filename)
elif 'axel' in downloader:
axel_dl(link, filename)
elif 'httpie' in downloader:
httpie_dl(link, filename)
elif 'curl' in downloader:
curl_dl(link, filename)
elif 'ffmpeg' in downloader: # only for hls
ffmpeg_dl(link, filename)
else:
urlopen_dl(link, filename, update_progress)
except OSError as exc:
if not os.path.exists(filename):
logger.critical('%s not found. Downloading with builtin downloader', downloader)
urlopen_dl(link, filename, update_progress)
else:
logger.critical('Failed to download: %s', exc)
download_status = 'failed'
return download_status
def httpie_dl(link, filename):
"""Use httpie as the downloader."""
command = ['http', '--continue', '--download', link, '-o', filename]
subprocess.call(command)
def axel_dl(link, filename):
"""Use axel as the downloader."""
command = ['axel', '-U', USER_AGENT, link, '-o', filename]
subprocess.call(command)
def curl_dl(link, filename):
"""Use curl as the downloader."""
command = ['curl', '-C', '-', link, '-o', filename]
cert_path = requests.certs.where()
if cert_path:
command.extend(['--cacert', cert_path])
else:
command.extend(['--insecure'])
subprocess.call(command)
def aria2c_dl(link, filename):
"""Use aria2c as the downloader."""
command = ['aria2c', '--continue', '--file-allocation=none', '--auto-file-renaming=false', '-k', '1M', '-x', '4', '-U', USER_AGENT, link, '-o', filename]
subprocess.call(command)
def ffmpeg_dl(link, filename):
"""Download m3u8/hls videos."""
command = ['ffmpeg', '-i', link, '-bsf:a', 'aac_adtstoasc', '-vcodec', 'copy', '-c', 'copy', '-crf', '50', '-f', 'mp4', filename]
subprocess.call(command)
def dl_progress(num_blocks, block_size, total_size):
"""Show a decent download progress indication."""
progress = num_blocks * block_size * 100 / total_size
if num_blocks != 0:
sys.stdout.write(4 * '\b')
sys.stdout.write('{0:3d}%'.format((progress)))
def youtube_dl(link, filename):
"""Use youtube-dl as the downloader if videos are in youtube.com."""
try:
subprocess.call(['youtube-dl', '-o', filename, link])
except OSError:
raise DLException('Install youtube-dl to download this lecture')
def urlopen_dl(link, filename, update_progress):
"""Download file with urlopen, source https://gist.github.com/medigeek/3176958."""
response = urlopen(link)
with open(filename, 'wb') as file:
meta = response.info()
if sys.version_info[0] is 2:
file_size = int(meta.getheaders("Content-Length")[0])
else:
file_size = int(response.getheader("Content-Length").strip())
file_size_dl = 0
block_size = 8192
while True:
buffer = response.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
file.write(buffer)
progress = float(file_size_dl) / file_size
update_progress(progress, 'Downloading', 30)
file.close()
|
import numpy as np
import cv2
from FaceRecognizer import _find_vectors_distance
def hist(img, histSize, channels, mask, ranges):
return cv2.calcHist([img], histSize=histSize, channels=channels, mask=mask, ranges=ranges)
def sliding_window(img, window_size):
vectors = []
img_shape = img.shape
for row_ind in range(window_size, img_shape[0]-window_size):
vectors.append(_find_vectors_distance(img[row_ind - window_size], img[row_ind + window_size]))
return vectors
def scaler(img, area):
vectors = []
for row_ind, col_ind in zip(range(0, img.shape[0] - area, area), range(0, img.shape[1] - area, area)):
values = []
for offset in range(area):
values.append(img[row_ind + offset][col_ind + offset])
vectors.append(np.mean(values))
return vectors
def dft(image, p):
s = [p, p]
image = np.fft.fft2(image, s=s)
return image
def find_gradient_distance(row_1, row_2):
return abs(np.gradient(row_1) - np.gradient(row_2))
|
import os
import sys
import hydra
import time
import datetime
import subprocess
import regex as re
import logging
logger = logging.getLogger(__name__)
def check_instance_preemptible(instance_name):
output = subprocess.run(
f"gcloud compute instances describe {instance_name}", shell=True, check=True, stdout=subprocess.PIPE
)
status = re.findall(r"(?<=preemptible: ).*", output.stdout.decode("utf-8"))[0]
logger.info(f"{instance_name} is {status}.")
return 0
def check_instance_status(instance_name):
output = subprocess.run(
f"gcloud compute instances describe {instance_name}", shell=True, check=True, stdout=subprocess.PIPE
)
status = re.findall(r"(?<=status: ).*", output.stdout.decode("utf-8"))[0]
logger.info(f"{instance_name} is {status}.")
return status
def start_instance(instance_name):
output = subprocess.run(
f"gcloud compute instances start {instance_name}", shell=True, check=True, stdout=subprocess.PIPE
)
logger.info(output.stdout.decode("utf-8"))
return 0
def stop_instance(instance_name):
output = subprocess.run(
f"gcloud compute instances stop {instance_name}", shell=True, check=True, stdout=subprocess.PIPE
)
logger.info(output.stdout.decode("utf-8"))
return 0
def main_loop(cfg):
# report preemptible
check_instance_preemptible(cfg.preempt.instance_name)
last_time = datetime.datetime.strptime(cfg.preempt.start_time, '%Y-%m-%d %H:%M')
elapsed_time = last_time - last_time
logger.info(f"Start time: {last_time}")
while elapsed_time.total_seconds() < cfg.preempt.task_duration * 3600:
status = check_instance_status(cfg.preempt.instance_name)
logger.info(f"Checking Status: {status}")
# wait 30s if it is stopping
if status == "STOPPING":
time.sleep(30)
start_instance(cfg.preempt.instance_name)
logger.info(f"Starting Instance {cfg.preempt.instance_name}")
elif status == "TERMINATED":
start_instance(cfg.preempt.instance_name)
logger.info(f"Starting Instance {cfg.preempt.instance_name}")
elif status == "RUNNING":
pass
time.sleep(cfg.preempt.loop_interval)
elapsed_time = datetime.datetime.now() - last_time
# print(elapsed_time.total_seconds())
stop_instance(cfg.preempt.instance_name)
logger.info("The job finished!")
@hydra.main(config_path='conf/config.yaml')
def main(cfg=None):
print(cfg.pretty())
main_loop(cfg)
if __name__ == "__main__":
main()
|
from pymongo import MongoClient
c = MongoClient(host='localhost', port=27017, replicaset="foo")
db = c.my_db
print "connectiong to db"
def resetReplicaSet():
cl = MongoClient(host='localhost', port=27017)
config = cl.admin.command("replSetGetConfig")['config']
statusMembers = cl.admin.command("replSetGetStatus")['members']
configMembers = config["members"]
deadMembers = []
for m in statusMembers:
if m["health"] < 1:
deadMembers.append(m["_id"])
configMembers = [m for m in configMembers if m["_id"] not in deadMembers]
config["members"] = configMembers
config["version"] += 1
cl.admin.command("replSetReconfig", config, force=True)
n = db.nodes
nrs = db.nodesRes
for m in deadMembers:
old = n.find_one_and_delete({"replica_id": m})
nrs.delete_one({"_id": old["_id"]})
def insertNodeReplicaSet(value):
config = c.admin.command("replSetGetConfig")['config']
members = sorted(config['members'], key=lambda m: m['_id'])
config["version"] += 1
priority = 0.5
votes = 1
id = freeID(members)
if len(config['members']) >= 7:
priority = 0
votes = 0
member = {
"votes": votes,
"priority": priority,
"host": value['ip'] + ":27017",
"_id": id
}
value['replica_id'] = id
config['members'].append(member)
c.admin.command("replSetReconfig", config)
return value
def removeNodeReplicaSet(value):
config = c.admin.command("replSetGetConfig")['config']
members = config['members']
config["version"] += 1
id = value["replica_id"]
new_members = []
non_vonting = []
voting = 0
to_remove = None
# remove old node from list and take all non voting members
for f in members:
if f["_id"] != id:
new_members.append(f)
if f['votes'] != 0:
voting += 1
else:
non_vonting.append(f)
else:
to_remove = f
# if old node was a voting one and there are non voting nodes, promote one.
if to_remove['votes'] != 0 and len(non_vonting) > 0:
newV = non_vonting[0]
new_members.remove(newV)
newV["priority"] = 1
newV["votes"] = 1
new_members.append(newV)
config['members'] = new_members
c.admin.command("replSetReconfig", config, force=True)
def freeID(l):
if len(l) == 255:
raise NoMoreNodes("Reached the maxium number of nodes")
last = l[-1]["_id"]
if last < 255:
return last + 1
id = 0
# find free id
for m in l:
if m['_id'] > id:
break
else:
id += 1
return id
class NoMoreNodes(Exception):
pass
|
# coding=UTF-8
class Ponto(object):
def __init__(self, x, y):
self.__x = x
self.__y = y
def get_x(self):
return self.__x
def get_y(self):
return self.__y
def set_x(self, x):
if x > 0:
self.__x = x
def set_y(self, y):
if y > 0:
self.__y = y
x = property(fget=get_x, fset=set_x)
y = property(fget=get_y, fset=set_y)
p = Ponto(2, 3)
#print(p.__x)
#print(p._Ponto__x)
#print(p._Ponto__y)
#print(p.get_x())
#p.set_x(10)
p.x = -1000
print(p.x)
print(p.y)
|
class Color:
colors = {
"blue": "blue",
"red": "red",
"black": "black",
"green": "green",
"orange": "orange",
"silver": "silver"
}
def get_color(self):
return self.colors
|
#!/bin/python3
import sys
n = int(3)
a = list(map(int, [3, 2, 1]))
# Write Your Code Here
numberOfSwaps = 0
for i in range(n):
for j in range(i, n - 1):
if a[i] > a[j + 1]:
numberOfSwaps += 1
a[i], a[j + 1] = a[j + 1], a[i]
print('Array is sorted in {} swaps.'.format(numberOfSwaps))
print('First Element: {}'.format(a[0]))
print('Last Element: {}'.format(a[-1]))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 19 14:20:28 2019
The actual work of creating the wavelet and conducting the 2d convolution
@author: matthewmorriss
"""
def conv2_mexh(patch,a,dx):
import numpy as np
from scipy import signal
#Generated the Mexican hat wavelet kerne at wavelet scale a. The kernal much be large enough for the wavelet to decay to ~0 at the edges. The Mexican hat is proportional to the second derivative of a gaussian
[X,Y] = np.meshgrid(np.arange(-8*a,8*a),np.arange(-8*a,8*a))
psi = (1/a)*(2 - (X/a)**2 - (Y/a)**2)*(np.exp(-((X/a)**2 + (Y/a)**2)/2))
# TO PLOT PSI UNCOMMENT
# from matplotlib import cm
# ax = plt.axes(projection = "3d")
# ax.plot_surface(X, Y, psi,cmap=cm.coolwarm)
# C =
# TRYING TO FIGURE OUT THE MOST EFFICIENT 2D CONVOLUTION
# start = time.time()
# C = (dx**2)*signal.convolve2d(patch, psi,'same')
# end = time.time()
# print(end-start)
#
# print()
# print()
# start = time.time()
C = (dx**2)*signal.fftconvolve(patch,psi,'same')
# end = time.time()
# print(end-start)
# start = time.time()
# C1 = scipy.ndimage.convolve(patch, psi, mode = 'constant', cval = 0)
# end = time.time()
# print(end-start)
# ADAM BOOTH'S SOLUTION, DIDN'T WORK
# C2 = np.real(np.fft.ifft2(np.fft.fft2(patch) * np.fft.fft2(psi, s = patch.shape)))
# C = (dx**2)*signal.fftconvolve(patch,psi,'same')
#convolve patch with the psi using the 2d convolution, multiplying by dx^2 to approximate the double integral. "same" crops C to the same size as the patch
return(C)
|
from django.conf.urls.defaults import *
urlpatterns = patterns('mulan.views',
(r'^contacts', 'Contacts'),
(r'^menu(?:/(-?\d+))?', 'Menu'),
(r'^business(\+)?', 'BusinessLunch'),
(r'^vacancies', 'Vacancies'),
(r'^delivery_success', 'DeliverySuccess'),
(r'^delivery', 'Delivery'),
(r'^upload', 'Upload'),
(r'^new_prices([+-]?\d+).txt', 'NewPricesTxt'),
(r'^price_increase', 'PriceIncrease'),
(r'^stats/xls', 'StatsXls'),
(r'^$', 'MainPage'),
(r'^.*$', 'MainPageRedirect'),
)
|
from datetime import date
class AwakenHistory:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
# YELLOW = '\033[93m'
YELLOW = ''
RED = '\033[91m'
# BOLD = '\033[1m'
BOLD = ''
UNDERLINE = '\033[4m'
# END = '\033[0m'
END = ''
NAME_LENGTH = 16
STAR_LENGTH = 4
DAY_LENGTH = 10
DAYS_LENGTH = 6
FOOTER_LENGTH = 46
name: str
awaken: int
day: date
days: int
def __init__(self, name: str, day: date, days: int, awaken=7):
self.name = name
self.awaken = awaken
self.day = day
self.days = days
def print(self):
name = self.name.ljust(self.NAME_LENGTH).title()
awaken = str(self.awaken).center(self.STAR_LENGTH)
day = str(self.day).ljust(self.DAY_LENGTH)
days = str(self.days).rjust(self.DAYS_LENGTH)
start = ""
end = ""
print(start + f"{awaken} {name} {day} {days}" + end)
@staticmethod
def printHeader(element: str):
name = "HERO".ljust(AwakenHistory.NAME_LENGTH)
star = "STAR".ljust(AwakenHistory.STAR_LENGTH)
day = "DATE".ljust(AwakenHistory.DAY_LENGTH)
days = "DAYS".rjust(AwakenHistory.DAYS_LENGTH)
print(AwakenHistory.BOLD + element.upper() + AwakenHistory.END)
print(AwakenHistory.BOLD + f"{star} {name} {day} {days}" + AwakenHistory.END)
@staticmethod
def printFooter():
print("".center(AwakenHistory.FOOTER_LENGTH, "-"))
|
import base64
import logging
import os
import httpx
from fastapi import FastAPI, UploadFile, File, HTTPException
from prometheus_client import Counter, REGISTRY
from pydantic import BaseModel
from prometheus_fastapi_instrumentator import Instrumentator
app = FastAPI(
docs_url="/doc",
)
logger = logging.getLogger(__name__)
TENSORFLOW_URL = "http://{host}:{post}/v1/models/{model}:predict".format(
host=os.environ.get("TENSORFLOW_HOST", "localhost"),
post=os.environ.get("TENSORFLOW_API_REST_PORT", 8501),
model=os.environ.get("MODEL_NAME", "resnet"),
)
os.environ.setdefault('ENABLE_METRICS', 'true')
# Fix double registered cause by async
registered = {metric.name: metric for metric in REGISTRY.collect()}
prometheus = Instrumentator(should_respect_env_var=True)
if 'http_requests' not in registered:
prometheus.instrument(app)
prometheus.expose(app, should_gzip=True, endpoint='/metrics')
if 'file_type' not in registered:
FILETYPE_COUNT = Counter(
"file_type_total",
"Number of different content-types uploaded.",
labelnames=("file_type",)
)
else:
FILETYPE_COUNT = REGISTRY._names_to_collectors['file_type']
# End fix
class MessageError(BaseModel):
detail: str
@app.post(
"/api/v1/resnet",
responses={
200: {
"content": {
"application/json": {
"schema": {
"title": "Response",
"type": "object",
"properties": {
"class": {
"title": "TensorFlow response", "type": "int",
}
},
},
"examples": {
"valid": {
"summary": "Response",
"value": {
"class": 100,
}
},
},
},
},
"description": "Return the JSON with resnet result.",
},
400: {
"model": MessageError,
"content": {
"application/json": {
"examples": {
"invalid": {
"summary": "Invalid content type",
"value": {
"detail": "Invalid content type",
},
}
},
},
},
"description": "Invalid file.",
},
500: {
"model": MessageError,
"content": {
"application/json": {
"examples": {
"invalid": {
"summary": "Invalid response from TensorFlow",
"value": {
"detail": "Error processing file",
},
},
"connection error": {
"summary": "Cannot connect to TensorFlow",
"value": {
"detail": "Cannot connect to TensorFlow",
},
}
},
},
},
"description": "Error sending/receiving data to/from TensorFlow.",
}
},
)
async def resnet(img: UploadFile = File(...)):
counter = FILETYPE_COUNT.labels(file_type=img.content_type)
if hasattr(counter, 'inc'):
counter.inc()
if not img.content_type.startswith("image/"):
logger.debug("Invalid file", extra={
"Content-Type": img.content_type,
"Filename": img.filename
})
raise HTTPException(status_code=400, detail="Invalid content type")
logger.debug("Read file", extra={
"Content-Type": img.content_type,
"Filename": img.filename
})
contents = await img.read()
jpeg_bytes = base64.b64encode(contents).decode("utf-8")
predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes
try:
async with httpx.AsyncClient() as client:
response = await client.post(TENSORFLOW_URL, data=predict_request)
except httpx.HTTPError:
logger.exception("Cannot connect to TensorFlow", extra={
"url": TENSORFLOW_URL,
})
raise HTTPException(status_code=500, detail="Cannot connect to TensorFlow")
logger.info("Response from TensorFlow", extra={
"raw": response.text,
"time": response.elapsed.total_seconds(),
"status": response.status_code,
})
tf_data = response.json()
if response.status_code != 200:
error = tf_data.get("error", "Error processing file")
raise HTTPException(status_code=500, detail=error)
predictions = tf_data.get("predictions")
if not isinstance(predictions, list) or not predictions:
raise HTTPException(status_code=500, detail="No predictions received")
if not (isinstance(predictions[0], dict) and isinstance(predictions[0].get("classes"), int)):
raise HTTPException(status_code=500, detail="Unexpected response from TensorFlow")
classes = predictions[0].get("classes")
return {
"class": classes
}
if __name__ == "__main__":
import uvicorn
reload = "--reload" in os.environ.get("API_ARGS", "")
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=reload)
|
#!/usr/bin/python3
# ^^ note the python directive on the first line
# COMP 9414 agent initiation file
# requires the host is running before the agent
# designed for python 3.6
# typical initiation would be (file in working directory, port = 31415)
# python3 agent.py -p 31415
# created by Leo Hoare
# with slight modifications by Alan Blair
import sys
import socket
import copy
# declaring visible grid to agent
view = [['' for _ in range(5)] for _ in range(5)]
pickable = ['a', 'k', 'o', '$', ' ']
mapping_table = {(0, 1, '^'): ['r', 'f'], (0, 1, '>'): ['f'], (0, 1, 'v'): ['l', 'f'], (0, 1, '<'): ['l', 'l', 'f'],
(0, -1, '^'): ['l', 'f'], (0, -1, '>'): ['l', 'l', 'f'], (0, -1, 'v'): ['r', 'f'], (0, -1, '<'): ['f'],
(1, 0, '^'): ['l', 'l', 'f'], (1, 0, '>'): ['r', 'f'], (1, 0, 'v'): ['f'], (1, 0, '<'): ['l', 'f'],
(-1, 0, '^'): ['f'], (-1, 0, '>'): ['l', 'f'], (-1, 0, 'v'): ['l', 'l', 'f'], (-1, 0, '<'): ['r', 'f']}
get_direction = {(0, 1): '>', (0, -1): '<', (1, 0): 'v', (-1, 0): '^'}
class Node:
def __init__(self, value, point):
self.value = value
self.point = point
self.parent = None
self.visited = False
self.H = 0
self.G = 0
def move_cost(self, other):
return 0 if self.value == ' ' else 1
class Agent:
def __init__(self):
self.inventory = {'a':False, 'k':False, '$':False, 'r':False, 'o':0}
self.axe_location = []
self.key_location = []
self.stepping_stone = []
self.water_location = []
self.tree_location = []
self.gold_location = []
self.door_location = []
self.unvisited = [] # use to stored all the walkable but unvisited cells
self.on_water = False
self.agent_x = 80
self.agent_y = 80
self.initial_x = 80
self.initial_y = 80
self.temp_x = 0
self.temp_y = 0
self.direction = '^' # Always consider the agent direction is '^'
self.grid = [[ '?' for i in range(160)] for j in range(160)] # create a 2d list that store all the Node objects
for x in range(len(self.grid)):
for y in range(len(self.grid[x])):
self.grid[x][y] = Node(self.grid[x][y], (x, y))
self.pending_move = [] # list to store the pending moves
# Helper function
def rotate(self, view, time): # rotate 2d list clockwise
for _ in range(time):
temp = zip(*view[::-1]) # return a list of tuples
view = [list(elem) for elem in temp] # convert list of tuples to list of lists
return view
def print_list(self, input_list):
print('\n'.join(map(''.join, input_list)))
# the the cell in front of agent
def get_front_tail(self): # get the grid in front the agent
if self.direction == '^':
x = self.agent_x - 1
y = self.agent_y
elif self.direction == '>':
x = self.agent_x
y = self.agent_y + 1
elif self.direction == 'v':
x = self.agent_x + 1
y = self.agent_y
else:
x = self.agent_x
y = self.agent_y - 1
return self.grid[x][y]
#######################################################################################
########## Line 95 to Line 170, Update the self.grid list from view and from move
#######################################################################################
def update_from_view(self, view, on_water):
# Rotate the view based on which direction the agent is facing
if self.direction == '>':
view = self.rotate(view, 1)
if self.direction == 'v':
view = self.rotate(view, 2)
if self.direction == '<':
view = self.rotate(view, 3)
self.grid[self.agent_x][self.agent_y].visited = True
self.grid[self.agent_x][self.agent_y].value = self.direction
# Iterate through the view and update the internal map
for i in range(5):
for j in range(5):
x = self.agent_x - (2 - i)
y = self.agent_y + (j - 2)
self.grid[x][y].value = view[i][j]
# stored all adjacent cells which can actually walk through
if (i == 1 and j == 2) or (i == 2 and j == 1) or (i == 2 and j == 3) or (i == 3 and j == 2):
if (x, y) not in self.unvisited and self.grid[x][y].visited == False:
if view[i][j] in pickable and on_water == False:
self.unvisited.append((x, y))
if on_water:
if view[i][j] == '~':
self.unvisited.append((x, y))
if view[i][j] in pickable:
self.unvisited.insert(0, (x, y))
if view[i][j] == 'a' and self.grid[x][y] not in self.axe_location:
self.axe_location.append(self.grid[x][y])
if view[i][j] == 'k' and self.grid[x][y] not in self.key_location:
self.key_location.append(self.grid[x][y])
if view[i][j] == '-' and self.grid[x][y] not in self.door_location:
self.door_location.append(self.grid[x][y])
if view[i][j] == 'o' and self.grid[x][y] not in self.stepping_stone:
self.stepping_stone.append(self.grid[x][y])
if view[i][j] == '~' and self.grid[x][y] not in self.water_location:
self.water_location.append(self.grid[x][y])
if view[i][j] == 'T' and self.grid[x][y] not in self.tree_location:
self.tree_location.append(self.grid[x][y])
if view[i][j] == '$' and self.grid[x][y] not in self.gold_location:
self.gold_location.append(self.grid[x][y])
print('At this stage, the agent direction is: ' + self.direction)
print("At this moment, the agent coordinate is: ({0}, {1})".format(self.agent_x, self.agent_y))
print('The unvisited list is: {0}'.format(self.unvisited))
print('The on_water variable is: {0}'.format(self.on_water))
def update_inventory(self, x, y):
if self.grid[x][y].value == 'a':
if self.grid[x][y] in self.axe_location:
self.axe_location.remove(self.grid[x][y])
self.inventory['a'] = True
if self.grid[x][y].value == 'k':
if self.grid[x][y] in self.key_location:
self.key_location.remove(self.grid[x][y])
self.inventory['k'] = True
if self.grid[x][y].value == 'o':
if self.grid[x][y] in self.stepping_stone:
self.stepping_stone.remove(self.grid[x][y])
self.inventory['o'] += 1
if self.grid[x][y].value == '$':
if self.grid[x][y] in self.gold_location:
self.gold_location.remove(self.grid[x][y])
self.inventory['$'] = True
if self.grid[x][y].value == 'T':
if self.grid[x][y] in self.tree_location:
self.tree_location.remove(self.grid[x][y])
self.inventory['r'] = True
#######################################################################################
############ Line 176 to Line 237, A* algorithm #############################
#######################################################################################
def children(self, node, on_water, grid):
x, y = node.point
result = []
for r, c in [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]:
if r >= 0 and r < len(grid[0]) and c >= 0 and c < len(grid):
if grid[r][c].value in pickable and on_water == False:
result.append(grid[r][c])
if on_water and grid[r][c].value == '~':
result.append(grid[r][c])
return result
def manhattan(self, a, b):
return abs(a.point[0] - b.point[0]) + abs(a.point[1] - b.point[1])
def clean_up(self, grid):
for i in range(len(grid)):
for j in range(len(grid[i])):
grid[i][j].G = 0
grid[i][j].H = 0
grid[i][j].parent = None
# this A star algorithm is adapted from https://gist.github.com/jamiees2/5531924
# with slightly modify to server our purpose
def aStar(self, start, goal, grid): # each grid element is a node object
self.clean_up(grid)
openset = set() # The open set
closedset = set() # The closed set
current = start # Current point is the starting point
openset.add(current) # Add the starting point to the open set
while openset: # While the open set is not empty
current = min(openset, key=lambda o:o.G + o.H)
if current.point == goal.point:
path = []
while current.parent:
current.visited = True
path.append(current.point)
current = current.parent
path.append(current.point)
current.visited = True
self.clean_up(grid)
return path[::-1]
openset.remove(current) # Remove the item from the open set
closedset.add(current) # Add it to the closed set
for node in self.children(current, self.on_water, grid):
if node in closedset: # If it is already in the closed set, skip it
continue
if node in openset: # Otherwise if it is already in the open set
new_g = current.G + current.move_cost(node)
if node.G > new_g:
node.G = new_g
node.parent = current
else:
node.G = current.G + current.move_cost(node)
node.H = self.manhattan(node, goal)
node.parent = current # Set the parent to our current item
openset.add(node) # Add it to the set
self.clean_up(grid)
return None # return None if no path is found
# Helper function, given a list of tools, return a part of them that can actual reach by agent
def reachable_tools(self, tool_list):
result = []
for element in tool_list:
x, y = element.point
if self.grid[x - 1][y].visited or self.grid[x][y - 1].visited or self.grid[x][y + 1].visited or self.grid[x + 1][y].visited:
result.append(element)
# tool_list.remove(element)
return result
def near_the_tool(self, node, on_water, raft=False):
x, y = node.point
for i, j in [(x - 1, y), (x, y - 1), (x, y + 1), (x + 1, y)]:
if on_water:
if self.grid[i][j].value == '~':
return self.grid[i][j]
if not on_water:
if self.grid[i][j].value == ' ' and self.grid[i][j].visited:
return self.grid[i][j]
# Flood Fill algorithm adpated from http://inventwithpython.com/blogstatic/floodfill/recursivefloodfill.py
# with slightly modify to server our purpose
def floodFill(self, world, x, y, oldChar, newChar):
worldWidth = len(world)
worldHeight = len(world[0])
if oldChar == None:
oldChar = world[x][y].value
if world[x][y].value != oldChar:
return
world[x][y].value = newChar
if x > 0: # left
self.floodFill(world, x-1, y, oldChar, newChar)
if y > 0: # up
self.floodFill(world, x, y-1, oldChar, newChar)
if x < worldWidth-1: # right
self.floodFill(world, x+1, y, oldChar, newChar)
if y < worldHeight-1: # down
self.floodFill(world, x, y+1, oldChar, newChar)
# if the self.unvisited list is not empty, means there are still some nodes that agent didn't go to.
#pop the last element of the list out, if this node is adjecent to agent, then just call path_to_action function with the correct path
# if this node is not adjecent to agent, do a A* search, return all the path coordinates that the agent need to follow, then call path_to_actions to get a series of moves
def take_action(self):
if self.inventory['$']:
path = self.aStar(self.grid[self.agent_x][self.agent_y], self.grid[80][80], self.grid)
return self.path_to_actions(path)
if len(self.unvisited) != 0:
start = (self.agent_x, self.agent_y)
end = self.unvisited.pop()
if abs(start[0] - end[0]) + abs(start[1] - end[1]) == 1:
return self.path_to_actions([start, end])
path = self.aStar(self.grid[start[0]][start[1]], self.grid[end[0]][end[1]], self.grid)
if not path and self.on_water:
node = self.near_the_tool(self.grid[end[0]][end[1]], self.on_water)
path = self.aStar(self.grid[self.agent_x][self.agent_y], node, self.grid)
path.append(end)
self.on_water = False
self.inventory['r'] = False
return self.path_to_actions(path)
if not path and self.on_water == False:
while True:
end = self.unvisited.pop()
path = self.aStar(self.grid[self.agent_x][self.agent_y], self.grid[end[0]][end[1]], self.grid)
if not path:
continue
return self.path_to_actions(path)
return self.path_to_actions(path)
# else when the self.unvisited list is empty
# that means the agent has visit every node that it can reach
# Then it should use tools to cut trees and unlock doors
if self.inventory['a'] and self.inventory['r'] == False:
reachable_tree = self.reachable_tools(self.tree_location)
while len(reachable_tree) != 0:
tree = reachable_tree.pop()
node = self.near_the_tool(tree, self.on_water)
print(tree.point)
print(node.point)
print(self.agent_x, self.agent_y)
print(self.on_water)
path = self.aStar(self.grid[self.agent_x][self.agent_y], node, self.grid)
path.append(tree.point)
moves = self.path_to_actions(path)
moves.insert(-1, 'c')
if tree in self.tree_location:
self.tree_location.remove(tree)
return moves
if self.inventory['k']:
reachable_door = self.reachable_tools(self.door_location)
while len(reachable_door) != 0:
door = reachable_door.pop()
node = None
node = self.near_the_tool(door, self.on_water)
# print('Node is {0}'.format(node.point))
# print(door.point)
# print(self.agent_x, self.agent_y)
if not node:
node = self.near_the_tool(door, False)
path = self.aStar(self.grid[self.agent_x][self.agent_y], node, self.grid)
if not path:
if self.on_water == False:
self.temp_x, self.temp_y = self.agent_x, self.agent_y
nearest = None
distance = 160
for i in self.water_location:
t = abs(i.point[0] - self.agent_x) + abs(i.point[1] - self.agent_y)
if t < distance:
distance = t
nearest = i
act = None
if distance == 1:
act = self.path_to_actions([(self.agent_x, self.agent_y), nearest.point])
else:
nearest_land = self.near_the_tool(nearest, self.on_water)
p = self.aStar(self.grid[self.agent_x][self.agent_y], nearest_land, self.grid)
p.append(nearest.point)
act = self.path_to_actions(p)
self.on_water = True
print(self.grid[self.agent_x][self.agent_y].value)
if self.grid[self.agent_x][self.agent_y] == '~':
self.on_water = True
return act
else:
adjecent_water = None
print(self.agent_x, self.agent_y)
for m, n in [(self.agent_x - 1, self.agent_y), (self.agent_x, self.agent_y - 1), (self.agent_x, self.agent_y + 1), (self.agent_x + 1, self.agent_y)]:
if m >= 0 and m < len(self.grid) and n >= 0 and n < len(self.grid[0]) and self.grid[m][n].value == '~':
adjecent_water = self.grid[m][n]
break
print(adjecent_water.value)
print(adjecent_water.point)
c = self.my_copy()
self.floodFill(c, self.temp_x, self.temp_y, ' ', '#')
self.floodFill(c, adjecent_water.point[0], adjecent_water.point[1], '~', ' ')
self.on_water = False
print(self.agent_x, self.agent_y)
print(node.point)
print(c[81][95].value)
print(c[82][96].value)
path = self.aStar(c[self.agent_x][self.agent_y], node, c)
path.append(door.point)
moves = self.path_to_actions(path)
moves.insert(-1, 'u')
if door in self.door_location:
self.door_location.remove(door)
return moves
path.append(door.point)
moves = self.path_to_actions(path)
moves.insert(-1, 'u')
return moves
# At this stage, the agent would already cut all the trees and unlock all the doors, depending on the inventory
# Now, the agent must use raft or stepping stone to explore more
if self.inventory['r'] and self.on_water == False:
reachable_water = self.reachable_tools(self.water_location)
water = reachable_water.pop()
node = self.near_the_tool(water, self.on_water)
path = self.aStar(self.grid[self.agent_x][self.agent_y], node, self.grid)
path.append(water.point)
moves = self.path_to_actions(path)
self.on_water = True
return moves
# convert a list of coordinate tuples to a list of actions
def path_to_actions(self, path):
actions = []
for i in range(len(path) - 1):
abs_x = path[i + 1][0] - path[i][0]
abs_y = path[i + 1][1] - path[i][1]
actions += mapping_table[(abs_x, abs_y, self.direction)]
self.direction = get_direction[(abs_x, abs_y)]
self.agent_x += abs_x
self.agent_y += abs_y
self.update_inventory(self.agent_x, self.agent_y)
self.grid[self.agent_x][self.agent_y].visited = True
if self.grid[self.agent_x][self.agent_y].value == '~':
self.on_water = True
if self.grid[self.agent_x][self.agent_y].value == ' ':
self.on_water = False
if (self.agent_x, self.agent_y) in self.unvisited:
self.unvisited.remove((self.agent_x, self.agent_y))
print('After the moves, the agent coordinates is ({0} {1})'.format(self.agent_x, self.agent_y))
print('The value stored at this place is: {0}'.format(self.grid[self.agent_x][self.agent_y].value))
print('The inventory is: {0}'.format(self.inventory))
return actions
def my_copy(self):
result = [['?' for _ in range(160)] for _ in range(160)]
for i in range(len(self.grid)):
for j in range(len(self.grid[i])):
node = Node(self.grid[i][j].value, self.grid[i][j].point)
result[i][j] = node
return result
############################################################################################
######################## Above are the code for Node and Agent class #######################
agent = Agent()
actions = []
# function to take get action from AI or user
def get_action(view):
global actions
if len(actions) == 0:
agent.update_from_view(view, agent.on_water)
actions = agent.take_action()
print('The action that supposed to take is: {0}'.format(actions) , '\n')
return actions.pop(0)
else:
temp = actions.pop(0)
print('The action that supposed to take is: {0}'.format(list(temp)), '\n')
return temp
# while 1:
# inp = input("Enter Action(s): ")
# # return actions.pop(0)
# return inp
# while 1:
# inp = input("Enter Action(s): ")
# inp.strip()
# final_string = ''
# for char in inp:
# if char in ['f','l','r','c','u','b','F','L','R','C','U','B']:
# final_string += char
# if final_string:
# agent.update_from_move(final_string[0])
# return final_string[0]
# helper function to print the grid
def print_grid(view):
print('+-----+')
for ln in view:
print("|"+str(ln[0])+str(ln[1])+str(ln[2])+str(ln[3])+str(ln[4])+"|")
print('+-----+')
if __name__ == "__main__":
# checks for correct amount of arguments
if len(sys.argv) != 3:
print("Usage Python3 "+sys.argv[0]+" -p port \n")
sys.exit(1)
port = int(sys.argv[2])
# checking for valid port number
if not 1025 <= port <= 65535:
print('Incorrect port number')
sys.exit()
# creates TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# tries to connect to host
# requires host is running before agent
sock.connect(('localhost',port))
except (ConnectionRefusedError):
print('Connection refused, check host is running')
sys.exit()
# navigates through grid with input stream of data
i=0
j=0
while 1:
data=sock.recv(100)
if not data:
exit()
for ch in data:
if (i==2 and j==2):
view[i][j] = '^'
view[i][j+1] = chr(ch)
j+=1
else:
view[i][j] = chr(ch)
j+=1
if j>4:
j=0
i=(i+1)%5
if j==0 and i==0:
print_grid(view) # COMMENT THIS OUT ON SUBMISSION
action = get_action(view) # gets new actions
sock.send(action.encode('utf-8'))
sock.close()
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
my_dict={'insert_me': 'INSERTED FROM VIEW','second_key':'AGAIN FROM VIEW'}
return render(request,'first_app/Index.html',my_dict)
def home(request):
return HttpResponse("<h1>WELCOME TO THE HOMEPAGE</h1>")
|
from Language import LanguageInstance
__author__ = 'Ritwik'
PYTHON_EXTENSION = "py"
PYTHON_EXEC_COM = "python"
class PythonInstance(LanguageInstance):
def __init__(self, working_directory):
LanguageInstance.__init__(self, working_directory, PYTHON_EXTENSION, "", PYTHON_EXEC_COM)
def run_std_io(self, pr_text, pr_input, pr_time, working_directory=""):
# Get working directory - either provided or default
if working_directory == "":
working_directory = self.default_working_directory
# Get info from setup
self.pr_info = self.program_setup(pr_text, working_directory)
self.pr_info['command'] = self.exec_command
self.pr_info['input'] = pr_input
# timer_thread = time_program
self.pr_info['output'] = None
pr_thread = self.run_command(self.pr_info['command'], pr_input)
pr_thread.start()
pr_thread.join(pr_time)
if pr_thread.is_alive():
print 'Thread running too long. Terminating thread.'
self.pr_info['instance'].terminate()
pr_thread.join()
self.pr_info = pr_thread.result_queue.get()
return self.pr_info
py_instances = []
for i in range(2):
py_instances.append(PythonInstance("."))
pr_info = py_instances[0].run_std_io("from time import sleep\nx = raw_input()\nprint x", "1", 2)
pr_info_2 = py_instances[1].run_std_io("x = raw_input()\nprint 2*x", "6", 2)
for i in range(len(py_instances)):
del py_instances[0]
print pr_info['output']
print pr_info_2['output']
|
# Requires api from setup.py and avg polarity from sentiment.py #
# Updates status #
import setup, sentiment as snt
setup.api.update_status("Test tweet for a demo")
|
import unittest
from katas.kyu_8.beginner_lost_without_a_map import maps
class MapsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(maps([1, 2, 3]), [2, 4, 6])
def test_equal_2(self):
self.assertEqual(maps([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
def test_equal_3(self):
self.assertEqual(maps([]), [])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 15:13:57 2017
@author: mulugetasemework
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 17:34:06 2017
@author: mulugetasemework
This code does geometric transfomations to "corrupt" data and increase
the size of training and test sets. It is called by "processDataAndStup.py")
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import warp
plt.close("all")
fig, ax = plt.subplots()
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'small',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'small',
'ytick.labelsize':'small' }
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.setp( ax.get_xticklabels(), visible=False)
plt.setp( ax.get_yticklabels(), visible=False)
trX,trY = 1, 2
pylab.rcParams.update(params)
def TransformInputsDef(inputMatrix,in_labels,imageSize1,translateImage,rotateImage,rotationAngle,affineOrNot,perspectiveOrNot,WarpOrNot,keepDataSize):
train_labels = in_labels
plt.subplot(231),plt.imshow(inputMatrix[-1],cmap='gray' ),plt.title('Input')
if keepDataSize==1:
d = list()
else:
d = list(inputMatrix)
if translateImage==1:
print("--- Translating " + str(len(inputMatrix)) + " images")
if keepDataSize==0:
train_labels = np.vstack([train_labels, train_labels])
for im in range(len(inputMatrix)):
img = inputMatrix[im]
M = np.float32([[1,0,trX],[0,1,trY]])
dst = cv2.warpAffine(img,M,(imageSize1,imageSize1))
d.append(dst)
plt.subplot(232),plt.imshow(dst,cmap='gray' ),plt.title('Translated', fontsize=7)
plt.show()
if rotateImage==1:
print("---- rotating " + str(len(d)) + " images by: " + str(rotationAngle) + " degrees")
if keepDataSize==0:
train_labels = np.vstack([train_labels, train_labels])
d2=list()
for im in range(len(d)):
img = d[im]
M = cv2.getRotationMatrix2D((imageSize1/2,imageSize1/2),rotationAngle,1)
dst = cv2.warpAffine(img,M,(imageSize1,imageSize1))
if keepDataSize==1:
d2.append(dst)
else:
d.append(dst)
if keepDataSize==1:
d=d2
plt.subplot(233),plt.imshow(dst,cmap='gray' ),plt.title('Rotated', fontsize=7)
plt.show()
if affineOrNot==1:
print("----- Affine transforming " + str(len(d)) + " images...")
if keepDataSize==0:
train_labels = np.vstack([train_labels, train_labels])
d2=list()
for im in range(len(d)):
img = d[im]
shift1 = 1
pts1 = np.float32([[shift1,shift1],[shift1*4,shift1],[shift1,shift1*4]])
pts2 = np.float32([[shift1/5,shift1*2],[shift1*4,shift1],[shift1*2,shift1*5]])
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(imageSize1,imageSize1))
if keepDataSize==1:
d2.append(dst)
else:
d.append(dst)
if keepDataSize==1:
d=d2
plt.subplot(234),plt.imshow(dst,cmap='gray' ),plt.title('Affine-Transformed', fontsize=7)
plt.show()
if perspectiveOrNot==1:
print("------ Perspective transforming " + str(len(d)) + " images...")
if keepDataSize==0:
train_labels = np.vstack([train_labels, train_labels])
d2=list()
for im in range(len(d)):
img = d[im]
pts1 = np.float32([[2,3],[imageSize1+1,4],[2,imageSize1+2],[imageSize1+3,imageSize1+4]])
pts2 = np.float32([[0,0],[imageSize1-2,0],[0,imageSize1-2],[imageSize1-2,imageSize1-2]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(imageSize1-2,imageSize1-2))
dst = np.resize(dst,[imageSize1,imageSize1])
if keepDataSize==1:
d2.append(dst)
else:
d.append(dst)
if keepDataSize==1:
d=d2
plt.subplot(235),plt.imshow(dst,cmap='gray' ),plt.title('Perspective-Transformed', fontsize=7)
plt.show()
if WarpOrNot==1:
print("------- Warping " + str(len(d)) + " images...")
if keepDataSize==0:
train_labels = np.vstack([train_labels, train_labels])
d2=list()
for im in range(len(d)):
img = d[im]
img *= 1/np.max(img)
matrix = np.array([[1, 0, 0], [0, 1, -5], [0, 0, 1]])
dst = warp(img, matrix)
if keepDataSize==1:
d2.append(dst)
else:
d.append(dst)
if keepDataSize==1:
d=d2
plt.subplot(236),plt.imshow(dst,cmap='gray' ),plt.title('Warped', fontsize=7)
plt.show()
print("training label size :" + str(train_labels.shape))
print("traiing data length :" + str(len(d)))
return d,train_labels
|
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
# initial area, left and right pointers
area, left, right = 0, 0, len(height) - 1
while left < right:
new_area = (right - left) * min(height[left], height[right])
area = max(area, new_area)
if height[left] <= height[right]:
# we want to keep the bigger number
left += 1
else:
right -= 1
return area
def maxArea_v2(self, height):
"""
NOTE: This doesnt work for all cases:
For example: [1, 2, 1]
:type height: List[int]
:rtype: int
"""
# set the area = 0
# for each h in height
# calculate the area with current width
max_index = height.index(max(height))
print(f"max index-> {max_index}")
l_area, r_area = 0, 0
unit = 1
# right side of the heighest vertical line
for i in range(max_index+1, len(height)):
h = height[i]
area = h * unit
print("h: ", h)
print("area:",area)
r_area = max(r_area, area)
unit += 1
print(r_area)
unit = 1
# left side
for i in range(max_index-1, -1, -1):
h = height[i]
area = h * unit
l_area = max(l_area, area)
unit += 1
print(f"left h: {h}")
return r_area if r_area >= l_area else l_area
obj = Solution()
height = [1, 8, 6, 2, 5, 4, 8, 3, 7]
h_1 = [1, 5, 3, 8, 2, 9]
area = obj.maxArea(height)
print(area)
|
import setuptools
setuptools.setup(
name="dmoj-tool-dessertion",
version="0.1.7",
author="Desertion",
author_email="73731354pi@gmail.com",
description="CLI submission to DMOJ",
# scripts=['bin/dmoj-tool'],
packages=['dmoj_tool'],
package_dir={'dmoj-tool':'dmoj_tool'},
entry_points={
'console_scripts':[
'dmoj-tool = dmoj_tool.cli:main',
]
},
include_package_data=True,
install_requires=['beautifulsoup4','bs4','certifi','chardet','idna','requests','soupsieve','urllib3'],
python_requires='>=3.6'
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(unique_for_date='posted', verbose_name='Загаловак', max_length=100)),
('description', models.TextField(verbose_name='Скарочаны змест')),
('content', models.TextField(verbose_name='Змест')),
('posted', models.DateTimeField(db_index=True, verbose_name='Апублікавана', auto_now_add=True)),
('is_commentable', models.BooleanField(default=True, verbose_name='Дазвол каментавання')),
('tags', taggit.managers.TaggableManager(verbose_name='Тэгі', help_text='A comma-separated list of tags.', blank=True, through='taggit.TaggedItem', to='taggit.Tag')),
('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'артыкул блогу',
'verbose_name_plural': 'артыкулы блогу',
'ordering': ['-posted'],
},
),
]
|
import time
class Timer:
def __init__(self):
self.elapsed_time = 0
self.start_time = 0
pass
def start(self):
if self.start_time > 0:
pass
self.start_time = time.time()
def end(self):
if self.elapsed_time > 0:
pass
self.elapsed_time = time.time() - self.start_time
|
import numpy as np
import matplotlib.pyplot as plt
import time
# Load the data:
X_train = np.load('data/q2xTrain.npy')
y_train = np.load('data/q2yTrain.npy')
X_test = np.load('data/q2xTest.npy')
y_test = np.load('data/q2yTest.npy')
def construct_polynomial(X_vec, degree):
X = np.ones((X_vec.shape[0], 1))
for i in range(1, degree + 1):
X = np.append(X, np.reshape(X_vec**i, (X_vec.shape[0], 1)), axis=1)
return X
############################## (a) ############################################
# Define the cost function:
def cost_fuction(X, theta, y, lambda_ = 0):
regularization = 0.5 * lambda_ * (theta.T.dot(theta))
return 0.5 * np.sum((np.dot(X, theta) - y) ** 2) + np.asscalar(regularization)
def batchGD(X, theta, y, learning_rate = 0.001, num_iter = 10000):
print("***************Batch GD********************")
e = 0
e_new = 1
tolerance = 1e-10
i = 0
error_list = []
while abs(e - e_new) > tolerance and i <= num_iter:
i += 1
e = e_new
grad = np.dot(X.T, np.dot(X, theta) - y)
theta = theta - learning_rate * grad
e_new = cost_fuction(X, theta, y)
error_list.append(e_new)
print("It takes number of iteration # {} with final cost {}".format(i, e_new))
print("The final result for theta is ", theta)
return error_list
def SGD(X, theta, y, learning_rate = 0.001, num_iter = 10000):
print("***************Stochastic GD********************")
e = 0
e_new = 1
tolerance = 1e-10
j = 0
error_list = []
while abs(e - e_new) > tolerance and j <= num_iter:
j += 1
e = e_new
e_new = 0
for i in range(n):
X_i = X[i,:].reshape(1, X.shape[1])
y_i = y[i].reshape(1,1)
prediction_i = np.dot(X_i, theta)
grad = np.dot(X_i.T, prediction_i - y_i)
theta = theta - learning_rate * grad
e_new += cost_fuction(X_i, theta, y_i)
error_list.append(e_new)
print("It takes number of iteration # {} with final cost {}".format(j, e_new))
print("The final result for theta is ", theta)
return error_list
def newton(X, theta, y, num_iter = 15):
print("***************Newton's method********************")
H = X.T.dot(X)
e = 0
e_new = 1
tolerance = 1e-10
i = 0
error_list = []
while abs(e - e_new) > tolerance and i <= num_iter:
i += 1
e = e_new
grad = np.dot(X.T, np.dot(X, theta) - y)
theta = theta - np.linalg.inv(H).dot(grad)
e_new = cost_fuction(X, theta, y)
# print("Iteration # {} with cost {}".format(i, e_new))
error_list.append(e_new)
print("It takes number of iteration # {} with final cost {}".format(i, e_new))
print("The final result for theta is ", theta)
return error_list, theta
n = X_train.shape[0]
# Create the data matrix of (n,j):
X = construct_polynomial(X_train, 1)
theta = np.zeros((2, 1))
y = np.reshape(y_train, (n, 1))
# print(X, X.shape)
# print(theta, theta.shape)
# print(y, y.shape)
start_time1 = time.time()
error_list1 = batchGD(X, theta, y)
time_GD = time.time() - start_time1
start_time2 = time.time()
error_list2 = SGD(X, theta, y)
time_SGD = time.time() - start_time2
start_time3 = time.time()
error_list3 = newton(X, theta, y)
time_newton = time.time() - start_time3
num_iter = 1000
plt.figure(figsize=(10,6))
plt.plot(range(1, num_iter + 1), error_list1, color='red')
plt.plot(range(1, num_iter + 1), error_list2, color='green')
plt.plot(range(1, num_iter + 1), error_list3, color='blue')
plt.title("Learning curve for Newton's method.")
plt.xlabel("#Iteration")
plt.ylabel("Error")
plt.show()
# ############################## (b) ############################################
ytest = np.reshape(y_test, (X_test.shape[0], 1))
M = range(0, 10)
ERMS_list = []
ERMS_list_test = []
for i in M:
X = construct_polynomial(X_train, i)
testX = construct_polynomial(X_test, i)
theta = np.zeros((i + 1, 1))
e, theta = newton(X, theta, y)
print("The theta for the {}-degree is: {}".format(i, theta))
e_test = cost_fuction(testX, theta, ytest)
ERMS = np.sqrt(2 * e[-1] / X.shape[0])
ERMS_list.append(ERMS)
ERMS_test = np.sqrt(2 * e_test / testX.shape[0])
ERMS_list_test.append(ERMS_test)
plt.figure(figsize=(10,6))
train, = plt.plot(M, ERMS_list, '--o', color='blue', label='Training')
test, = plt.plot(M, ERMS_list_test, '--o', color='red', label='Test')
plt.legend(handles=[train, test])
plt.xlabel("M")
plt.ylabel("ERMS")
plt.show()
############################## (c) ############################################
def newton_regularization(X, theta, y, lambda_, num_iter = 15):
print("***************Newton's method********************")
H = X.T.dot(X)
e = 0
e_new = 1
tolerance = 1e-10
i = 0
error_list = []
while abs(e - e_new) > tolerance and i <= num_iter:
i += 1
e = e_new
grad = np.dot(X.T, np.dot(X, theta) - y) + lambda_ * theta
theta = theta - np.linalg.inv(np.add(H, lambda_ * np.identity(X.shape[1]))).dot(grad)
e_new = cost_fuction(X, theta, y, lambda_)
# print("Iteration # {} with cost {}".format(i, e_new))
error_list.append(e_new)
print("The final result for theta is ", theta)
return error_list, theta
lambda_list = [0, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1]
M = 9
ERMS_list = []
ERMS_list_test = []
X = construct_polynomial(X_train, M)
testX = construct_polynomial(X_test, M)
for l in lambda_list:
theta = np.zeros((M+1, 1))
e, theta = newton_regularization(X, theta, y, l)
# print("The theta for the regularization with lambda equal to {} is: {}".format(l, theta))
e_test = cost_fuction(testX, theta, ytest, lambda_ = l)
ERMS = np.sqrt(2 * e[-1] / X.shape[0])
ERMS_list.append(ERMS)
ERMS_test = np.sqrt(2 * e_test / testX.shape[0])
ERMS_list_test.append(ERMS_test)
lambda_list[0] = 1e-9
plt.figure(figsize=(10,6))
train2, = plt.plot(np.log10(lambda_list), ERMS_list, '--o', color='blue', label='Training')
test2, = plt.plot(np.log10(lambda_list), ERMS_list_test, '--o', color='red', label='Test')
plt.legend(handles=[train2, test2])
plt.xlabel("log_10 of lambda")
plt.ylabel("ERMS")
plt.show()
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
spec = load_test_spec("vpp", "scale")
@slash.requires(have_ffmpeg)
@slash.requires(have_ffmpeg_qsv_accel)
@slash.requires(*have_ffmpeg_filter("vpp_qsv"))
@slash.requires(using_compatible_driver)
@slash.parametrize(*gen_vpp_scale_parameters(spec))
@platform_tags(VPP_PLATFORMS)
def test_default(case, scale_width, scale_height):
params = spec[case].copy()
params.update(
mformat = mapformat(params["format"]),
scale_width = scale_width, scale_height = scale_height)
params["scaled"] = get_media()._test_artifact(
"{}_scaled_{scale_width}x{scale_height}_{format}"
".yuv".format(case, **params))
if params["mformat"] is None:
slash.skip_test("{format} format not supported".format(**params))
call(
"ffmpeg -init_hw_device qsv=qsv:hw -hwaccel qsv -filter_hw_device qsv"
" -v debug -f rawvideo -pix_fmt {mformat} -s:v {width}x{height} -i {source}"
" -vf 'format=nv12,hwupload=extra_hw_frames=16"
",vpp_qsv=w={scale_width}:h={scale_height},hwdownload,format=nv12'"
" -pix_fmt {mformat} -an -vframes {frames} -y {scaled}".format(**params))
check_filesize(
params["scaled"], params["scale_width"], params["scale_height"],
params["frames"], params["format"])
fmtref = format_value(params["reference"], case = case, **params)
ssim = calculate_ssim(
fmtref, params["scaled"],
params["scale_width"], params["scale_height"],
params["frames"], params["format"])
get_media()._set_test_details(ssim = ssim)
assert 1.0 >= ssim[0] >= 0.97
assert 1.0 >= ssim[1] >= 0.97
assert 1.0 >= ssim[2] >= 0.97
|
# Copyright Ramón Vila Ferreres - 2021
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import requests
import pprint as pp
import logging
from urllib.parse import urlencode
from typing import *
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
logging.basicConfig(format= '%(name)s - %(levelname)s - %(message)s', level= logging.INFO)
logger = logging.getLogger("CalendarExtractor")
class CalendarExtractor:
""" Requests and extracts raw calendar data straight from SIES """
def __init__(self, configuration: str, username: str, password: str):
self.configuration = configuration
self.credentials: dict[str, str] = { "username": username, "password": password }
self.options = webdriver.ChromeOptions()
self.options.add_argument("--headless")
self.driver= webdriver.Chrome(
executable_path=self.configuration["extractor"]["params"]["CHROMEDRIVER_PATH"],
chrome_options=self.options)
self.wait = WebDriverWait(self.driver, 20)
self.cookies = self.__extract_user_cookies()
def __get_element(self, xpath: str, clickable: bool = False) -> Any:
if clickable:
# Wait until the element becomes clickable
return self.wait.until(ec.element_to_be_clickable((By.XPATH, xpath)))
else:
# Wait until the element exists
return self.wait.until(ec.presence_of_element_located((By.XPATH, xpath)))
def __click(self, xpath):
self.__get_element(xpath, clickable=True).click()
def __extract_user_cookies(self):
""" Dumps user cookies using Selenium Chromedriver (headless mode) """
self.driver.get(self.configuration["extractor"]["params"]["LOGIN_URL"])
# Username
username_textarea = self.__get_element(self.configuration["extractor"]["xpaths"]["username_input"])
username_textarea.clear()
username_textarea.send_keys(self.credentials["username"])
# Password
password_textarea = self.__get_element(self.configuration["extractor"]["xpaths"]["password_input"])
password_textarea.clear()
password_textarea.send_keys(self.credentials["password"])
# Submit the form
self.__click(self.configuration["extractor"]["xpaths"]["login_button"])
user_cookies = {
'JSESSIONID': self.driver.get_cookie("JSESSIONID")["value"],
'oam.Flash.RENDERMAP.TOKEN': self.driver.get_cookie("oam.Flash.RENDERMAP.TOKEN")["value"]
}
logger.info("Got user cookies!")
self.driver.close()
return user_cookies
def __get_calendar_source(self):
""" Extracts calendar's page raw HTML """
logger.info("Requesting calendar source using dumped cookies")
# Actually perform the request
request = requests.get( self.configuration["extractor"]["params"]["BASE_CALENDAR_URL"], cookies= self.cookies )
self.calendar_page_source = request.text
logger.info("Calendar HTML succesfully dumped")
def __extract_state_parameters(self):
""" Parses dumped raw HTML to extract state parameters """
logger.info("Extracting state parameters")
page_soup = BeautifulSoup(self.calendar_page_source, "html.parser")
# Find the submit form
javax_form = page_soup.select('form[action="/serviciosacademicos/web/expedientes/calendario.xhtml"]')[0]
# Extract form parameters and attbs
javax_faces_form = page_soup.find("div", {"class": "card-body"})
javax_faces_source = javax_faces_form.find("div")
javax_faces_source_submit, javax_faces_viewstate = javax_form.find_all("input")
self.state = {
"javax.faces.source": javax_faces_source["id"],
"javax.faces.source_SUBMIT": javax_faces_source_submit["name"],
"javax.faces.viewstate": javax_faces_viewstate["value"]
}
logger.info("State parameters extracted")
def __request_calendar_events(self):
""" Requests the calendar data to UniOvi servers """
payload= urlencode({
# JSF state parameters
"javax.faces.source": self.state["javax.faces.source"],
"javax.faces.partial.execute": self.state["javax.faces.source"],
"javax.faces.partial.render": self.state["javax.faces.source"],
"javax.faces.ViewState": self.state["javax.faces.viewstate"],
# TODO: More reversing needed here! (why is this mandatory?)
self.state["javax.faces.source"]: self.state["javax.faces.source"],
# TODO: refactor this to make it user-adjustable
# Start and end times are just Unix timestamps (adjusted to GMT +2)
self.state["javax.faces.source"] + "_start": "1630886400000",
self.state["javax.faces.source"] + "_end": "1652054400000",
# Form-related parameters
self.state["javax.faces.source_SUBMIT"]: 1,
"javax.faces.partial.ajax": "true"
})
logger.info("Requesting calendar events to SIES server")
r = requests.post(
self.configuration["extractor"]["params"]["BASE_CALENDAR_URL"],
data= payload,
headers= { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' },
cookies= self.cookies
)
logger.info("Successfuly dumped calendar data")
return r.text
def get_calendar_data(self):
""" Logs into sies.uniovi.es and dumps the (raw) user calendar """
self.__get_calendar_source()
self.__extract_state_parameters()
return self.__request_calendar_events()
|
# -*- coding: utf-8 -*-
from base.models import AppOptions, Author, OauthAccount, Post, Taxonomy
from google.appengine.ext import ndb
from custom.utils import rst2html, md2html
def to_slug(string):
"""缩略名
:param string: str
:return: str
"""
import re, urllib
ret = re.sub(r'([_\+=!?.\'"]|\s)+', '-', string)
return urllib.quote(ret.strip('-').lower().encode('utf-8'))
class ArticleMapper(object):
@classmethod
def save_by_form(cls, f, author):
url_safe_key = f.cleaned_data.get('key', None)
if url_safe_key:
post = ndb.Key(urlsafe=url_safe_key).get()
else:
post = Post()
post.title = f.cleaned_data['subject']
post.slug = to_slug(post.title)
post.raw_content = f.cleaned_data['message']
post.format_text = f.cleaned_data['format_text']
if 'rst' == post.format_text:
post.content = rst2html(post.raw_content)
elif 'md' == post.format_text:
post.content = md2html(post.raw_content)
post.author = author
if post.categories:
old_categories = Taxonomy.query(Taxonomy.slug.IN([c.slug for c in post.categories])).fetch()
for cate in old_categories:
cate.count -= 1
ndb.put_multi(old_categories)
categories = Taxonomy.query(Taxonomy.slug.IN(f.cleaned_data['category'])).fetch()
for cate in categories:
cate.count += 1
ndb.put_multi(categories)
post.categories = categories
post.put()
return post
@classmethod
def get_by_key(cls, string):
return ndb.Key(urlsafe=string).get()
@classmethod
def fetch_categories(cls, **kwargs):
return Taxonomy.query(Taxonomy.taxonomy == 'category').fetch(**kwargs)
@classmethod
def save_category(cls, category_name, slug=None):
category = Taxonomy(name=category_name)
category.slug = slug or to_slug(category_name)
category.put()
return category
class AppOptionsMapper(object):
"""docstring for AppOptionsMapper"""
@classmethod
def save(cls, name, value, auto_load='yes'):
opt = AppOptions()
opt.name = name
opt.value = value
opt.autoload = auto_load
opt.put()
return opt
class AuthorMapper(object):
"""docstring for AppOptionsMapper"""
@classmethod
def save(cls, name, passwd, nicename):
a = Author()
a.user_login = name
a.user_pass = passwd
a.display_name = nicename
a.put()
return a
@classmethod
def get_author(cls, name):
"""获取博客作者
Args:
name: string, 作者登录名
Returns:
object, Author
"""
return Author.query(Author.user_login == name).get()
@classmethod
def has_bound(cls, aid, atype):
return OauthAccount.query(OauthAccount.account_id == aid,
OauthAccount.account_type == atype).count() > 0
@classmethod
def get_bound_author(cls, aid, atype):
oa = OauthAccount.query(OauthAccount.account_id == aid,
OauthAccount.account_type == atype).get()
if oa is None:
return None
return Author.query(
Author.user_login == oa.bound_author.user_login).get()
@classmethod
def bind_account(cls, aid, atype, author, token=None):
account = OauthAccount()
from django.utils.six import string_types
if isinstance(author, string_types):
author = ndb.Key(urlsafe = author).get()
account.bound_author = author
if token:
account.bound_token = token
account.account_id = aid
account.account_type = atype
return account.put()
|
from lasagne.layers import Layer
import theano.tensor as T
import lasagne
class TensorDotLayer(Layer):
def __init__(self, incoming, n_filters, axis, W=lasagne.init.Normal(),
**kwargs):
super(TensorDotLayer, self).__init__(incoming, **kwargs)
self.axis = axis
axis_length = incoming.output_shape[self.axis]
self.n_filters = n_filters
self.W = self.add_param(W, (self.n_filters, axis_length), name='W',
regularizable=True, trainable=True)
def get_output_shape_for(self, input_shape):
""" input_shapes[0] should be examples x time x chans
input_shapes[1] should be examples x time x chans x filters
"""
out_shape = list(input_shape)
out_shape[self.axis] = self.n_filters
return tuple(out_shape)
def get_output_for(self, input, **kwargs):
# 1 for W is axis where weight entries are for one weight..
out = T.tensordot(input, self.W, axes=(self.axis,1))
n_dims = len(self.output_shape)
reshuffle_arr = range(self.axis) + [n_dims-1] + range(self.axis,n_dims-1)
out = out.dimshuffle(*reshuffle_arr)
return out
|
#import urllib2
import csv
import sys
import re
from datetime import datetime
import time
import pandas as pd
import configparser
import hashlib
import os
import rdflib
import logging
logging.getLogger().disabled = True
if sys.version_info[0] == 3:
from importlib import reload
reload(sys)
if sys.version_info[0] == 2:
sys.setdefaultencoding('utf8')
whyis = rdflib.Namespace('http://vocab.rpi.edu/whyis/')
np = rdflib.Namespace("http://www.nanopub.org/nschema#")
prov = rdflib.Namespace("http://www.w3.org/ns/prov#")
dc = rdflib.Namespace("http://purl.org/dc/terms/")
sio = rdflib.Namespace("http://semanticscience.org/resource/")
setl = rdflib.Namespace("http://purl.org/twc/vocab/setl/")
pv = rdflib.Namespace("http://purl.org/net/provenance/ns#")
skos = rdflib.Namespace("http://www.w3.org/2008/05/skos#")
rdfs = rdflib.RDFS
rdf = rdflib.RDF
owl = rdflib.OWL
xsd = rdflib.XSD
def parseString(input_string, delim) :
my_list = input_string.split(delim)
for i in range(0,len(my_list)) :
my_list[i] = my_list[i].strip()
return my_list
def codeMapper(input_word) :
unitVal = input_word
for unit_label in unit_label_list :
if (unit_label == input_word) :
unit_index = unit_label_list.index(unit_label)
unitVal = unit_uri_list[unit_index]
for unit_code in unit_code_list :
if (unit_code == input_word) :
unit_index = unit_code_list.index(unit_code)
unitVal = unit_uri_list[unit_index]
return unitVal
def convertImplicitToKGEntry(*args) :
if (args[0][:2] == "??") :
if (studyRef is not None ) :
if (args[0]==studyRef) :
return "<" + prefixes[kb] + args[0][2:] + ">"
if (len(args) == 2) :
return "<" + prefixes[kb] + args[0][2:] + "-" + args[1] + ">"
else :
return "<" + prefixes[kb] + args[0][2:] + ">"
elif ('http:' not in args[0]) or ('https:' not in args[0]) :
# Check for entry in column list
for item in explicit_entry_list :
if args[0] == item.Column :
if (len(args) == 2) :
return "<" + prefixes[kb] + args[0].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + "-" + args[1] + ">"
else :
return "<" + prefixes[kb] + args[0].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + ">"
#return '"' + args[0] + "\"^^xsd:string"
return args[0]
else :
return args[0]
def checkImplicit(input_word) :
try:
if (input_word[:2] == "??") :
return True
else :
return False
except Exception as e:
print("Something went wrong in checkImplicit()" + str(e))
sys.exit(1)
def isfloat(term):
try:
float(term)
return True
except ValueError:
return False
def isURI(term):
try:
if any(c in term for c in ("http://","https://")) :
return True
else:
return False
except ValueError:
return False
def isSchemaVar(term) :
for entry in explicit_entry_list :
if term == entry[1] :
return True
return False
def assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,column, npubIdentifier) :
v_id = npubIdentifier
for v_tuple in implicit_entry_tuples : # referenced in implicit list
if v_tuple["Column"] == a_tuple[column]:
v_id = hashlib.md5((str(v_tuple) + str(npubIdentifier)).encode("utf-8")).hexdigest()
if v_id == None : # maybe it's referenced in the timeline
for t_tuple in timeline_tuple:
if t_tuple["Column"] == a_tuple[column]:
#print("Got here")
v_id = hashlib.md5((str(t_tuple) + str(npubIdentifier)).encode("utf-8")).hexdigest()
if v_id == npubIdentifier : # if it's not in implicit list or timeline
print("Warning, " + column + " ID assigned to nanopub ID:" + a_tuple[column])
return v_id
def assignTerm(col_headers, column, implicit_entry_tuples, a_tuple, row, v_id) :
termURI = None
for v_tuple in implicit_entry_tuples : # referenced in implicit list
if v_tuple["Column"] == a_tuple[column]:
if "Template" in v_tuple :
template_term = extractTemplate(col_headers,row,v_tuple["Template"])
termURI = "<" + prefixes[kb] + template_term + ">"
if termURI is None :
termURI = convertImplicitToKGEntry(a_tuple[column],v_id)
return termURI
'''def processPrefixes(output_file,query_file):
if 'prefixes' in config['Prefixes']:
prefix_fn = config['Prefixes']['prefixes']
else:
prefix_fn="prefixes.txt"
prefix_file = open(prefix_fn,"r")
prefixes = prefix_file.readlines()
for prefix in prefixes :
#print(prefix.find(">"))
output_file.write(prefix)
query_file.write(prefix[1:prefix.find(">")+1])
query_file.write("\n")
prefix_file.close()
output_file.write("\n")'''
def checkTemplate(term) :
if "{" in term and "}" in term:
return True
return False
def extractTemplate(col_headers,row,term) :
while checkTemplate(term) :
open_index = term.find("{")
close_index = term.find("}")
key = term[open_index+1:close_index]
term = term[:open_index] + str(row[col_headers.index(key)+1]) + term[close_index+1:]
return term
def extractExplicitTerm(col_headers,row,term) : # need to write this function
while checkTemplate(term) :
open_index = term.find("{")
close_index = term.find("}")
key = term[open_index+1:close_index]
if isSchemaVar(key) :
for entry in explicit_entry_list :
if entry.Column == key :
if pd.notnull(entry.Template) :
term = extractTemplate(col_headers,row,entry.Template)
else :
typeString = ""
if pd.notnull(entry.Attribute) :
typeString += str(entry.Attribute)
if pd.notnull(entry.Entity) :
typeString += str(entry.Entity)
if pd.notnull(entry.Label) :
typeString += str(entry.Label)
if pd.notnull(entry.Unit) :
typeString += str(entry.Unit)
if pd.notnull(entry.Time) :
typeString += str(entry.Time)
if pd.notnull(entry.inRelationTo) :
typeString += str(entry.inRelationTo)
if pd.notnull(entry.wasGeneratedBy) :
typeString += str(entry.wasGeneratedBy)
if pd.notnull(entry.wasDerivedFrom) :
typeString += str(entry.wasDerivedFrom)
identifierKey = hashlib.md5((str(row[col_headers.index(key)+1])+typeString).encode("utf-8")).hexdigest()
term = entry.Column.replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + "-" + identifierKey
#return extractTemplate(col_headers,row,entry.Template)
else : # What does it mean for a template reference to not be a schema variable?
print("Warning: Template reference " + term + " is not be a schema variable")
term = term[:open_index] + str(row[col_headers.index(key)+1]) + term[close_index+1:] # Needs updating probably, at least checking
return term
def writeClassAttributeOrEntity(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)) :
if ',' in item.Entity :
entities = parseString(item.Entity,',')
for entity in entities :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(entity)
whereString += codeMapper(entity) + " "
swrlString += codeMapper(entity) + "(" + term + ") ^ "
if entities.index(entity) + 1 != len(entities) :
whereString += ", "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(item.Entity)
whereString += codeMapper(item.Entity) + " "
swrlString += codeMapper(item.Entity) + "(" + term + ") ^ "
input_tuple["Entity"]=codeMapper(item.Entity)
if (input_tuple["Entity"] == "hasco:Study") :
global studyRef
studyRef = item.Column
input_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)) :
if ',' in item.Attribute :
attributes = parseString(item.Attribute,',')
for attribute in attributes :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(attribute)
whereString += codeMapper(attribute) + " "
swrlString += codeMapper(attribute) + "(" + term + ") ^ "
if attributes.index(attribute) + 1 != len(attributes) :
whereString += ", "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> " + codeMapper(item.Attribute)
whereString += codeMapper(item.Attribute) + " "
swrlString += codeMapper(item.Attribute) + "(" + term + ") ^ "
input_tuple["Attribute"]=codeMapper(item.Attribute)
else :
print("Warning: Entry not assigned an Entity or Attribute value, or was assigned both.")
input_tuple["Attribute"]=codeMapper("sio:Attribute")
assertionString += " ;\n <" + rdfs.subClassOf + "> sio:Attribute"
whereString += "sio:Attribute "
swrlString += "sio:Attribute(" + term + ") ^ "
return [input_tuple, assertionString, whereString, swrlString]
def writeClassAttributeOf(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.attributeOf)) :
if checkTemplate(item.attributeOf) :
open_index = item.attributeOf.find("{")
close_index = item.attributeOf.find("}")
key = item.attributeOf[open_index+1:close_index]
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["attributeOf"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["attributeOf"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["attributeOf"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + convertImplicitToKGEntry(item.attributeOf) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["attributeOf"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> " + convertImplicitToKGEntry(item.attributeOf)
whereString += " ;\n <" + properties_tuple["attributeOf"] + "> " + [item.attributeOf + " ",item.attributeOf[1:] + "_V "][checkImplicit(item.attributeOf)]
swrlString += properties_tuple["attributeOf"] + "(" + term + " , " + [item.attributeOf,item.attributeOf[1:] + "_V"][checkImplicit(item.attributeOf)] + ") ^ "
input_tuple["isAttributeOf"]=item.attributeOf
return [input_tuple, assertionString, whereString, swrlString]
def writeClassUnit(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.Unit)) :
if checkTemplate(item.Unit) :
open_index = item.Unit.find("{")
close_index = item.Unit.find("}")
key = item.Unit[open_index+1:close_index]
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["Unit"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["Unit"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["Unit"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
input_tuple["Unit"] = key
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + str(codeMapper(item.Unit)) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["Unit"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + str(codeMapper(item.Unit))
whereString += " ;\n <" + properties_tuple["Unit"] + "> " + str(codeMapper(item.Unit))
swrlString += properties_tuple["Unit"] + "(" + term + " , " + str(codeMapper(item.Unit)) + ") ^ "
input_tuple["Unit"] = codeMapper(item.Unit)
# Incorporate item.Format here
return [input_tuple, assertionString, whereString, swrlString]
def writeClassTime(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.Time)) :
if checkTemplate(item.Time) :
open_index = item.Time.find("{")
close_index = item.Time.find("}")
key = item.Time[open_index+1:close_index]
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Time"] + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(key) + " ]"
#assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["Time"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["Time"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Time"] + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(item.Time) + " ]"
#assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(item.Time)
whereString += " ;\n <" + properties_tuple["Time"] + "> " + [item.Time + " ",item.Time[1:] + "_V "][checkImplicit(item.Time)]
swrlString += properties_tuple["Time"] + "(" + term + " , " + [item.Time + " ",item.Time[1:] + "_V "][checkImplicit(item.Time)] + ") ^ "
input_tuple["Time"]=item.Time
return [input_tuple, assertionString, whereString, swrlString]
def writeClassRelation(item, term, input_tuple, assertionString, whereString, swrlString) :
if (pd.notnull(item.inRelationTo)) :
input_tuple["inRelationTo"]=item.inRelationTo
key = item.inRelationTo
if checkTemplate(item.inRelationTo) :
open_index = item.inRelationTo.find("{")
close_index = item.inRelationTo.find("}")
key = item.inRelationTo[open_index+1:close_index]
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)) :
assertionString += " ;\n " + item.Relation + " " + convertImplicitToKGEntry(key)
if(isSchemaVar(key)):
whereString += " ;\n " + item.Relation + " ?" + key.lower() + "_E "
swrlString += item.Relation + "(" + term + " , " + "?" + key.lower() + "_E) ^ "
else :
whereString += " ;\n " + item.Relation + " " + [key + " ",key[1:] + "_V "][checkImplicit(key)]
swrlString += item.Relation + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
input_tuple["Relation"]=item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)) :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Role"] + "> ;\n <" + owl.someValuesFrom + "> [ <" + rdf.type + "> <" + owl.Class + "> ;\n <" + owl.intersectionOf + "> ( \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + [key,convertImplicitToKGEntry(key)][checkImplicit(key)] + " ;\n <" + owl.onProperty + "> <" + properties_tuple["inRelationTo"] + "> ] <" + item.Role + "> ) ] ]"
#assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(key) + " ]"
whereString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ;\n <" + properties_tuple["inRelationTo"] + "> " + [key + " ",key[1:] + "_V "][checkImplicit(key)] + " ]"
swrlString += "" # add appropriate swrl term
input_tuple["Role"]=item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)) :
input_tuple["Relation"]=item.Relation
input_tuple["Role"]=item.Role
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Role"] + "> ;\n <" + owl.someValuesFrom + "> [ <" + rdf.type + "> <" + owl.Class + "> ;\n <" + owl.intersectionOf + "> ( \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + [key,convertImplicitToKGEntry(key)][checkImplicit(key)] + " ;\n <" + owl.onProperty + "> <" + item.Relation + "> ] <" + item.Role + "> ) ] ]"
#assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(key)
if(isSchemaVar(key)):
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> ?" + key.lower() + "_E "
swrlString += "" # add appropriate swrl term
else :
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + [key + " ",key[1:] + "_V "][checkImplicit(key)]
swrlString += "" # add appropriate swrl term
elif (pd.isnull(item.Relation)) and (pd.isnull(item.Role)) :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["inRelationTo"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(key)
if(isSchemaVar(key)):
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> ?" + key.lower() + "_E "
swrlString += properties_tuple["inRelationTo"] + "(" + term + " , " + "?" + key.lower() + "_E) ^ "
else :
whereString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + [key + " ",key[1:] + "_V "][checkImplicit(key)]
swrlString += properties_tuple["inRelationTo"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
elif (pd.notnull(item.Role)) : # if there is a role, but no in relation to
input_tuple["Role"]=item.Role
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.onProperty + "> <" + properties_tuple["Role"] + "> ;\n <" + owl.someValuesFrom + "> [ <" + rdf.type + "> <" + item.Role + "> ] ]"
#assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ]"
whereString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + item.Role + " ]"
swrlString += "" # add appropriate swrl term
return [input_tuple, assertionString, whereString, swrlString]
def writeClassWasDerivedFrom(item, term, input_tuple, provenanceString, whereString, swrlString) :
if pd.notnull(item.wasDerivedFrom) :
if ',' in item.wasDerivedFrom :
derivatives = parseString(item.wasDerivedFrom,',')
for derivative in derivatives :
provenanceString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(derivative) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["wasDerivedFrom"] + "> ]"
#provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(derivative)
input_tuple["wasDerivedFrom"]=derivative
if(isSchemaVar(derivative)):
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> ?" + derivative.lower() + "_E "
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + "?" + derivative.lower() + "_E) ^ "
elif checkTemplate(derivative) :
open_index = derivative.find("{")
close_index = derivative.find("}")
key = derivative[open_index+1:close_index]
print(convertImplicitToKGEntry(key))
provenanceString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["wasDerivedFrom"] + "> ]"
#provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + [derivative + " ",derivative[1:] + "_V "][checkImplicit(derivative)]
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + [derivative,derivative[1:] + "_V"][checkImplicit(derivative)] + ") ^ "
else :
provenanceString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(item.wasDerivedFrom) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["wasDerivedFrom"] + "> ]"
#provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(item.wasDerivedFrom)
input_tuple["wasDerivedFrom"]=item.wasDerivedFrom
if(isSchemaVar(item.wasDerivedFrom)):
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> ?" + item.wasDerivedFrom.lower() + "_E "
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + "?" + item.wasDerivedFrom.lower() + "_E) ^ "
elif checkTemplate(item.wasDerivedFrom) :
open_index = item.wasDerivedFrom.find("{")
close_index = item.wasDerivedFrom.find("}")
key = item.wasDerivedFrom[open_index+1:close_index]
provenanceString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.someValuesFrom + "> " + convertImplicitToKGEntry(key) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["wasDerivedFrom"] + "> ]"
#provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
whereString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + [item.wasDerivedFrom + " ",item.wasDerivedFrom[1:] + "_V "][checkImplicit(item.wasDerivedFrom)]
swrlString += properties_tuple["wasDerivedFrom"] + "(" + term + " , " + [item.wasDerivedFrom,item.wasDerivedFrom[1:] + "_V"][checkImplicit(item.wasDerivedFrom)] + ") ^ "
return [input_tuple, provenanceString, whereString, swrlString]
def writeClassWasGeneratedBy(item, term, input_tuple, provenanceString, whereString, swrlString) :
if pd.notnull(item.wasGeneratedBy) :
if ',' in item.wasGeneratedBy :
generators = parseString(item.wasGeneratedBy,',')
for generator in generators :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(generator)
input_tuple["wasGeneratedBy"]=generator
if(isSchemaVar(generator)):
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> ?" + generator.lower() + "_E "
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + "?" + generator.lower() + "_E) ^ "
elif checkTemplate(generator) :
open_index = generator.find("{")
close_index = generator.find("}")
key = generator[open_index+1:close_index]
assertionString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + [generator + " ",generator[1:] + "_V "][checkImplicit(generator)]
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + [generator,generator[1:] + "_V"][checkImplicit(generator)] + ") ^ "
else :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(item.wasGeneratedBy)
input_tuple["wasGeneratedBy"]=item.wasGeneratedBy
if(isSchemaVar(item.wasGeneratedBy)):
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> ?" + item.wasGeneratedBy.lower() + "_E "
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + "?" + item.wasGeneratedBy.lower() + "_E) ^ "
elif checkTemplate(item.wasGeneratedBy) :
open_index = item.wasGeneratedBy.find("{")
close_index = item.wasGeneratedBy.find("}")
key = item.wasGeneratedBy[open_index+1:close_index]
assertionString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(key)
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> ?" + key.lower() + "_E"
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + [key,key[1:] + "_V"][checkImplicit(key)] + ") ^ "
else :
whereString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + [item.wasGeneratedBy + " ",item.wasGeneratedBy[1:] + "_V "][checkImplicit(item.wasGeneratedBy)]
swrlString += properties_tuple["wasGeneratedBy"] + "(" + term + " , " + [item.wasGeneratedBy,item.wasGeneratedBy[1:] + "_V"][checkImplicit(item.wasGeneratedBy)] + ") ^ "
return [input_tuple, provenanceString, whereString, swrlString]
def writeImplicitEntryTuples(implicit_entry_list, timeline_tuple, output_file, query_file, swrl_file, dm_fn) :
implicit_entry_tuples = []
assertionString = ''
provenanceString = ''
whereString = '\n'
swrlString = ''
datasetIdentifier = hashlib.md5(dm_fn.encode('utf-8')).hexdigest()
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "head-implicit_entry-" + datasetIdentifier + "> { ")
output_file.write("\n <" + prefixes[kb] + "nanoPub-implicit_entry-" + datasetIdentifier + "> <" + rdf.type + "> <" + np.Nanopublication + ">")
output_file.write(" ;\n <" + np.hasAssertion + "> <" + prefixes[kb] + "assertion-implicit_entry-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasProvenance + "> <" + prefixes[kb] + "provenance-implicit_entry-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasPublicationInfo + "> <" + prefixes[kb] + "pubInfo-implicit_entry-" + datasetIdentifier + ">")
output_file.write(" .\n}\n\n")
col_headers=list(pd.read_csv(dm_fn).columns.values)
for item in implicit_entry_list :
implicit_tuple = {}
if "Template" in col_headers and pd.notnull(item.Template) :
implicit_tuple["Template"]=item.Template
assertionString += "\n <" + prefixes[kb] + item.Column[2:] + "> <" + rdf.type + "> owl:Class"
term_implicit = item.Column[1:] + "_V"
whereString += " " + term_implicit + " <" + rdf.type + "> "
implicit_tuple["Column"]=item.Column
if (hasattr(item,"Label") and pd.notnull(item.Label)) :
implicit_tuple["Label"]=item.Label
if ',' in item.Label :
labels = parseString(item.Label,',')
for label in labels :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + label + "\"^^xsd:string"
else :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + item.Label + "\"^^xsd:string"
else :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + item.Column[2:] + "\"^^xsd:string"
implicit_tuple["Label"]=item.Column[2:]
if (hasattr(item,"Comment") and pd.notnull(item.Comment)) :
assertionString += " ;\n <" + properties_tuple["Comment"] + "> \"" + item.Comment + "\"^^xsd:string"
implicit_tuple["Comment"]=item.Comment
[implicit_tuple, assertionString, whereString, swrlString] = writeClassAttributeOrEntity(item, term_implicit, implicit_tuple, assertionString, whereString, swrlString)
[implicit_tuple, assertionString, whereString, swrlString] = writeClassAttributeOf(item, term_implicit, implicit_tuple, assertionString, whereString, swrlString)
[implicit_tuple, assertionString, whereString, swrlString] = writeClassUnit(item, term_implicit, implicit_tuple, assertionString, whereString, swrlString)
[implicit_tuple, assertionString, whereString, swrlString] = writeClassTime(item, term_implicit, implicit_tuple, assertionString, whereString, swrlString)
[implicit_tuple, assertionString, whereString, swrlString] = writeClassRelation(item, term_implicit, implicit_tuple, assertionString, whereString, swrlString)
assertionString += " .\n"
provenanceString += "\n <" + prefixes[kb] + item.Column[2:] + ">"
provenanceString +="\n <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime"
[implicit_tuple, provenanceString, whereString, swrlString] = writeClassWasGeneratedBy(item, term_implicit, implicit_tuple, provenanceString, whereString, swrlString)
[implicit_tuple, provenanceString, whereString, swrlString] = writeClassWasDerivedFrom(item, term_implicit, implicit_tuple, provenanceString, whereString, swrlString)
provenanceString += " .\n"
whereString += ".\n\n"
implicit_entry_tuples.append(implicit_tuple)
if timeline_tuple != {}:
for key in timeline_tuple :
assertionString += "\n " + convertImplicitToKGEntry(key) + " <" + rdf.type + "> owl:Class "
for timeEntry in timeline_tuple[key] :
if 'Type' in timeEntry :
assertionString += " ;\n rdfs:subClassOf " + timeEntry['Type']
if 'Label' in timeEntry :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + timeEntry['Label'] + "\"^^xsd:string"
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
assertionString += " ;\n <" + properties_tuple["Value"] + "> " + str(timeEntry['Start']) # rewrite this as a restriction
if 'Start' in timeEntry :
if 'Unit' in timeEntry :
assertionString += " ;\n <" + rdfs.subClassOf + ">\n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + ">\n [ <" + rdf.type + "> <" + owl.Class + "> ;\n <" + owl.intersectionOf + "> ( [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + str(timeEntry['Start']) +" ;\n <" + owl.onProperty + "> <" + properties_tuple["Value"] + "> ] " + str(codeMapper(timeEntry['Unit'])) + " ) ] ;\n <" + owl.onProperty + "> <" + properties_tuple["Start"] + "> ] "
else : # update restriction that gets generated if unit is not specified
assertionString += " ;\n <" + properties_tuple["Start"] + "> [ <" + properties_tuple["Value"] + "> " + str(timeEntry['Start']) + " ]"
if 'End' in timeEntry :
if 'Unit' in timeEntry :
assertionString += " ;\n <" + rdfs.subClassOf + ">\n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.allValuesFrom + ">\n [ <" + rdf.type + "> <" + owl.Class + "> ;\n <" + owl.intersectionOf + "> ( [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + str(timeEntry['End']) +" ;\n <" + owl.onProperty + "> <" + properties_tuple["Value"] + "> ] " + str(codeMapper(timeEntry['Unit'])) + " ) ] ;\n <" + owl.onProperty + "> <" + properties_tuple["End"] + "> ] "
else : # update restriction that gets generated if unit is not specified
assertionString += " ;\n <" + properties_tuple["End"] + "> [ <" + properties_tuple["Value"] + "> " + str(timeEntry['End']) + " ]"
if 'Unit' in timeEntry :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + str(codeMapper(timeEntry['Unit'])) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["Unit"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + timeEntry['Unit']
if 'inRelationTo' in timeEntry :
assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(timeEntry['inRelationTo'])
assertionString += " .\n"
provenanceString += "\n " + convertImplicitToKGEntry(key) + " <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n"
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "assertion-implicit_entry-" + datasetIdentifier + "> {" + assertionString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "provenance-implicit_entry-" + datasetIdentifier + "> {")
provenanceString = "\n <" + prefixes[kb] + "assertion-implicit_entry-" + datasetIdentifier + "> <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n" + provenanceString
output_file.write(provenanceString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "pubInfo-implicit_entry-" + datasetIdentifier + "> {\n <" + prefixes[kb] + "nanoPub-implicit_entry-" + datasetIdentifier + "> <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n}\n\n")
else :
output_file.write(assertionString + "\n")
output_file.write(provenanceString + "\n")
whereString += "}"
query_file.write(whereString)
swrl_file.write(swrlString[:-2])
return implicit_entry_tuples
def writeExplicitEntryTuples(explicit_entry_list, output_file, query_file, swrl_file, dm_fn) :
explicit_entry_tuples = []
assertionString = ''
provenanceString = ''
publicationInfoString = ''
selectString = "SELECT DISTINCT "
whereString = "WHERE {\n"
swrlString = ""
datasetIdentifier = hashlib.md5(dm_fn.encode('utf-8')).hexdigest()
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "head-explicit_entry-" + datasetIdentifier + "> { ")
output_file.write("\n <" + prefixes[kb] + "nanoPub-explicit_entry-" + datasetIdentifier + "> <" + rdf.type + "> <" + np.Nanopublication + ">")
output_file.write(" ;\n <" + np.hasAssertion + "> <" + prefixes[kb] + "assertion-explicit_entry-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasProvenance + "> <" + prefixes[kb] + "provenance-explicit_entry-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasPublicationInfo + "> <" + prefixes[kb] + "pubInfo-explicit_entry-" + datasetIdentifier + ">")
output_file.write(" .\n}\n\n")
col_headers=list(pd.read_csv(dm_fn).columns.values)
for item in explicit_entry_list :
explicit_entry_tuple = {}
if "Template" in col_headers and pd.notnull(item.Template) :
explicit_entry_tuple["Template"]=item.Template
term = item.Column.replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-")
assertionString += "\n <" + prefixes[kb] + term + "> <" + rdf.type + "> owl:Class"
selectString += "?" + term.lower() + " "
whereString += " ?" + term.lower() + "_E <" + rdf.type + "> "
term_expl = "?" + term.lower() + "_E"
#print(item.Column
explicit_entry_tuple["Column"]=item.Column
[explicit_entry_tuple, assertionString, whereString, swrlString] = writeClassAttributeOrEntity(item, term_expl, explicit_entry_tuple, assertionString, whereString, swrlString)
[explicit_entry_tuple, assertionString, whereString, swrlString] = writeClassAttributeOf(item, term_expl, explicit_entry_tuple, assertionString, whereString, swrlString)
[explicit_entry_tuple, assertionString, whereString, swrlString] = writeClassUnit(item, term_expl, explicit_entry_tuple, assertionString, whereString, swrlString)
[explicit_entry_tuple, assertionString, whereString, swrlString] = writeClassTime(item, term_expl, explicit_entry_tuple, assertionString, whereString, swrlString)
[explicit_entry_tuple, assertionString, whereString, swrlString] = writeClassRelation(item, term_expl, explicit_entry_tuple, assertionString, whereString, swrlString)
if "Label" in col_headers and (pd.notnull(item.Label)) :
if ',' in item.Label :
labels = parseString(item.Label,',')
for label in labels :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + label + "\"^^xsd:string"
else :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + item.Label + "\"^^xsd:string"
explicit_entry_tuple["Label"]=item.Label
if "Comment" in col_headers and (pd.notnull(item.Comment)) :
assertionString += " ;\n <" + properties_tuple["Comment"] + "> \"" + item.Comment + "\"^^xsd:string"
explicit_entry_tuple["Comment"]=item.Comment
if "Format" in col_headers and (pd.notnull(item.Format)) :
explicit_entry_tuple["Format"]=item.Format
assertionString += " .\n"
provenanceString += "\n <" + prefixes[kb] + term + ">"
provenanceString += "\n <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime"
[explicit_entry_tuple, provenanceString, whereString, swrlString] = writeClassWasGeneratedBy(item, term_expl, explicit_entry_tuple, provenanceString, whereString, swrlString)
[explicit_entry_tuple, provenanceString, whereString, swrlString] = writeClassWasDerivedFrom(item, term_expl, explicit_entry_tuple, provenanceString, whereString, swrlString)
provenanceString += " .\n"
whereString += " ;\n <" + properties_tuple["Value"] + "> ?" + term.lower() + " .\n\n"
if "hasPosition" in col_headers and pd.notnull(item.hasPosition) :
publicationInfoString += "\n <" + prefixes[kb] + term + "> hasco:hasPosition \"" + str(item.hasPosition) + "\"^^xsd:integer ."
explicit_entry_tuple["hasPosition"]=item.hasPosition
explicit_entry_tuples.append(explicit_entry_tuple)
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "assertion-explicit_entry-" + datasetIdentifier + "> {" + assertionString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "provenance-explicit_entry-" + datasetIdentifier + "> {")
provenanceString = "\n <" + prefixes[kb] + "assertion-explicit_entry-" + datasetIdentifier + "> <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n" + provenanceString
output_file.write(provenanceString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "pubInfo-explicit_entry-" + datasetIdentifier + "> {\n <" + prefixes[kb] + "nanoPub-explicit_entry-" + datasetIdentifier + "> <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .")
output_file.write(publicationInfoString + "\n}\n\n")
else :
output_file.write(assertionString + "\n")
output_file.write(provenanceString + "\n")
query_file.write(selectString)
query_file.write(whereString)
swrl_file.write(swrlString)
return explicit_entry_tuples
def writeImplicitEntry(assertionString, provenanceString,publicationInfoString, explicit_entry_tuples, implicit_entry_tuples, timeline_tuple, vref_list, v_column, index, row, col_headers) :
try :
#col_headers=list(pd.read_csv(dm_fn).columns.values)
if timeline_tuple != {} :
if v_column in timeline_tuple :
v_id = hashlib.md5((str(timeline_tuple[v_column]) + str(index)).encode("utf-8")).hexdigest()
assertionString += "\n " + convertImplicitToKGEntry(v_column, v_id) + " <" + rdf.type + "> " + convertImplicitToKGEntry(v_column)
for timeEntry in timeline_tuple[v_column] :
if 'Type' in timeEntry :
assertionString += " ;\n <" + rdf.type + "> " + timeEntry['Type']
if 'Label' in timeEntry :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + timeEntry['Label'] + "\"^^xsd:string"
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
assertionString += " ;\n <" + properties_tuple["Value"] + "> " + str(timeEntry['Start'])
if 'Start' in timeEntry :
assertionString += " ;\n <" + properties_tuple["Start"] + "> [ <" + properties_tuple["Value"] + "> " + str(timeEntry['Start']) + " ]"
if 'End' in timeEntry :
assertionString += " ;\n <" + properties_tuple["End"] + "> [ <" + properties_tuple["Value"] + "> " + str(timeEntry['End']) + " ]"
if 'Unit' in timeEntry :
assertionString += " ;\n <" + rdfs.subClassOf + "> \n [ <" + rdf.type + "> <" + owl.Restriction + "> ;\n <" + owl.hasValue + "> " + str(codeMapper(timeEntry['Unit'])) + " ;\n <" + owl.onProperty + "> <" + properties_tuple["Unit"] + "> ]"
#assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + timeEntry['Unit']
if 'inRelationTo' in timeEntry :
assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(timeEntry['inRelationTo'], v_id)
if checkImplicit(timeEntry['inRelationTo']) and timeEntry['inRelationTo'] not in vref_list :
vref_list.append(timeEntry['inRelationTo'])
assertionString += " .\n"
for v_tuple in implicit_entry_tuples :
if (v_tuple["Column"] == v_column) :
if "Study" in v_tuple :
continue
else :
v_id = hashlib.md5((str(v_tuple) + str(index)).encode("utf-8")).hexdigest()
if "Template" in v_tuple :
template_term = extractTemplate(col_headers,row,v_tuple["Template"])
termURI = "<" + prefixes[kb] + template_term + ">"
else :
termURI = "<" + prefixes[kb] + v_tuple["Column"][2:] + "-" + v_id + ">"
assertionString += "\n " + termURI + " <" + rdf.type + "> <" + prefixes[kb] + v_tuple["Column"][2:] + ">"
if "Entity" in v_tuple :
if ',' in v_tuple["Entity"] :
entities = parseString(v_tuple["Entity"],',')
for entity in entities :
assertionString += " ;\n <" + rdf.type + "> " + entity
else :
assertionString += " ;\n <" + rdf.type + "> " + v_tuple["Entity"]
if "Attribute" in v_tuple :
if ',' in v_tuple["Attribute"] :
attributes = parseString(v_tuple["Attribute"],',')
for attribute in attributes :
assertionString += " ;\n <" + rdf.type + "> " + attribute
else :
assertionString += " ;\n <" + rdf.type + "> " + v_tuple["Attribute"]
# Need to get the right ID uri if we put this in.. Commenting out identifier for now
#if "Subject" in v_tuple :
# assertionString += " ;\n sio:hasIdentifier <" + prefixes[kb] + v_tuple["Subject"] + "-" + v_id + ">" #should be actual ID
if "Label" in v_tuple :
if ',' in v_tuple["Label"] :
labels = parseString(v_tuple["Label"],',')
for label in labels :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + label + "\"^^xsd:string"
else :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + v_tuple["Label"] + "\"^^xsd:string"
if "Time" in v_tuple :
if checkImplicit(v_tuple["Time"]) :
for vr_tuple in implicit_entry_tuples :
if (vr_tuple["Column"] == v_tuple["Time"]) :
timeID = hashlib.md5((str(vr_tuple) + str(index)).encode("utf-8")).hexdigest()
assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(v_tuple["Time"], timeID)
if v_tuple["Time"] not in vref_list :
vref_list.append(v_tuple["Time"])
else :
assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(v_tuple["Time"], v_id) #should be actual ID
if "inRelationTo" in v_tuple :
relationToID = None
for vr_tuple in implicit_entry_tuples :
if (vr_tuple["Column"] == v_tuple["inRelationTo"]) :
relationToID = hashlib.md5((str(vr_tuple) + str(index)).encode("utf-8")).hexdigest()
if ("Role" in v_tuple) and ("Relation" not in v_tuple) :
assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + v_tuple["Role"] + " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(v_tuple["inRelationTo"], relationToID) + " ]"
elif ("Role" not in v_tuple) and ("Relation" in v_tuple) :
assertionString += " ;\n " + v_tuple["Relation"] + " " + convertImplicitToKGEntry(v_tuple["inRelationTo"],v_id)
assertionString += " ;\n " + v_tuple["Relation"] + " " + convertImplicitToKGEntry(v_tuple["inRelationTo"],relationToID)
elif ("Role" not in v_tuple) and ("Relation" not in v_tuple) :
assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(v_tuple["inRelationTo"],relationToID)
elif "Role" in v_tuple :
assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + v_tuple["Role"] + " ]"
assertionString += " .\n"
provenanceString += "\n " + termURI + " <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime"
if "wasGeneratedBy" in v_tuple :
if ',' in v_tuple["wasGeneratedBy"] :
generatedByTerms = parseString(v_tuple["wasGeneratedBy"],',')
for generatedByTerm in generatedByTerms :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(generatedByTerm,v_id)
if checkImplicit(generatedByTerm) and generatedByTerm not in vref_list :
vref_list.append(generatedByTerm)
else :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(v_tuple["wasGeneratedBy"],v_id)
if checkImplicit(v_tuple["wasGeneratedBy"]) and v_tuple["wasGeneratedBy"] not in vref_list :
vref_list.append(v_tuple["wasGeneratedBy"]);
if "wasDerivedFrom" in v_tuple :
if ',' in v_tuple["wasDerivedFrom"] :
derivedFromTerms = parseString(v_tuple["wasDerivedFrom"],',')
for derivedFromTerm in derivedFromTerms :
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(derivedFromTerm,v_id)
if checkImplicit(derivedFromTerm) and derivedFromTerm not in vref_list :
vref_list.append(derivedFromTerm);
else :
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(v_tuple["wasDerivedFrom"],v_id)
if checkImplicit(v_tuple["wasDerivedFrom"]) and v_tuple["wasDerivedFrom"] not in vref_list :
vref_list.append(v_tuple["wasDerivedFrom"]);
#if "wasGeneratedBy" in v_tuple or "wasDerivedFrom" in v_tuple :
provenanceString += " .\n"
return [assertionString,provenanceString,publicationInfoString,vref_list]
except Exception as e :
print("Warning: Unable to create implicit entry: " + str(e))
def processInfosheet(output_file, dm_fn, cb_fn, cmap_fn, timeline_fn):
infosheet_tuple = {}
if 'infosheet' in config['Source Files'] :
infosheet_fn = config['Source Files']['infosheet']
try :
infosheet_file = pd.read_csv(infosheet_fn, dtype=object)
except Exception as e :
print("Warning: Collection metadata will not be written to the output file.\nThe specified Infosheet file does not exist or is unreadable: " + str(e))
return [dm_fn, cb_fn, cmap_fn, timeline_fn]
for row in infosheet_file.itertuples() :
if(pd.notnull(row.Value)):
infosheet_tuple[row.Attribute]=row.Value
# If SDD files included in Infosheet, they override the config declarations
if "Dictionary Mapping" in infosheet_tuple :
dm_fn = infosheet_tuple["Dictionary Mapping"]
if "Codebook" in infosheet_tuple :
cb_fn = infosheet_tuple["Codebook"]
if "Code Mapping" in infosheet_tuple :
cmap_fn = infosheet_tuple["Code Mapping"]
if "Timeline" in infosheet_tuple :
timeline_fn = infosheet_tuple["Timeline"]
datasetIdentifier = hashlib.md5(dm_fn.encode('utf-8')).hexdigest()
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "head-collection_metadata-" + datasetIdentifier + "> { ")
output_file.write("\n <" + prefixes[kb] + "nanoPub-collection_metadata-" + datasetIdentifier + "> <" + rdf.type + "> <" + np.Nanopublication + ">")
output_file.write(" ;\n <" + np.hasAssertion + "> <" + prefixes[kb] + "assertion-collection_metadata-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasProvenance + "> <" + prefixes[kb] + "provenance-collection_metadata-" + datasetIdentifier + ">")
output_file.write(" ;\n <" + np.hasPublicationInfo + "> <" + prefixes[kb] + "pubInfo-collection_metadata-" + datasetIdentifier + ">")
output_file.write(" .\n}\n\n")
assertionString = "<" + prefixes[kb] + "collection-" + datasetIdentifier + ">"
provenanceString = " <" + prefixes[kb] + "collection-" + datasetIdentifier + "> <http://www.w3.org/ns/prov#generatedAtTime> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime"
if "Type" in infosheet_tuple :
assertionString += " <" + rdf.type + "> " + [infosheet_tuple["Type"],"<" + infosheet_tuple["Type"] + ">"][isURI(infosheet_tuple["Type"])]
else :
assertionString += " <" + rdf.type + "> <http://purl.org/dc/dcmitype/Collection>"
#print("Warning: The Infosheet file is missing the Type value declaration")
#sys.exit(1)
if "Title" in infosheet_tuple :
assertionString += " ;\n <http://purl.org/dc/terms/title> \"" + infosheet_tuple["Title"] + "\"^^xsd:string"
if "Alternative Title" in infosheet_tuple :
if ',' in infosheet_tuple["Alternative Title"] :
alt_titles = parseString(infosheet_tuple["Alternative Title"],',')
for alt_title in alt_titles :
assertionString += " ;\n <http://purl.org/dc/terms/alternative> \"" + alt_title + "\"^^xsd:string"
else :
assertionString += " ;\n <http://purl.org/dc/terms/alternative> \"" + infosheet_tuple["Alternative Title"] + "\"^^xsd:string"
if "Comment" in infosheet_tuple :
assertionString += " ;\n <http://www.w3.org/2000/01/rdf-schema#comment> \"" + infosheet_tuple["Comment"] + "\"^^xsd:string"
if "Description" in infosheet_tuple :
assertionString += " ;\n <http://purl.org/dc/terms/description> \"" + infosheet_tuple["Description"] + "\"^^xsd:string"
if "Date Created" in infosheet_tuple :
provenanceString += " ;\n <http://purl.org/dc/terms/created> \"" + infosheet_tuple["Date Created"] + "\"^^xsd:date"
if "Creators" in infosheet_tuple :
if ',' in infosheet_tuple["Creators"] :
creators = parseString(infosheet_tuple["Creators"],',')
for creator in creators :
provenanceString += " ;\n <http://purl.org/dc/terms/creator> " + ["\"" + creator + "\"^^xsd:string","<" + creator + ">"][isURI(creator)]
else :
provenanceString += " ;\n <http://purl.org/dc/terms/creator> " + ["\"" + infosheet_tuple["Creators"] + "\"^^xsd:string","<" + infosheet_tuple["Creators"] + ">"][isURI(infosheet_tuple["Creators"])]
if "Contributors" in infosheet_tuple :
if ',' in infosheet_tuple["Contributors"] :
contributors = parseString(infosheet_tuple["Contributors"],',')
for contributor in contributors :
provenanceString += " ;\n <http://purl.org/dc/terms/contributor> " + ["\"" + contributor + "\"^^xsd:string","<" + contributor + ">"][isURI(contributor)]
else :
provenanceString += " ;\n <http://purl.org/dc/terms/contributor> " + ["\"" + infosheet_tuple["Contributors"] + "\"^^xsd:string","<" + infosheet_tuple["Contributors"] + ">"][isURI(infosheet_tuple["Contributors"])]
if "Publisher" in infosheet_tuple :
if ',' in infosheet_tuple["Publisher"] :
publishers = parseString(infosheet_tuple["Publisher"],',')
for publisher in publishers :
provenanceString += " ;\n <http://purl.org/dc/terms/publisher> " + ["\"" + publisher + "\"^^xsd:string","<" + publisher + ">"][isURI(publisher)]
else :
provenanceString += " ;\n <http://purl.org/dc/terms/publisher> " + ["\"" + infosheet_tuple["Publisher"] + "\"^^xsd:string","<" + infosheet_tuple["Publisher"] + ">"][isURI(infosheet_tuple["Publisher"])]
if "Date of Issue" in infosheet_tuple :
provenanceString += " ;\n <http://purl.org/dc/terms/issued> \"" + infosheet_tuple["Date of Issue"] + "\"^^xsd:date"
if "Link" in infosheet_tuple :
assertionString += " ;\n <http://xmlns.com/foaf/0.1/page> <" + infosheet_tuple["Link"] + ">"
if "Identifier" in infosheet_tuple :
assertionString += " ;\n <http://semanticscience.org/resource/hasIdentifier> \n [ <" + rdf.type + "> <http://semanticscience.org/resource/Identifier> ; \n <http://semanticscience.org/resource/hasValue> \"" + infosheet_tuple["Identifier"] + "\"^^xsd:string ]"
if "Keywords" in infosheet_tuple :
if ',' in infosheet_tuple["Keywords"] :
keywords = parseString(infosheet_tuple["Keywords"],',')
for keyword in keywords :
assertionString += " ;\n <http://www.w3.org/ns/dcat#keyword> \"" + keyword + "\"^^xsd:string"
else :
assertionString += " ;\n <http://www.w3.org/ns/dcat#keyword> \"" + infosheet_tuple["Keywords"] + "\"^^xsd:string"
if "License" in infosheet_tuple :
if ',' in infosheet_tuple["License"] :
licenses = parseString(infosheet_tuple["License"],',')
for license in licenses :
assertionString += " ;\n <http://purl.org/dc/terms/license> " + ["\"" + license + "\"^^xsd:string","<" + license + ">"][isURI(license)]
else :
assertionString += " ;\n <http://purl.org/dc/terms/license> " + ["\"" + infosheet_tuple["License"] + "\"^^xsd:string","<" + infosheet_tuple["License"] + ">"][isURI(infosheet_tuple["License"])]
if "Rights" in infosheet_tuple :
if ',' in infosheet_tuple["Rights"] :
rights = parseString(infosheet_tuple["Rights"],',')
for right in rights :
assertionString += " ;\n <http://purl.org/dc/terms/rights> " + ["\"" + right + "\"^^xsd:string","<" + right + ">"][isURI(right)]
else :
assertionString += " ;\n <http://purl.org/dc/terms/rights> " + ["\"" + infosheet_tuple["Rights"] + "\"^^xsd:string","<" + infosheet_tuple["Rights"] + ">"][isURI(infosheet_tuple["Rights"])]
if "Language" in infosheet_tuple :
assertionString += " ;\n <http://purl.org/dc/terms/language> \"" + infosheet_tuple["Language"] + "\"^^xsd:string"
if "Version" in infosheet_tuple :
provenanceString += " ;\n <http://purl.org/pav/version> " + ["\"" + infosheet_tuple["Version"] + "\"^^xsd:string","<" + infosheet_tuple["Version"] + ">"][isURI(infosheet_tuple["Version"])]
provenanceString += " ;\n <http://www.w3.org/2002/07/owl/versionInfo> " + ["\"" + infosheet_tuple["Version"] + "\"^^xsd:string","<" + infosheet_tuple["Version"] + ">"][isURI(infosheet_tuple["Version"])]
if "Previous Version" in infosheet_tuple :
provenanceString += " ;\n <http://purl.org/pav/previousVersion> " + ["\"" + infosheet_tuple["Previous Version"] + "\"^^xsd:string","<" + infosheet_tuple["Previous Version"] + ">"][isURI(infosheet_tuple["Previous Version"])]
if "Version Of" in infosheet_tuple :
provenanceString += " ;\n <http://purl.org/dc/terms/isVersionOf> " + ["\"" + infosheet_tuple["Version Of"] + "\"^^xsd:string","<" + infosheet_tuple["Version Of"] + ">"][isURI(infosheet_tuple["Version Of"])]
if "Standards" in infosheet_tuple :
if ',' in infosheet_tuple["Standards"] :
standards = parseString(infosheet_tuple["Standards"],',')
for standard in standards :
assertionString += " ;\n <http://purl.org/dc/terms/conformsTo> " + ["\"" + standard + "\"^^xsd:string","<" + standard + ">"][isURI(standard)]
else :
assertionString += " ;\n <http://purl.org/dc/terms/conformsTo> " + ["\"" + infosheet_tuple["Standards"] + "\"^^xsd:string","<" + infosheet_tuple["Standards"] + ">"][isURI(infosheet_tuple["Standards"])]
if "Source" in infosheet_tuple :
if ',' in infosheet_tuple["Source"] :
sources = parseString(infosheet_tuple["Source"],',')
for source in sources :
provenanceString += " ;\n <http://purl.org/dc/terms/source> \"" + source + "\"^^xsd:string"
else :
provenanceString += " ;\n <http://purl.org/dc/terms/source> " + ["\"" + infosheet_tuple["Source"] + "\"^^xsd:string","<" + infosheet_tuple["Source"] + ">"][isURI(infosheet_tuple["Source"])]
if "File Format" in infosheet_tuple :
assertionString += " ;\n <http://purl.org/dc/terms/format> \"" + infosheet_tuple["File Format"] + "\"^^xsd:string"
if "Documentation" in infosheet_tuple : # currently encoded as URI, should confirm that it really is one
provenanceString += " ;\n <http://www.w3.org/ns/dcat#landingPage> <" + infosheet_tuple["Documentation"] + ">"
if "Imports" in infosheet_tuple :
if ',' in infosheet_tuple["Imports"] :
imports = parseString(infosheet_tuple["Imports"],',')
for imp in imports :
assertionString += " ;\n <http://www.w3.org/2002/07/owl#imports> " + [imp,"<" + imp + ">"][isURI(imp)]
else :
assertionString += " ;\n <http://www.w3.org/2002/07/owl#imports> " + [infosheet_tuple["Imports"],"<" + infosheet_tuple["Imports"] + ">"][isURI(infosheet_tuple["Imports"])]
assertionString += " .\n"
provenanceString += " .\n"
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "assertion-collection_metadata-" + datasetIdentifier + "> {\n " + assertionString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "provenance-collection_metadata-" + datasetIdentifier + "> {\n <" + prefixes[kb] + "assertion-dataset_metadata-" + datasetIdentifier + "> <http://www.w3.org/ns/prov#generatedAtTime> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n" + provenanceString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "pubInfo-collection_metadata-" + datasetIdentifier + "> {")
publicationInfoString = "\n <" + prefixes[kb] + "nanoPub-collection_metadata-" + datasetIdentifier + "> <http://www.w3.org/ns/prov#generatedAtTime> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n"
output_file.write(publicationInfoString + "\n}\n\n")
else :
output_file.write(assertionString +"\n\n")
output_file.write(provenanceString + "\n")
return [dm_fn, cb_fn, cmap_fn, timeline_fn]
def processPrefixes(output_file,query_file):
prefixes = {}
if 'prefixes' in config['Prefixes']:
prefix_fn = config['Prefixes']['prefixes']
else:
prefix_fn="prefixes.csv"
try:
prefix_file = pd.read_csv(prefix_fn, dtype=object)
for row in prefix_file.itertuples() :
prefixes[row.prefix] = row.url
for prefix in prefixes :
#print(prefix.find(">"))
output_file.write("@prefix " + prefix + ": <" + prefixes[prefix] + "> .\n")
query_file.write("prefix " + prefix + ": <" + prefixes[prefix] + "> \n")
query_file.write("\n")
output_file.write("\n")
except Exception as e :
print("Warning: Something went wrong when trying to read the prefixes file: " + str(e))
return prefixes
def processCodeMappings(cmap_fn):
unit_code_list = []
unit_uri_list = []
unit_label_list = []
if cmap_fn is not None :
try :
code_mappings_reader = pd.read_csv(cmap_fn)
#Using itertuples on a data frame makes the column heads case-sensitive
for code_row in code_mappings_reader.itertuples() :
if pd.notnull(code_row.code):
unit_code_list.append(code_row.code)
if pd.notnull(code_row.uri):
unit_uri_list.append(code_row.uri)
if pd.notnull(code_row.label):
unit_label_list.append(code_row.label)
except Exception as e :
print("Warning: Something went wrong when trying to read the Code Mappings file: " + str(e))
return [unit_code_list,unit_uri_list,unit_label_list]
def processProperties():
properties_tuple = {'Comment': rdfs.comment, 'attributeOf': sio.isAttributeOf, 'Attribute': rdf.type, 'Definition' : skos.definition, 'Value' : sio.hasValue, 'wasDerivedFrom': prov.wasDerivedFrom, 'Label': rdfs.label, 'inRelationTo': sio.inRelationTo, 'Role': sio.hasRole, 'Start' : sio.hasStartTime, 'End' : sio.hasEndTime, 'Time': sio.existsAt, 'Entity': rdf.type, 'Unit': sio.hasUnit, 'wasGeneratedBy': prov.wasGeneratedBy}
if 'properties' in config['Source Files'] :
properties_fn = config['Source Files']['properties']
try :
properties_file = pd.read_csv(properties_fn, dtype=object)
except Exception as e :
print("Warning: The specified Properties file does not exist or is unreadable: " + str(e))
return properties_tuple
for row in properties_file.itertuples() :
if(hasattr(row,"Property") and pd.notnull(row.Property)):
if(("http://" in row.Property) or ("https://" in row.Property)) :
properties_tuple[row.Column]=row.Property
elif(":" in row.Property) :
terms = row.Property.split(":")
properties_tuple[row.Column]=rdflib.term.URIRef(prefixes[terms[0]]+terms[1])
elif("." in row.Property) :
terms = row.Property.split(".")
properties_tuple[row.Column]=rdflib.term.URIRef(prefixes[terms[0]]+terms[1])
return properties_tuple
def processTimeline(timeline_fn):
timeline_tuple = {}
if timeline_fn is not None :
try :
timeline_file = pd.read_csv(timeline_fn, dtype=object)
try :
inner_tuple_list = []
row_num=0
for row in timeline_file.itertuples():
if (pd.notnull(row.Name) and row.Name not in timeline_tuple) :
inner_tuple_list=[]
inner_tuple = {}
inner_tuple["Type"]=row.Type
if(hasattr(row,"Label") and pd.notnull(row.Label)):
inner_tuple["Label"]=row.Label
if(pd.notnull(row.Start)) :
inner_tuple["Start"]=row.Start
if(pd.notnull(row.End)) :
inner_tuple["End"]=row.End
if(hasattr(row,"Unit") and pd.notnull(row.Unit)) :
inner_tuple["Unit"]=row.Unit
if(hasattr(row,"inRelationTo") and pd.notnull(row.inRelationTo)) :
inner_tuple["inRelationTo"]=row.inRelationTo
inner_tuple_list.append(inner_tuple)
timeline_tuple[row.Name]=inner_tuple_list
row_num += 1
except Exception as e :
print("Warning: Unable to process Timeline file: " + str(e))
except Exception as e :
print("Warning: The specified Timeline file does not exist: " + str(e))
#sys.exit(1)
return timeline_tuple
def processDictionaryMapping(dm_fn):
try :
dm_file = pd.read_csv(dm_fn, dtype=object)
except Exception as e:
print("Current directory: " + os.getcwd() + "/ - " + str(os.path.isfile(dm_fn)) )
print("Error: The processing DM file \"" + dm_fn + "\": " + str(e))
sys.exit(1)
try:
# Set implicit and explicit entries
for row in dm_file.itertuples() :
if (pd.isnull(row.Column)) :
print("Error: The DM must have a column named 'Column'")
sys.exit(1)
if row.Column.startswith("??") :
implicit_entry_list.append(row)
else :
explicit_entry_list.append(row)
except Exception as e :
print("Something went wrong when trying to read the DM: " + str(e))
sys.exit(1)
return [explicit_entry_list,implicit_entry_list]
def processCodebook(cb_fn):
cb_tuple = {}
if cb_fn is not None :
try :
cb_file = pd.read_csv(cb_fn, dtype=object)
except Exception as e:
print("Error: The processing Codebook file: " + str(e))
sys.exit(1)
try :
inner_tuple_list = []
row_num=0
for row in cb_file.itertuples():
if (pd.notnull(row.Column) and row.Column not in cb_tuple) :
inner_tuple_list=[]
inner_tuple = {}
inner_tuple["Code"]=row.Code
if(hasattr(row,"Label") and pd.notnull(row.Label)):
inner_tuple["Label"]=row.Label
if(hasattr(row,"Class") and pd.notnull(row.Class)) :
inner_tuple["Class"]=row.Class
if (hasattr(row,"Resource") and pd.notnull(row.Resource)) : # "Resource" in row and
inner_tuple["Resource"]=row.Resource
if (hasattr(row,"Comment") and pd.notnull(row.Comment)) :
inner_tuple["Comment"]=row.Comment
if (hasattr(row,"Definition") and pd.notnull(row.Definition)) :
inner_tuple["Definition"]=row.Definition
inner_tuple_list.append(inner_tuple)
cb_tuple[row.Column]=inner_tuple_list
row_num += 1
except Exception as e :
print("Warning: Unable to process Codebook file: " + str(e))
return cb_tuple
def processData(data_fn, output_file, query_file, swrl_file, cb_tuple, timeline_tuple, explicit_entry_tuples, implicit_entry_tuples):
xsd_datatype_list = ["anyURI", "base64Binary", "boolean", "date", "dateTime", "decimal", "double", "duration", "float", "hexBinary", "gDay", "gMonth", "gMonthDay", "gYear", "gYearMonth", "NOTATION", "QName", "string", "time" ]
if data_fn != None :
try :
data_file = pd.read_csv(data_fn, dtype=object)
except Exception as e :
print("Error: The specified Data file does not exist: " + str(e))
sys.exit(1)
try :
# ensure that there is a column annotated as the sio:Identifier or hasco:originalID in the data file:
# TODO make sure this is getting the first available ID property for the _subject_ (and not anything else)
col_headers=list(data_file.columns.values)
#id_index=None
try :
for a_tuple in explicit_entry_tuples :
if "Attribute" in a_tuple :
if ((a_tuple["Attribute"] == "hasco:originalID") or (a_tuple["Attribute"] == "sio:Identifier")) :
if(a_tuple["Column"] in col_headers) :
#print(a_tuple["Column"])
#id_index = col_headers.index(a_tuple["Column"])# + 1
#print(id_index)
for v_tuple in implicit_entry_tuples :
if "isAttributeOf" in a_tuple :
if (a_tuple["isAttributeOf"] == v_tuple["Column"]) :
v_tuple["Subject"]=a_tuple["Column"].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-")
except Exception as e :
print("Error processing column headers: " + str(e))
for row in data_file.itertuples() :
#print(row)
assertionString = ''
provenanceString = ''
publicationInfoString = ''
id_string=''
for term in row[1:] :
if term is not None:
id_string+=str(term)
npubIdentifier = hashlib.md5(id_string.encode("utf-8")).hexdigest()
try:
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "head-" + npubIdentifier + "> {")
output_file.write("\n <" + prefixes[kb] + "nanoPub-" + npubIdentifier + ">")
output_file.write("\n <" + rdf.type + "> <" + np.Nanopublication + ">")
output_file.write(" ;\n <" + np.hasAssertion + "> <" + prefixes[kb] + "assertion-" + npubIdentifier + ">")
output_file.write(" ;\n <" + np.hasProvenance + "> <" + prefixes[kb] + "provenance-" + npubIdentifier + ">")
output_file.write(" ;\n <" + np.hasPublicationInfo + "> <" + prefixes[kb] + "pubInfo-" + npubIdentifier + ">")
output_file.write(" .\n}\n\n")# Nanopublication head
vref_list = []
for a_tuple in explicit_entry_tuples :
#print(a_tuple["Column"])
#print(col_headers)
#print("\n")
if (a_tuple["Column"] in col_headers ) :
typeString = ""
if "Attribute" in a_tuple :
typeString += str(a_tuple["Attribute"])
if "Entity" in a_tuple :
typeString += str(a_tuple["Entity"])
if "Label" in a_tuple :
typeString += str(a_tuple["Label"])
if "Unit" in a_tuple :
typeString += str(a_tuple["Unit"])
if "Time" in a_tuple :
typeString += str(a_tuple["Time"])
if "inRelationTo" in a_tuple :
typeString += str(a_tuple["inRelationTo"])
if "wasGeneratedBy" in a_tuple :
typeString += str(a_tuple["wasGeneratedBy"])
if "wasDerivedFrom" in a_tuple :
typeString += str(a_tuple["wasDerivedFrom"])
identifierString = hashlib.md5((str(row[col_headers.index(a_tuple["Column"])+1])+typeString).encode("utf-8")).hexdigest()
try :
if "Template" in a_tuple :
template_term = extractTemplate(col_headers,row,a_tuple["Template"])
termURI = "<" + prefixes[kb] + template_term + ">"
else :
termURI = "<" + prefixes[kb] + a_tuple["Column"].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + "-" + identifierString + ">"
try :
#print(termURI)
#print("\n\n")
assertionString += "\n " + termURI + "\n <" + rdf.type + "> <" + prefixes[kb] + a_tuple["Column"].replace(" ","_").replace(",","").replace("(","").replace(")","").replace("/","-").replace("\\","-") + ">"
if "Attribute" in a_tuple :
if ',' in a_tuple["Attribute"] :
attributes = parseString(a_tuple["Attribute"],',')
for attribute in attributes :
assertionString += " ;\n <" + properties_tuple["Attribute"] + "> " + attribute
else :
assertionString += " ;\n <" + properties_tuple["Attribute"] + "> " + a_tuple["Attribute"]
if "Entity" in a_tuple :
if ',' in a_tuple["Entity"] :
entities = parseString(a_tuple["Entity"],',')
for entity in entities :
assertionString += " ;\n <" + properties_tuple["Entity"] + "> " + entity
else :
assertionString += " ;\n <" + properties_tuple["Entity"] + "> " + a_tuple["Entity"]
if "isAttributeOf" in a_tuple :
if checkImplicit(a_tuple["isAttributeOf"]) :
v_id = assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,"isAttributeOf", npubIdentifier)
vTermURI = assignTerm(col_headers, "isAttributeOf", implicit_entry_tuples, a_tuple, row, v_id)
assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> " + vTermURI
if a_tuple["isAttributeOf"] not in vref_list :
vref_list.append(a_tuple["isAttributeOf"])
elif checkTemplate(a_tuple["isAttributeOf"]):
assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["isAttributeOf"])) + ">"
else :
assertionString += " ;\n <" + properties_tuple["attributeOf"] + "> " + convertImplicitToKGEntry(a_tuple["isAttributeOf"],identifierString)
if "Unit" in a_tuple :
if checkImplicit(a_tuple["Unit"]) :
v_id = assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,"Unit", npubIdentifier)
vTermURI = assignTerm(col_headers, "Unit", implicit_entry_tuples, a_tuple, row, v_id)
assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + vTermURI
if a_tuple["Unit"] not in vref_list :
vref_list.append(a_tuple["Unit"])
elif checkTemplate(a_tuple["Unit"]):
assertionString += " ;\n <" + properties_tuple["Unit"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["Unit"])) + ">"
else :
assertionString += " ;\n <" + properties_tuple["Unit"] + "> " + a_tuple["Unit"]
if "Time" in a_tuple :
if checkImplicit(a_tuple["Time"]) :
foundBool = False
for v_tuple in implicit_entry_tuples : # referenced in implicit list
if v_tuple["Column"] == a_tuple["Time"]:
foundBool = True
if(foundBool) :
v_id = assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,"Time", npubIdentifier)
vTermURI = assignTerm(col_headers, "Time", implicit_entry_tuples, a_tuple, row, v_id)
assertionString += " ;\n <" + properties_tuple["Time"] + "> " + vTermURI
else : # Check timeline
for t_tuple in timeline_tuple :
if t_tuple == a_tuple["Time"] :
vTermURI = convertImplicitToKGEntry(t_tuple)
assertionString += " ;\n <" + properties_tuple["Time"] + "> [ rdf:type " + vTermURI + " ] "
#if t_tuple["Column"] == a_tuple["Time"]:
#if a_tuple["Time"] not in vref_list :
# vref_list.append(a_tuple["Time"])
elif checkTemplate(a_tuple["Time"]):
assertionString += " ;\n <" + properties_tuple["Time"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["Time"])) + ">"
else :
assertionString += " ;\n <" + properties_tuple["Time"] + "> " + convertImplicitToKGEntry(a_tuple["Time"], identifierString)
if "Label" in a_tuple :
if ',' in a_tuple["Label"] :
labels = parseString(a_tuple["Label"],',')
for label in labels :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + label + "\"^^xsd:string"
else :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + a_tuple["Label"] + "\"^^xsd:string"
if "Comment" in a_tuple :
assertionString += " ;\n <" + properties_tuple["Comment"] + "> \"" + a_tuple["Comment"] + "\"^^xsd:string"
if "inRelationTo" in a_tuple :
if checkImplicit(a_tuple["inRelationTo"]) :
v_id = assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,"inRelationTo", npubIdentifier)
vTermURI = assignTerm(col_headers, "inRelationTo", implicit_entry_tuples, a_tuple, row, v_id)
if a_tuple["inRelationTo"] not in vref_list :
vref_list.append(a_tuple["inRelationTo"])
if "Relation" in a_tuple :
assertionString += " ;\n " + a_tuple["Relation"] + " " + vTermURI
elif "Role" in a_tuple :
assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + a_tuple["Role"] + " ;\n <" + properties_tuple["inRelationTo"] + "> " + vTermURI + " ]"
else :
assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + vTermURI
elif checkTemplate(a_tuple["inRelationTo"]):
if "Relation" in a_tuple :
assertionString += " ;\n " + a_tuple["Relation"] + " <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["inRelationTo"])) + ">"
elif "Role" in a_tuple :
assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + a_tuple["Role"] + " ;\n <" + properties_tuple["inRelationTo"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["inRelationTo"])) + "> ]"
else:
assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["inRelationTo"])) + ">"
else:
if "Relation" in a_tuple :
assertionString += " ;\n " + a_tuple["Relation"] + " " + convertImplicitToKGEntry(a_tuple["inRelationTo"], identifierString)
elif "Role" in a_tuple :
assertionString += " ;\n <" + properties_tuple["Role"] + "> [ <" + rdf.type + "> " + a_tuple["Role"] + " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(a_tuple["inRelationTo"],identifierString) + " ]"
else :
assertionString += " ;\n <" + properties_tuple["inRelationTo"] + "> " + convertImplicitToKGEntry(a_tuple["inRelationTo"], identifierString)
except Exception as e:
print("Error writing initial assertion elements: ")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
try :
if row[col_headers.index(a_tuple["Column"])+1] != "" :
#print(row[col_headers.index(a_tuple["Column"])])
if cb_tuple != {} :
if a_tuple["Column"] in cb_tuple :
#print(a_tuple["Column"])
for tuple_row in cb_tuple[a_tuple["Column"]] :
#print(tuple_row)
if ("Code" in tuple_row) and (str(tuple_row['Code']) == str(row[col_headers.index(a_tuple["Column"])+1]) ):
#print(tuple_row['Code'])
if ("Class" in tuple_row) and (tuple_row['Class'] != "") :
if ',' in tuple_row['Class'] :
classTerms = parseString(tuple_row['Class'],',')
for classTerm in classTerms :
assertionString += " ;\n <" + rdf.type + "> " + convertImplicitToKGEntry(codeMapper(classTerm))
else :
assertionString += " ;\n <" + rdf.type + "> "+ convertImplicitToKGEntry(codeMapper(tuple_row['Class']))
if ("Resource" in tuple_row) and (tuple_row['Resource'] != "") :
if ',' in tuple_row['Resource'] :
classTerms = parseString(tuple_row['Resource'],',')
for classTerm in classTerms :
assertionString += " ;\n <" + rdf.type + "> " + convertImplicitToKGEntry(codeMapper(classTerm))
else :
assertionString += " ;\n <" + rdf.type + "> " + convertImplicitToKGEntry(codeMapper(tuple_row['Resource']))
if ("Label" in tuple_row) and (tuple_row['Label'] != "") :
assertionString += " ;\n <" + properties_tuple["Label"] + "> \"" + tuple_row['Label'] + "\"^^xsd:string"
if ("Comment" in tuple_row) and (tuple_row['Comment'] != "") :
assertionString += " ;\n <" + properties_tuple["Comment"] + "> \"" + tuple_row['Comment'] + "\"^^xsd:string"
if ("Definition" in tuple_row) and (tuple_row['Definition'] != "") :
assertionString += " ;\n <" + properties_tuple["Definition"] + "> \"" + tuple_row['Definition'] + "\"^^xsd:string"
#print(str(row[col_headers.index(a_tuple["Column"])]))
try :
if str(row[col_headers.index(a_tuple["Column"])+1]) == "nan" :
pass
# Check if Format was populated in the DM row of the current data point
if ("Format" in a_tuple) and (a_tuple['Format'] != "") :
# Check if an xsd prefix is included in the populated Format cell
if("xsd:" in a_tuple['Format']):
assertionString += " ;\n <" + properties_tuple["Value"] + "> \"" + str(row[col_headers.index(a_tuple["Column"])+1]) + "\"^^" + a_tuple['Format']
# If the Format cell is populated, but the xsd prefix isn't specified, do a string match over the set of primitive xsd types
elif a_tuple['Format'] in xsd_datatype_list :
assertionString += " ;\n <" + properties_tuple["Value"] + "> \"" + str(row[col_headers.index(a_tuple["Column"])+1]) + "\"^^xsd:" + a_tuple['Format']
# If the Format cell isn't populated, check is the data value is an integer
elif str(row[col_headers.index(a_tuple["Column"])+1]).isdigit() :
assertionString += " ;\n <" + properties_tuple["Value"] + "> \"" + str(row[col_headers.index(a_tuple["Column"])+1]) + "\"^^xsd:integer"
# Next check if it is a float
elif isfloat(str(row[col_headers.index(a_tuple["Column"])+1])) :
assertionString += " ;\n <" + properties_tuple["Value"] + "> \"" + str(row[col_headers.index(a_tuple["Column"])+1]) + "\"^^xsd:float"
# By default, assign 'xsd:string' as the datatype
else :
assertionString += " ;\n <" + properties_tuple["Value"] + "> \"" + str(row[col_headers.index(a_tuple["Column"])+1]).replace("\"","'") + "\"^^xsd:string"
except Exception as e :
print("Warning: unable to write value to assertion string:", row[col_headers.index(a_tuple["Column"])+1] + ": " + str(e))
assertionString += " .\n"
except Exception as e:
print("Error writing data value to assertion string:", row[col_headers.index(a_tuple["Column"])+1], ": " + str(e))
try :
provenanceString += "\n " + termURI + "\n <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime"
if "wasDerivedFrom" in a_tuple :
v_id = assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,"wasDerivedFrom", npubIdentifier)
if ',' in a_tuple["wasDerivedFrom"] :
derivedFromTerms = parseString(a_tuple["wasDerivedFrom"],',')
for derivedFromTerm in derivedFromTerms :
if checkImplicit(derivedFromTerm) :
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(derivedFromTerm, v_id)
if derivedFromTerm not in vref_list :
vref_list.append(derivedFromTerm)
elif checkTemplate(derivedFromTerm):
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,derivedFromTerm)) + ">"
else :
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(derivedFromTerm, identifierString)
elif checkImplicit(a_tuple["wasDerivedFrom"]) :
vTermURI = assignTerm(col_headers, "wasDerivedFrom", implicit_entry_tuples, a_tuple, row, v_id)
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + vTermURI
if a_tuple["wasDerivedFrom"] not in vref_list :
vref_list.append(a_tuple["wasDerivedFrom"])
elif checkTemplate(a_tuple["wasDerivedFrom"]):
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["wasDerivedFrom"])) + ">"
else :
provenanceString += " ;\n <" + properties_tuple["wasDerivedFrom"] + "> " + convertImplicitToKGEntry(a_tuple["wasDerivedFrom"], identifierString)
if "wasGeneratedBy" in a_tuple :
v_id = assignVID(implicit_entry_tuples,timeline_tuple,a_tuple,"wasGeneratedBy", npubIdentifier)
if ',' in a_tuple["wasGeneratedBy"] :
generatedByTerms = parseString(a_tuple["wasGeneratedBy"],',')
for generatedByTerm in generatedByTerms :
if checkImplicit(generatedByTerm) :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(generatedByTerm, v_id)
if generatedByTerm not in vref_list :
vref_list.append(generatedByTerm)
elif checkTemplate(generatedByTerm):
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,generatedByTerm)) + ">"
else:
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(generatedByTerm, identifierString)
elif checkImplicit(a_tuple["wasGeneratedBy"]) :
vTermURI = assignTerm(col_headers, "wasGeneratedBy", implicit_entry_tuples, a_tuple, row, v_id)
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + vTermURI
if a_tuple["wasGeneratedBy"] not in vref_list :
vref_list.append(a_tuple["wasGeneratedBy"])
elif checkTemplate(a_tuple["wasGeneratedBy"]):
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> <" + prefixes[kb] + str(extractExplicitTerm(col_headers,row,a_tuple["wasGeneratedBy"])) + ">"
else :
provenanceString += " ;\n <" + properties_tuple["wasGeneratedBy"] + "> " + convertImplicitToKGEntry(a_tuple["wasGeneratedBy"], identifierString)
provenanceString += " .\n"
if "hasPosition" in a_tuple :
publicationInfoString += "\n " + termURI + "\n hasco:hasPosition \"" + str(a_tuple["hasPosition"]) + "\"^^xsd:integer ."
except Exception as e:
print("Error writing provenance or publication info: " + str(e))
except Exception as e:
print("Unable to process tuple" + a_tuple.__str__() + ": " + str(e))
try:
for vref in vref_list :
[assertionString,provenanceString,publicationInfoString,vref_list] = writeImplicitEntry(assertionString,provenanceString,publicationInfoString,explicit_entry_tuples, implicit_entry_tuples, timeline_tuple, vref_list, vref, npubIdentifier, row, col_headers)
except Exception as e:
print("Warning: Something went wrong writing implicit entries: " + str(e))
except Exception as e:
print("Error: Something went wrong when processing explicit tuples: " + str(e))
sys.exit(1)
if nanopublication_option == "enabled" :
output_file.write("<" + prefixes[kb] + "assertion-" + npubIdentifier + "> {" + assertionString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "provenance-" + npubIdentifier + "> {")
provenanceString = "\n <" + prefixes[kb] + "assertion-" + npubIdentifier + "> <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n" + provenanceString
output_file.write(provenanceString + "\n}\n\n")
output_file.write("<" + prefixes[kb] + "pubInfo-" + npubIdentifier + "> {")
publicationInfoString = "\n <" + prefixes[kb] + "nanoPub-" + npubIdentifier + "> <" + prov.generatedAtTime + "> \"" + "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year,datetime.utcnow().month,datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(datetime.utcnow().hour,datetime.utcnow().minute,datetime.utcnow().second) + "Z\"^^xsd:dateTime .\n" + publicationInfoString
output_file.write(publicationInfoString + "\n}\n\n")
else :
output_file.write(assertionString + "\n")
output_file.write(provenanceString + "\n")
except Exception as e :
print("Warning: Unable to process Data file: " + str(e))
def main():
if 'dictionary' in config['Source Files'] :
dm_fn = config['Source Files']['dictionary']
else :
print("Error: Dictionary Mapping file is not specified:" + str(e))
sys.exit(1)
if 'codebook' in config['Source Files'] :
cb_fn = config['Source Files']['codebook']
else :
cb_fn = None
if 'timeline' in config['Source Files'] :
timeline_fn = config['Source Files']['timeline']
else :
timeline_fn = None
if 'data_file' in config['Source Files'] :
data_fn = config['Source Files']['data_file']
else :
data_fn = None
global nanopublication_option
if 'nanopublication' in config['Prefixes'] :
nanopublication_option = config['Prefixes']['nanopublication']
else :
nanopublication_option = "enabled"
if 'out_file' in config['Output Files']:
out_fn = config['Output Files']['out_file']
else:
if nanopublication_option == "enabled" :
out_fn = "out.trig"
else :
out_fn = "out.ttl"
if 'query_file' in config['Output Files'] :
query_fn = config['Output Files']['query_file']
else :
query_fn = "queryQ"
if 'swrl_file' in config['Output Files'] :
swrl_fn = config['Output Files']['swrl_file']
else :
swrl_fn = "swrlModel"
output_file = open(out_fn,"w")
query_file = open(query_fn,"w")
swrl_file = open(swrl_fn,"w")
global prefixes
prefixes = processPrefixes(output_file,query_file)
global properties_tuple
properties_tuple = processProperties()
global cmap_fn
[dm_fn, cb_fn, cmap_fn, timeline_fn] = processInfosheet(output_file, dm_fn, cb_fn, cmap_fn, timeline_fn)
global explicit_entry_list
global implicit_entry_list
[explicit_entry_list,implicit_entry_list] = processDictionaryMapping(dm_fn)
cb_tuple = processCodebook(cb_fn)
timeline_tuple = processTimeline(timeline_fn)
explicit_entry_tuples = writeExplicitEntryTuples(explicit_entry_list, output_file, query_file, swrl_file, dm_fn)
implicit_entry_tuples = writeImplicitEntryTuples(implicit_entry_list, timeline_tuple, output_file, query_file, swrl_file, dm_fn)
processData(data_fn, output_file, query_file, swrl_file, cb_tuple, timeline_tuple, explicit_entry_tuples, implicit_entry_tuples)
output_file.close()
query_file.close()
swrl_file.close()
# Global Scope
# Used to prevent the creation of multiple URIs for hasco:Study, will need to address this in the future
studyRef = None
properties_tuple = {}
prefixes = {}
# Need to implement input flags rather than ordering
if (len(sys.argv) < 2) :
print("Usage: python sdd2rdf.py <configuration_file>")
sys.exit(1)
#file setup and configuration
config = configparser.ConfigParser()
try:
config.read(sys.argv[1])
except Exception as e :
print("Error: Unable to open configuration file:" + str(e))
sys.exit(1)
#unspecified parameters in the config file should set the corresponding read string to ""
if 'base_uri' in config['Prefixes']:
kb = config['Prefixes']['base_uri'] #+ ":" # may want to check if colon already exists in the specified base uri
else:
kb=":"
if 'code_mappings' in config['Source Files'] :
cmap_fn = config['Source Files']['code_mappings']
else :
cmap_fn = None
[unit_code_list,unit_uri_list,unit_label_list] = processCodeMappings(cmap_fn) #must be global at the moment for code mapper to work..
explicit_entry_list = []
implicit_entry_list = []
if __name__ == "__main__":
main()
|
# here are some trick in python ...
class A:
psss = 0
a = A()
print(a.__class__)
class B(A):
def __init__(self):
print("Inside the B")
b = B()
print(b.__class__)
# note:-
# ' object.__class__ ' return the class name
print(isinstance(b, B)) # isinstance() method return true or false
print(issubclass(B, A)) # issubclass() method return true or false
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 12:42:44 2020
@author: catiefinkenbiner
"""
import numpy as np
import scipy as sp
from scipy import stats
## Step 4 Use prediction to create conditional copula generated values
def main(tsP,xday_stats,H_scale,O_scale):
# Observed P statistics
Pmu = np.mean(tsP)
Psig = np.std(tsP)
# Correlation Coefficients
xday_rho1 = xday_stats[2,0] ; xday_rho2 = xday_stats[2,1] ; xday_rho3 = xday_stats[2,2]
# Calculate Sigma_bar
sigma_bar = [[1 - xday_rho1*xday_rho1, xday_rho3 - xday_rho1*xday_rho2],
[xday_rho3 - xday_rho1*xday_rho2, 1 - xday_rho2*xday_rho2]]
series = []
for i in np.arange(len(tsP)):
if tsP[i] > 0:
a = (tsP[i] - Pmu) / Psig
H2mu_bar = xday_rho1 * a
O18mu_bar = xday_rho2 * a
mu_bar = np.array([H2mu_bar,O18mu_bar])
X = sp.stats.multivariate_normal.rvs(mean= mu_bar,cov= sigma_bar)
X2 = sp.stats.norm.cdf(X[0],loc=0,scale=1)
X3 = sp.stats.norm.cdf(X[1],loc=0,scale=1)
index = int(np.floor(X2 * len(H_scale)))
newH = H_scale[index]
index = int(np.floor(X3 * len(O_scale)))
newO = O_scale[index]
series.append([newH,newO])
else:
series.append([np.nan,np.nan])
return series
if __name__ == '__main__':
main(tsP,xday_stats,H_scale,O_scale)
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from mainApi.models import UserProfile, AllMedHistory
from mainApi.models import AllEvent, UserEvent, Like, Comment, UserCreatedEvent
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework_nested.relations import NestedHyperlinkedRelatedField
from rest_framework.serializers import HyperlinkedModelSerializer
class CommentSerializer(serializers.ModelSerializer):
first_name = serializers.CharField(source='user.first_name')
last_name = serializers.CharField(source='user.last_name')
class Meta:
model = Comment
fields = ('user_id','first_name','last_name','text', 'commentTime')
class LikeSerializer(serializers.ModelSerializer):
first_name = serializers.CharField(source='user.first_name')
last_name = serializers.CharField(source='user.last_name')
class Meta:
model = Like
fields = ('user_id','first_name','last_name')
class AllEventSerializer(serializers.ModelSerializer):
class Meta:
model = AllEvent
fields = ('eventType', 'timelineDescription', 'publicFeedDescription', 'personalFeedDescription')
class UserEventSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='user-event-detail')
# allEvent = AllEventSerializer(read_only=True, required=False)
allEvent = serializers.HyperlinkedIdentityField(view_name='allevent-detail')
eventComments = serializers.HyperlinkedIdentityField(view_name='comment-list')
eventLikes = serializers.HyperlinkedIdentityField(view_name='like-list')
class Meta:
model = UserEvent
fields = ('url', 'id', 'allEvent', 'completed','dateCompleted','dateShared', 'ifShared','eventComments', 'eventLikes')
class AllMedHistorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = AllMedHistory
fields = ('url', 'id', 'historyType','description')
class UserCreatedEventSerializer(serializers.HyperlinkedModelSerializer):
#i may want to nest this serializer into UserEvent but then AllEvent will not have to be required
userEventForCreatedEvent = UserEventSerializer()
url = serializers.HyperlinkedIdentityField(view_name='created-event-detail')
class Meta:
model = UserCreatedEvent
fields = ('url', 'id', 'eventType', 'withWhom', 'eventDate', 'description', 'userEventForCreatedEvent')
def create(self, validated_data):
userEventForCreatedEvent_data = validated_data.pop('userEventForCreatedEvent')
createdEvent = UserCreatedEvent.objects.create(**validated_data)
UserEvent.objects.create(createdEvent=createdEvent, **userEventForCreatedEvent_data)
return createdEvent
def update(self, validated_data):
pass
class UserProfileSerializer(serializers.ModelSerializer):
friends = serializers.HyperlinkedIdentityField(view_name='user-friend-list')
class Meta:
model = UserProfile
fields = ('birthDate', 'gender', 'completionPercentage', 'profilePicture', 'friends')
class UserSerializer(serializers.HyperlinkedModelSerializer):
'''
Used when creating or updating a new User/Profile
'''
userProfile = UserProfileSerializer()
allMedHistories = serializers.HyperlinkedIdentityField(view_name='allmedhistory-list')
userCreatedEvents = serializers.HyperlinkedIdentityField(view_name='created-event-list')
timeline = serializers.HyperlinkedIdentityField(view_name='user-timeline')
privateFeed = serializers.HyperlinkedIdentityField(view_name='user-private-feed')
friendFeed = serializers.HyperlinkedIdentityField(view_name='user-friend-feed')
publicFeed = serializers.HyperlinkedIdentityField(view_name='user-public-feed')
friendProfileFeed = serializers.HyperlinkedIdentityField(view_name='user-friend-profile-feed')
class Meta:
model = User
fields = ('url','id','username', 'first_name', 'last_name', 'email', 'password', 'userProfile', 'allMedHistories', 'userCreatedEvents', 'timeline', 'privateFeed', 'friendFeed', 'publicFeed', 'friendProfileFeed')
def create(self, validated_data):
userProfile_data = validated_data.pop('userProfile')
user = User.objects.create(**validated_data)
UserProfile.objects.create(user=user, **userProfile_data)
return user
def update(self, instance, validated_data):
userProfile_data = validated_data.pop('userProfile')
userProfile = instance.userProfile
instance.username = validated_data.get('username', instance.username)
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.last_name = validated_data.get('last_name', instance.last_name)
instance.password = validated_data.get('password', instance.password)
instance.email = validated_data.get('email', instance.email)
instance.save()
#need to add profilePicture
userProfile.birthDate = userProfile_data.get('birthDate', userProfile.birthDate)
userProfile.gender = userProfile_data.get('gender', userProfile.gender)
userProfile.save()
return instance
class FriendListSerializer(serializers.ModelSerializer):
friends = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='user-detail')
class Meta:
model = UserProfile
fields = ('friends',)
class buildOutUserEventSerializer(serializers.ModelSerializer):
class Meta:
model = AllEvent
fields = ('id', 'name', 'eventType', 'gender','timelineDescription', 'publicFeedDescription', 'personalFeedDescription')
#class PrivateField(serializers.Field):
# def get_attribute(self, obj):
# # We pass the object instance onto `to_representation`,
# # not just the field attribute.
# return obj
#
# def to_representation(self, obj):
# # for read functionality
# if obj.created_by != self.context['request'].user:
# return ""
# else:
# return obj.private_field1
#
# def to_internal_value(self, data):
# pass
# for write functionality
# check if data is valid and if not raise ValidationError
|
from pyparsing import Or, Group, ZeroOrMore, Literal
from .lexical_items import number, HYPHEN_MINUS, identifier, LEFT_PARENTHESIS, RIGHT_PARENTHESIS, \
COMMA, modulereference, FULL_STOP, valuereference, RIGHT_CURLY_BRACKET, LEFT_CURLY_BRACKET, \
cstring
NULL = Literal("NULL")
# 19.1
SignedNumber = Or([
number,
HYPHEN_MINUS + number
]).setParseAction(lambda toks: ''.join(toks))
# 14.6
ExternalValueReference = modulereference + FULL_STOP + valuereference
# 14.1
DefinedValue = Or([
ExternalValueReference,
valuereference,
# ParameterizedValue
])
# 19.1
NamedNumber = Or([
Group(identifier + LEFT_PARENTHESIS + SignedNumber + RIGHT_PARENTHESIS),
Group(identifier + LEFT_PARENTHESIS + DefinedValue + RIGHT_PARENTHESIS)
])
# 19.1
NamedNumberList = Or([
NamedNumber + ZeroOrMore(COMMA + NamedNumber),
# NamedNumber,
# NamedNumberList + COMMA + NamedNumber # ORIGINAL
])
# 41.8
TableRow = number
# 41.8
TableColumn = number
# 41.8
Tuple = LEFT_CURLY_BRACKET + TableColumn + COMMA + TableRow + RIGHT_CURLY_BRACKET
# 41.8
Cell = number
# 41.8
Row = number
# 41.8
Plane = number
# 41.8
Group = number
# 41.8
Quadruple = LEFT_CURLY_BRACKET + Group + COMMA + Plane + COMMA + Row + COMMA + Cell + RIGHT_CURLY_BRACKET
# 41.8
CharsDefn = Or([
cstring,
Quadruple,
Tuple,
DefinedValue
])
# 41.8
CharSyms = Or([
CharsDefn + ZeroOrMore(COMMA + CharsDefn),
# CharsDefn,
# CharSyms + COMMA + CharsDefn # ORIGINAL
])
# 41.8
CharacterStringList = LEFT_CURLY_BRACKET + CharSyms + RIGHT_CURLY_BRACKET
|
from re import compile, finditer
REGEX = compile(r'(?P<chunk>[a-zA-Z]+)(?:_|-|$)')
def to_camel_case(text):
result = []
for i, a in enumerate(finditer(REGEX, text)):
current = a.group('chunk')
if not i and current[0].islower():
result.append(current.lower())
else:
result.append(current.title())
return ''.join(result)
|
"""
Unit and regression test for the maxsmi package.
"""
# Import package, test suite, and other packages as needed
# import maxsmi
import pytest
import sys
import torch
from maxsmi.utils_evaluation import evaluation_results
def test_maxsmi_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "maxsmi" in sys.modules
####################
@pytest.mark.parametrize(
"pred_output, true_output, cuda_available, solution",
[
(torch.zeros(2), torch.zeros(2), False, (0, 0, 1)),
(torch.zeros(2), torch.zeros(2), True, (0, 0, 1)),
],
)
def test_evaluation_results(pred_output, true_output, cuda_available, solution):
results = evaluation_results(true_output, pred_output, cuda_available)
assert solution == results
|
class Solution:
def findDifference(self, nums1: List[int], nums2: List[int]) -> List[List[int]]:
res = [[], []]
for i in range(len(nums1)):
if nums1[i] not in nums2 and nums1[i] not in res[0]:
res[0].append(nums1[i])
for i in range(len(nums2)):
if nums2[i] not in nums1 and nums2[i] not in res[1]:
res[1].append(nums2[i])
return res
|
'''
Created on Nov 9, 2016
@author: micro
'''
import numpy as np
import cv2
def find_marker(image):
#convert the image to grayscale, blur it, and detect edges (in that order))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5,5), 0)
edged = cv2.Canny(gray, 35, 125)
#find the contours in the edged image and keep the largest one;
(_,cnts,_) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
_, c = max(cnts, key = cv2.contourArea)
#compute the bounding box of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
#compute and return the distance from the marker and the camera
return (knownWidth*focalLength) / perWidth
KNOWN_DISTANCE = 24.0
KNOWN_WIDTH = 11.0
image = cv2.imread("C:/python27/image-2ft.jpg")
marker = find_marker(image)
focalLength = (marker[1][0]*KNOWN_DISTANCE) / KNOWN_WIDTH
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
marker = find_marker(frame)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
#draw a bounding box around the image and display it
box = np.int0(cv2.boxPoints(marker))
cv2.drawContours(frame, [box], -1, (0,255,0), 2)
cv2.putText(frame, "%.2fft" % (inches / 12),
(frame.shape[1] - 200, frame.shape[0] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0,255,0), 3)
cv2.imshow("Distance calculation", frame)
cv2.waitKey(0)
k = cv2.waitKey(5) & 0xFF
if k == 7:
break
cv2.destroyAllWindows()
cap.release()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.terraform.dependencies import TerraformInitRequest, TerraformInitResponse
from pants.backend.terraform.target_types import TerraformDeploymentFieldSet
from pants.backend.terraform.tool import TerraformProcess
from pants.core.goals.check import CheckRequest, CheckResult, CheckResults
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.option.option_types import SkipOption
from pants.option.subsystem import Subsystem
from pants.util.strutil import pluralize
class TerraformValidateSubsystem(Subsystem):
options_scope = "terraform-validate"
name = "`terraform validate`"
help = """Terraform validate options."""
skip = SkipOption("check")
class TerraformCheckRequest(CheckRequest):
field_set_type = TerraformDeploymentFieldSet
tool_name = TerraformValidateSubsystem.options_scope
@rule
async def terraform_check(
request: TerraformCheckRequest, subsystem: TerraformValidateSubsystem
) -> CheckResults:
if subsystem.skip:
return CheckResults([], checker_name=request.tool_name)
initialised_terraforms = await MultiGet(
Get(
TerraformInitResponse,
TerraformInitRequest(
deployment.root_module, deployment.backend_config, deployment.dependencies
),
)
for deployment in request.field_sets
)
results = await MultiGet(
Get(
FallibleProcessResult,
TerraformProcess(
args=("validate",),
input_digest=deployment.sources_and_deps,
output_files=tuple(deployment.terraform_files),
description=f"Run `terraform fmt` on {pluralize(len(deployment.terraform_files), 'file')}.",
chdir=deployment.chdir,
),
)
for deployment in initialised_terraforms
)
check_results = []
for deployment, result, field_set in zip(initialised_terraforms, results, request.field_sets):
check_results.append(
CheckResult.from_fallible_process_result(
result, partition_description=f"`terraform validate` on `{field_set.address}`"
)
)
return CheckResults(check_results, checker_name=request.tool_name)
def rules():
return (
*collect_rules(),
UnionRule(CheckRequest, TerraformCheckRequest),
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import shelve
import requests
from oauthlib.oauth2 import TokenExpiredError
from requests_oauthlib import OAuth2Session
from six.moves.urllib.parse import urljoin
from pymonzo.api_objects import MonzoAccount, MonzoBalance, MonzoTransaction
from pymonzo.exceptions import MonzoAPIException
API_URL = 'https://api.monzo.com/'
PYMONZO_REDIRECT_URI = 'https://github.com/pawelad/pymonzo'
MONZO_ACCESS_TOKEN_ENV = 'MONZO_ACCESS_TOKEN'
MONZO_AUTH_CODE_ENV = 'MONZO_AUTH_CODE'
MONZO_CLIENT_ID_ENV = 'MONZO_CLIENT_ID'
MONZO_CLIENT_SECRET_ENV = 'MONZO_CLIENT_SECRET'
TOKEN_FILE_NAME = '.pymonzo-token'
TOKEN_FILE_PATH = os.path.join(os.path.expanduser('~'), TOKEN_FILE_NAME)
class MonzoAPI(object):
"""
Base class that smartly wraps official Monzo API.
Official docs:
https://monzo.com/docs/
"""
default_account_id = None
def __init__(self, access_token=None, client_id=None, client_secret=None,
auth_code=None):
"""
We need Monzo access token to work with the API, which we try to get
in multiple ways detailed below. Basically you need to either pass
it directly, pass your client ID, client secret and OAuth 2 auth code
or have the token already saved on the disk from previous OAuth 2
authorization.
We then create an OAuth authorised session and get the default
account ID if there's only one available.
:param access_token: your Monzo access token
:type access_token: str
:param client_id: your Monzo client ID
:type client_id: str
:param client_secret: your Monzo client secret
:type client_secret: str
:param auth_code: your Monzo OAuth 2 auth code
:type auth_code: str
"""
# If no values are passed, try to get them from environment variables
self._access_token = (
access_token or os.environ.get(MONZO_ACCESS_TOKEN_ENV)
)
self._client_id = (
client_id or os.environ.get(MONZO_CLIENT_ID_ENV)
)
self._client_secret = (
client_secret or os.environ.get(MONZO_CLIENT_SECRET_ENV)
)
self._auth_code = (
auth_code or os.environ.get(MONZO_AUTH_CODE_ENV)
)
# We try to get the access token from:
# a) explicitly passed 'access_token'
if access_token:
self._token = {
'access_token': self._access_token,
'token_type': 'Bearer',
}
# b) explicitly passed 'client_id', 'client_secret' and 'auth_code'
elif all([client_id, client_secret, auth_code]):
self._token = self._get_oauth_token()
# c) token file saved on the disk
elif os.path.isfile(TOKEN_FILE_PATH):
with shelve.open(TOKEN_FILE_PATH) as f:
self._token = f['token']
# d) 'access_token' saved as a environment variable
elif self._access_token:
self._token = {
'access_token': self._access_token,
'token_type': 'Bearer',
}
# e) 'client_id', 'client_secret' and 'auth_code' saved as
# environment variables
elif all([self._client_id, self._client_secret, self._auth_code]):
self._token = self._get_oauth_token()
else:
raise ValueError(
"You need to pass (or set as environment variables) either "
"the access token or all of client ID, client secret "
"and authentication code. For more info see "
"https://github.com/pawelad/pymonzo#authentication"
)
# Create a session with the acquired token
self._session = OAuth2Session(
client_id=self._client_id,
token=self._token,
)
# Make sure that we're authenticated
if not self.whoami().get('authenticated'):
raise MonzoAPIException("You're not authenticated")
# Set the default account ID if there is only one available
if len(self.accounts()) == 1:
self.default_account_id = self.accounts()[0].id
@staticmethod
def _save_token_on_disk(token):
"""Helper function that saves passed token on disk"""
with shelve.open(TOKEN_FILE_PATH) as f:
f['token'] = token
def _get_oauth_token(self):
"""
Get Monzo access token via OAuth2 `authorization code` grant type.
Official docs:
https://monzo.com/docs/#acquire-an-access-token
:returns: OAuth 2 access token
:rtype: dict
"""
url = urljoin(API_URL, '/oauth2/token')
oauth = OAuth2Session(
client_id=self._client_id,
redirect_uri=PYMONZO_REDIRECT_URI,
)
token = oauth.fetch_token(
token_url=url,
code=self._auth_code,
client_secret=self._client_secret,
)
self._save_token_on_disk(token)
return token
def _refresh_oath_token(self):
"""
Refresh Monzo OAuth 2 token.
Official docs:
https://monzo.com/docs/#refreshing-access
:returns: OAuth 2 access token
:rtype: dict
"""
url = urljoin(API_URL, '/oauth2/token')
data = {
'grant_type': 'refresh_token',
'client_id': self._client_id,
'client_secret': self._client_secret,
'refresh_token': self._token['refresh_token'],
}
token_response = requests.post(url, data=data)
token = token_response.json()
self._save_token_on_disk(token)
return token
def _get_response(self, method, endpoint, params=None):
"""
Helper method for wading API requests, mainly for catching errors
in one place.
:param method: valid HTTP method
:type method: str
:param endpoint: API endpoint
:type endpoint: str
:param params: extra parameters passed with the request
:type params: dict
:returns: API response
:rtype: Response
"""
url = urljoin(API_URL, endpoint)
try:
response = getattr(self._session, method)(url, params=params)
except TokenExpiredError:
# For some reason 'requests-oauthlib' automatic token refreshing
# doesn't work so we do it here semi-manually
self._token = self._refresh_oath_token()
self._session = OAuth2Session(
client_id=self._client_id,
token=self._token,
)
response = getattr(self._session, method)(url, params=params)
if response.status_code != requests.codes.ok:
raise MonzoAPIException(
"Something wrong happened: {}".format(response.json())
)
return response
def whoami(self):
"""
Get information about the access token.
Official docs:
https://monzo.com/docs/#authenticating-requests
:returns: access token details
:rtype: dict
"""
endpoint = '/ping/whoami'
response = self._get_response(
method='get', endpoint=endpoint,
)
return response.json()
def accounts(self):
"""
Returns a list of accounts owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#list-accounts
:returns: list of Monzo accounts
:rtype: list of MonzoAccount
"""
endpoint = '/accounts'
response = self._get_response(
method='get', endpoint=endpoint,
)
accounts = response.json()['accounts']
return [MonzoAccount(data=account) for account in accounts]
def balance(self, account_id=None):
"""
Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance
"""
if not account_id and not self.default_account_id:
raise ValueError("You need to pass account ID")
elif not account_id and self.default_account_id:
account_id = self.default_account_id
endpoint = '/balance'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
return MonzoBalance(data=response.json())
def _raw_transactions(self, account_id=None, reverse=True, limit=None):
if not account_id and not self.default_account_id:
raise ValueError("You need to pass account ID")
elif not account_id and self.default_account_id:
account_id = self.default_account_id
endpoint = '/transactions'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
# The API does not allow reversing the list or limiting it, so to do
# the basic query of 'get the latest transaction' we need to always get
# all transactions and do the reversing and slicing in Python
# I send Monzo an email, we'll se how they'll respond
transactions = response.json()['transactions']
if reverse:
transactions.reverse()
if limit:
transactions = transactions[:limit]
return transactions
def transactions(self, account_id=None, reverse=True, limit=None):
"""
Returns a list of transactions on the user's account.
Official docs:
https://monzo.com/docs/#list-transactions
:param account_id: Monzo account ID
:type account_id: str
:param reverse: whether transactions should be in in descending order
:type reverse: bool
:param limit: how many transactions should be returned; None for all
:type limit: int
:returns: list of Monzo transactions
:rtype: list of MonzoTransaction
"""
transactions = self._raw_transactions(account_id=account_id, reverse=reverse, limit=limit)
return [MonzoTransaction(data=t) for t in transactions]
def transaction(self, transaction_id, expand_merchant=False):
"""
Returns an individual transaction, fetched by its id.
Official docs:
https://monzo.com/docs/#retrieve-transaction
:param transaction_id: Monzo transaction ID
:type transaction_id: str
:param expand_merchant: whether merchant data should be included
:type expand_merchant: bool
:returns: Monzo transaction details
:rtype: MonzoTransaction
"""
endpoint = '/transactions/{}'.format(transaction_id)
data = dict()
if expand_merchant:
data['expand[]'] = 'merchant'
response = self._get_response(
method='get', endpoint=endpoint, params=data,
)
return MonzoTransaction(data=response.json()['transaction'])
|
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.validators import (DataRequired)
class LoginForm(Form):
value = StringField('********', validators=[DataRequired()])
|
#! /usr/bin/env python
from __future__ import division
from numpy import *
from numpy.random import normal
from scipy.stats import norm,betai
from scipy.special import betaln
from pylab import where
from scipy import weave
def lprob2sigma(lprob):
""" translates a log_e(probability) to units of Gaussian sigmas
"""
if (lprob>-36.):
sigma = norm.ppf(1.-0.5*exp(1.*lprob))
else:
# this is good to 5.e-2; just to be crazy, get to 5.e-5
sigma = sqrt( log(2./pi) - 2.*log(8.2) - 2.*lprob )
f = 0.5*log(2./pi) - 0.5*sigma**2 - log(sigma) - lprob
df = - sigma - 1./sigma
sigma = sigma - f/df
return float(sigma)
def peak2sigma(psdpeak,n0):
""" translates a psd peak height into a multi-trial NULL-hypothesis probability
NOTE: dstarr replaces '0' with 0.000001 to catch float-point accuracy bugs
Which I otherwise stumble into.
"""
# Student's-T
prob0 = betai( 0.5*n0-2.,0.5,(n0-1.)/(n0-1.+2.*psdpeak) )
if (0.5*n0-2.<=0.000001):
lprob0=0.
elif ( (n0-1.)/(n0-1.+2.*psdpeak) <=0.000001 ):
lprob0=-999.
elif (prob0==0):
lprob0=(0.5*n0-2.)*log( (n0-1.)/(n0-1.+2.*psdpeak)
) - log(0.5*n0-2.) - betaln(0.5*n0-2.,0.5)
else: lprob0=log(prob0)
# ballpark number of independent frequencies
# (Horne and Baliunas, eq. 13)
horne = long(-6.362+1.193*n0+0.00098*n0**2.)
if (horne <= 0): horne=5
if (lprob0>log(1.e-4) and prob0>0):
# trials correction, monitoring numerical precision
lprob = log( 1. - exp( horne*log(1-prob0) ) )
elif (lprob0+log(horne)>log(1.e-4) and prob0>0):
lprob = log( 1. - exp( -horne*prob0 ) )
else:
lprob = log(horne) + lprob0
sigma = lprob2sigma(lprob)
return sigma
def get_peak_width(psd,imax):
pmax = psd[imax]
i = 0
while ( (psd[imax-i:imax+1+i]>(pmax/2.)).sum()/(1.+2*i)==1 ):
w = 1.+2*i
i+=1
return w
# New as of 20101120: (with run_lomb14.py changes):
def lomb(time, signal, error, f1, df, numf, fit_mean=True, fit_slope=False, subtract_mean=True):
"""
C version of lomb_scargle
Inputs:
time: time vector
signal: data vector
error: uncertainty on signal
df: frequency step
numf: number of frequencies to consider
Output:
psd: power spectrum on frequency grid: f1,f1+df,...,f1+numf*df
"""
numt = len(time)
wth = (1./error).astype('float64')
s0 = dot(wth,wth)
wth /= sqrt(s0)
if (fit_mean==True):
subtract_mean=True
if (fit_slope==True):
fit_mean=True
subtract_mean=True
cn = (signal*wth).astype('float64')
if (subtract_mean==True):
cn -= dot(cn,wth)*wth
tt = 2*pi*time.astype('float64')
sinx0, cosx0 = sin(df*tt), cos(df*tt)
sinx, cosx = sin(f1*tt)*wth, cos(f1*tt)*wth
if (fit_slope==True):
tt *= wth
tt -= dot(tt,wth)*wth
tt /= tt.max()
s1 = dot(tt,tt)
cn -= dot(tt,cn)*tt/s1
numf = int(numf)
psd = empty(numf,dtype='float64')
if (subtract_mean==False):
vcn = 1./s0
else:
vcn = var(cn)
fit_mean = int(fit_mean)
lomb_scargle_support = """
inline double SQR(double a) {
return (a == 0.0 ? 0.0 : a*a);
}
inline void update_sincos (long int numt, double *sinx0_ptr, double *cosx0_ptr, double *sinx_ptr, double *cosx_ptr) {
double tmp,*sinx0 = sinx0_ptr, *cosx0 = cosx0_ptr, *sinx = sinx_ptr, *cosx = cosx_ptr;
for (unsigned long i=0;i<numt;i++,sinx0++,cosx0++,sinx++,cosx++) {
tmp = *sinx;
*sinx = *cosx0*tmp + *sinx0**cosx;
*cosx = -*sinx0*tmp + *cosx0**cosx;
}
}
"""
lomb_scargle_codeB = """
inline double lomb_scargle(double *cn_ptr, double *wt_ptr, double *sinx_ptr, double *cosx_ptr, long int numt, int fit_mean) {
double ts1=0.,tc1=0.,s2=0.,c2=0.,sh=0.,ch=0.,tc2,ts2;
double omtau;
double tmp,px,cosomtau,sinomtau,cos2omtau,sin2omtau;
double *wt = wt_ptr, *cn = cn_ptr;
double *sinx = sinx_ptr, *cosx = cosx_ptr;
double norm_sin,norm_cos,cn0=0.;
for (unsigned long i=0;i<numt;i++, wt++, cn++, sinx++, cosx++) {
ts1 += *sinx**wt;
s2 += *cosx*(*sinx);
tc1 += *cosx**wt;
c2 += SQR(*cosx);
sh += *sinx*(*cn);
ch += *cosx*(*cn);
}
s2 *= 2.; c2 = 2*c2 - 1.;
omtau = atan2(s2,c2)/2;
sinomtau = sin(omtau);
cosomtau = cos(omtau);
sin2omtau = 2.*sinomtau*cosomtau;
cos2omtau = 2.*SQR(cosomtau) - 1.;
tmp = c2*cos2omtau + s2*sin2omtau;
tc2 = 0.5*(1.+tmp);
ts2 = 0.5*(1.-tmp);
tmp = ts1;
ts1 = cosomtau*tmp - sinomtau*tc1;
tc1 = sinomtau*tmp + cosomtau*tc1;
tmp = sh;
sh = cosomtau*tmp - sinomtau*ch;
ch = sinomtau*tmp + cosomtau*ch;
norm_sin = sh/ts2;
norm_cos = ch/tc2;
if (fit_mean) {
cn0 = ( norm_sin*ts1 + norm_cos*tc1 ) / ( SQR(ts1)/ts2 + SQR(tc1)/tc2 - 1. );
norm_sin -= cn0*ts1/ts2;
norm_cos -= cn0*tc1/tc2;
}
px = SQR(norm_sin)*ts2 + SQR(norm_cos)*tc2 - SQR(cn0);
if (tc2<=0 || ts2<=0) px = 0.;
return px;
}
"""
lomb_scargle_codeA = """
inline double lomb_scargle_linear(double *tt_ptr, double *cn_ptr, double *wt_ptr, double *sinx_ptr, double *cosx_ptr, double s1, long int numt) {
double ts1=0.,tc1=0.,s2=0.,c2=0.,sh=0.,ch=0.,ts=0.,tc=0.,tc2,ts2;
double omtau;
double tmp,px,cosomtau,sinomtau,cos2omtau,sin2omtau;
double *wt = wt_ptr, *cn = cn_ptr, *tt = tt_ptr;
double *sinx = sinx_ptr, *cosx = cosx_ptr;
double cn0,cn1;
for (unsigned long i=0;i<numt;i++, wt++, tt++, cn++, sinx++, cosx++) {
ts1 += *sinx**wt;
s2 += *cosx*(*sinx);
tc1 += *cosx**wt;
c2 += SQR(*cosx);
sh += *sinx*(*cn);
ch += *cosx*(*cn);
ts += *sinx*(*tt);
tc += *cosx*(*tt);
}
s2 *= 2.; c2 = 2*c2 - 1.;
omtau = atan2(s2,c2)/2;
sinomtau = sin(omtau);
cosomtau = cos(omtau);
sin2omtau = 2.*sinomtau*cosomtau;
cos2omtau = 2.*SQR(cosomtau) - 1.;
tmp = c2*cos2omtau + s2*sin2omtau;
tc2 = 0.5*(1.+tmp);
ts2 = 0.5*(1.-tmp);
tmp = ts1;
ts1 = cosomtau*tmp - sinomtau*tc1;
tc1 = sinomtau*tmp + cosomtau*tc1;
tmp = ts;
ts = cosomtau*tmp - sinomtau*tc;
tc = sinomtau*tmp + cosomtau*tc;
tmp = sh;
sh = cosomtau*tmp - sinomtau*ch;
ch = sinomtau*tmp + cosomtau*ch;
tmp = 2*tc*tc1*ts*ts1 + (s1*ts2 - SQR(ts))*SQR(tc1) + (s1*tc2 - SQR(tc))*SQR(ts1) - ((s1*tc2 - SQR(tc))*ts2 - tc2*SQR(ts));
cn0 = ch*(tc*ts*ts1 + (s1*ts2 - SQR(ts))*tc1) + sh*(tc*ts*tc1 + (s1*tc2 - SQR(tc))*ts1);
cn0 /= tmp;
cn1 = ch*(ts*tc1*ts1 - tc*SQR(ts1) + tc*ts2) + sh*(tc*tc1*ts1 - ts*SQR(tc1) + ts*tc2);
cn1 /= tmp;
px = SQR(sh-cn0*ts1-cn1*ts)/ts2 + SQR(ch-cn0*tc1-cn1*tc)/tc2 - SQR(cn0) - s1*SQR(cn1);
if (tc2<=0 || ts2<=0) px = 0.;
return px;
}
"""
lomb_code_A = """
for (unsigned long j=0;j<numf;j++,psd++) {
*psd = lomb_scargle_linear(tt,cn,wth,sinx,cosx,s1,numt);
update_sincos (numt, sinx0, cosx0, sinx, cosx);
}
"""
lomb_code_B = """
for (unsigned long j=0;j<numf;j++,psd++) {
*psd = lomb_scargle(cn,wth,sinx,cosx,numt,fit_mean);
update_sincos (numt, sinx0, cosx0, sinx, cosx);
}
"""
if (fit_slope==True):
weave.inline(lomb_code_A,\
['cn','wth','tt','numt','numf','psd','s1','sinx0','cosx0','sinx','cosx'],\
support_code = lomb_scargle_support+lomb_scargle_codeA,force=0)
else:
weave.inline(lomb_code_B,\
['cn','wth','numt','numf','psd','sinx0','cosx0','sinx','cosx','fit_mean'],\
support_code = lomb_scargle_support+lomb_scargle_codeB,force=0)
return 0.5*psd/vcn;
def lomb__pre20101120(time, signal, wt, df, numf ):
"""
C version of lomb_scargle
Constructs a periodogram from frequency df to frequency numf * df
Inputs:
time: time vector
signal: data vector
wt: weights vector = 1/uncertainty^2
df: frequency step
numf: number of frequencies to consider
Output:
psd: power spectrum on frequency grid
"""
numt = len(time)
wt = wt.astype('float64')
s0 = wt.sum()
cn = signal.astype('float64') - ( signal*wt ).sum() / s0
var = ( cn**2*wt ).sum()/(numt-1.)
s0 = array([s0]).astype('float64')
# make times manageable (Scargle periodogram is time-shift invariant)
tt = 2*pi*( time.astype('float64')-time.min() )
#df = array([df],dtype='float64')
df = float64(df)
numf = int(numf)
psd = empty(numf,dtype='float64')
#numf = array([numf],dtype='float64')
# work space
sinx0,cosx0,sinx,cosx = empty((4,numt),dtype='float64')
lomb_scargle = """
inline double SQR(double a) {
return (a == 0.0 ? 0.0 : a*a);
}
inline void initialize_sincos (long int numt, double *tt_ptr, double *sinx0_ptr, double *cosx0_ptr, double *sinx_ptr, double *cosx_ptr, double df) {
double *sinx0 = sinx0_ptr, *cosx0 = cosx0_ptr, *tt = tt_ptr;
double *sinx = sinx_ptr, *cosx = cosx_ptr;
for (unsigned long i=0;i<numt;i++,tt++,sinx0++,cosx0++,sinx++,cosx++) {
*sinx0 = sin(*tt*df);
*cosx0 = cos(*tt*df);
*sinx = *sinx0; *cosx = *cosx0;
}
}
inline void update_sincos (long int numt, double *sinx0_ptr, double *cosx0_ptr, double *sinx_ptr, double *cosx_ptr) {
double tmp;
double *sinx0 = sinx0_ptr, *cosx0 = cosx0_ptr, *sinx = sinx_ptr, *cosx = cosx_ptr;
for (unsigned long i=0;i<numt;i++,sinx0++,cosx0++,sinx++,cosx++) {
tmp = *sinx;
*sinx = *cosx0*tmp + *sinx0**cosx;
*cosx = -*sinx0*tmp + *cosx0**cosx;
}
}
inline double lomb_scargle(double *cn_ptr, double *wt_ptr, double *sinx_ptr, double *cosx_ptr, double s0, long int numt) {
double ts1=0.,tc1=0.,s2=0.,c2=0.,sh=0.,ch=0.,tc2,ts2;
double omtau;
double tmp,px,cosomtau,sinomtau,cos2omtau,sin2omtau;
double *wt = wt_ptr, *cn = cn_ptr;
double *sinx = sinx_ptr, *cosx = cosx_ptr;
double norm,norm_sin,norm_cos,cn0;
for (unsigned long i=0;i<numt;i++, wt++, cn++, sinx++, cosx++) {
ts1 += tmp = *sinx**wt;
s2 += *cosx*tmp;
tc1 += tmp = *cosx**wt;
c2 += *cosx*tmp;
tmp = *cn**wt;
sh += *sinx*tmp;
ch += *cosx*tmp;
}
s2 *= 2.; c2 = 2*c2 - s0;
omtau = atan2(s2,c2)/2;
sinomtau = sin(omtau);
cosomtau = cos(omtau);
sin2omtau = 2.*sinomtau*cosomtau;
cos2omtau = 2.*SQR(cosomtau) - 1.;
tmp = c2*cos2omtau + s2*sin2omtau;
tc2 = 0.5*(s0+tmp);
ts2 = 0.5*(s0-tmp);
tmp = ts1;
ts1 = cosomtau*tmp - sinomtau*tc1;
tc1 = sinomtau*tmp + cosomtau*tc1;
tmp = sh;
sh = cosomtau*tmp - sinomtau*ch;
ch = sinomtau*tmp + cosomtau*ch;
norm_sin = sh/ts2;
norm_cos = ch/tc2;
cn0 = ( norm_sin*ts1 + norm_cos*tc1 ) / ( SQR(ts1)/ts2 + SQR(tc1)/tc2 - s0 );
norm_sin -= cn0*ts1/ts2;
norm_cos -= cn0*tc1/tc2;
px = SQR(norm_sin)*ts2 + SQR(norm_cos)*tc2 - s0*SQR(cn0);
if (tc2<=0 || ts2<=0) px = 0.;
return px;
}
"""
lomb_code = """
initialize_sincos (numt,tt,sinx0,cosx0,sinx,cosx,df);
*psd = lomb_scargle(cn,wt,sinx,cosx,*s0,numt);
psd++;
for (unsigned long j=1;j<numf;j++,psd++) {
update_sincos (numt, sinx0, cosx0, sinx, cosx);
*psd = lomb_scargle(cn,wt,sinx,cosx,*s0,numt);
}
"""
weave.inline(lomb_code,\
['cn','wt','tt','numt','numf','psd','s0','df','sinx0','cosx0','sinx','cosx'],\
support_code = lomb_scargle)
#import pdb; pdb.set_trace()
#print
return 0.5*psd/var;
def lomb__numpy20100913efficent(time,signal,wt=[],freqin=[]):
""" run lomb_scargle on the graphics card
requires frequency grid as input, returns psd
Nat made this numpy-optimal version on 20100911.
"""
numt = len(time)
wt = wt.astype('float64')
freqin = freqin.astype('float64')
s0 = wt.sum()
cn = signal.astype('float64') - (signal*wt).sum()/s0
var = ( cn**2*wt ).sum()/(numt-1.)
tt = 2*pi*( time.astype('float64')-time.min() )
numf = len(freqin)
ts1 = zeros(numf,'float64'); tc1 = zeros(numf,'float64')
s2 = zeros(numf,'float64'); c2 = zeros(numf,'float64')
sh = zeros(numf,'float64'); ch = zeros(numf,'float64')
for i in xrange(numt):
x = freqin * tt[i]
sinx, cosx = sin(x), cos(x)
tmp = wt[i]*sinx;
ts1 += tmp;
s2 += tmp*cosx;
tmp = wt[i]*cosx
tc1 += tmp
c2 += tmp*cosx;
tmp = cn[i]*wt[i]
sh += tmp*sinx; ch += tmp*cosx
s2 *= 2.
c2 = 2.*c2 - s0
omtau = 0.5*arctan2(s2,c2)
sinomtau = sin(omtau); cosomtau = cos(omtau)
tmp = ts1;
ts1 = cosomtau*tmp - sinomtau*tc1;
tc1 = sinomtau*tmp + cosomtau*tc1;
tmp = sh;
sh = cosomtau*tmp - sinomtau*ch;
ch = sinomtau*tmp + cosomtau*ch;
tmp = c2*cos(2.*omtau) + s2*sin(2.*omtau)
tc2 = 0.5*(s0+tmp)
ts2 = 0.5*(s0-tmp)
norm_sin = sh/ts2;
norm_cos = ch/tc2;
cn0 = ( norm_sin*ts1 + norm_cos*tc1 ) / ( ts1**2/ts2 + tc1**2/tc2 - s0 );
norm_sin -= cn0*ts1/ts2;
norm_cos -= cn0*tc1/tc2;
return 0.5/var *(norm_sin**2*ts2 + norm_cos**2*tc2 - s0*cn0**2)
### OBSOLETE: 20100912: Nat replaced this with a more optimal numpy version and an even more optimal C/weave version.
def lomb__old_pre20100912(time, signal, delta_time=[], signal_err=[], freqin=[], fap=0.01, multiple=0, noise=0, verbosity=2, use_bayes=False, num_freq_max=10000):
#
# NAME:
# lomb
#
# PURPOSE:
# Compute the lomb-scargle periodogram of an unevenly sampled
# lightcurve
#
# CATEGORY:
# time series analysis
#
# CALLING SEQUENCE:
# psd, freq = scargle(time,signal)
#
# INPUTS:
# time: The times at which the time series was measured
# signal: the corresponding count rates
#
# OPTIONAL INPUTS:
# delta_time: exposure times (bin widths) centered around time
# signal_err: 1-sigma uncertainty on signal vector
# freqin: frequencies for which the PSD values are desired
# fap : false alarm probability desired
# (see Scargle et al., p. 840, and signi
# keyword). Default equal to 0.01 (99% significance)
# noise: PSD normalization, default assumes (chi^2/nu)^0.5 for a linear fit
# multiple: number of Gaussian noise simulations for the FAP
# power level. Default equal to 0 (i.e., no simulations).
#
# OUTPUTS:
# psd: the psd-values corresponding to omega
# freq: frequency of PSD
#
# OPTIONAL OUTPUTS:
# signi : peak of the PSD
# simsigni : PSD peak corresponding to the given
# false alarm probabilities fap according to Gaussian
# noise simulations
# psdpeak: array with the maximum peak for each simulation
#
#
# KEYWORD PARAMETERS:
# verbosity: print out debugging information if set
#
# MODIFICATION HISTORY:
# Version 1.0, 1997, Joern Wilms IAAT
# Version 1.1, 1998.09.23, JW: Do not normalize if variance is 0
# (for computation of LSP of window function...)
# Version 1.2, 1999.01.07, JW: force numf to be int
# Version 1.3, 1999.08.05, JW: added omega keyword
# Version 1.4, 1999.08
# KP: significance levels
# JW: pmin,pmax keywords
# Version 1.5, 1999.08.27, JW: compute the significance levels
# from the horne number of independent frequencies, and not from
# numf
# Version 1.6, 2000.07.27, SS and SB: added fast algorithm and FAP
# according to white noise lc simulations.
# Version 1.7, 2000.07.28 JW: added debug keyword, sped up
# simulations by factor of four (use /slow to get old
# behavior of the simulations)
# Version 2.0 2004.09.01, Thomas Kornack rewritten in Python
# Version 2.1 2008.04.11, Nat Butler added error propagation and allowed
# for non-zero and variable time bins, altered output signi to be trials
# significance of periodogram peak
# Version 2.2 2009.11.17, Nat Butler added Bayesian term for width of periodogram
# (Laplace approximation to posterior). This now searches over a linear
# frequency grid, with a present maximum number of bins.
#
if verbosity>1: print('Starting Lomb (standard)...')
signal = atleast_1d(signal).astype(double)
time = atleast_1d(time).astype(double)
n0 = len(time)
# if data error not given, assume all are unity
if (signal_err==[]):
wt = ones(n0,dtype=float)
else:
wt = 1./atleast_1d(signal_err).astype(double)**2;
wt[signal_err<=0] = 1.
# if delta_time not given, assume 0
do_sync=True
if (delta_time==[]):
do_sync=False
delta_time = zeros(n0, dtype=float)
else:
delta_time = atleast_1d(delta_time).astype(double)
# make times manageable (Scargle periodogram is time-shift invariant)
tt = time-min(time)
ii = tt.argsort()
tt = tt[ii]; cn = signal[ii]; wt=wt[ii];
s0 = sum(wt)
msignal = sum( cn*wt ) / s0
cn -= msignal
# defaults
renorm=1
if noise == 0:
renorm=0
noise = sqrt( sum( cn**2*wt )/(n0-1) )
# make times manageable (Scargle periodogram is time-shift invariant)
tt = time-min(time)
tt.sort()
max_tt = tt[-1]
# min.freq is 1/T, go a bit past that
# initial max. freq guess: approx. to Nyquist frequency
df = 0.1/max_tt
fmin = 0.5/max_tt
fmax = n0*fmin
# refine the maximum frequency to be a bit higher
dt = tt[1:] - tt[:-1]
g=where(dt>0)
if (len(g[0])>0):
dt_min = dt[g].min()
fmax = 0.5/dt_min
# if omega is not given, compute it
if (freqin==[]):
numf = long( ceil( (fmax-fmin)/df ) )
if (numf>num_freq_max):
if (verbosity>1): print ("Warning: shrinking num_freq %d -> %d (num_freq_max)") % (numf,num_freq_max)
numf = long(num_freq_max)
#fmax = fmin + numf*df
df = (fmax - fmin)/numf
freqin = fmax - df*arange(numf,dtype=float)
om = 2.*pi*freqin
else:
om = freqin*2*pi
numf = len(om)
# Bayes term in periodogram gets messy at frequencies lower than this
om0 = pi/max_tt
if (numf==0): multiple = 0
if verbosity>1: print('Setting up periodogram...')
# Periodogram
# Ref.: W.H. Press and G.B. Rybicki, 1989, ApJ 338, 277
# finite bins leads to sinc function; sinc factors drop out if delta_time = const.
# sinc(x) = sin(x*pi)/(x*pi)
if (multiple > 0):
if verbosity>1: print('Looping...')
sisi=zeros([n0,numf], dtype=float)
coco=zeros([n0,numf], dtype=float)
# Eq. (6); s2, c2
ts1 = zeros(numf, dtype=float)
tc1 = zeros(numf, dtype=float)
s1 = zeros(numf, dtype=float)
s2 = zeros(numf, dtype=float)
c2 = zeros(numf, dtype=float)
# Eq. (5); sh and ch
sh = zeros(numf, dtype=float)
ch = zeros(numf, dtype=float)
bayes_term = zeros(numf, dtype=float)
sync_func = lambda x: 1.
if (do_sync):
sync_func = lambda x: (1.e-99 + sin(pi*x))/(1.e-99 + pi*x)
for i in range(numf):
x = ( om[i]*tt ) % (2*pi)
synct = sync_func(freqin[i]*delta_time)
sinom = sin(x)*synct
cosom = cos(x)*synct
ts1[i] = sum( sinom*wt )
tc1[i] = sum (cosom*wt )
s1[i] = sum( synct**2*wt )
s2[i] = 2.*sum( sinom*cosom*wt )
c2[i] = sum( (cosom**2-sinom**2)*wt )
sh[i] = sum( cn*sinom*wt )
ch[i] = sum( cn*cosom*wt )
if (multiple > 0):
sisi[:,i]=sinom*wt
coco[:,i]=cosom*wt
# cleanup
sinom = 0.
cosom = 0.
synct = 0.
# Eq. (2): Definition -> tan(2omtau)
# --- tan(2omtau) = s2 / c2
omtau = arctan2(s2,c2)/2
# cos(tau), sin(tau)
cosomtau = cos(omtau)
sinomtau = sin(omtau)
tmp = 1.*ts1;
ts1 = cosomtau*tmp - sinomtau*tc1;
tc1 = sinomtau*tmp + cosomtau*tc1;
tmp = 1.*sh;
sh = cosomtau*tmp - sinomtau*ch;
ch = sinomtau*tmp + cosomtau*ch;
# Eq. (7); sum(cos(t-tau)**2) and sum(sin(t-tau)**2)
tmp = c2*cos(2.*omtau) + s2*sin(2.*omtau)
tc2 = 0.5*(s1+tmp) # sum(cos(t-tau)**2)
ts2 = 0.5*(s1-tmp) # sum(sin(t-tau)**2)
norm_sin = sh/ts2;
norm_cos = ch/tc2;
cn0 = ( norm_sin*ts1 + norm_cos*tc1 ) / ( ts1**2/ts2 + tc1**2/tc2 - s0 );
norm_sin -= cn0*ts1/ts2;
norm_cos -= cn0*tc1/tc2;
#amplitude = sqrt(norm_sin**2+norm_cos**2)
#damplitude = sqrt(norm_sin**2/ts2+norm_cos**2/tc2)/amplitude*noise
bayes_term = -0.5*log( s0*ts2*tc2 - tc1**2*ts2 - ts1**2*tc2 ) + 1.5*log(s0) - 0.5*log(4.) - log(freqin) + (log(freqin)).mean()
# Eq. (3), modified
px = norm_sin**2*ts2 + norm_cos**2*tc2 - cn0**2*s0
# be careful here
wh = (tc2<=0) | (ts2<=0)
px[wh] = 0.
# clean up
tmp = 0.
omtau = 0.
s2 = 0.
c2 = 0.
if multiple <=0 :
ts1 = 0.
tc1 = 0.
tc2 = 0.
ts2 = 0.
# correct normalization
psd = atleast_1d( 0.5*px/(noise**2) )
if (use_bayes):
g=where(om<om0)
#bayes_term[g]=0.
psd += bayes_term
signi = 0.
if (numf>0):
if (use_bayes):
j0 = psd.argmax()
signi = peak2sigma( (psd-bayes_term)[j0],n0)
else:
signi = peak2sigma(psd.max(),n0)
# --- RUN SIMULATIONS for multiple > 0
simsigni=[]
psdpeak=[]
if multiple > 0:
if verbosity>1: print('Running Simulations...')
if (multiple*fap < 10):
print('WARNING: Number of iterations (multiple keyword) not large enough for false alarm probability requested (need multiple*FAP > 10 )')
psdpeak = zeros(multiple, dtype=float)
for m in range(multiple):
if ((m+1)%100 == 0) and (verbosity>0):
print "...working on %ith simulation. (%.2f Done)" % (m,m/multiple)
# Gaussian noise simulation
cn = normal(loc=0.0,scale=1.,size=n0)/sqrt(wt)
msignal = sum( cn*wt ) / s0
cn = cn-msignal # force OBSERVED count rate to zero
if (renorm==0): noise = sqrt( sum( cn**2*wt )/(n0-1) )
# Eq. (5); sh and ch
for i in range(numf):
sh[i]=sum(cn*sisi[:,i])
ch[i]=sum(cn*coco[:,i])
# Eq. (3) ; computing the periodogram for each simulation
tmp = sh;
sh = cosomtau*tmp - sinomtau*ch;
ch = sinomtau*tmp + cosomtau*ch;
norm_sin = sh/ts2;
norm_cos = ch/tc2;
cn0 = ( norm_sin*ts1 + norm_cos*tc1 ) / ( ts1*ts1/ts2 + tc1*tc1/tc2 - s0 );
norm_sin -= cn0*ts1/ts2;
norm_cos -= cn0*tc1/tc2;
# Eq. (3), modified
px = norm_sin**2*ts2 + norm_cos**2*tc2 - s0*cn0**2
# be careful here
px[wh] = 0.
psdpeak[m] = 0.5*px.max()/(noise**2)
# False Alarm Probability according to simulations
if len(psdpeak) != 0:
psdpeak.sort()
psd0 = psdpeak[ long((1-fap)*(multiple-1)) ]
simsigni = peak2sigma(psd0,n0)
freq = om/(2.*pi)
if verbosity>1: print('Done...')
return (psd,freq,signi,simsigni,psdpeak)
if __name__ == '__main__':
from numpy.random import normal
from scipy.stats import betai
#print('Testing Lomb-Scargle Periodogram with Gaussian noise...')
#freq = 10. # Hz - Sample frequency
#time = 10. #seconds
#noisetime = arange(0,time,1./freq, dtype=float)
#N = len(noisetime)
#dnoisetime=0*noisetime + 1./freq
#noisedata = sin(noisetime*2*pi)*1. + normal(loc=0,scale=1,size=N)
#dnoisedata = noisedata*0.+1.
file='vosource_9026.dat'
#file='00331_3.dat'
#file='07914_9.dat'
mfile=open(file,'r')
fileList = mfile.readlines()
N = fileList.__len__()
noisedata = zeros(N,dtype=float)
dnoisedata = zeros(N,dtype=float)
noisetime = zeros(N,dtype=float)
dnoisetime = zeros(N,dtype=float)
i=0
for line in fileList:
(a,b,c) = line.split()
noisetime[i]=float(a)
noisedata[i]=float(b)
dnoisedata[i]=float(c)
i = i+1
noisetime = noisetime - noisetime[0]
mfile.close()
# get a careful estimate of the typical time between observations
time = noisetime
time.sort
dt = median( time[1:]-time[:-1] )
maxlogx = log(0.5/dt) # max frequency is ~ the sampling rate
minlogx = log(0.5/(time[-1]-time[0])) #min frequency is 0.5/T
# sample the PSD with 1% fractional precision
M=long(ceil( (maxlogx-minlogx)*100. ))
frequencies = exp(maxlogx-arange(M, dtype=float) / (M-1.) * (maxlogx-minlogx))
fap = 0.01 # we want to see what psd peak this false alarm probability would correspond to
# set multiple >0 to get Monte Carlo significane estimate for peak (warning: this is slow)
multiple = 0 # should be >~10/fap
psd, freqs, signi, sim_signi, peak_sort = lomb(noisetime,noisedata,delta_time=dnoisedata,
signal_err=dnoisedata,freqin=frequencies,fap=fap,multiple=multiple)
#peak location
imax = psd.argmax()
freq_max = freqs[imax]
mpsd=max(psd)
print ("Peak=%.2f @ %.2f Hz, significance estimate: %.1f-sigma (T-test)") % (mpsd,freq_max,signi)
if (len(peak_sort)>0):
psd0 = peak_sort[ long((1-fap)*(multiple-1)) ]
print ("Expected peak %.2f for False Alarm of %.2e") % (psd0,fap)
Prob0 = betai( 0.5*N-2.,0.5,(N-1.)/(N-1.+2.*psd0) )
Nindep = log(1-fap)/log(1-Prob0)
horne = long(-6.362+1.193*N+0.00098*N**2.)
if (horne <= 0): horne=5
print ("Estimated number of independent trials: %.2f (horne=%d)") % (Nindep,horne)
nover = sum( peak_sort>=mpsd )
print ("Fraction of simulations with peak greater than observed value: %d/%d") % (nover,multiple)
"""
import Gnuplot
import time
plotobj = Gnuplot.Gnuplot()
plotobj.xlabel('Period (s)')
plotobj.ylabel('LS Periodogram')
plotobj('set logscale x')
plotobj('set logscale y')
plotobj.plot(Gnuplot.Data(1./freqs,psd, with = 'l 4 0'))
time.sleep(30)
"""
|
# Generated by Django 2.2.4 on 2019-09-16 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('whiskydatabase', '0029_auto_20190914_1232'),
]
operations = [
migrations.AlterField(
model_name='personalwhiskynote',
name='creamy',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='flora',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='fruity',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='malty',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='nutty',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='peaty',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='smoky',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='personalwhiskynote',
name='spicy',
field=models.IntegerField(blank=True, null=True),
),
]
|
print('doesn\'t')
print('"si,"le dijo.')
|
#!/usr/bin/env python
print(" ")
print(" ")
print("MMMMM MMMMM AAAA CCCCCCCCCC ")
print("MMM MM MM MMM AAA AAA CCC ")
print("MMM MM MM MMM AAA AAA CCC ")
print("MMM MM MMM AAA@@@@@@AAA CCC H A N G E R.")
print("MMM MMM AAA AAA CCC ")
print("MMM MMM AAA AAA CCC ")
print("MMM MMM AAA AAA CCCCCCCCCC ")
print(" -Robin ")
print(" ")
import subprocess
import optparse
def get_argument():
parse = optparse.OptionParser() # taking input from user
parse.add_option("-i", "--interface", dest="interface", help="Interface 'wlan0/eth0' to change it's mac address")
parse.add_option("-m", "--mac", dest="new_mac", help="New mac address")
(option, argument) = parse.parse_args()
if not option.interface:
parse.error("[-]Please entre the specific interface or --help for more info")
if not option.new_mac:
parse.error("[-]please entre the new MAC or --help for more info")
return option
def change_mac(interface,new_mac):
print("################# R O B I N ####################")
print("change MAC ADDRESS of " + interface + " to " + new_mac)
print("################# R O B I N ####################")
subprocess.call(["ifconfig", interface, "down"])
subprocess.call(["ifconfig", interface, "hw", "ether", new_mac])
subprocess.call(["ifconfig", interface, "up"])
subprocess.call(["ifconfig", interface])
option = get_argument()
change_mac(option.interface, option.new_mac) #calling function
|
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework import status
from .services import *
from .permissions import *
from utils.serializer_validator import validate_serializer
from .models import Iip
class AddIipApi(APIView):
permission_classes = [ExpertPermission, ]
class RequestSerializer(serializers.Serializer):
IIP = serializers.FloatField(required=False)
mining_industry = serializers.FloatField(required=False)
manufacturing_processing_industry = serializers.FloatField(required=False)
gas_electricity_industry = serializers.FloatField(required=False)
waste_treatment_water_supply = serializers.FloatField(required=False)
mineral_exploitation = serializers.FloatField(required=False)
food = serializers.FloatField(required=False)
cigarette = serializers.FloatField(required=False)
textile = serializers.FloatField(required=False)
costume = serializers.FloatField(required=False)
leather_product = serializers.FloatField(required=False)
paper_product = serializers.FloatField(required=False)
chemical_product = serializers.FloatField(required=False)
plastic_product = serializers.FloatField(required=False)
non_metalic_mineral_product = serializers.FloatField(required=False)
prefabricated_metal_product = serializers.FloatField(required=False)
electrical_product = serializers.FloatField(required=False)
motor_vehicle = serializers.FloatField(required=False)
furniture = serializers.FloatField(required=False)
other_manufacturing_processing = serializers.FloatField(required=False)
water_supply = serializers.FloatField(required=False)
gas_electricity = serializers.FloatField(required=False)
other_products = serializers.FloatField(required=False)
base_period = serializers.CharField(max_length=50, required=True)
year = serializers.IntegerField(required=True)
month = serializers.IntegerField(required=True)
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Iip
fields = '__all__'
def post(self, request):
request_serializer = self.RequestSerializer(data=request.data)
validate_serializer(request_serializer)
self.check_permissions(request=request)
creator = request.user
organization = creator.client
iip = add_iip(data=request_serializer.validated_data, creator=creator, organization=organization)
response_serializer = self.ResponseSerializer(iip)
return Response({
'iip': response_serializer.data
}, status=status.HTTP_200_OK)
class UpdateIipApi(APIView):
permission_classes = [OwnerPermission, ]
class RequestSerializer(serializers.Serializer):
IIP = serializers.FloatField(required=False)
mining_industry = serializers.FloatField(required=False)
manufacturing_processing_industry = serializers.FloatField(required=False)
gas_electricity_industry = serializers.FloatField(required=False)
waste_treatment_water_supply = serializers.FloatField(required=False)
mineral_exploitation = serializers.FloatField(required=False)
food = serializers.FloatField(required=False)
cigarette = serializers.FloatField(required=False)
textile = serializers.FloatField(required=False)
costume = serializers.FloatField(required=False)
leather_product = serializers.FloatField(required=False)
paper_product = serializers.FloatField(required=False)
chemical_product = serializers.FloatField(required=False)
plastic_product = serializers.FloatField(required=False)
non_metalic_mineral_product = serializers.FloatField(required=False)
prefabricated_metal_product = serializers.FloatField(required=False)
electrical_product = serializers.FloatField(required=False)
motor_vehicle = serializers.FloatField(required=False)
furniture = serializers.FloatField(required=False)
other_manufacturing_processing = serializers.FloatField(required=False)
water_supply = serializers.FloatField(required=False)
gas_electricity = serializers.FloatField(required=False)
other_products = serializers.FloatField(required=False)
base_period = serializers.CharField(max_length=50, required=False)
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Iip
fields = '__all__'
def put(self, request, iip_id):
request_serializer = self.RequestSerializer(data=request.data)
validate_serializer(request_serializer)
iip = get_iip_by(raise_exception=False, id=iip_id).first()
self.check_object_permissions(request=request, obj=iip)
iip = update_iip(iip=iip, **request_serializer.validated_data)
response_serializer = self.ResponseSerializer(iip)
return Response({
'iip': response_serializer.data
}, statu=status.HTTP_200_OK)
class DeleteIipApi(APIView):
permission_classes = [OwnerPermission, ]
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Iip
fields = '__all__'
def delete(self, request, iip_id):
iip = get_iip_by(raise_exception=False,id=iip_id).first()
self.check_object_permissions(request=request, obj=iip)
iip = delete_iip(iip)
response_serializer = self.ResponseSerializer(iip)
return Response({
'iip': response_serializer.data
}, status=status.HTTP_200_OK)
class IipListApi(APIView):
permission_classes = [IsAuthenticated, ]
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Iip
fields = '__all__'
def get(self, request, start, end):
start_year = int(str(start)[0:4])
start_month = int(str(start)[4:])
end_year = int(str(end)[0:4])
end_month = int(str(end)[4:])
user = request.user
if start_year != end_year:
iips = set(Iip.objects.filter(organization=user.client, month__gte=start_month, year=start_year) | Iip.objects.filter(organization=user.client, year__gt=start_year, year__lt=end_year) | Iip.objects.filter(organization=user.client, month__lte=end_month, year=end_year))
else:
iips = set(Iip.objects.filter(organization=user.client, month__gte=start_month, month__lte=end_month, year=start_year))
response_serializer = self.ResponseSerializer(iips, many=True)
return Response({
'iips': response_serializer.data
}, status=status.HTTP_200_OK)
|
import time
import pandas as pd
from random import random, randint
from kafka import KafkaConsumer, KafkaProducer
class UserLocationProducer():
MALL_GPS_LOC = (28.457523, 77.026344)
LATTITUDE = 28.457523
LONGITUDE = 77.026344
# approximate radius of earth in km
R = 6371.0087714150598
@staticmethod
def add_tuple(loc1, loc2):
"""
Method to add two locations (lattitude, longitude)
:param loc1:
:param loc2:
:return:
"""
return loc1[0] + loc2[0], loc1[1] + loc2[1]
@staticmethod
def sub_tuple(loc1, loc2):
"""
Method to substract two locations (lattitude, longitude)
:param loc1:
:param loc2:
:return:
"""
return loc1[0] - loc2[0], loc1[1] - loc2[1]
@staticmethod
def calculate_distance(loc1, loc2):
"""
Function to return distance between two co-ordinates in KM
:param loc1:
:param loc2:
:return:
"""
from math import sin, cos, sqrt, atan2, radians
# Convert into radian
lat1 = radians(loc1[0])
lon1 = radians(loc1[1])
lat2 = radians(loc2[0])
lon2 = radians(loc2[1])
# Take difference
dlon = lon2 - lon1
dlat = lat2 - lat1
# Calculate the
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return round(UserLocationProducer.R * c, 2)
def get_current_location(self, gps_loc):
"""
Get the current location randomly by add/sub delta to current location
:param gps_loc:
:return:
"""
loc1 = self.MALL_GPS_LOC
loc2 = gps_loc
if randint(0, 1) == 1:
return loc1[0] + loc2[0], loc1[1] + loc2[1]
else:
return loc1[0] - loc2[0], loc1[1] - loc2[1]
def generate_data(self):
"""
Main method to generate GPS data for the customers
"""
df = pd.read_csv('customers.csv')
df.columns = [col.lower() for col in df.columns]
for index, row in df.head(10).iterrows():
# Generate random delta
gps_loc = (random() * random() * 0.1, random() * random() * 0.1)
# Add or remove delta
current_loc = self.get_current_location(gps_loc)
distance = self.calculate_distance(self.MALL_GPS_LOC, current_loc)
print(f"Customer Id : {row['customerid']} | Location : {current_loc} | Distance : {distance} KM")
# Push the location to Kafka Topic
producer = KafkaProducer(bootstrap_servers='localhost:9092', acks=1)
producer.send(topic='send-locations', key=row['customerid'], value=current_loc)
time.sleep(10)
def __init__(self):
self.generate_data()
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the root node in the tree
def sortedListToBST(self, A):
def add_node(tree_root, val):
if val < tree_root.val:
if not tree_root.left:
tree_root.left = TreeNode(val)
else:
add_node(tree_root.left, val)
else:
if not tree_root.right:
tree_root.right = TreeNode(val)
else:
add_node(tree_root.right, val)
def pick_nums(low, high):
if low <= high:
med = (low + high) / 2
add_node(root, vals[med])
pick_nums(low, med - 1)
pick_nums(med + 1, high)
if not A:
return None
vals = []
current = A
while current:
vals.append(current.val)
current = current.next
total_low = 0
total_high = len(vals) - 1
middle = total_high / 2
root = TreeNode(vals[middle])
pick_nums(total_low, middle - 1)
pick_nums(middle + 1, total_high)
return root
|
# -*- coding: utf-8 -*-
'''
Copyright of DasPy:
Author - Xujun Han (Forschungszentrum Jülich, Germany)
x.han@fz-juelich.de, xujunhan@gmail.com
DasPy was funded by:
1. Forschungszentrum Jülich, Agrosphere (IBG 3), Jülich, Germany
2. Cold and Arid Regions Environmental and Engineering Research Institute, Chinese Academy of Sciences, Lanzhou, PR China
3. Centre for High-Performance Scientific Computing in Terrestrial Systems: HPSC TerrSys, Geoverbund ABC/J, Jülich, Germany
Please include the following references related to DasPy:
1. Han, X., Li, X., He, G., Kumbhar, P., Montzka, C., Kollet, S., Miyoshi, T., Rosolem, R., Zhang, Y., Vereecken, H., and Franssen, H. J. H.:
DasPy 1.0 : the Open Source Multivariate Land Data Assimilation Framework in combination with the Community Land Model 4.5, Geosci. Model Dev. Discuss., 8, 7395-7444, 2015.
2. Han, X., Franssen, H. J. H., Rosolem, R., Jin, R., Li, X., and Vereecken, H.:
Correction of systematic model forcing bias of CLM using assimilation of cosmic-ray Neutrons and land surface temperature: a study in the Heihe Catchment, China, Hydrology and Earth System Sciences, 19, 615-629, 2015a.
3. Han, X., Franssen, H. J. H., Montzka, C., and Vereecken, H.:
Soil moisture and soil properties estimation in the Community Land Model with synthetic brightness temperature observations, Water Resour Res, 50, 6081-6105, 2014a.
4. Han, X., Franssen, H. J. H., Li, X., Zhang, Y. L., Montzka, C., and Vereecken, H.:
Joint Assimilation of Surface Temperature and L-Band Microwave Brightness Temperature in Land Data Assimilation, Vadose Zone J, 12, 0, 2013.
'''
from mpi4py import MPI
import multiprocessing, shutil
from DAS_Assim_Common import *
from DAS_Assim import *
from DAS_Misc import *
from DAS_Driver_Common import *
from DAS_Utilities import *
# Data Assimilation, Parameter Estimation, Bias Estimation
def DAS_Driver(mpi4py_comm, mpi4py_null, mpi4py_rank, mpi4py_size, mpi4py_name, Model_Driver,Do_DA_Flag, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Texture_Layer_Opt_Num, Observation_Box, LAI_Year_String, MODIS_LAI_Data_ID,\
Num_of_Days_Monthly, Start_Year, Start_Month, Start_Day, Start_Hour, Start_Minute, End_Year, End_Month, End_Day, End_Hour, End_Minute, Datetime_Start, Datetime_Start_Init, \
Datetime_End, Datetime_End_Init, Datetime_Initial, UTC_Zone, CLM_NA, NAvalue, Assim_Algorithm_Name, Station_XY, Station_XY_Index, dtime,\
NSLOTS, Feedback_Assim, Parameter_Optimization, Parameter_Regularization, Def_CDF_Matching, Bias_Estimation_Option_Model, Bias_Estimation_Option_Obs, Post_Inflation_Alpha, Def_Snow_Effects, N0, nlyr,\
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Observation_Time_File_Path, Def_CESM_Multi_Instance, Constant_File_Name_Header, finidat_initial_CLM, finidat_initial_PFCLM, Def_PP, DAS_Fortran_Lib, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, \
Def_ParFor, Def_Region, Def_Initial, Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, CLM_Flag, Def_ReBEL, Def_Localization, \
Num_Local_Obs_State, Num_Local_Obs_Par, Num_Local_Obs_Bias, eps, msw_infl, Def_Multiresolution, Def_Write_Initial, Ensemble_Number, Ensemble_Number_Predict, Call_Gstat_Flag, Write_DA_File_Flag, Use_Mask_Flag, Def_Figure_Output,\
Forcing_File_Path_Home, Soil_Layer_Num, Snow_Layer_Num, ParFlow_Layer_Num, maxpft, numrad, Density_of_liquid_water, Density_of_ice, Freezing_temperature_of_fresh_water, Plot_Analysis, Def_Debug, Initial_Perturbation, \
Weather_Forecast_Days, PicHeight, PicWidth, RegionName, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, Grid_Resolution_CEA_String, xllcenter, yllcenter, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper,MODEL_CEA_X, MODEL_CEA_Y, Z_Resolution, Proj_String, \
Grid_Resolution_CEA, Grid_Resolution_GEO, mksrf_edgee, mksrf_edgew, mksrf_edges, mksrf_edgen, ntasks_CLM, rootpe_CLM, nthreads_CLM, omp_get_num_procs_ParFor, Low_Ratio_Par, High_Ratio_Par, Low_Ratio_Par_Uniform, High_Ratio_Par_Uniform, \
Soil_Par_Sens_Array, Veg_Par_Sens_Array, PFT_Par_Sens_Array, Hard_Par_Sens_Array, Region_Name, Run_Dir_Home, Model_Path, Hydraulic_File_Name, Mask_File, Observation_Path, DAS_Data_Path, DasPy_Path, DAS_Output_Path, DAS_Depends_Path,
octave, r, plt, cm, colors, inset_axes, fm, legend):
#print DasPy_Path+"ObsModel/COSMOS/COSMIC_Py.py"
#print DasPy_Path+"ObsModel/COSMOS/COSMIC_Py.py"
COSMIC_Py = imp.load_source("COSMIC_Py",DasPy_Path+"ObsModel/COSMOS/COSMIC_Py.py")
memory_profiler = []
COSMIC = imp.load_dynamic("COSMIC",DasPy_Path+"ObsModel/COSMOS/COSMIC.so")
num_processors = multiprocessing.cpu_count()
start = time.time()
UTM_Zone = int(round(mksrf_edgee/15.0)) # The Difference Between the UTM Time and the Local Time
diskless_flag = True
persist_flag = True
############################################ PFCLM ####################################################
COUP_OAS_PFL = False # whether to run coupled ParFlow or not
if Model_Driver == "PFCLM":
COUP_OAS_PFL = True
CESM_Init_Flag = 1 # whether is the first time to run ccsm_driver
############################################# PFCLM ###################################################
gelmna_threshold = 1.0 # Threshold to Stop Parameter Optimization (No Stop)
#gelmna_threshold = 1.08 # Threshold to Stop Parameter Optimization
if mpi4py_rank == 0:
if Def_Region == -1:
forcing_file_name = Forcing_File_Path_Home +"_Ens1/"+ Start_Year + '_' + Start_Month + '_' + Start_Day + '_tair.nc'
if not os.path.exists(forcing_file_name):
print "Forcing file",forcing_file_name,"does not exists!!!!!!!!"
print "Please Change the Start Date and Time."
os.abort()
else:
forcing_file_name = Forcing_File_Path_Home +"/"+ Start_Year + '_' + Start_Month + '_' + Start_Day + '_tair.nc'
if not os.path.exists(forcing_file_name):
print "Forcing file",forcing_file_name,"does not exists!!!!!!!!"
print "Please Change the Start Date and Time."
if Def_SpinUp != 1: # If we do multi years spinup, we could use one year foring to simulate multi years
os.abort()
PP_Port = 23335 + int(numpy.random.uniform(50*Def_Region,100*Def_Region))
active_nodes_server = []
else:
forcing_file_name = None
PP_Port = None
active_nodes_server = None
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
PP_Port = mpi4py_comm.bcast(PP_Port)
active_nodes_server = mpi4py_comm.bcast(active_nodes_server)
forcing_file_name = mpi4py_comm.bcast(forcing_file_name)
if mpi4py_rank == 0:
print "mpi4py_rank",mpi4py_rank,"PP_Port",PP_Port
job_server_node_array, active_nodes_server, PROCS_PER_NODE, PP_Port, PP_Servers_Per_Node = Start_ppserver(mpi4py_comm, mpi4py_rank, mpi4py_name, DAS_Output_Path, Ensemble_Number, DAS_Depends_Path, active_nodes_server, Def_Region, NSLOTS, Def_Print, DasPy_Path, Def_PP, Def_CESM_Multi_Instance, PP_Port)
if mpi4py_rank == 0:
restart_pp_server = (len(job_server_node_array) < 1)
print "-------------- restart_pp_server",restart_pp_server
else:
restart_pp_server = None
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
restart_pp_server = mpi4py_comm.bcast(restart_pp_server)
while Def_PP and restart_pp_server and Ensemble_Number > 1:
job_server_node_array = Stop_ppserver(mpi4py_rank, Def_PP, DAS_Depends_Path, job_server_node_array, NSLOTS, DasPy_Path, active_nodes_server, PP_Servers_Per_Node)
job_server_node_array, active_nodes_server, PROCS_PER_NODE, PP_Port, PP_Servers_Per_Node = Start_ppserver(mpi4py_comm, mpi4py_rank, mpi4py_name, DAS_Output_Path, Ensemble_Number, DAS_Depends_Path, active_nodes_server, Def_Region, NSLOTS, Def_Print, DasPy_Path, Def_PP, Def_CESM_Multi_Instance, PP_Port)
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
if mpi4py_rank == 0:
print "job_server_node_array",job_server_node_array
print "active_nodes_server",active_nodes_server
if mpi4py_rank == 0:
print "mpi4py_comm,mpi4py_rank,mpi4py_size,mpi4py_name",mpi4py_comm,mpi4py_rank,mpi4py_size,mpi4py_name
# MPI Split into Ensemble_Number Groups
mpi4py_comm_split = []
mpipy_comm_decomposition = 1
if Def_PP == 2:
if mpi4py_rank == 0:
mpipy_comm_decomposition = mpi4py_size/Ensemble_Number
else:
mpipy_comm_decomposition = None
mpipy_comm_decomposition = mpi4py_comm.bcast(mpipy_comm_decomposition)
if Ensemble_Number > 1:
color = mpi4py_rank/mpipy_comm_decomposition
key = mpi4py_rank
else:
color = 0
key = 0
mpi4py_comm_split = mpi4py_comm.Split(color, key)
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
if mpi4py_rank == 0:
print "mpipy_comm_decomposition,mpi4py_comm_split",mpipy_comm_decomposition,mpi4py_comm_split
if Def_PP == 2:
print "mpi4py_comm_split.py2f()",mpi4py_comm_split.py2f(),"mpi4py_comm_split.Get_size()",mpi4py_comm_split.Get_size()
if Def_PP == 2:
ntasks_CLM[:] = mpi4py_comm_split.Get_size()
###################################################################
if Def_Region >= 9:
Grid_Resolution_GEO_Global = 360.0/43200.0
Resolution_Name = "1KM"
else:
Grid_Resolution_GEO_Global = 360.0/8640.0
Resolution_Name = "5KM"
# Path for DA
if mpi4py_rank == 0:
# Decrease the float bit
MODEL_CEA_X = MODEL_CEA_X - MODEL_X_Left
MODEL_CEA_Y = MODEL_CEA_Y - MODEL_Y_Lower
MODEL_X_Left = numpy.min(MODEL_CEA_X)
MODEL_X_Right = numpy.max(MODEL_CEA_X)
MODEL_Y_Lower = numpy.min(MODEL_CEA_Y)
MODEL_Y_Upper = numpy.max(MODEL_CEA_Y)
MODEL_X_Right = MODEL_X_Right - MODEL_X_Left
MODEL_X_Left = MODEL_X_Left - MODEL_X_Left
MODEL_Y_Upper = MODEL_Y_Upper - MODEL_Y_Lower
MODEL_Y_Lower = MODEL_Y_Lower - MODEL_Y_Lower
r.assign('MODEL_X_Left', MODEL_X_Left)
r.assign('MODEL_X_Right', MODEL_X_Right)
r.assign('MODEL_Y_Lower', MODEL_Y_Lower)
r.assign('MODEL_Y_Upper', MODEL_Y_Upper)
print "MODEL_X_Left,MODEL_X_Right,MODEL_Y_Lower,MODEL_Y_Upper"
print MODEL_X_Left,MODEL_X_Right,MODEL_Y_Lower,MODEL_Y_Upper
Run_Dir_Multi_Instance = []
Run_Dir_Array = []
Forcing_File_Path_Array = []
Forcing_File_Path_Array_Par = []
if Ensemble_Number == 1:
if not os.path.exists(Run_Dir_Home):
os.makedirs(Run_Dir_Home)
Run_Dir_Multi_Instance = Run_Dir_Home+"/"
Run_Dir_Array.append(Run_Dir_Home+"/")
Forcing_File_Path_Array.append(Forcing_File_Path_Home)
Forcing_File_Path_Array_Par.append(Forcing_File_Path_Home)
else:
Run_Dir_Multi_Instance = Run_Dir_Home+"_Ens/"
for Ens_Index in range(Ensemble_Number):
Run_Dir_Temp = Run_Dir_Home+"_Ens"+str(Ens_Index+1)+"/"
if Def_CESM_Multi_Instance == 1:
Run_Dir_Temp = Run_Dir_Multi_Instance
Forcing_File_Path_Temp = Forcing_File_Path_Home+"_Ens"+str(Ens_Index+1)+"/"
if not os.path.exists(Run_Dir_Temp):
os.makedirs(Run_Dir_Temp)
Run_Dir_Array.append(Run_Dir_Temp)
Forcing_File_Path_Array.append(Forcing_File_Path_Temp)
Forcing_File_Path_Array_Par.append(Forcing_File_Path_Home)
if not os.path.exists(Run_Dir_Multi_Instance):
os.makedirs(Run_Dir_Multi_Instance)
else:
Run_Dir_Multi_Instance = None
Run_Dir_Array = None
Forcing_File_Path_Array = None
Forcing_File_Path_Array_Par = None
if Def_PP == 2:
Run_Dir_Multi_Instance = mpi4py_comm.bcast(Run_Dir_Multi_Instance)
Run_Dir_Array = mpi4py_comm.bcast(Run_Dir_Array)
Forcing_File_Path_Array = mpi4py_comm.bcast(Forcing_File_Path_Array)
Forcing_File_Path_Array_Par = mpi4py_comm.bcast(Forcing_File_Path_Array_Par)
######################################################### NC Files
NC_FileName_Assimilation_2_Constant = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Constant.nc"
NC_FileName_Assimilation_2_Diagnostic = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Diagnostic.nc"
NC_FileName_Assimilation_2_Initial = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Initial.nc"
NC_FileName_Assimilation_2_Initial_Copy = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Initial_Copy.nc"
NC_FileName_Assimilation_2_Bias = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Bias.nc"
NC_FileName_Assimilation_2_Bias_Copy = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Bias_Copy.nc"
NC_FileName_Assimilation_2_Bias_Monthly = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Bias_Monthly.nc"
NC_FileName_Assimilation_2_Bias_Monthly_Copy = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Bias_Monthly_Copy.nc"
NC_FileName_Estimated_Bias = DAS_Output_Path+"Analysis/"+Region_Name+"/Estimated_Bias.nc"
NC_FileName_Assimilation_2_Parameter = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Parameter.nc"
NC_FileName_Assimilation_2_Parameter_Copy = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Parameter_Copy.nc"
NC_FileName_Assimilation_2_Parameter_Obs_Dim = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Parameter_Obs_Dim.nc"
NC_FileName_Assimilation_2_Parameter_Monthly = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Parameter_Monthly.nc"
NC_FileName_Assimilation_2_Parameter_Monthly_Copy = DAS_Output_Path+"Analysis/"+Region_Name+"/Assimilation_2_Parameter_Monthly_Copy.nc"
NC_FileName_Optimized_Parameter = DAS_Output_Path+"Analysis/"+Region_Name+"/Optimized_Parameter.nc"
NC_FileName_Soil_Moisture_Difference = DAS_Output_Path+"Analysis/"+Region_Name+"/Soil_Moisture_Difference.nc"
NC_FileName_Parameter_Space_Single = DAS_Output_Path+"Analysis/"+Region_Name+"/Parameter_Space_Single.nc"
DAS_File_Name_List = [NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic,
NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Initial_Copy,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Bias_Copy,
NC_FileName_Assimilation_2_Bias_Monthly, NC_FileName_Assimilation_2_Bias_Monthly_Copy,
NC_FileName_Estimated_Bias,
NC_FileName_Assimilation_2_Parameter, NC_FileName_Assimilation_2_Parameter_Copy, NC_FileName_Assimilation_2_Parameter_Obs_Dim,
NC_FileName_Assimilation_2_Parameter_Monthly, NC_FileName_Assimilation_2_Parameter_Monthly_Copy,
NC_FileName_Optimized_Parameter, NC_FileName_Soil_Moisture_Difference, NC_FileName_Parameter_Space_Single]
########################################################################################
Soil_Thickness = numpy.asarray([0.01751282, 0.02757897, 0.04547003, 0.07496741, 0.1236004, 0.2037826, 0.3359806, 0.5539384, 0.91329, 1.50576, 2.48258, 4.093082, 6.748351, 11.12615, 13.85115])
Variable_List = ["Soil_Moisture","Surface_Temperature","Vegetation_Temperature","Canopy_Water","Albedo_BSA_Band_vis","Albedo_BSA_Band_nir","Albedo_WSA_Band_vis",
"Albedo_WSA_Band_nir","Emissivity","Snow_Depth","Snow_Cover_Fraction","Snow_Water_Equivalent","LAI","Sensible_Heat","Irrigation_Scheduling"]
Dim_CLM_State = len(Variable_List)
Variable_Assimilation_Flag = numpy.zeros(Dim_CLM_State,dtype=numpy.float32)
Initial_Perturbation_SM_Flag = numpy.array([0 for i in range(15)]) # whether to perturb the initial data
Initial_Perturbation_ST_Flag = numpy.array([0 for i in range(15)]) # whether to perturb the initial data
################################################################################ CLM Input File Names
finidat_initial_CLM_Copy = finidat_initial_CLM
fndepdat_name = "fndep_clm_hist_simyr1849-2006_1.9x2.5_c100428.nc"
fatmgrid_name = "griddata_"+Row_Numbers_String+"x"+Col_Numbers_String+".nc"
fatmlndfrc_name = "domain.lnd_"+Row_Numbers_String+"x"+Col_Numbers_String+"_"+Region_Name+".nc"
#fatmlndfrc_name = "fracdata_"+Row_Numbers_String+"x"+Col_Numbers_String+"_"+Region_Name+".nc"
fsurdat_name = "surfdata_"+Row_Numbers_String+"x"+Col_Numbers_String+"_"+Region_Name+".nc"
fglcmask_name = "glcmaskdata_"+Row_Numbers_String+"x"+Col_Numbers_String+"_Gland20km.nc"
flndtopo_name = "topodata_"+Row_Numbers_String+"x"+Col_Numbers_String+"_"+Region_Name+".nc"
fsnowoptics_name = "snicar_optics_5bnd_c090915.nc"
fsnowaging_name = "snicar_drdt_bst_fit_60_c070416.nc"
fpftcon_name = "pft-physiology.c130503.nc"
domain_name = "domain.lnd_"+Row_Numbers_String+"x"+Col_Numbers_String+"_"+Region_Name+".nc"
rdirc_name = "rdirc_0.5x0.5_simyr2000_c101124.nc"
popd_streams_name = "clmforc.Li_2012_hdm_0.5x0.5_AVHRR_simyr1850-2010_c130401.nc"
light_streams_name = "clmforc.Li_2012_climo1995-2011.T62.lnfm_c130327.nc"
CLM_File_Name_List = [fndepdat_name, fatmgrid_name, fatmlndfrc_name, fsurdat_name, fglcmask_name, flndtopo_name,
fsnowoptics_name, fsnowaging_name, fpftcon_name, domain_name, rdirc_name, popd_streams_name, light_streams_name]
##################DA Parameters
Dim_Observation_Quantity = 4
# Irrigation Parameters
irrig_nsteps_per_day = 1
PFT_Num = 1
PFT_Type_Index = 4
irrig_nsteps_per_day = 3600.0 / dtime * Irrigation_Hours * PFT_Num
N_Steps = []
SensorType = []
SensorVariable = []
SensorQuantity = []
Variable_ID = []
QC_ID = []
SensorResolution = []
Observation_File_Name = []
Soil_Layer_Index_DA = 0
Def_First_Run_RTM = 1 # whether is RTM called at the first time
if mpi4py_rank == 0:
##### Some Index Variables
column_len = []
pft_len = []
finidat_name_string = []
if Do_DA_Flag:
finidat_name_string = Run_Dir_Home+"_Ens" + str(1) +"/"+ finidat_initial_CLM
print '============================= Open the Model Initial File and Read the Index Data ==========================================='
#------------------------------------------- Read the CLM Initial File
print "Open Initial File:", finidat_name_string
try:
CLM_Initial_File = netCDF4.Dataset(finidat_name_string, 'r')
column_len = len(CLM_Initial_File.dimensions['column'])
pft_len = len(CLM_Initial_File.dimensions['pft'])
CLM_Initial_File.close()
except:
print finidat_name_string,"not exists!!!!!!!!!!!!!!!!!!!!!"
os.abort()
else:
finidat_name_string = None
column_len = None
pft_len = None
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
finidat_name_string = mpi4py_comm.bcast(finidat_name_string)
column_len = mpi4py_comm.bcast(column_len)
pft_len = mpi4py_comm.bcast(pft_len)
if mpi4py_rank == 0:
##### Some Index Variables
#############################################################
if not os.path.exists(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name):
os.makedirs(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name)
####################################################################################
Land_Mask_Data = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Teta_Residual = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
Teta_Saturated = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
Teta_Field_Capacity = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
Teta_Wilting_Point = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
sucsat = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
bsw = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
watdry = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
watopt = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
watfc = numpy.ones((Soil_Layer_Num,Row_Numbers,Col_Numbers),dtype=numpy.float32)
smpso = []
smpsc = []
Sand_Top_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Clay_Top_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Organic_Top_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Bulk_Density_Top_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Sand_Sub_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Clay_Sub_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Organic_Sub_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Bulk_Density_Sub_Region = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
DEM_Data = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
PCT_PFT = numpy.zeros((maxpft, Row_Numbers, Col_Numbers),dtype=numpy.float32)
STD_ELEV = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32) # standard deviation of the elevation within a grid cell
topo_slope = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
PCT_LAKE = numpy.ones((Row_Numbers,Col_Numbers),dtype=numpy.float32)
Irrigation_Grid_Flag = numpy.zeros((Row_Numbers,Col_Numbers),dtype=numpy.bool)
micro_sigma = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
# Calculate the Vegetation Fraction
PCT_Veg = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
PCT_PFT_High = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
PCT_PFT_Low = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
PCT_PFT_WATER = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
PFT_Dominant_Index = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Crop_Sum = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Bare_Grid_Index = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
print "Open Hydraulic_File_Name",Hydraulic_File_Name
try:
Hydraulic_File = netCDF4.Dataset(Hydraulic_File_Name, 'r')
Teta_Residual = Hydraulic_File.variables['RES'][:,:,:]
#Teta_Saturated = Hydraulic_File.variables['SAT'][:,:,:]
#dominik: 09/07/2016
Teta_Saturated = Hydraulic_File.variables['WATSAT'][:,:,:]
Teta_Field_Capacity = Hydraulic_File.variables['FC'][:,:,:]
Teta_Wilting_Point = Hydraulic_File.variables['WP'][:,:,:]
sucsat = Hydraulic_File.variables["sucsat"][:,:,:]
bsw = Hydraulic_File.variables["bsw"][:,:,:]
watdry = Hydraulic_File.variables["watdry"][:,:,:]
watopt = Hydraulic_File.variables["watopt"][:,:,:]
watfc = Hydraulic_File.variables["watfc"][:,:,:]
Hydraulic_File.close()
except:
print "Open Hydraulic_File_Name",Hydraulic_File_Name,"Failed!!"
os.abort()
# Make sure the minimum soil moisture larger than 0.05, because we assume the maximum bias is 0.05
Teta_Residual[numpy.where(Teta_Residual < 0.05)] = 0.05
pft_physiology_file_name = DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/pftdata/"+fpftcon_name
pft_physiology_file = netCDF4.Dataset(pft_physiology_file_name, "r")
smpso = pft_physiology_file.variables["smpso"][:]
smpsc = pft_physiology_file.variables["smpsc"][:]
#smpso = numpy.zeros(len(pft_physiology_file.dimensions['pft']))
#smpso[:] = -50000.0
#print z0mr,PFT_Height_Top[1:17]
pft_physiology_file.close()
# Create Plot Folder
if not os.path.exists(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name):
os.makedirs(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name)
if Use_Mask_Flag:
# find the model grids which are need to be assiilated.
# Read the Mask Grid
print "Read the Land Water Mask Data"
mkdatadomain_NC_FileName_In = DAS_Data_Path+"SysModel/CLM/tools/" + fatmlndfrc_name
print "mkdatadomain_NC_FileName_In",mkdatadomain_NC_FileName_In
mkdatadomain_NC_File_In = netCDF4.Dataset(mkdatadomain_NC_FileName_In, 'r+')
Land_Mask_Data = numpy.flipud(mkdatadomain_NC_File_In.variables['mask'][:,:])
mkdatadomain_NC_File_In.close()
Land_Mask_Data[numpy.where(Land_Mask_Data == 0.0)] = NAvalue
Data = numpy.ma.masked_where(Land_Mask_Data == NAvalue, Land_Mask_Data)
fig1 = plt.figure(figsize=(15, 10), dpi=80)
ax = fig1.add_subplot(1, 1, 1)
im1 = ax.imshow(Data, cmap=cm.jet)
ax.set_title('Land_Mask_Data')
plt.grid(True)
plt.savefig(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"/Land_Mask_Data.png")
plt.close('all')
del Data
####################################################################
Corner_Row_Index = 0
Corner_Col_Index = 0
if Def_Region == -2:
Grid_Resolution_GEO_Global = 360.0/432000.0*5.0
Resolution_Name = "500m"
elif Def_Region == -1:
Grid_Resolution_GEO_Global = 360.0/432000.0
Resolution_Name = "100m"
elif Def_Region <= 8:
Grid_Resolution_GEO_Global = 360.0/43200.0
Resolution_Name = "1KM"
elif Def_Region >= 9:
Grid_Resolution_GEO_Global = 360.0/8640.0
Resolution_Name = "5KM"
# Sand_Top_Region, Clay_Top_Region, Organic_Top_Region, Bulk_Density_Top_Region, Sand_Sub_Region, Clay_Sub_Region, Organic_Sub_Region, Bulk_Density_Sub_Region \
# = Read_Soil_Texture(Def_Region, DAS_Data_Path, Resolution_Name, Region_Name, Row_Numbers, Col_Numbers, Corner_Row_Index, Corner_Col_Index)
print "*******************************Read CLM mksurfdata"
mksurfdata_NC_FileName_In = DAS_Data_Path+"SysModel/CLM/tools/" + fsurdat_name
print "mksurfdata_NC_FileName_In",mksurfdata_NC_FileName_In
print "************************Open*******************",mksurfdata_NC_FileName_In
mksurfdata_NC_File_In = netCDF4.Dataset(mksurfdata_NC_FileName_In, 'r')
#print mksurfdata_NC_File_In.variables
STD_ELEV = numpy.flipud(mksurfdata_NC_File_In.variables['STD_ELEV'][::]) # standard deviation of the elevation within a grid cell
topo_slope = numpy.flipud(mksurfdata_NC_File_In.variables['SLOPE'][::]) # mean topographic slop
DEM_Data = numpy.flipud(mksurfdata_NC_File_In.variables['TOPO'][::]) # mean elevation on land
# check for near zero slopes, set minimum value
topo_slope[numpy.where(topo_slope < 0.2)] = 0.2
for Pft_index in range(maxpft):
#print numpy.shape(mksurfdata_NC_File_In.variables['PCT_PFT'])
PCT_PFT[Pft_index,::] = numpy.flipud(mksurfdata_NC_File_In.variables['PCT_PFT'][Pft_index,:,:])
PCT_LAKE = numpy.flipud(mksurfdata_NC_File_In.variables['PCT_LAKE'][::])
mksurfdata_NC_File_In.close()
#################################
###########################3
# microtopographic parameter, units are meters
minslope=0.05
slopemax=0.4
maxslope=(slopemax - minslope)/(slopemax)
# try smooth function of slope
slopebeta=3.0
slopemax=0.4
slope0=slopemax**(-1.0/slopebeta)
micro_sigma = (topo_slope + slope0)**(-slopebeta)
# Calculate the Vegetation Fraction
PCT_Veg = numpy.sum(PCT_PFT[1:maxpft,:,:],axis=0)
PCT_PFT_High = numpy.sum(PCT_PFT[1:9,:,:],axis=0) / 100.0
PCT_PFT_Low = numpy.sum(PCT_PFT[9:maxpft,:,:],axis=0) / 100.0
#PCT_PFT_WATER = PCT_LAKE / 100.0
PFT_Dominant_Index = numpy.argmax(PCT_PFT,axis=0)
numpy.savetxt("PFT_Dominant_Index_"+Region_Name+".txt",PFT_Dominant_Index)
w,h = plt.figaspect(float(Row_Numbers)/Col_Numbers)
fig1 = plt.figure(figsize=(w,h))
ax1 = fig1.add_subplot(1,1,1)
im1 = ax1.imshow(PFT_Dominant_Index, cmap=cm.jet, interpolation='bilinear')
plt.colorbar(im1)
if Def_Figure_Output:
plt.savefig("DataBase/PFT_Dominant_Index_"+Region_Name+".png")
#plt.show()
Bare_Grid_Index = numpy.where(PFT_Dominant_Index == 0)
#Bare_Grid_Index = numpy.where(PCT_PFT[0,:,:] == 100)
print "numpy.size(Bare_Grid_Index)",numpy.size(Bare_Grid_Index)
for Soil_Layer_Index in range(10):
watopt[Soil_Layer_Index,:,:][Bare_Grid_Index] = watfc[Soil_Layer_Index,:,:][Bare_Grid_Index]
watdry[Soil_Layer_Index,:,:][Bare_Grid_Index] = Teta_Residual[Soil_Layer_Index,:,:][Bare_Grid_Index]
#watopt[Soil_Layer_Index,:,:] = Teta_Saturated[Soil_Layer_Index,:,:]
#watdry[Soil_Layer_Index,:,:] = Teta_Residual[Soil_Layer_Index,:,:]
print "************************"
print "numpy.max(watopt),numpy.min(watopt),numpy.max(watdry),numpy.min(watdry)"
print numpy.max(watopt),numpy.min(watopt),numpy.max(watdry),numpy.min(watdry)
# w,h = plt.figaspect(float(Row_Numbers)/Col_Numbers)
# fig1 = plt.figure(figsize=(w,h))
# ax1 = fig1.add_subplot(1,1,1)
# im1 = ax1.imshow(MONTHLY_LAI[6,:,:], cmap=cm.jet, interpolation='bilinear')
# plt.colorbar(im1)
# if Def_Figure_Output:
# plt.savefig("SysModel/CLM/Surfdata_Figure/"+Region_Name+"_PFT/"+"MONTHLY_LAI.png")
# plt.show()
##############################################################################
# COSMOS Circle Mask
Mask_X_COSMOS = MODEL_CEA_X
Mask_Y_COSMOS = MODEL_CEA_Y
COSMOS_Circle_Plot = numpy.zeros((Row_Numbers,Col_Numbers),dtype=numpy.float32)
COSMOS_Circle_Array = []
COSMOS_Circle_Index_Array = []
COSMOS_Circle_Num_Array = []
#for Station_Index in range(numpy.size(Station_XY)/2):
for Station_Index in range(numpy.size(Station_XY)/2-12):
COSMOS_Circle = numpy.zeros((Row_Numbers,Col_Numbers),dtype=numpy.bool)
COSMOS_Circle[::] = False
print "Station_"+str(Station_Index+1),Station_XY[Station_Index][0],Station_XY[Station_Index][1]
r.assign('X_Coordiates',Station_XY[Station_Index][0])
r.assign('Y_Coordiates',Station_XY[Station_Index][1])
r('xy <- cbind(X_Coordiates,Y_Coordiates)')
print r['xy']
print "========================== GEO to CEA"
r('xy_cea <- project(xy,"+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +a=6371228 +b=6371228 +units=m +ellps=WGS84 +no_defs")')
print 'x,y',r['xy_cea'][0][0],r['xy_cea'][0][1]
ii = r['xy_cea'][0][0]
jj = r['xy_cea'][0][1]
dist = numpy.sqrt(abs(ii - Mask_X_COSMOS) ** 2 + abs(jj - Mask_Y_COSMOS) ** 2)
COSMOS_Circle_Index = numpy.where(dist <= 300)
COSMOS_Circle[COSMOS_Circle_Index] = True
COSMOS_Circle_Array.append(COSMOS_Circle)
COSMOS_Circle_Index_Array.append(COSMOS_Circle_Index)
#print COSMOS_Circle_Index,numpy.size(COSMOS_Circle_Index)
COSMOS_Circle_Num = numpy.zeros_like(COSMOS_Circle_Index)
COSMOS_Circle_Num = numpy.floor(dist[COSMOS_Circle_Index] / Grid_Resolution_CEA)
#print COSMOS_Circle_Num
COSMOS_Circle_Num_Array.append(COSMOS_Circle_Num)
COSMOS_Circle_Plot[COSMOS_Circle] = 1.0
if Plot_Analysis:
fig1 = plt.figure(figsize=(15, 10), dpi=80)
ax = fig1.add_subplot(1, 1, 1)
im1 = ax.imshow(COSMOS_Circle_Plot, cmap=cm.jet)
ax.set_title('COSMOS_Circle_Plot')
plt.grid(True)
plt.savefig(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"/COSMOS_Circle_Plot.png")
plt.close('all')
##########################################################
LONGXY_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
LATIXY_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
longitudes = numpy.linspace(mksrf_edgew+Grid_Resolution_GEO[0]/2.0,mksrf_edgee-Grid_Resolution_GEO[0]/2.0,Col_Numbers)
latitudes = numpy.linspace(mksrf_edges+Grid_Resolution_GEO[1]/2.0,mksrf_edgen-Grid_Resolution_GEO[1]/2.0,Row_Numbers)
LONGXY_Row = longitudes
LATIXY_Col = latitudes
for row in range(Row_Numbers):
LONGXY_Mat[row,:] = LONGXY_Row
for col in range(Col_Numbers):
LATIXY_Mat[:,col] = LATIXY_Col
#print LATIXY_Col
Mask_Index = numpy.zeros((Dim_CLM_State, Row_Numbers, Col_Numbers), dtype=numpy.bool)
Mask_Index[:,:,:] = False
#------------------------------------------- Data Assimilation Flags
print "Soil Moisture Products: SMAP(10km), AMSR-E(25km), SMOS(40km), ASCAT(12.5km,25km), MODIS(1km), ASAR(120m), PALSAR(60m)"
print "Soil Temperature Products: MODIS Terra and Aqua(1km)"
############################################################## For Bias Estimation #######################################
Bias_Remove_Start_Time_Array = ['' for i in range(Dim_CLM_State)]
# Flag to check wehter the Observation Bias of each observation type and each observation ensemble has been perturbed
Observation_Bias_Initialization_Flag = numpy.zeros((Dim_CLM_State,Dim_Observation_Quantity,Ensemble_Number),dtype=numpy.float32)
Model_Bias_Optimized = numpy.zeros((Ensemble_Number, Dim_CLM_State, numpy.size(Station_XY)/2), dtype=numpy.float32)
Observation_Bias_Optimized = numpy.zeros((Ensemble_Number, Dim_CLM_State, Dim_Observation_Quantity, numpy.size(Station_XY)/2), dtype=numpy.float32)
# Bias Estimation Range and Standard Deviation Defination
Model_Bias_Range = numpy.zeros((Dim_CLM_State,2),dtype=numpy.float32)
Observation_Bias_Range = numpy.zeros((Dim_CLM_State,Dim_Observation_Quantity,2),dtype=numpy.float32)
Model_Bias_Range_STD = numpy.zeros((Dim_CLM_State,2),dtype=numpy.float32)
Observation_Bias_Range_STD = numpy.zeros((Dim_CLM_State,Dim_Observation_Quantity,2),dtype=numpy.float32)
Model_Bias_STD = numpy.zeros(Dim_CLM_State,dtype=numpy.float32)
Observation_Bias_STD = numpy.zeros((Dim_CLM_State,Dim_Observation_Quantity),dtype=numpy.float32)
# Model State Ensemble Inflation STD
Model_State_Inflation_Range = numpy.zeros((Dim_CLM_State,2),dtype=numpy.float32)
Model_State_Inflation_Range_STD = numpy.zeros(Dim_CLM_State,dtype=numpy.float32)
########### Simulate Model Error
Additive_Noise_SM_Par = numpy.zeros((10,11),dtype=numpy.float32)
Additive_Noise_SM_Par[::] = numpy.array([[1.00E-3, 1.0, 0.7, 0.7, 0.6, 0.6, 0.6, 0.6, 0.4, 0.4, 0.4],
[7.00E-4, 0.7, 1.0, 0.7, 0.7, 0.6, 0.6, 0.6, 0.6, 0.4, 0.4],
[5.00E-4, 0.7, 0.7, 1.0, 0.7, 0.7, 0.6, 0.6, 0.6, 0.6, 0.4],
[3.00E-4, 0.6, 0.7, 0.7, 1.0, 0.7, 0.7, 0.6, 0.6, 0.6, 0.6],
[2.00E-5, 0.6, 0.6, 0.7, 0.7, 1.0, 0.7, 0.7, 0.6, 0.6, 0.6],
[2.00E-5, 0.6, 0.6, 0.6, 0.7, 0.7, 1.0, 0.7, 0.7, 0.6, 0.6],
[2.00E-5, 0.6, 0.6, 0.6, 0.6, 0.7, 0.7, 1.0, 0.7, 0.7, 0.6],
[1.50E-6, 0.4, 0.6, 0.6, 0.6, 0.6, 0.7, 0.7, 1.0, 0.7, 0.7],
[1.50E-6, 0.4, 0.4, 0.6, 0.6, 0.6, 0.6, 0.7, 0.7, 1.0, 0.7],
[5.00E-8, 0.4, 0.4, 0.4, 0.6, 0.6, 0.6, 0.6, 0.7, 0.7, 1.0]])
#print numpy.shape(Additive_Noise_SM_Par)
Additive_Noise_SM = numpy.zeros((Ensemble_Number,Soil_Layer_Num-5),dtype=numpy.float32)
Additive_Noise_ST = numpy.zeros((Ensemble_Number,2),dtype=numpy.float32)
Irrigation_Grid_Flag_Array = []
cols1d_ixy = numpy.zeros(column_len, dtype=numpy.integer)
cols1d_jxy = numpy.zeros(column_len, dtype=numpy.integer)
cols1d_ityplun = numpy.zeros(column_len, dtype=numpy.integer)
pfts1d_ixy = numpy.zeros(pft_len, dtype=numpy.integer)
pfts1d_jxy = numpy.zeros(pft_len, dtype=numpy.integer)
pfts1d_itypveg = numpy.zeros(pft_len, dtype=numpy.integer)
pfts1d_ci = numpy.zeros(pft_len, dtype=numpy.integer)
pfts1d_ityplun = numpy.zeros(pft_len, dtype=numpy.integer)
##### Some Index Variables
if Do_DA_Flag:
finidat_name_string = Run_Dir_Home+"_Ens" + str(1) +"/"+ finidat_initial_CLM
print '============================= Open the Model Initial File and Read the Index Data ==========================================='
#------------------------------------------- Read the CLM Initial File
print "Open Initial File:", finidat_name_string
try:
CLM_Initial_File = netCDF4.Dataset(finidat_name_string, 'r')
cols1d_ixy[:] = CLM_Initial_File.variables['cols1d_ixy'][:]
cols1d_jxy[:] = CLM_Initial_File.variables['cols1d_jxy'][:]
cols1d_ityplun[:] = CLM_Initial_File.variables['cols1d_ityplun'][:]
#numpy.savetxt('cols1d_ixy',cols1d_ixy)
#numpy.savetxt('cols1d_jxy',cols1d_jxy)
pfts1d_ixy[:] = CLM_Initial_File.variables['pfts1d_ixy'][:]
pfts1d_jxy[:] = CLM_Initial_File.variables['pfts1d_jxy'][:]
pfts1d_itypveg[:] = CLM_Initial_File.variables['pfts1d_itypveg'][:]
pfts1d_ci[:] = CLM_Initial_File.variables['pfts1d_ci'][:]
pfts1d_ityplun[:] = CLM_Initial_File.variables['pfts1d_ityplun'][:]
CLM_Initial_File.close()
except:
print finidat_name_string,"not exists!!!!!!!!!!!!!!!!!!!!!"
os.abort()
Analysis_Variable_Name = ['' for i in range(Dim_CLM_State)]
Soil_Sand_Clay_Sum = numpy.zeros((Soil_Texture_Layer_Opt_Num, Row_Numbers, Col_Numbers), dtype=numpy.float32)
print "################ Go to CLM"
Parameter_Soil_Optimized = numpy.zeros((Ensemble_Number, Dim_Soil_Par, numpy.size(Station_XY)/2), dtype=numpy.float32)
Parameter_PFT_Optimized = numpy.zeros((Ensemble_Number, Dim_PFT_Par, numpy.size(Station_XY)/2), dtype=numpy.float32)
Parameter_Hard_Optimized = numpy.zeros((Ensemble_Number, Dim_Hard_Par, numpy.size(Station_XY)/2), dtype=numpy.float32)
Parameter_Soil_PSRF = numpy.zeros((Dim_Soil_Par, numpy.size(Station_XY)/2), dtype=numpy.float32)
Parameter_PFT_PSRF = numpy.zeros((Dim_PFT_Par, numpy.size(Station_XY)/2), dtype=numpy.float32)
Parameter_Hard_PSRF = numpy.zeros((Dim_Hard_Par, numpy.size(Station_XY)/2), dtype=numpy.float32)
Parameter_Optimization_First_Flag = True
Mean_Index_Prop_Grid_Array_Sys = numpy.zeros((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Model_Variance = numpy.zeros((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask = numpy.zeros((Dim_CLM_State, 3, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask_X = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask_Y = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask_X[::] = MODEL_CEA_X
Mask_Y[::] = MODEL_CEA_Y
###################################### CMEM Matrix
Clay_Fraction = []
Sand_Fraction = []
Soil_Density = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
CMEM_Work_Path_Array = []
Clay_Mat = []
Sand_Mat = []
ECOCVL_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOCVH_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOTVL_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOTVH_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOWAT_Mat = numpy.zeros((Row_Numbers, Col_Numbers), dtype=numpy.float32)
# Folder to Save Ensemble Mean
Mean_Dir = Run_Dir_Home+"_Ens_Mean"
if not os.path.exists(Mean_Dir):
os.makedirs(Mean_Dir)
if not os.path.exists(DAS_Output_Path+"Analysis/"+Region_Name):
os.makedirs(DAS_Output_Path+"Analysis/"+Region_Name)
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
if not os.path.exists(DAS_Output_Path+"Analysis/"+Region_Name+"/Block_"+str(Block_Index+1)):
os.makedirs(DAS_Output_Path+"Analysis/"+Region_Name+"/Block_"+str(Block_Index+1))
NC_File_In = netCDF4.Dataset(DAS_Data_Path + "SysModel/CLM/tools/"+fsurdat_name, 'r')
File_Format1 = NC_File_In.file_format
NC_File_In.close()
NC_File_In = netCDF4.Dataset(DAS_Data_Path + "SysModel/CLM/tools/"+fatmlndfrc_name, 'r')
File_Format2 = NC_File_In.file_format
NC_File_In.close()
NC_File_In = netCDF4.Dataset(DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/pftdata/"+fpftcon_name, 'r')
File_Format3 = NC_File_In.file_format
NC_File_In.close()
NC_File_In = netCDF4.Dataset(DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/rtmdata/"+rdirc_name, 'r')
File_Format4 = NC_File_In.file_format
NC_File_In.close()
NC_File_In = netCDF4.Dataset(DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/ndepdata/"+fndepdat_name, 'r')
File_Format5 = NC_File_In.file_format
NC_File_In.close()
if Def_First_Run == 1 and ((File_Format1 == 'NETCDF3_CLASSIC') or (File_Format1 == 'NETCDF3_64BIT') and \
(File_Format2 == 'NETCDF3_CLASSIC') or (File_Format2 == 'NETCDF3_64BIT') and \
(File_Format3 == 'NETCDF3_CLASSIC') or (File_Format3 == 'NETCDF3_64BIT') and \
(File_Format4 == 'NETCDF3_CLASSIC') or (File_Format4 == 'NETCDF3_64BIT') and \
(File_Format5 == 'NETCDF3_CLASSIC') or (File_Format5 == 'NETCDF3_64BIT') ):
print "Convert netCDF3 input to netCDF4 for CLM"
subprocess.call(DAS_Depends_Path+"bin/nccopy -k 3 "+DAS_Data_Path + "SysModel/CLM/tools/"+fatmlndfrc_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fatmlndfrc_name,shell=True)
# os.remove(DAS_Data_Path + "SysModel/CLM/tools/"+fatmlndfrc_name)
subprocess.call(DAS_Depends_Path+"bin/nccopy -d 4 "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fatmlndfrc_name+" "+DAS_Data_Path + "SysModel/CLM/tools/"+fatmlndfrc_name,shell=True)
subprocess.call(DAS_Depends_Path+"bin/nccopy -k 3 "+DAS_Data_Path + "SysModel/CLM/tools/"+fsurdat_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fsurdat_name,shell=True)
# os.remove(DAS_Data_Path + "SysModel/CLM/tools/"+fsurdat_name)
subprocess.call(DAS_Depends_Path+"bin/nccopy -d 4 "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fsurdat_name+" "+DAS_Data_Path + "SysModel/CLM/tools/"+fsurdat_name,shell=True)
subprocess.call(DAS_Depends_Path+"bin/nccopy -k 3 "+DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/pftdata/"+fpftcon_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fpftcon_name,shell=True)
# os.remove(DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/pftdata/"+fpftcon_name)
subprocess.call(DAS_Depends_Path+"bin/nccopy -d 4 "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fpftcon_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/pftdata/"+fpftcon_name,shell=True)
subprocess.call(DAS_Depends_Path+"bin/nccopy -k 3 "+DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/rtmdata/"+rdirc_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/"+rdirc_name,shell=True)
# os.remove(DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/rtmdata/"+rdirc_name)
subprocess.call(DAS_Depends_Path+"bin/nccopy -d 4 "+DAS_Data_Path + "SysModel/CLM/inputdata/"+rdirc_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/rtmdata/"+rdirc_name,shell=True)
subprocess.call(DAS_Depends_Path+"bin/nccopy -k 3 "+DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/ndepdata/"+fndepdat_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fndepdat_name,shell=True)
# os.remove(DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/ndepdata/"+fndepdat_name)
subprocess.call(DAS_Depends_Path+"bin/nccopy -d 4 "+DAS_Data_Path + "SysModel/CLM/inputdata/"+fndepdat_name+" "+DAS_Data_Path + "SysModel/CLM/inputdata/lnd/clm2/ndepdata/"+fndepdat_name,shell=True)
if (Def_First_Run == -1) and Ensemble_Number > 1:
copyLargeFile(NC_FileName_Assimilation_2_Initial_Copy, NC_FileName_Assimilation_2_Initial)
copyLargeFile(NC_FileName_Assimilation_2_Parameter_Copy, NC_FileName_Assimilation_2_Parameter)
copyLargeFile(NC_FileName_Assimilation_2_Parameter_Monthly_Copy, NC_FileName_Assimilation_2_Parameter_Monthly)
copyLargeFile(NC_FileName_Assimilation_2_Bias_Copy, NC_FileName_Assimilation_2_Bias)
copyLargeFile(NC_FileName_Assimilation_2_Bias_Monthly_Copy, NC_FileName_Assimilation_2_Bias_Monthly)
if Def_First_Run == 1:
print "**************** Prepare Initial netCDF file"
if os.path.exists(NC_FileName_Assimilation_2_Constant):
os.remove(NC_FileName_Assimilation_2_Constant)
print 'Write NetCDF File:',NC_FileName_Assimilation_2_Constant
NC_File_Out_Assimilation_2_Constant = netCDF4.Dataset(NC_FileName_Assimilation_2_Constant, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Assimilation_2_Constant.createDimension('lon', Col_Numbers)
NC_File_Out_Assimilation_2_Constant.createDimension('lat', Row_Numbers)
NC_File_Out_Assimilation_2_Constant.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Assimilation_2_Constant.createDimension('ParFlow_Layer_Num', ParFlow_Layer_Num)
NC_File_Out_Assimilation_2_Constant.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Assimilation_2_Constant.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Out_Assimilation_2_Constant.createDimension('maxpft', maxpft)
NC_File_Out_Assimilation_2_Constant.createVariable('Land_Mask_Data','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Land_Mask_Data'][:,:] = Land_Mask_Data
NC_File_Out_Assimilation_2_Constant.createVariable('PCT_LAKE','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['PCT_LAKE'][:,:] = PCT_LAKE
NC_File_Out_Assimilation_2_Constant.createVariable('PCT_Veg','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['PCT_Veg'][:,:] = PCT_Veg
NC_File_Out_Assimilation_2_Constant.createVariable('PCT_PFT','f4',('maxpft','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['PCT_PFT'][:,:] = PCT_PFT
#print "numpy.mean(numpy.sum(PCT_PFT[:,:,:],axis=0))",numpy.mean(numpy.sum(PCT_PFT[:,:,:],axis=0))
NC_File_Out_Assimilation_2_Constant.createVariable('STD_ELEV','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['STD_ELEV'][:,:] = STD_ELEV
NC_File_Out_Assimilation_2_Constant.createVariable('DEM_Data','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['DEM_Data'][:,:] = DEM_Data
NC_File_Out_Assimilation_2_Constant.createVariable('Bulk_Density_Top_Region','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Bulk_Density_Top_Region'][:,:] = Bulk_Density_Top_Region
NC_File_Out_Assimilation_2_Constant.createVariable('Bulk_Density_Sub_Region','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Bulk_Density_Sub_Region'][:,:] = Bulk_Density_Sub_Region
NC_File_Out_Assimilation_2_Constant.createVariable('Teta_Residual','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Teta_Residual'][:,:,:] = Teta_Residual
NC_File_Out_Assimilation_2_Constant.createVariable('Teta_Saturated','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Teta_Saturated'][:,:,:] = Teta_Saturated
NC_File_Out_Assimilation_2_Constant.createVariable('Teta_Field_Capacity','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Teta_Field_Capacity'][:,:,:] = Teta_Field_Capacity
NC_File_Out_Assimilation_2_Constant.createVariable('Teta_Wilting_Point','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['Teta_Wilting_Point'][:,:,:] = Teta_Wilting_Point
NC_File_Out_Assimilation_2_Constant.createVariable('watopt','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['watopt'][:,:,:] = watopt
NC_File_Out_Assimilation_2_Constant.createVariable('watdry','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['watdry'][:,:,:] = watdry
NC_File_Out_Assimilation_2_Constant.createVariable('watfc','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['watfc'][:,:,:] = watfc
NC_File_Out_Assimilation_2_Constant.createVariable('PFT_Dominant_Index','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.variables['PFT_Dominant_Index'][:,:] = PFT_Dominant_Index
del Land_Mask_Data,PCT_LAKE,PCT_Veg,PCT_PFT,STD_ELEV,DEM_Data
del Bulk_Density_Top_Region,Bulk_Density_Sub_Region,Teta_Residual,Teta_Saturated,Teta_Field_Capacity,Teta_Wilting_Point,watopt,watdry,watfc
NC_File_Out_Assimilation_2_Constant.createVariable('CLM_Soil_Layer_Thickness','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.createVariable('CLM_Soil_Layer_Thickness_Cumsum','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.createVariable('Soil_Layer_Thickness_Ratio_Moisture','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.createVariable('Soil_Layer_Thickness_Ratio_Temperature','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Constant.sync()
NC_File_Out_Assimilation_2_Constant.close()
NC_File_Out_Assimilation_2_Constant = netCDF4.Dataset(NC_FileName_Assimilation_2_Constant, 'r+', format='NETCDF4')
# Meters
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][0, :, :] = Soil_Thickness[0]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][1, :, :] = Soil_Thickness[1]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][2, :, :] = Soil_Thickness[2]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][3, :, :] = Soil_Thickness[3]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][4, :, :] = Soil_Thickness[4]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][5, :, :] = Soil_Thickness[5]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][6, :, :] = Soil_Thickness[6]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][7, :, :] = Soil_Thickness[7]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][8, :, :] = Soil_Thickness[8]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][9, :, :] = Soil_Thickness[9]
#NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][10, :, :] = Soil_Thickness[10]
#NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][11, :, :] = Soil_Thickness[11]
#NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][12, :, :] = Soil_Thickness[12]
#NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][13, :, :] = Soil_Thickness[13]
#NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][14, :, :] = Soil_Thickness[14]
NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness_Cumsum'][:,:,:] = numpy.cumsum(numpy.asarray(NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][:,:,:]), axis=0)
for Soil_Layer_Index in range(Soil_Layer_Num):
#NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Moisture'][Soil_Layer_Index, :, :] = numpy.exp(-1.0*NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness_Cumsum'][Soil_Layer_Index,:,:])
NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Moisture'][Soil_Layer_Index, :, :] = numpy.exp(-1.0*NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][Soil_Layer_Index,:,:])
for Soil_Layer_Index in range(Soil_Layer_Num):
#NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Temperature'][Soil_Layer_Index, :, :] = numpy.exp(-1.0*numpy.log(NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness_Cumsum'][Soil_Layer_Index,:,:]))
NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Temperature'][Soil_Layer_Index, :, :] = numpy.exp(-1.0*numpy.log(NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][Soil_Layer_Index,:,:]))
Ratio_Temp = NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Temperature'][0, :, :]
for Soil_Layer_Index in range(Soil_Layer_Num):
NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Temperature'][Soil_Layer_Index, :, :] = \
numpy.asarray(NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Temperature'][Soil_Layer_Index, :, :]) / Ratio_Temp
NC_File_Out_Assimilation_2_Constant.sync()
NC_File_Out_Assimilation_2_Constant.close()
print "**************** Prepare Initial netCDF file"
if os.path.exists(NC_FileName_Assimilation_2_Diagnostic):
os.remove(NC_FileName_Assimilation_2_Diagnostic)
print 'Write NetCDF File:',NC_FileName_Assimilation_2_Diagnostic
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Assimilation_2_Diagnostic.createDimension('lon', Col_Numbers)
NC_File_Out_Assimilation_2_Diagnostic.createDimension('lat', Row_Numbers)
NC_File_Out_Assimilation_2_Diagnostic.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Assimilation_2_Diagnostic.createDimension('ParFlow_Layer_Num', ParFlow_Layer_Num)
NC_File_Out_Assimilation_2_Diagnostic.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Assimilation_2_Diagnostic.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Initial_SM_Noise','f4',('Ensemble_Number','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.variables['Initial_SM_Noise'][:,:,:] = 0.0
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Initial_ST_Noise','f4',('Ensemble_Number','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.variables['Initial_ST_Noise'][:,:,:] = 0.0
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Mask_Index','i4',('Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('CLM_Soil_Temperature_Ratio_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('CLM_Soil_Moisture_Ratio_Ensemble_Mat_MultiScale','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('CLM_Soil_Moisture_Ratio_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.variables['CLM_Soil_Moisture_Ratio_Ensemble_Mat'][:,:,:] = 1.0
NC_File_Out_Assimilation_2_Diagnostic.variables['CLM_Soil_Temperature_Ratio_Ensemble_Mat'][:,:,:] = 1.0
NC_File_Out_Assimilation_2_Diagnostic.variables['CLM_Soil_Moisture_Ratio_Ensemble_Mat_MultiScale'][:,:,:] = 1.0
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Analysis_Grid_Array','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Innovation_State','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Increments_State','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('Observation','f4',('Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('CLM_2m_Air_Temperature_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.createVariable('CLM_Air_Pressure_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Diagnostic.sync()
NC_File_Out_Assimilation_2_Diagnostic.close()
print "**************** Prepare Initial netCDF file"
if os.path.exists(NC_FileName_Assimilation_2_Initial):
os.remove(NC_FileName_Assimilation_2_Initial)
if os.path.exists(NC_FileName_Assimilation_2_Initial_Copy):
os.remove(NC_FileName_Assimilation_2_Initial_Copy)
print 'Write NetCDF File:',NC_FileName_Assimilation_2_Initial
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Assimilation_2_Initial.createDimension('lon', Col_Numbers)
NC_File_Out_Assimilation_2_Initial.createDimension('lat', Row_Numbers)
NC_File_Out_Assimilation_2_Initial.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Assimilation_2_Initial.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Assimilation_2_Initial.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Out_Assimilation_2_Initial.createVariable('Prop_Grid_Array_Sys_Parallel','f4',('Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('Prop_Grid_Array_H_Trans_Paralle','f4',('Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Soil_Moisture_Ensemble_Mat_Parallel','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Soil_Temperature_Ensemble_Mat_Parallel','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('Prop_Grid_Array_Sys','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('Prop_Grid_Array_H_Trans','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Soil_Moisture_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Soil_Temperature_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Vegetation_Temperature_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Ground_Temperature_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Snow_Depth_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Snow_Water_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_INT_SNOW_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_FH2OSFC_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
#onset freezing degree days counters
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Onset_Freezing_Degree_Days_Counter_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('Prop_Grid_Array_Sys_parm_infl','f4',('Dim_CLM_State','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Soil_Moisture_parm_infl','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Soil_Temperature_parm_infl','f4',('Soil_Layer_Num','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Vegetation_Temperature_parm_infl','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Ground_Temperature_parm_infl','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.createVariable('CLM_Surface_Temperature_parm_infl','f4',('lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Initial.sync()
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r+', format='NETCDF4')
NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys_parm_infl'][:,:,:] = numpy.abs(msw_infl[0])
NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_parm_infl'][:,:,:] = numpy.abs(msw_infl[0])
NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_parm_infl'][:,:,:] = numpy.abs(msw_infl[0])
NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_parm_infl'][:,:] = numpy.abs(msw_infl[0])
NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_parm_infl'][:,:] = numpy.abs(msw_infl[0])
NC_File_Out_Assimilation_2_Initial.variables['CLM_Surface_Temperature_parm_infl'][:,:] = numpy.abs(msw_infl[0])
NC_File_Out_Assimilation_2_Initial.sync()
NC_File_Out_Assimilation_2_Initial.close()
print "**************** Prepare Parameter netCDF file"
if os.path.exists(NC_FileName_Assimilation_2_Parameter):
os.remove(NC_FileName_Assimilation_2_Parameter)
if os.path.exists(NC_FileName_Assimilation_2_Parameter_Copy):
os.remove(NC_FileName_Assimilation_2_Parameter_Copy)
if os.path.exists(NC_FileName_Assimilation_2_Parameter_Obs_Dim):
os.remove(NC_FileName_Assimilation_2_Parameter_Obs_Dim)
if os.path.exists(NC_FileName_Assimilation_2_Parameter_Monthly):
os.remove(NC_FileName_Assimilation_2_Parameter_Monthly)
if os.path.exists(NC_FileName_Assimilation_2_Parameter_Monthly_Copy):
os.remove(NC_FileName_Assimilation_2_Parameter_Monthly_Copy)
print 'Write NetCDF File:',NC_FileName_Assimilation_2_Parameter
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Assimilation_2_Parameter.createDimension('lon', Col_Numbers)
NC_File_Out_Assimilation_2_Parameter.createDimension('lat', Row_Numbers)
NC_File_Out_Assimilation_2_Parameter.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Assimilation_2_Parameter.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Assimilation_2_Parameter.createDimension('Ensemble_Number_Predict', Ensemble_Number_Predict)
NC_File_Out_Assimilation_2_Parameter.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Out_Assimilation_2_Parameter.createDimension('Dim_Soil_Par', Dim_Soil_Par)
NC_File_Out_Assimilation_2_Parameter.createDimension('Dim_PFT_Par', Dim_PFT_Par)
NC_File_Out_Assimilation_2_Parameter.createDimension('maxpft', maxpft)
NC_File_Out_Assimilation_2_Parameter.createVariable('Parameter_Soil_Space_Ensemble','f4',('Ensemble_Number','Dim_Soil_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter.createVariable('Parameter_Soil_Space_parm_infl','f4',('Dim_Soil_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_parm_infl'][:,:,:] = numpy.abs(msw_infl[1])
NC_File_Out_Assimilation_2_Parameter.createVariable('Parameter_PFT_Space_Ensemble','f4',('Ensemble_Number','Dim_PFT_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter.createVariable('Parameter_PFT_Space_parm_infl','f4',('Dim_PFT_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_parm_infl'][:,:,:] = numpy.abs(msw_infl[1])
NC_File_Out_Assimilation_2_Parameter.sync()
NC_File_Out_Assimilation_2_Parameter.close()
print 'Write NetCDF File:',NC_FileName_Assimilation_2_Parameter_Obs_Dim
NC_File_Out_Assimilation_2_Parameter_Obs_Dim = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Obs_Dim, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('lon', Col_Numbers)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('lat', Row_Numbers)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('ParFlow_Layer_Num', ParFlow_Layer_Num)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Ensemble_Number_Predict', Ensemble_Number_Predict)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Dim_Soil_Par', Dim_Soil_Par)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Dim_Veg_Par', Dim_Veg_Par)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Dim_PFT_Par', Dim_PFT_Par)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('Dim_Hard_Par', Dim_Hard_Par)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createDimension('maxpft', maxpft)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createVariable('Parameter_Soil_Space_Ensemble_Obs_Dim','f4',('Ensemble_Number','Dim_Soil_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createVariable('Parameter_Veg_Space_Ensemble_Obs_Dim','f4',('Ensemble_Number','Dim_Veg_Par','maxpft',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Veg_Space_Ensemble_Obs_Dim'][:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createVariable('Parameter_Hard_Space_Ensemble_Obs_Dim','f4',('Ensemble_Number','Dim_Hard_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Hard_Space_Ensemble_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createVariable('Parameter_Veg_Space_Ensemble_Matrix_Obs_Dim','f4',('Ensemble_Number','Dim_Veg_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Veg_Space_Ensemble_Matrix_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.createVariable('Parameter_PFT_Space_Ensemble_Obs_Dim','f4',('Ensemble_Number','Dim_PFT_Par','lat','lon',),zlib=True,least_significant_digit=None)
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.sync()
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.close()
if Parameter_Optimization:
if os.path.exists(NC_FileName_Optimized_Parameter):
os.remove(NC_FileName_Optimized_Parameter)
print 'Write NetCDF File:',NC_FileName_Optimized_Parameter
NC_File_Out_Optimized_Parameter = netCDF4.Dataset(NC_FileName_Optimized_Parameter, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Optimized_Parameter.createDimension('lon', Col_Numbers)
NC_File_Out_Optimized_Parameter.createDimension('lat', Row_Numbers)
NC_File_Out_Optimized_Parameter.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Optimized_Parameter.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Optimized_Parameter.createDimension('Dim_Soil_Par', Dim_Soil_Par)
NC_File_Out_Optimized_Parameter.createDimension('Dim_PFT_Par', Dim_PFT_Par)
NC_File_Out_Optimized_Parameter.createDimension('maxpft', maxpft)
NC_File_Out_Optimized_Parameter.createDimension('Station_Dim',numpy.size(Station_XY)/2)
NC_File_Out_Optimized_Parameter.createDimension('time_soil', None)
NC_File_Out_Optimized_Parameter.createDimension('time_pft', None)
NC_File_Out_Optimized_Parameter.createVariable('Parameter_Soil_Optimized','f4',('time_soil', 'Ensemble_Number','Dim_Soil_Par','Station_Dim',),zlib=True)
NC_File_Out_Optimized_Parameter.createVariable('Parameter_PFT_Optimized','f4',('time_pft', 'Ensemble_Number','Dim_PFT_Par','Station_Dim',),zlib=True)
NC_File_Out_Optimized_Parameter.sync()
NC_File_Out_Optimized_Parameter.close()
Dim_ParFlow_Par = 1
# Get the Perturbed Parameter Space
if Ensemble_Number > 1 or Def_Par_Optimized:
Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Par_Index_Increment_Soil_Par, Par_Soil_Uniform_STD, Par_Veg_Uniform_STD, Par_PFT_Uniform_STD, Par_Hard_Uniform_STD = \
Parameter_Space_Function(Model_Driver, Def_Print, Def_PP,active_nodes_server, job_server_node_array, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, Def_Debug, Def_Region, Def_First_Run, Def_Par_Optimized,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Dim_ParFlow_Par, ParFlow_Layer_Num, Start_Month, maxpft, Soil_Texture_Layer_Opt_Num, Row_Numbers, Col_Numbers, Ensemble_Number, Ensemble_Number_Predict, \
Parameter_Optimization, Row_Numbers_String, Col_Numbers_String, Soil_Sand_Clay_Sum, Soil_Par_Sens_Array, Veg_Par_Sens_Array, PFT_Par_Sens_Array, Hard_Par_Sens_Array, PFT_Dominant_Index, topo_slope,\
fsurdat_name, fpftcon_name, NC_FileName_Assimilation_2_Constant, DasPy_Path,
DAS_Data_Path, DAS_Output_Path, Region_Name, Datetime_Start, Datetime_Initial, Low_Ratio_Par, High_Ratio_Par, Low_Ratio_Par_Uniform, High_Ratio_Par_Uniform,
DAS_Depends_Path, Def_ParFor, omp_get_num_procs_ParFor, r)
if Def_First_Run == 1:
Optimized_Parameter_Index = numpy.zeros(4,dtype=numpy.integer)
Bias_Record_Index = numpy.zeros(2,dtype=numpy.integer)
Soil_Moisture_Diff_Index = 0
else:
Optimized_Parameter_Index = numpy.zeros(4,dtype=numpy.integer)
Bias_Record_Index = numpy.zeros(2,dtype=numpy.integer)
if Parameter_Optimization:
NC_File_Out_Optimized_Parameter = netCDF4.Dataset(NC_FileName_Optimized_Parameter, 'r')
Optimized_Parameter_Index[0] = len(NC_File_Out_Optimized_Parameter.dimensions['time_soil']) - 1
Optimized_Parameter_Index[2] = len(NC_File_Out_Optimized_Parameter.dimensions['time_pft']) - 1
NC_File_Out_Optimized_Parameter.close()
if (numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1):
NC_File_Out_Estimated_Bias = netCDF4.Dataset(NC_FileName_Estimated_Bias, 'r')
Bias_Record_Index[0] = len(NC_File_Out_Estimated_Bias.dimensions['time']) - 1
Bias_Record_Index[1] = len(NC_File_Out_Estimated_Bias.dimensions['time']) - 1
NC_File_Out_Estimated_Bias.close()
# dominik 18/07/2016
#NC_File_Out_Soil_Moisture_Difference = netCDF4.Dataset(NC_FileName_Soil_Moisture_Difference, 'r')
#if len(NC_File_Out_Soil_Moisture_Difference.dimensions['time']) >= 1:
# Soil_Moisture_Diff_Index = len(NC_File_Out_Soil_Moisture_Difference.dimensions['time']) - 1
#NC_File_Out_Soil_Moisture_Difference.close()
NC_File_Out_Assimilation_2_Constant = netCDF4.Dataset(NC_FileName_Assimilation_2_Constant, 'r')
CLM_Soil_Layer_Thickness = numpy.asarray(NC_File_Out_Assimilation_2_Constant.variables['CLM_Soil_Layer_Thickness'][:,:,:])
NC_File_Out_Assimilation_2_Constant.close()
Analysis_Grid = numpy.zeros((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Localization_Map_Mask = numpy.zeros((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
ObsModel_Mat_Masked = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
else:
LONGXY_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
LATIXY_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
Mean_Index_Prop_Grid_Array_Sys = numpy.empty((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Model_Variance = numpy.empty((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask = numpy.empty((Dim_CLM_State, 3, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask_Index = numpy.empty((Dim_CLM_State, Row_Numbers, Col_Numbers), dtype=numpy.bool)
Mask_X = numpy.empty((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mask_Y = numpy.empty((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Mean_Dir = None
Bias_Remove_Start_Time_Array = None
Observation_Bias_Initialization_Flag = numpy.empty((Dim_CLM_State, Dim_Observation_Quantity, Ensemble_Number), dtype=numpy.float32)
Observation_Bias_Optimized = numpy.empty((Ensemble_Number, Dim_CLM_State, Dim_Observation_Quantity, numpy.size(Station_XY) / 2), dtype=numpy.float32)
Model_Bias_Range = numpy.empty((Dim_CLM_State, 2), dtype=numpy.float32)
Observation_Bias_Range = numpy.empty((Dim_CLM_State, Dim_Observation_Quantity, 2), dtype=numpy.float32)
Model_Bias_Range_STD = numpy.empty((Dim_CLM_State, 2), dtype=numpy.float32)
Observation_Bias_Range_STD = numpy.empty((Dim_CLM_State, Dim_Observation_Quantity, 2), dtype=numpy.float32)
Model_Bias_STD = numpy.empty(Dim_CLM_State, dtype=numpy.float32)
Observation_Bias_STD = numpy.empty((Dim_CLM_State,Dim_Observation_Quantity), dtype=numpy.float32)
Model_State_Inflation_Range = numpy.empty((Dim_CLM_State,2), dtype=numpy.float32)
Model_State_Inflation_Range_STD = numpy.empty(Dim_CLM_State, dtype=numpy.float32)
Additive_Noise_SM_Par = numpy.empty((10,11), dtype=numpy.float32)
Additive_Noise_SM = numpy.empty((Ensemble_Number, Soil_Layer_Num - 5), dtype=numpy.float32)
Additive_Noise_ST = numpy.empty((Ensemble_Number, 2), dtype=numpy.float32)
Irrigation_Grid_Flag_Array = None
cols1d_ixy = numpy.empty(column_len, dtype=numpy.integer)
cols1d_jxy = numpy.empty(column_len, dtype=numpy.integer)
cols1d_ityplun = numpy.empty(column_len, dtype=numpy.integer)
pfts1d_ixy = numpy.empty(pft_len, dtype=numpy.integer)
pfts1d_jxy = numpy.empty(pft_len, dtype=numpy.integer)
pfts1d_itypveg = numpy.empty(pft_len, dtype=numpy.integer)
pfts1d_ci = numpy.empty(pft_len, dtype=numpy.integer)
pfts1d_ityplun = numpy.empty(pft_len, dtype=numpy.integer)
PCT_PFT_High = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
PCT_PFT_Low = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
PCT_PFT_WATER = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOCVL_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOCVH_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOTVL_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOTVH_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
ECOWAT_Mat = numpy.empty((Row_Numbers, Col_Numbers), dtype=numpy.float32)
Soil_Density = numpy.empty((Row_Numbers, Col_Numbers),dtype=numpy.float32)
CMEM_Work_Path_Array = None
Analysis_Variable_Name = None
Soil_Sand_Clay_Sum = numpy.empty((Soil_Texture_Layer_Opt_Num, Row_Numbers, Col_Numbers), dtype=numpy.float32)
Parameter_Range_Soil = numpy.empty((2, Dim_Soil_Par),dtype=numpy.float32)
Parameter_Range_Veg = numpy.empty((2,Dim_Veg_Par),dtype=numpy.float32)
Parameter_Range_PFT = numpy.empty((2,Dim_PFT_Par),dtype=numpy.float32)
Parameter_Range_Hard = numpy.empty((2, Dim_Hard_Par),dtype=numpy.float32)
Par_Index_Increment_Soil_Par = numpy.empty((2, Dim_Soil_Par),dtype=numpy.float32)
Par_Soil_Uniform_STD = numpy.empty(Dim_Soil_Par,dtype=numpy.float32)
Par_Veg_Uniform_STD = numpy.empty(Dim_Veg_Par,dtype=numpy.float32)
Par_PFT_Uniform_STD = numpy.empty(Dim_PFT_Par,dtype=numpy.float32)
Par_Hard_Uniform_STD = numpy.empty(Dim_Hard_Par,dtype=numpy.float32)
Optimized_Parameter_Index = numpy.empty(4,dtype=numpy.integer)
Bias_Record_Index = numpy.empty(2,dtype=numpy.integer)
Soil_Moisture_Diff_Index = None
COSMOS_Circle_Array = None
COSMOS_Circle_Index_Array = None
COSMOS_Circle_Num_Array = None
CLM_Soil_Layer_Thickness = numpy.empty((Soil_Layer_Num, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Analysis_Grid = numpy.empty((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Localization_Map_Mask = numpy.empty((Dim_CLM_State, Row_Numbers, Col_Numbers),dtype=numpy.float32)
ObsModel_Mat_Masked = numpy.empty((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Two_Step_Bias_Estimation_Flag = 0 # whether the bias estimation has been done
Two_Step_Bias_Estimation_Active = False # whether it is in the process of bias estimation step
Def_First_Run_Bias = 1
if Def_PP == 2:
Mean_Dir = mpi4py_comm.bcast(Mean_Dir)
mpi4py_comm.bcast(Bias_Remove_Start_Time_Array)
mpi4py_comm.Bcast([Mean_Index_Prop_Grid_Array_Sys,MPI.FLOAT])
mpi4py_comm.Bcast([Model_Variance,MPI.FLOAT])
mpi4py_comm.Bcast([Mask,MPI.FLOAT])
mpi4py_comm.Bcast([Mask_Index,MPI.BOOL])
mpi4py_comm.Bcast([Mask_X,MPI.FLOAT])
mpi4py_comm.Bcast([Mask_Y,MPI.FLOAT])
mpi4py_comm.Bcast([LONGXY_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([LATIXY_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Bias_Initialization_Flag,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Bias_Optimized,MPI.FLOAT])
mpi4py_comm.Bcast([Model_Bias_Range,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Bias_Range,MPI.FLOAT])
mpi4py_comm.Bcast([Model_Bias_Range_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Bias_Range_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Model_Bias_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Bias_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Model_State_Inflation_Range,MPI.FLOAT])
mpi4py_comm.Bcast([Model_State_Inflation_Range_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Additive_Noise_SM_Par,MPI.FLOAT])
mpi4py_comm.Bcast([Additive_Noise_SM,MPI.FLOAT])
mpi4py_comm.Bcast([Additive_Noise_ST,MPI.FLOAT])
Irrigation_Grid_Flag_Array = mpi4py_comm.bcast(Irrigation_Grid_Flag_Array)
mpi4py_comm.Bcast([cols1d_ixy,MPI.INT])
mpi4py_comm.Bcast([cols1d_jxy,MPI.INT])
mpi4py_comm.Bcast([cols1d_ityplun,MPI.INT])
mpi4py_comm.Bcast([pfts1d_ixy,MPI.INT])
mpi4py_comm.Bcast([pfts1d_jxy,MPI.INT])
mpi4py_comm.Bcast([pfts1d_itypveg,MPI.INT])
mpi4py_comm.Bcast([pfts1d_ci,MPI.INT])
mpi4py_comm.Bcast([pfts1d_ityplun,MPI.INT])
mpi4py_comm.Bcast([PCT_PFT_High,MPI.FLOAT])
mpi4py_comm.Bcast([PCT_PFT_Low,MPI.FLOAT])
mpi4py_comm.Bcast([PCT_PFT_WATER,MPI.FLOAT])
mpi4py_comm.Bcast([ECOCVL_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([ECOCVH_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([ECOTVL_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([ECOTVH_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([ECOWAT_Mat,MPI.FLOAT])
mpi4py_comm.Bcast([Soil_Density,MPI.FLOAT])
CMEM_Work_Path_Array = mpi4py_comm.bcast(CMEM_Work_Path_Array)
Analysis_Variable_Name = mpi4py_comm.bcast(Analysis_Variable_Name)
mpi4py_comm.Bcast([Soil_Sand_Clay_Sum,MPI.FLOAT])
mpi4py_comm.Bcast([Parameter_Range_Soil,MPI.FLOAT])
mpi4py_comm.Bcast([Parameter_Range_Veg,MPI.FLOAT])
mpi4py_comm.Bcast([Parameter_Range_PFT,MPI.FLOAT])
mpi4py_comm.Bcast([Parameter_Range_Hard,MPI.FLOAT])
mpi4py_comm.Bcast([Par_Index_Increment_Soil_Par,MPI.FLOAT])
mpi4py_comm.Bcast([Par_Soil_Uniform_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Par_Veg_Uniform_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Par_PFT_Uniform_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Par_Hard_Uniform_STD,MPI.FLOAT])
mpi4py_comm.Bcast([Optimized_Parameter_Index,MPI.INT])
mpi4py_comm.Bcast([Bias_Record_Index,MPI.INT])
# dominik: 18/07/2016
#Soil_Moisture_Diff_Index = mpi4py_comm.bcast(Soil_Moisture_Diff_Index)
mpi4py_comm.Bcast([CLM_Soil_Layer_Thickness,MPI.FLOAT])
mpi4py_comm.Bcast([Analysis_Grid,MPI.FLOAT])
mpi4py_comm.Bcast([Localization_Map_Mask,MPI.FLOAT])
mpi4py_comm.Bcast([ObsModel_Mat_Masked,MPI.FLOAT])
COSMOS_Circle_Array = mpi4py_comm.bcast(COSMOS_Circle_Array)
COSMOS_Circle_Index_Array = mpi4py_comm.bcast(COSMOS_Circle_Index_Array)
COSMOS_Circle_Num_Array = mpi4py_comm.bcast(COSMOS_Circle_Num_Array)
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
#================================================= Do Data Assimilation
if Do_DA_Flag:
if mpi4py_rank == 0:
#---------------------------------------------- Read Observation Time -----------------------------
Observation_Time_File = open(Observation_Time_File_Path + '/Observation_Time_empty.txt', 'r')
#Observation_Time_File = open(Observation_Time_File_Path + '/Observation_Time_TB_an_2010_2015.txt', 'r')
#Observation_Time_File = open(Observation_Time_File_Path + '/SMAPCDF_Observation_Time_TB_an_2010_2016_Oct_Mbidgee.txt', 'r')
#Observation_Time_File = open(Observation_Time_File_Path + '/smap_masked.txt','r')
#Observation_Time_File = open(Observation_Time_File_Path + '/SMOS_Gabrielle_short_SMAPclim.txt','r')
#LAI study
#Observation_Time_File = open(Observation_Time_File_Path + '/LAI_dyn.txt', 'r')
Observation_Time_File_Header = Observation_Time_File.readline()
print "Observation_Time_File_Header",Observation_Time_File_Header
Observation_Time_Lines = Observation_Time_File.readlines()
else:
Observation_Time_Lines = None
if Def_PP == 2:
Observation_Time_Lines = mpi4py_comm.bcast(Observation_Time_Lines)
#print len(Observation_Time_Lines)
Observation_Index = 0
Forward = True
while Observation_Index < len(Observation_Time_Lines):
if mpi4py_rank == 0:
#print Observation_Index,len(Observation_Time_Lines)
# Find the Same Time Observation
Variable_Assimilation_Flag = numpy.zeros(Dim_CLM_State)
while Forward:
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "+++++++++++++++ Find Synchronous Observation ++++++++++++++++++++++++++++++++++++"
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
Observation_Time_Line = Observation_Time_Lines[Observation_Index]
Observation_Time_Line_Split = string.split(Observation_Time_Line)
print Observation_Time_Line_Split
# Model Stop Time
Stop_Year = Observation_Time_Line_Split[6].zfill(4)
Stop_Month = Observation_Time_Line_Split[7].zfill(2)
Stop_Day = Observation_Time_Line_Split[8].zfill(2)
Stop_Hour = Observation_Time_Line_Split[9].zfill(2)
Stop_Minute = Observation_Time_Line_Split[10].zfill(2)
Datetime_Stop_First = datetime.datetime(string.atoi(Stop_Year), string.atoi(Stop_Month), string.atoi(Stop_Day), string.atoi(Stop_Hour), 00)
if string.atoi(Stop_Minute) >= 60 or string.atoi(Stop_Minute) < 0:
sys.exit('The Observation is Wrong in ' + Stop_Year + ' ' + Stop_Month + ' ' + Stop_Day + ' ' + Stop_Hour + ' ' + Stop_Minute)
elif string.atoi(Stop_Minute) <= 30:
Datetime_Stop = datetime.datetime(string.atoi(Stop_Year), string.atoi(Stop_Month), string.atoi(Stop_Day), string.atoi(Stop_Hour), 00)
else:
if (string.atoi(Stop_Hour) + 1 < 24):
Datetime_Stop = datetime.datetime(string.atoi(Stop_Year), string.atoi(Stop_Month), string.atoi(Stop_Day), string.atoi(Stop_Hour) + 1, 00)
elif (string.atoi(Stop_Hour) + 1 == 24) and (string.atoi(Stop_Day) + 1 <= Num_of_Days_Monthly[string.atoi(Stop_Month) - 1]):
Datetime_Stop = datetime.datetime(string.atoi(Stop_Year), string.atoi(Stop_Month), string.atoi(Stop_Day) + 1, 00, 00)
elif (string.atoi(Stop_Hour) + 1 == 24) and (string.atoi(Stop_Day) + 1 > Num_of_Days_Monthly[string.atoi(Stop_Month) - 1]) and (string.atoi(Stop_Month) + 1 <= 12):
Datetime_Stop = datetime.datetime(string.atoi(Stop_Year), string.atoi(Stop_Month) + 1, 01, 00, 00)
elif (string.atoi(Stop_Hour) + 1 == 24) and (string.atoi(Stop_Day) + 1 > Num_of_Days_Monthly[string.atoi(Stop_Month) - 1]) and (string.atoi(Stop_Month) + 1 > 12):
Datetime_Stop = datetime.datetime(string.atoi(Stop_Year) + 1, 01, 01, 00, 00)
#Datetime_Stop_Init = datetime.datetime(Datetime_Stop.year-1,12,31,23,00)
Datetime_Stop_Init = datetime.datetime(Datetime_Stop.year, Datetime_Stop.month, Datetime_Stop.day, 00, 00)
print "Datetime_Start, Datetime_Stop, Datetime_Stop_Init, (Datetime_Stop - Datetime_Stop_Init).seconds"
print Datetime_Start, Datetime_Stop, Datetime_Stop_Init, (Datetime_Stop - Datetime_Stop_Init).seconds
# Because there is a calendar bug in cesm1.1.1, so we skip 02-29 for data assimilation
if (calendar.isleap(string.atoi(Stop_Year)) and (datetime.datetime(string.atoi(Stop_Year),string.atoi(Stop_Month),string.atoi(Stop_Day)) == datetime.datetime(string.atoi(Stop_Year),2,29))) or (Datetime_Stop <= Datetime_Start):
Observation_Index = Observation_Index + 1
if Observation_Index >= len(Observation_Time_Lines):
Forward = False
break
else:
continue
print "Observation_Index",Observation_Index,"len(Observation_Time_Lines)",len(Observation_Time_Lines)
#sys.exit()
# Judge the Sensor Type
SensorType.append(Observation_Time_Line_Split[0]) # MODIS AMSR-E SMOS ASCAT ASAR
SensorVariable.append(Observation_Time_Line_Split[1])
SensorQuantity.append(Observation_Time_Line_Split[2])
Variable_ID.append(Observation_Time_Line_Split[3])
QC_ID.append(Observation_Time_Line_Split[4])
SensorResolution.append(string.atof(Observation_Time_Line_Split[5]))
Observation_File_Name.append(Observation_Time_Line_Split[11])
SensorVariable_Temp = Observation_Time_Line_Split[1]
DA_Flag = 0 # if DA_Flag=1, Do Data Assimilation
print SensorVariable_Temp,"Variable_List.index(SensorVariable_Temp)",Variable_List.index(SensorVariable_Temp)
if SensorVariable_Temp == 'Albedo':
Variable_Assimilation_Flag[4] = 1
Variable_Assimilation_Flag[5] = 1
Variable_Assimilation_Flag[6] = 1
Variable_Assimilation_Flag[7] = 1
elif Variable_List.index(SensorVariable_Temp) >= 0:
Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Temp)] = 1
else:
print "Wrong SensorVariable_Temp Specification",SensorVariable_Temp
# Recore the last bias remove time of each observation type
if Def_First_Run_Bias or (Bias_Remove_Start_Time_Array[Variable_List.index(SensorVariable_Temp)] == ''):
Bias_Remove_Start_Time_Array[Variable_List.index(SensorVariable_Temp)] = Datetime_Start
print ""
print "-----Bias_Remove_Start_Time_Array",Bias_Remove_Start_Time_Array
print ""
if Def_Print:
print "------------Variable_Assimilation_Flag",Variable_Assimilation_Flag
if numpy.sum(Variable_Assimilation_Flag) > 0:
DA_Flag = 1
if Def_Print:
print "len(SensorType),len(SensorVariable),len(SensorQuantity),len(Variable_ID)",len(SensorType),len(SensorVariable),len(SensorQuantity),len(Variable_ID)
if Observation_Index + 1 >= len(Observation_Time_Lines):
Forward = False
break
else:
Observation_Index = Observation_Index + 1
Observation_Time_Line = Observation_Time_Lines[Observation_Index]
Observation_Time_Line_Split = string.split(Observation_Time_Line)
#print Observation_Time_Line_Split
Stop_Year = Observation_Time_Line_Split[6].zfill(4)
Stop_Month = Observation_Time_Line_Split[7].zfill(2)
Stop_Day = Observation_Time_Line_Split[8].zfill(2)
Stop_Hour = Observation_Time_Line_Split[9].zfill(2)
Stop_Minute = Observation_Time_Line_Split[10].zfill(2)
Datetime_Stop_Second = datetime.datetime(string.atoi(Stop_Year), string.atoi(Stop_Month), string.atoi(Stop_Day), string.atoi(Stop_Hour), 00)
#print Datetime_Stop_First,Datetime_Stop_Second
if Datetime_Stop_First == Datetime_Stop_Second:
Forward = True
else:
Observation_Index = Observation_Index - 1
Observation_Time_Line = Observation_Time_Lines[Observation_Index]
Observation_Time_Line_Split = string.split(Observation_Time_Line)
#print Observation_Time_Line_Split
Stop_Year = Observation_Time_Line_Split[6].zfill(4)
Stop_Month = Observation_Time_Line_Split[7].zfill(2)
Stop_Day = Observation_Time_Line_Split[8].zfill(2)
Stop_Hour = Observation_Time_Line_Split[9].zfill(2)
Stop_Minute = Observation_Time_Line_Split[10].zfill(2)
break
else:
Variable_Assimilation_Flag = None
Stop_Year = None
Stop_Month = None
Stop_Day = None
Stop_Hour = None
Stop_Minute = None
Datetime_Stop = None
Datetime_Stop_Init = None
Observation_Index = None
SensorType = None
SensorVariable = None
SensorQuantity = None
Variable_ID = None
QC_ID = None
SensorResolution = None
Observation_File_Name = None
SensorVariable_Temp = None
DA_Flag = None
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
Variable_Assimilation_Flag = mpi4py_comm.bcast(Variable_Assimilation_Flag)
Stop_Year = mpi4py_comm.bcast(Stop_Year)
Stop_Month = mpi4py_comm.bcast(Stop_Month)
Stop_Day = mpi4py_comm.bcast(Stop_Day)
Stop_Hour = mpi4py_comm.bcast(Stop_Hour)
Stop_Minute = mpi4py_comm.bcast(Stop_Minute)
Datetime_Stop = mpi4py_comm.bcast(Datetime_Stop)
Datetime_Stop_Init = mpi4py_comm.bcast(Datetime_Stop_Init)
Observation_Index = mpi4py_comm.bcast(Observation_Index)
SensorType = mpi4py_comm.bcast(SensorType)
SensorVariable = mpi4py_comm.bcast(SensorVariable)
SensorQuantity = mpi4py_comm.bcast(SensorQuantity)
Variable_ID = mpi4py_comm.bcast(Variable_ID)
QC_ID = mpi4py_comm.bcast(QC_ID)
SensorResolution = mpi4py_comm.bcast(SensorResolution)
Observation_File_Name = mpi4py_comm.bcast(Observation_File_Name)
Dim_Obs_Type = len(SensorType)
if mpi4py_rank == 0:
print "******************************************* Dim_Obs_Type",Dim_Obs_Type
Month_String = "-" + Stop_Month.zfill(2)
Day_String = "-" + Stop_Day.zfill(2)
Hour_String = "-" + Stop_Hour.zfill(2)
DateString_Plot=Stop_Year+ Month_String+Day_String+Hour_String
if mpi4py_rank == 0:
print "#######################Initial file is",finidat_initial_CLM
stop_tod_string = str((Datetime_Stop - Datetime_Stop_Init).seconds).zfill(5)
history_file_name = Region_Name + '.clm2.h0.' + Stop_Year + '-' + Stop_Month + '-' + Stop_Day + '-' + stop_tod_string + '.nc'
finidat_name = Region_Name + '.clm2.r.' + Stop_Year + '-' + Stop_Month + '-' + Stop_Day + '-' + stop_tod_string + '.nc'
if mpi4py_rank == 0:
print "*************************************************Prepare Observation Matrix***************************************************************"
Observation_Matrix = numpy.zeros((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Variance = numpy.zeros((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Latitude = numpy.zeros((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Longitude = numpy.zeros((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_View_Zenith_Angle = numpy.zeros((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_View_Time = numpy.zeros((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_NLons = numpy.zeros(len(SensorType),dtype=numpy.float32)
Observation_NLats = numpy.zeros(len(SensorType),dtype=numpy.float32)
Observation_X_Left = numpy.zeros(len(SensorType),dtype=numpy.float32)
Observation_X_Right = numpy.zeros(len(SensorType),dtype=numpy.float32)
Observation_Y_Lower = numpy.zeros(len(SensorType),dtype=numpy.float32)
Observation_Y_Upper = numpy.zeros(len(SensorType),dtype=numpy.float32)
Observation_Misc = numpy.zeros((len(SensorType), 10, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Corelation_Par = numpy.zeros((len(SensorType), 5, 2),dtype=numpy.float32)
else:
Observation_Matrix = numpy.empty((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Variance = numpy.empty((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Latitude = numpy.empty((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Longitude = numpy.empty((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_View_Zenith_Angle = numpy.empty((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_View_Time = numpy.empty((len(SensorType), Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_NLons = numpy.empty(len(SensorType),dtype=numpy.float32)
Observation_NLats = numpy.empty(len(SensorType),dtype=numpy.float32)
Observation_X_Left = numpy.empty(len(SensorType),dtype=numpy.float32)
Observation_X_Right = numpy.empty(len(SensorType),dtype=numpy.float32)
Observation_Y_Lower = numpy.empty(len(SensorType),dtype=numpy.float32)
Observation_Y_Upper = numpy.empty(len(SensorType),dtype=numpy.float32)
Observation_Misc = numpy.empty((len(SensorType), 10, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Observation_Corelation_Par = numpy.empty((len(SensorType), 5, 2),dtype=numpy.float32)
if Def_PP == 2:
mpi4py_comm.Bcast([Observation_Matrix,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Variance,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Latitude,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Longitude,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_View_Zenith_Angle,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_View_Time,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_NLons,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_NLats,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_X_Left,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_X_Right,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Y_Lower,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Y_Upper,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Misc,MPI.FLOAT])
mpi4py_comm.Bcast([Observation_Corelation_Par,MPI.FLOAT])
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
if Datetime_Stop > Datetime_Start and Datetime_Stop <= Datetime_End and Dim_Obs_Type > 0:
if mpi4py_rank == 0:
print "Datetime_Start, Datetime_Stop",Datetime_Start, Datetime_Stop
os.chdir(DasPy_Path)
if Datetime_Start != Datetime_Stop: # If There are severl Observation at the Same Time, CLM Only Need to Be Run Once.
if mpi4py_rank == 0:
print "*************************************************Start Online Forcing Perturbation***************************************************************"
# Forcing_Perturbation_Online(Def_PP,Ensemble_Number,DasPy_Path,Forcing_File_Path_Home,Row_Numbers,Col_Numbers,Grid_Resolution_GEO,
# mksrf_edgee, mksrf_edgew, mksrf_edges, mksrf_edgen, LATIXY_Mat, LONGXY_Mat, Forcepert_ntrmdt,
# Datetime_Start, Datetime_Stop, active_nodes_server, job_server_node_array, Def_Print)
if (numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1): # Model/Observation Bias Estimation
if Variable_Assimilation_Flag[Variable_List.index("Soil_Moisture")] or Variable_Assimilation_Flag[Variable_List.index("Surface_Temperature")] or Variable_Assimilation_Flag[Variable_List.index("Vegetation_Temperature")] or Variable_Assimilation_Flag[Variable_List.index("Latent_Heat")]\
or Variable_Assimilation_Flag[Variable_List.index("Latent_Heat_Daily")] or Variable_Assimilation_Flag[Variable_List.index("Sensible_Heat")]:
Two_Step_Bias_Estimation_Active = True #If it is true, read the ensemble mean in the read_history function, use the bias ensembles to generate the model ensembles
# In theory, only the parameter estimation or bias estimation can be activated, both can not be activated at the same time
if mpi4py_rank == 0:
print "===================================== Call DAS_Driver_Common"
Observation_Matrix, Observation_Longitude, Observation_Latitude, Observation_Variance, Observation_NLons, Observation_NLats, \
Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Observation_Misc, Observation_View_Zenith_Angle, Observation_View_Time, Observation_Corelation_Par, \
Analysis_Variable_Name, Constant_File_Name, Def_Par_Optimized, Soil_Layer_Index_DA,\
Mask, Mask_Index, Model_Variance, Def_First_Run_RTM, ECOCVL_Mat, ECOCVH_Mat, ECOTVL_Mat, ECOTVH_Mat, ECOWAT_Mat, \
Mean_Index_Prop_Grid_Array_Sys, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD, Observation_Bias_Initialization_Flag, job_server_node_array, active_nodes_server = \
DAS_Driver_Common(mpi4py_comm, mpi4py_null, mpi4py_rank, mpi4py_name, mpi4py_comm_split, mpipy_comm_decomposition, Model_Driver, PDAF_Assim_Framework, PDAF_Filter_Type, Def_PP, Def_CESM_Multi_Instance, Def_Par_Optimized, Def_Par_Sensitivity, Def_Par_Correlation, Do_DA_Flag, CLM_NA, NAvalue, finidat_initial_CLM, finidat_initial_CLM_Copy, Def_ParFor, Def_Region,
Def_Initial, Irrig_Scheduling, Irrigation_Hours, Def_Print, Region_Name, Run_Dir_Home, Run_Dir_Multi_Instance, Run_Dir_Array, Model_Path, CLM_Flag, num_processors,
Start_Year, Start_Month, Start_Day, Start_Hour, Stop_Year, Stop_Month, Stop_Day, Stop_Hour, Datetime_Initial, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End,
DAS_Data_Path, DasPy_Path, Forcing_File_Path_Array, dtime, N_Steps, Ensemble_Number, Row_Numbers, Col_Numbers, Ensemble_Number_Predict,
Row_Numbers_String, Col_Numbers_String, Mask_Index, DAS_Depends_Path, maxpft, ntasks_CLM, rootpe_CLM, nthreads_CLM, Weather_Forecast_Days, Two_Step_Bias_Estimation_Flag, Two_Step_Bias_Estimation_Active, Mean_Dir,
PP_Port, NSLOTS, Low_Ratio_Par, High_Ratio_Par, Soil_Texture_Layer_Opt_Num, Def_Snow_Effects, Dim_Observation_Quantity,
Soil_Thickness, Soil_Layer_Num, Snow_Layer_Num, ParFlow_Layer_Num, Density_of_liquid_water, Freezing_temperature_of_fresh_water, Initial_Perturbation, Initial_Perturbation_SM_Flag, diskless_flag, persist_flag,
CLM_File_Name_List, Parameter_Range_Soil, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD, Observation_Bias_Initialization_Flag,
stop_tod_string, history_file_name, finidat_name, Observation_Matrix, Observation_Longitude, Observation_Latitude, Observation_Variance, \
COSMOS_Circle_Index_Array, COSMOS_Circle_Num_Array, COSMOS_Circle_Array, Observation_NLons, Observation_NLats, Observation_X_Left, Observation_X_Right, \
Observation_Y_Lower, Observation_Y_Upper, Observation_Misc, Observation_View_Zenith_Angle, Observation_View_Time, Observation_Corelation_Par, \
Initial_Perturbation_ST_Flag, Def_First_Run, Def_SpinUp, active_nodes_server, job_server_node_array, PP_Servers_Per_Node,
Forcing_File_Path_Home, SensorType, SensorVariable, SensorQuantity, SensorResolution, Variable_ID, QC_ID, Observation_File_Name, Dim_Obs_Type,
Write_DA_File_Flag, Use_Mask_Flag, Mask_File, Def_ReBEL, Def_Localization, Call_Gstat_Flag, Plot_Analysis, Num_Local_Obs_State,
Observation_Path, Grid_Resolution_CEA, Grid_Resolution_GEO, mksrf_edgee, mksrf_edges, mksrf_edgew, mksrf_edgen,
LATIXY_Mat, LONGXY_Mat, MODEL_CEA_X, MODEL_CEA_Y, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower,MODEL_Y_Upper, LAI_Year_String, Month_String,
DAS_Output_Path, Dim_CLM_State, Parameter_Optimization, Def_Debug, PCT_PFT_High, PCT_PFT_Low, PCT_PFT_WATER, Soil_Layer_Index_DA,
ECOCVL_Mat, ECOCVH_Mat, ECOTVL_Mat, ECOTVH_Mat, ECOWAT_Mat, column_len, pft_len, cols1d_jxy, cols1d_ixy, pfts1d_jxy, pfts1d_ixy,
Constant_File_Name_Header, finidat_name_string, Feedback_Assim, numrad,
Density_of_ice, omp_get_num_procs_ParFor, Model_Variance, Additive_Noise_SM_Par, Additive_Noise_SM, Additive_Noise_ST,
Variable_List, Variable_Assimilation_Flag, Analysis_Variable_Name, Mask, Mask_X, Mask_Y, N0, nlyr, Station_XY, Station_XY_Index, Def_First_Run_RTM, Soil_Density, CMEM_Work_Path_Array,
DAS_File_Name_List, COUP_OAS_PFL, CESM_Init_Flag,
MODIS_LAI_Data_ID, Bias_Estimation_Option_Model, Bias_Estimation_Option_Obs, PFT_Par_Sens_Array, UTC_Zone, plt, cm, colors, DateString_Plot, octave, r, COSMIC_Py, [], memory_profiler, COSMIC, Observation_Time_File_Path)
if mpi4py_rank == 0:
###################################################################
Weather_Forecast_Days_State = copy.copy(Weather_Forecast_Days)
Weather_Forecast_Days_Par = 0
# Record how many iterations we have for each parameter
Soil_Par_Accum_Dim = 0
Veg_Par_Accum_Dim = 0
PFT_Par_Accum_Dim = 0
Hard_Par_Accum_Dim = 0
for Observation_Matrix_Index in range(Dim_Obs_Type):
print "Read",str(Observation_Matrix_Index+1)+"th","Observation Matrix!"
SensorType_Sub = SensorType[Observation_Matrix_Index]
SensorVariable_Sub = SensorVariable[Observation_Matrix_Index]
SensorQuantity_Sub = SensorQuantity[Observation_Matrix_Index]
SensorResolution_Sub = SensorResolution[Observation_Matrix_Index]
Variable_ID_Sub = Variable_ID[Observation_Matrix_Index]
QC_ID_Sub = QC_ID[Observation_Matrix_Index]
print "SensorType_Sub,SensorVariable_Sub,SensorQuantity_Sub,SensorResolution_Sub,Variable_ID_Sub,QC_ID_Sub"
print SensorType_Sub,SensorVariable_Sub,SensorQuantity_Sub,SensorResolution_Sub,Variable_ID_Sub,QC_ID_Sub
if SensorVariable_Sub == "Irrigation_Scheduling":
print "Skip to next observation because of irrigation"
continue
if SensorVariable_Sub != "Albedo":
Prop_Grid_Array_Sys_Index = Variable_List.index(SensorVariable_Sub)
else:
Prop_Grid_Array_Sys_Index = Variable_List.index(Variable_ID_Sub)
print "SensorVariable_Sub,Prop_Grid_Array_Sys_Index",SensorVariable_Sub,Prop_Grid_Array_Sys_Index
print ""
if SensorQuantity_Sub == "K":
if SensorVariable_Sub == "Soil_Moisture":
SensorQuantity_Index = 0
if SensorVariable_Sub == "Surface_Temperature" or SensorVariable_Sub == "Vegetation_Temperature":
SensorQuantity_Index = 3
if SensorQuantity_Sub == "DB":
SensorQuantity_Index = 1
if SensorQuantity_Sub == "Neutron_Count":
SensorQuantity_Index = 2
if SensorQuantity_Sub == "m3/m3":
SensorQuantity_Index = 3
print "SensorQuantity_Sub,SensorQuantity_Index",SensorQuantity_Sub,SensorQuantity_Index
print ""
print "Mask the Observation where the Model State is invalid"
Observation_Matrix[Observation_Matrix_Index,:,:][Mask_Index[Prop_Grid_Array_Sys_Index,::]] = -9999.0
if SensorType_Sub == "COSMOS" or SensorType_Sub == "InSitu":
Def_Localization_Original = Def_Localization
Def_Localization = 1.0
if Variable_Assimilation_Flag[Variable_List.index("Soil_Moisture")]:
Soil_Par_Sens = Soil_Par_Sens_Array[0]
Veg_Par_Sens = Veg_Par_Sens_Array[0]
PFT_Par_Sens = PFT_Par_Sens_Array[0]
Hard_Par_Sens = Hard_Par_Sens_Array[0]
elif Variable_Assimilation_Flag[Variable_List.index("Surface_Temperature")]:
Soil_Par_Sens = Soil_Par_Sens_Array[1]
Veg_Par_Sens = Veg_Par_Sens_Array[1]
PFT_Par_Sens = PFT_Par_Sens_Array[1]
Hard_Par_Sens = Hard_Par_Sens_Array[1]
else:
Soil_Par_Sens = numpy.array([False, False, False, False, False],dtype=numpy.bool)
Veg_Par_Sens = numpy.array([False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False],dtype=numpy.bool)
PFT_Par_Sens = numpy.array([False, False, False],dtype=numpy.bool)
Hard_Par_Sens = numpy.array([False, False],dtype=numpy.bool)
Soil_Par_Sens_Dim = numpy.size(numpy.where(Soil_Par_Sens == True))
Veg_Par_Sens_Dim = numpy.size(numpy.where(Veg_Par_Sens == True))
PFT_Par_Sens_Dim = numpy.size(numpy.where(PFT_Par_Sens == True))
Hard_Par_Sens_Dim = numpy.size(numpy.where(Hard_Par_Sens == True))
print "++++++++++++++++++++++ Soil_Par_Sens_Dim",Soil_Par_Sens_Dim,"+++++++++++++++++++++++ PFT_Par_Sens_Dim",PFT_Par_Sens_Dim
Par_Soil_Uniform_STD_Sub = Par_Soil_Uniform_STD[Soil_Par_Sens]
Par_PFT_Uniform_STD_Sub = Par_PFT_Uniform_STD[PFT_Par_Sens]
print "Par_Soil_Uniform_STD_Sub",Par_Soil_Uniform_STD_Sub,"Par_PFT_Uniform_STD_Sub",Par_PFT_Uniform_STD_Sub
##################################################################################################################
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
Mask_Index_Vector = ~Mask_Index[Prop_Grid_Array_Sys_Index,:,:].flatten()
Model_Variance_Sub = Model_Variance[Prop_Grid_Array_Sys_Index, :, :]
Prop_Grid_Array_Sys_Sub = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :]
Prop_Grid_Array_H_Trans_Sub = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_H_Trans'][:, Prop_Grid_Array_Sys_Index, :, :]
Mask_Sub = Mask[Prop_Grid_Array_Sys_Index, :, :, :]
Mask_Index_Sub = Mask_Index[Prop_Grid_Array_Sys_Index, :, :]
E0_SysModel_Mask = numpy.zeros((numpy.size(numpy.where(Mask_Index_Vector==True)),Ensemble_Number),dtype=numpy.float32)
E0_ObsModel_Mask = numpy.zeros((numpy.size(numpy.where(Mask_Index_Vector==True)),Ensemble_Number),dtype=numpy.float32)
for Ens_Index in range(Ensemble_Number):
E0_SysModel_Mask[:,Ens_Index] = Prop_Grid_Array_Sys_Sub[Ens_Index, :,:][~Mask_Index_Sub].flatten()
E0_ObsModel_Mask[:,Ens_Index] = Prop_Grid_Array_H_Trans_Sub[Ens_Index, :,:][~Mask_Index_Sub].flatten()
SysModel_Variance_Value = numpy.var(E0_SysModel_Mask,axis=1)
Mean_SysModel_Variance_Value = numpy.mean(SysModel_Variance_Value)
ObsModel_Variance_Value = numpy.var(E0_ObsModel_Mask,axis=1)
Mean_ObsModel_Variance_Value = numpy.mean(ObsModel_Variance_Value)
for Soil_Layer_Index in range(Soil_Layer_Num):
Mean_SysModel_Variance_Value_Layer = numpy.mean(numpy.var(E0_SysModel_Mask,axis=1))
if SensorVariable_Sub == "Soil_Moisture":
if numpy.sqrt(Mean_SysModel_Variance_Value_Layer) < 0.02:
Initial_Perturbation_SM_Flag[Soil_Layer_Index] = 1
else:
Initial_Perturbation_SM_Flag[Soil_Layer_Index] = 0
#print Initial_Perturbation_SM_Flag[0]
elif SensorVariable_Sub == "Surface_Temperature":
if numpy.sqrt(Mean_SysModel_Variance_Value_Layer) < 1.0:
Initial_Perturbation_ST_Flag[Soil_Layer_Index] = 1
else:
Initial_Perturbation_ST_Flag[Soil_Layer_Index] = 0
if Write_DA_File_Flag:
numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/E0_SysModel_Mask.txt", E0_SysModel_Mask)
numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/E0_ObsModel_Mask.txt", E0_ObsModel_Mask)
if Def_Print:
print "******************************************************************************"
print "SysModel Mean Variance is:", Mean_SysModel_Variance_Value
print "Max SysModel Variance is:", numpy.max(SysModel_Variance_Value),"Median SysModel Variance is:", numpy.median(SysModel_Variance_Value),"Min SysModel Variance is:", numpy.min(SysModel_Variance_Value)
print "ObsModel Mean Variance is:", Mean_ObsModel_Variance_Value
print "Max ObsModel Variance is:", numpy.max(ObsModel_Variance_Value),"Median ObsModel Variance is:", numpy.median(ObsModel_Variance_Value),"Min ObsModel Variance is:", numpy.min(ObsModel_Variance_Value)
print "******************************************************************************"
SysModel_Mat = numpy.zeros((Row_Numbers,Col_Numbers))
SysModel_Mat_Col = SysModel_Mat.flatten()
SysModel_Mat_Col[Mask_Index_Vector] = numpy.mean(E0_SysModel_Mask,axis=1)
SysModel_Mat = numpy.reshape(SysModel_Mat_Col, (Row_Numbers, -1))
SysModel_Mat = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index,:,:], SysModel_Mat)
ObsModel_Variance_Mat = numpy.zeros((Row_Numbers,Col_Numbers))
ObsModel_Variance_Mat_Col = ObsModel_Variance_Mat.flatten()
ObsModel_Variance_Mat_Col[Mask_Index_Vector] = ObsModel_Variance_Value
ObsModel_Variance_Mat = numpy.reshape(ObsModel_Variance_Mat_Col, (Row_Numbers, -1))
ObsModel_Variance_Mat = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index,:,:], ObsModel_Variance_Mat)
ObsModel_Mat = numpy.zeros((Row_Numbers,Col_Numbers))
ObsModel_Mat_Col = ObsModel_Mat.flatten()
ObsModel_Mat_Col[Mask_Index_Vector] = numpy.mean(E0_ObsModel_Mask,axis=1)
ObsModel_Mat = numpy.reshape(ObsModel_Mat_Col, (Row_Numbers, -1))
ObsModel_Mat = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index,:,:], ObsModel_Mat)
if Plot_Analysis:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from mpl_toolkits.axes_grid.inset_locator import inset_axes
w, h = plt.figaspect(float(Row_Numbers) / Col_Numbers)
SysModel_Mat_Masked = numpy.ma.masked_values(SysModel_Mat, NAvalue)
ObsModel_Mat_Masked = numpy.ma.masked_values(ObsModel_Mat, NAvalue)
Observation_Matrix_Masked = numpy.ma.masked_values(Observation_Matrix[Observation_Matrix_Index,:,:],NAvalue)
ObsModel_Variance_Mat_Masked = numpy.ma.masked_values(ObsModel_Variance_Mat,NAvalue)
Variable_Min = numpy.zeros(Dim_Obs_Type)
Variable_Max = numpy.zeros(Dim_Obs_Type)
Variable_Min[Observation_Matrix_Index] = numpy.min(Observation_Matrix_Masked)
Variable_Max[Observation_Matrix_Index] = numpy.max(Observation_Matrix_Masked)
print "Variable_Min_Obs",Variable_Min[Observation_Matrix_Index],"Variable_Max_Obs",Variable_Max[Observation_Matrix_Index]
Variable_Min = numpy.min(ObsModel_Variance_Mat_Masked)
Variable_Max = numpy.max(ObsModel_Variance_Mat_Masked)
if Variable_Min != Variable_Max and (not numpy.isnan(Variable_Min)) and (not numpy.isnan(Variable_Max)):
fig1 = plt.figure(figsize=(w*2, h*2), dpi=80)
fig1.suptitle(DateString_Plot, fontsize=16)
Variable_Min = numpy.min(SysModel_Mat_Masked)
Variable_Max = numpy.max(SysModel_Mat_Masked)
ticks = numpy.arange(Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 5.0)
color_boun_list = []
color_bound = [Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 100.0]
for i in range(int((color_bound[1] - color_bound[0]) / color_bound[2])):
color_bound[0] += color_bound[2]
color_boun_list.append(color_bound[0])
ax = fig1.add_subplot(2, 2, 1)
im1 = ax.imshow(SysModel_Mat_Masked, cmap=cm.jet, norm=colors.BoundaryNorm(color_boun_list, ncolors=300))
plt.colorbar(im1, ticks=ticks, orientation='horizontal')
ax.set_title('SysModel_Value')
plt.grid(True)
Variable_Min = numpy.min(ObsModel_Mat_Masked)
Variable_Max = numpy.max(ObsModel_Mat_Masked)
ticks = numpy.arange(Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 5.0)
color_boun_list = []
color_bound = [Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 100.0]
for i in range(int((color_bound[1] - color_bound[0]) / color_bound[2])):
color_bound[0] += color_bound[2]
color_boun_list.append(color_bound[0])
ax = fig1.add_subplot(2, 2, 2)
im1 = ax.imshow(ObsModel_Mat_Masked, cmap=cm.jet, norm=colors.BoundaryNorm(color_boun_list, ncolors=300))
plt.colorbar(im1, ticks=ticks, orientation='horizontal')
ax.set_title('ObsModel_Value')
plt.grid(True)
ax = fig1.add_subplot(2, 2, 3)
im1 = ax.imshow(Observation_Matrix_Masked, cmap=cm.jet, norm=colors.BoundaryNorm(color_boun_list, ncolors=300))
plt.colorbar(im1, ticks=ticks, orientation='horizontal')
ax.set_title('Observation_Matrix')
plt.grid(True)
Variable_Min = numpy.min(ObsModel_Variance_Mat_Masked)
Variable_Max = numpy.max(ObsModel_Variance_Mat_Masked)
print "Variable_Max,Variable_Min",Variable_Max,Variable_Min
ticks = numpy.arange(Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 5.0)
color_boun_list = []
color_bound = [Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 100.0]
for i in range(int((color_bound[1] - color_bound[0]) / color_bound[2])):
color_bound[0] += color_bound[2]
color_boun_list.append(color_bound[0])
ax = fig1.add_subplot(2, 2, 4)
im1 = ax.imshow(ObsModel_Variance_Mat_Masked, cmap=cm.jet, norm=colors.BoundaryNorm(color_boun_list, ncolors=300))
plt.colorbar(im1, ticks=ticks, orientation='horizontal')
ax.set_title('ObsModel_Variance_Value')
plt.grid(True)
plt.savefig(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"/SysModel_ObsModel_Observation_"+str(Observation_Matrix_Index)+"_"+DateString_Plot+".png")
plt.close('all')
#os.abort()
if Def_Print >= 2:
print "numpy.mean(Prop_Grid_Array_Sys[:, Prop_Grid_Array_Sys_Index, :, :],axis=0)",numpy.shape(numpy.mean(NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :],axis=0))
Model_State = numpy.mean(NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :],axis=0)[~Mask_Index[Prop_Grid_Array_Sys_Index,::]].flatten()
NC_File_Out_Assimilation_2_Initial.close()
print "******************** Prepare the Input For Block_Assim"
NC_FileName_Block_Assim_Common = DAS_Output_Path+"Analysis/"+Region_Name+"/Block_Assim_Common.nc"
if os.path.exists(NC_FileName_Block_Assim_Common):
os.remove(NC_FileName_Block_Assim_Common)
if Def_Print:
print 'Write NetCDF File:',NC_FileName_Block_Assim_Common
NC_File_Block_Assim_Common = netCDF4.Dataset(NC_FileName_Block_Assim_Common, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Block_Assim_Common.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Block_Assim_Common.createDimension('lon', Col_Numbers)
NC_File_Block_Assim_Common.createDimension('lat', Row_Numbers)
NC_File_Block_Assim_Common.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Block_Assim_Common.createDimension('Dim_Obs_Type', Dim_Obs_Type)
NC_File_Block_Assim_Common.createDimension('Mask_Dim', 3)
NC_File_Block_Assim_Common.createVariable('Mask_Sub','f4',('Mask_Dim','lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Mask_Sub'][:,:,:] = Mask_Sub
NC_File_Block_Assim_Common.createVariable('Mask_Index_Sub','i4',('lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Mask_Index_Sub'][:,:] = Mask_Index_Sub
NC_File_Block_Assim_Common.createVariable('Model_Variance','f4',('Dim_CLM_State','lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Model_Variance'][:,:,:] = Model_Variance
NC_File_Block_Assim_Common.createVariable('Observation_Variance','f4',('Dim_Obs_Type','lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Observation_Variance'][:,:,:] = Observation_Variance
NC_File_Block_Assim_Common.createVariable('Observation_Latitude','f4',('Dim_Obs_Type','lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Observation_Latitude'][:,:,:] = Observation_Latitude
NC_File_Block_Assim_Common.createVariable('Observation_Longitude','f4',('Dim_Obs_Type','lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Observation_Longitude'][:,:,:] = Observation_Longitude
NC_File_Block_Assim_Common.createVariable('Observation_Matrix','f4',('Dim_Obs_Type','lat','lon',),zlib=True)
NC_File_Block_Assim_Common.variables['Observation_Matrix'][:,:,:] = Observation_Matrix
NC_File_Block_Assim_Common.sync()
NC_File_Block_Assim_Common.close()
if Parameter_Optimization:
if Parameter_Optimization_First_Flag and Def_First_Run:
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r')
for Station_Index in range(numpy.size(Station_XY)/2):
Parameter_Soil_Optimized[:,:,Station_Index] = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:,:,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
Parameter_PFT_Optimized[:,:,Station_Index] = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,:,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
NC_File_Out_Optimized_Parameter = netCDF4.Dataset(NC_FileName_Optimized_Parameter, 'a')
NC_File_Out_Optimized_Parameter.variables['Parameter_Soil_Optimized'][Optimized_Parameter_Index[0],:,:,:] = Parameter_Soil_Optimized
NC_File_Out_Optimized_Parameter.variables['Parameter_PFT_Optimized'][Optimized_Parameter_Index[2],:,:,:] = Parameter_PFT_Optimized
NC_File_Out_Optimized_Parameter.sync()
NC_File_Out_Optimized_Parameter.close()
NC_File_Out_Assimilation_2_Parameter.close()
Parameter_Optimization_First_Flag = False
print "****************************Optimizing the Parameters When Soil_Moisture_DA_Flag or Surface_Temperature_DA_Flag or Vegetation_Temperature_DA_Flag is True!!***********8"
if Variable_Assimilation_Flag[Variable_List.index("Soil_Moisture")]:
Soil_Par_Sens = Soil_Par_Sens_Array[0]
Veg_Par_Sens = Veg_Par_Sens_Array[0]
PFT_Par_Sens = PFT_Par_Sens_Array[0]
Hard_Par_Sens = Hard_Par_Sens_Array[0]
elif Variable_Assimilation_Flag[Variable_List.index("Surface_Temperature")]:
Soil_Par_Sens = Soil_Par_Sens_Array[1]
Veg_Par_Sens = Veg_Par_Sens_Array[1]
PFT_Par_Sens = PFT_Par_Sens_Array[1]
Hard_Par_Sens = Hard_Par_Sens_Array[1]
else:
Soil_Par_Sens = numpy.array([False, False, False, False, False],dtype=numpy.bool)
Veg_Par_Sens = numpy.array([False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False],dtype=numpy.bool)
PFT_Par_Sens = numpy.array([False, False, False],dtype=numpy.bool)
Hard_Par_Sens = numpy.array([False, False],dtype=numpy.bool)
Soil_Par_Sens_Dim = numpy.size(numpy.where(Soil_Par_Sens == True))
Veg_Par_Sens_Dim = numpy.size(numpy.where(Veg_Par_Sens == True))
PFT_Par_Sens_Dim = numpy.size(numpy.where(PFT_Par_Sens == True))
Hard_Par_Sens_Dim = numpy.size(numpy.where(Hard_Par_Sens == True))
if Soil_Par_Sens_Dim >= 1 or PFT_Par_Sens_Dim >= 1:
print "*************************************************Start Parameter Optimization***************************************************************"
Normal_Score_Trans_Par = 0 # No Normal Scaore Transformation for Parameter Estimation
Def_Par_Optimized, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, job_server_node_array, active_nodes_server, Optimized_Parameter_Index = \
Parameter_Update(mpi4py_comm, mpi4py_rank, mpi4py_name, gelmna_threshold, Optimized_Parameter_Index, Model_Driver, NSLOTS, Def_PP, Def_First_Run, Def_Print, Feedback_Assim, Def_Par_Optimized, Parameter_Optimization, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, [], Par_PFT_Uniform_STD_Sub, [], Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, \
SensorQuantity_Sub, SensorType_Sub, SensorVariable_Sub, SensorResolution_Sub, Variable_ID_Sub, QC_ID_Sub, Variable_List, maxpft, \
Row_Numbers, Col_Numbers, Ensemble_Number, Ensemble_Number_Predict, Dim_Obs_Type, Observation_Matrix, Observation_Longitude, Observation_Latitude, job_server_node_array, active_nodes_server, ntasks_CLM, \
Mask, Mask_Index, NAvalue, COSMOS_Circle_Array, COSMOS_Circle_Index_Array, COSMOS_Circle_Num_Array, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Par_Index_Increment_Soil_Par, DasPy_Path, \
Variable_Assimilation_Flag, DAS_Depends_Path, Def_ParFor, omp_get_num_procs_ParFor, Def_CDF_Matching, Normal_Score_Trans_Par, PDAF_Assim_Framework, PDAF_Filter_Type, PP_Servers_Per_Node, Def_CESM_Multi_Instance, PP_Port, \
Plot_Analysis, Soil_Layer_Index_DA, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, Post_Inflation_Alpha[1], \
Soil_Par_Sens_Array, Veg_Par_Sens_Array, PFT_Par_Sens_Array, Hard_Par_Sens_Array, Datetime_Start, Datetime_Initial, Low_Ratio_Par, High_Ratio_Par, Low_Ratio_Par_Uniform, High_Ratio_Par_Uniform, Write_DA_File_Flag, r, Observation_Box, Def_Region, Dim_CLM_State, Num_Local_Obs_Par, Model_Variance, DateString_Plot,
Def_Multiresolution, Def_ReBEL, Def_Localization, Assim_Algorithm_Name, eps[1], msw_infl[1], Region_Name, Call_Gstat_Flag, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper,Proj_String, MODEL_CEA_X, MODEL_CEA_Y, Z_Resolution,
dtime, Irrigation_Hours, column_len, Weather_Forecast_Days, Datetime_End, Hydraulic_File_Name, fpftcon_name, Run_Dir_Array, \
Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD, Dim_Observation_Quantity,
Snow_Layer_Num, Def_Write_Initial, cols1d_ixy, cols1d_jxy, cols1d_ityplun, pfts1d_ityplun, Freezing_temperature_of_fresh_water, Density_of_ice, N0, nlyr,
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
diskless_flag, persist_flag, Irrig_Scheduling, Run_Dir_Home, Start_Month, Stop_Year, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, finidat_name, Density_of_liquid_water, Irrigation_Grid_Flag_Array,
mksrf_edgee, mksrf_edges, mksrf_edgew, mksrf_edgen, Datetime_Stop, Datetime_Stop_Init, CLM_NA,
Observation_Variance, Observation_NLons, Observation_NLats, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Observation_Corelation_Par, octave, Station_XY, Station_XY_Index, Soil_Layer_Num, Analysis_Variable_Name,
Analysis_Grid, Localization_Map_Mask, ObsModel_Mat, ObsModel_Variance_Mat, Mask_Sub, Mask_Index_Sub, Mask_Index_Vector, Observation_Matrix_Index, Prop_Grid_Array_Sys_Index, Model_State,
SensorQuantity_Index, E0_ObsModel_Mask, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Soil_Par_Accum_Dim, Veg_Par_Accum_Dim, PFT_Par_Accum_Dim, Hard_Par_Accum_Dim, ParFlow_Layer_Num,
Forcing_File_Path_Home, Observation_Path, DAS_Data_Path, Grid_Resolution_CEA, Grid_Resolution_GEO, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Initial_Copy, \
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Bias_Copy, NC_FileName_Assimilation_2_Bias_Monthly, NC_FileName_Assimilation_2_Bias_Monthly_Copy,
NC_FileName_Assimilation_2_Parameter, NC_FileName_Assimilation_2_Parameter_Copy, NC_FileName_Assimilation_2_Parameter_Obs_Dim, NC_FileName_Assimilation_2_Parameter_Monthly, NC_FileName_Assimilation_2_Parameter_Monthly_Copy, NC_FileName_Parameter_Space_Single, DAS_Output_Path, \
COSMIC_Py, [], memory_profiler, COSMIC, Observation_Time_File_Path)
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r')
for Station_Index in range(numpy.size(Station_XY)/2):
if (numpy.size(numpy.where(numpy.asarray(Soil_Par_Sens_Array) == True)) >= 1):
Parameter_Soil_Optimized[:,:,Station_Index] = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:,:,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
if (numpy.size(numpy.where(numpy.asarray(PFT_Par_Sens_Array) == True)) >= 1):
Parameter_PFT_Optimized[:,:,Station_Index] = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,:,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
NC_File_Out_Optimized_Parameter = netCDF4.Dataset(NC_FileName_Optimized_Parameter, 'a')
if (numpy.size(numpy.where(numpy.asarray(Soil_Par_Sens_Array) == True)) >= 1):
NC_File_Out_Optimized_Parameter.variables['Parameter_Soil_Optimized'][Optimized_Parameter_Index[0],:,:,:] = Parameter_Soil_Optimized
if (numpy.size(numpy.where(numpy.asarray(PFT_Par_Sens_Array) == True)) >= 1):
NC_File_Out_Optimized_Parameter.variables['Parameter_PFT_Optimized'][Optimized_Parameter_Index[2],:,:,:] = Parameter_PFT_Optimized
NC_File_Out_Optimized_Parameter.sync()
NC_File_Out_Optimized_Parameter.close()
NC_File_Out_Assimilation_2_Parameter.close()
#print "Parameter_Soil_Space_Ensemble[:,13,Station_XY_Index[0][1],Station_XY_Index[0][0]]",Parameter_Soil_Space_Ensemble[:,13,Station_XY_Index[0][1],Station_XY_Index[0][0]]
#print "Parameter_Soil_Optimized_Array",Parameter_Soil_Optimized_Array[0][:,13,0],Parameter_Soil_Optimized_Array[-1][:,13,0]
if Plot_Analysis:
print "################################################# Plot Parameter Results"
Plot_Parameters(Def_Print, fm, legend, plt, cm, colors, r, Def_Region, DasPy_Path, Region_Name, Row_Numbers, Col_Numbers, DAS_Data_Path, Row_Numbers_String, Col_Numbers_String, Dim_Soil_Par, Dim_Veg_Par, Start_Month, DateString_Plot, Variable_Assimilation_Flag, Mask_Index, PFT_Dominant_Index,
Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Ensemble_Number, Soil_Par_Sens_Array, Veg_Par_Sens_Array, PFT_Par_Sens_Array, Hard_Par_Sens_Array, Station_XY, Station_XY_Index, NC_FileName_Assimilation_2_Parameter, NC_FileName_Optimized_Parameter, NC_FileName_Parameter_Space_Single)
#os.abort()
print "*************************************************Start Data Assimilation***************************************************************"
print ""
Analysis_Grid, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, job_server_node_array, active_nodes_server = \
Assimilation_Update(mpi4py_comm, mpi4py_rank, mpi4py_name, Model_Driver, NSLOTS, finidat_initial_CLM, Def_ParFor, Def_Region, Def_Initial, Irrig_Scheduling, Irrigation_Hours, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Model_Path, CLM_Flag, Def_PP, job_server_node_array, active_nodes_server,
Start_Year,Start_Month,Start_Day,Stop_Year,Stop_Month,Stop_Day,Stop_Hour, UTC_Zone, Datetime_Start,Datetime_Start_Init,Datetime_Stop,Datetime_Stop_Init, Datetime_End, Datetime_Initial, Weather_Forecast_Days, Density_of_liquid_water, Density_of_ice, Freezing_temperature_of_fresh_water, N0, nlyr,
DAS_Data_Path, DAS_Depends_Path, DasPy_Path, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs_State, eps[0], msw_infl[0], Plot_Analysis, Def_Figure_Output, DateString_Plot,
Def_Write_Initial,DA_Flag, Write_DA_File_Flag, Mask, Mask_Index, COSMOS_Circle_Array, COSMOS_Circle_Index_Array, COSMOS_Circle_Num_Array, Call_Gstat_Flag, mksrf_edgee, mksrf_edges, mksrf_edgew, mksrf_edgen, Station_XY_Index, Station_XY, Observation_Box,
Variable_Assimilation_Flag, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, Model_Variance, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, PP_Servers_Per_Node, Def_CESM_Multi_Instance, PP_Port,
Z_Resolution, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper,Proj_String, MODEL_CEA_X, MODEL_CEA_Y, Hydraulic_File_Name, Assim_Algorithm_Name, Low_Ratio_Par, High_Ratio_Par, Post_Inflation_Alpha[0], irrig_nsteps_per_day, PFT_Num,
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset, fpftcon_name, Crop_Sum,
Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD, Dim_Observation_Quantity,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array, finidat_name,
Ensemble_Number, Ensemble_Number_Predict, Soil_Layer_Num, Snow_Layer_Num, maxpft, Forcing_File_Path_Home, dtime, Observation_Path, Dim_CLM_State, Dim_Obs_Type, CLM_NA, NAvalue, Variable_List, ntasks_CLM, rootpe_CLM, nthreads_CLM, omp_get_num_procs_ParFor,
Grid_Resolution_CEA, Grid_Resolution_GEO, SensorQuantity_Sub, SensorType_Sub, SensorVariable_Sub, SensorResolution_Sub, Variable_ID_Sub, QC_ID_Sub,Analysis_Variable_Name, Soil_Layer_Index_DA,
Observation_Matrix, Observation_Variance, Observation_Latitude, Observation_Longitude, Observation_NLons, Observation_NLats, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Observation_Corelation_Par,
octave, r, Def_CDF_Matching, numrad, cols1d_ixy, cols1d_jxy, pfts1d_ixy, pfts1d_jxy, cols1d_ityplun, pfts1d_ityplun, column_len, pft_len, pfts1d_itypveg, pfts1d_ci,
diskless_flag, persist_flag, Forcing_File_Path_Home, Forcing_File_Path_Array, history_file_name, Constant_File_Name, Run_Dir_Array, Feedback_Assim,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, [], Par_PFT_Uniform_STD_Sub, [], \
Analysis_Grid, Localization_Map_Mask, ObsModel_Mat, ObsModel_Variance_Mat, Prop_Grid_Array_Sys_Index, Observation_Matrix_Index, Mask_Sub, Mask_Index_Sub,
SensorQuantity_Index, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, ParFlow_Layer_Num,
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Assimilation_2_Initial_Copy, NC_FileName_Assimilation_2_Bias_Copy,
NC_FileName_Assimilation_2_Bias_Monthly, NC_FileName_Assimilation_2_Bias_Monthly_Copy, NC_FileName_Assimilation_2_Parameter_Monthly, NC_FileName_Assimilation_2_Parameter_Monthly_Copy,
NC_FileName_Parameter_Space_Single, DAS_Output_Path, COSMIC_Py, [], memory_profiler, COSMIC, finidat_name_string, Observation_Time_File_Path)
if Plot_Analysis:
print "###################################### Plot the Updated Model States"
Plot_States(octave, fm, legend, plt, cm, colors, Def_Region, Region_Name, Plot_Analysis, DasPy_Path, Soil_Layer_Num, Ensemble_Number, Row_Numbers, Col_Numbers, Dim_Obs_Type, Observation_Matrix, NAvalue, Def_Print,
Prop_Grid_Array_Sys_Index, Observation_Matrix_Index, SensorType_Sub, SensorVariable_Sub, SensorQuantity_Sub, SensorResolution_Sub, Variable_ID_Sub, QC_ID_Sub, Variable_List,
Mean_Index_Prop_Grid_Array_Sys, Mask_Index, Analysis_Grid, Analysis_Variable_Name, DateString_Plot, Variable_Assimilation_Flag,
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Initial_Copy, ObsModel_Mat_Masked, Observation_File_Name)
##########################################
if SensorType_Sub == "COSMOS" or SensorType_Sub == "InSitu":
Def_Localization = Def_Localization_Original
# MPI
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
if Parameter_Optimization:
Def_Par_Optimized = 1
#raw_input("Press Enter to continue...")
Def_First_Run = 0
Def_First_Run_Bias = 0
Two_Step_Bias_Estimation_Flag = 0
# don't init cesm mpi again
CESM_Init_Flag = 0
else:
if mpi4py_rank == 0:
print "************************ Datetime_Stop > Datetime_End, Skip to Simulation *****************"
break
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
if mpi4py_rank == 0:
if (Datetime_Stop > Datetime_Start):
print "############################### Delete the Histroy Ensembles to Save Space in Daily Step"
# Folder to Save Ensemble Mean
Mean_Dir = Run_Dir_Home+"_Ens_Mean"
if not os.path.exists(Mean_Dir):
os.makedirs(Mean_Dir)
#Datetime_Start_Mean = datetime.datetime(Datetime_Start.year,Datetime_Start.month,Datetime_Start.day,0,0)
#Datetime_Stop_Mean = datetime.datetime((Datetime_Stop-datetime.timedelta(days=1)).year,(Datetime_Stop-datetime.timedelta(days=1)).month,(Datetime_Stop-datetime.timedelta(days=1)).day,23,0)
Datetime_Start_Mean = Datetime_Start
Datetime_Stop_Mean = Datetime_Stop
stop_tod_string_final = str((Datetime_Stop - Datetime_Stop_Init).seconds).zfill(5)
print "-------------------- Remove the old Initial Files" # Because of the Get_Ens_Mean cannot do it
Datetime_Start_Temp = Datetime_Start-datetime.timedelta(hours=1)
Datetime_Start_Temp_Init_Mean = datetime.datetime(Datetime_Start_Temp.year, Datetime_Start_Temp.month, Datetime_Start_Temp.day, 00, 00)
stop_tod_string = str((Datetime_Start_Temp - Datetime_Start_Temp_Init_Mean).seconds).zfill(5)
restart_file_name_last_step = Region_Name + '.clm2.r.' + str(Datetime_Start_Temp.year) + '-' + str(Datetime_Start_Temp.month).zfill(2) + '-' + str(Datetime_Start_Temp.day).zfill(2) + '-' + stop_tod_string + '.nc'
Command_String = "rm -irdf "+Run_Dir_Home+"*/"+restart_file_name_last_step
print Command_String
if (restart_file_name_last_step != finidat_initial_CLM_Copy):
subprocess.call(Command_String,shell=True)
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
#print Parameter_Range_PFT
# After the Assimilation, We should use the new initial file
Def_Initial = 1
finidat_initial_CLM = finidat_name
Datetime_Start = Datetime_Stop + datetime.timedelta(hours=1)
Datetime_Start_Init = datetime.datetime(Datetime_Start.year,Datetime_Start.month,Datetime_Start.day,00,00)
Start_Year = str(Datetime_Start.year).zfill(4)
Start_Month = str(Datetime_Start.month).zfill(2)
Start_Day = str(Datetime_Start.day).zfill(2)
Start_Hour = str(Datetime_Start.hour).zfill(2)
Constant_File_Name_Header = Region_Name + ".clm2.h0."+Start_Year+"-"+Start_Month+"-"+Start_Day+"-"+str((Datetime_Start - Datetime_Start_Init).seconds).zfill(5)+".nc"
if mpi4py_rank == 0:
print "============================================================================================================================"
print "Constant_File_Name_Header",Constant_File_Name_Header
print "============================================================================================================================"
# We only read the column and pft index during the first run
Def_Read_Index = 0
Def_First_Run = 0
CESM_Init_Flag = 0
#
SensorType = []
SensorVariable = []
SensorQuantity = []
Variable_ID = []
QC_ID = []
SensorResolution = []
Observation_File_Name = []
#------------------------------------------- Data Assimilation Flags
Variable_Assimilation_Flag = numpy.zeros(Dim_CLM_State)
if mpi4py_rank == 0:
del Observation_Matrix, Observation_Longitude, Observation_Latitude, Observation_Variance, Observation_NLons, Observation_NLats
del Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Observation_Misc, Observation_View_Zenith_Angle, Observation_View_Time, Observation_Corelation_Par
plt.close('all')
gc.collect()
del gc.garbage[:]
r('gc(TRUE)')
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
Observation_Index = Observation_Index + 1
end = time.time()
if mpi4py_rank == 0:
print 'Time Is: ', (end - start) / 3600.0, 'Hours'
if mpi4py_rank == 0:
Observation_Time_File.close()
if Datetime_Start > Datetime_End:
Datetime_Stop = Datetime_Start
Stop_Year = Start_Year
Stop_Month = Start_Month
Stop_Day = Start_Day
Stop_Hour = Start_Hour
if Datetime_Start < Datetime_End:
Stop_Year = End_Year
Stop_Month = End_Month
Stop_Day = End_Day
Stop_Hour = End_Hour
Datetime_Stop = Datetime_End
Datetime_Stop_Init = Datetime_End_Init
if mpi4py_rank == 0:
print "**************** Drive CLM from",Datetime_Start,"to",Datetime_Stop,"after data assimilation"
Do_DA_Flag = 0
if mpi4py_rank == 0:
if Def_PP and Ensemble_Number > 1:
print "********************************************** Using PP to Accelerate Prepare_Model_Operator"
Job_Num_Per_Node = int(numpy.ceil(float(Ensemble_Number) / len(active_nodes_server)))
print "The following submits",Job_Num_Per_Node,"jobs on each node and then retrieves the results"
if Job_Num_Per_Node == 0:
Job_Num_Per_Node = 1
job_server_node_results = []
Ens_Index = 0
Job_Num_Per_Node_Index = 0
while Job_Num_Per_Node_Index < Job_Num_Per_Node and Ens_Index < Ensemble_Number:
for Node_Index in range(len(active_nodes_server)):
job_server_node = job_server_node_array[numpy.min([Job_Num_Per_Node_Index+Node_Index*len(job_server_node_array)/len(active_nodes_server),len(job_server_node_array)-1])]
#job_server_node = job_server_node_array[Node_Index]
if Ens_Index > Ensemble_Number - 1:
break
job_server_node_results.append(job_server_node.submit(Prepare_Model_Operator, args=(Ens_Index, Model_Driver, Def_CESM_Multi_Instance, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Do_DA_Flag, Def_Debug, CLM_NA, NAvalue, finidat_initial_CLM, Def_ParFor, Def_Region, Def_Initial, \
Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Run_Dir_Multi_Instance, Run_Dir_Array, Model_Path, CLM_Flag, num_processors,
CLM_File_Name_List, Parameter_Range_Soil,
Start_Year, Start_Month, Start_Day, Stop_Year, Stop_Month, Stop_Day, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End, Datetime_Initial,
DAS_Data_Path, DasPy_Path, DAS_Output_Path, Forcing_File_Path_Array, dtime, Variable_Assimilation_Flag, Variable_List,\
Def_PP, N_Steps, Ensemble_Number, Ensemble_Number_Predict, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String,
DAS_Depends_Path, maxpft, ntasks_CLM, rootpe_CLM, nthreads_CLM, Weather_Forecast_Days, Variable_Assimilation_Flag[Variable_List.index("Irrigation_Scheduling")],\
omp_get_num_procs_ParFor, Low_Ratio_Par, High_Ratio_Par, Soil_Texture_Layer_Opt_Num, Def_Snow_Effects, PFT_Par_Sens_Array,\
Soil_Thickness, Soil_Layer_Num, Snow_Layer_Num, Density_of_liquid_water, Initial_Perturbation, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, \
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single,),
depfuncs=(Run_CLM, Call_CLM_3D, Write_datm_atm_in, Write_datm_streams_txt_rad, Write_datm_streams_txt_prec, Write_datm_streams_txt_tair, Write_presaero_stream_txt, Write_lnd_in, Write_rof_in, Write_Config_Files, Write_drv_in, Write_seq_maps),
modules=("numpy", "netCDF4", "sys", "os", "re", "unittest", "time", "datetime", "shutil", "fnmatch", "subprocess", "string", "socket", "signal", "gc", "imp", "getpass", "calendar", "glob","scipy.stats","scipy.signal",'scipy.weave',), group='Prepare_Model_Operator'))
Ens_Index = Ens_Index + 1
Job_Num_Per_Node_Index = Job_Num_Per_Node_Index + 1
for job_server_node in job_server_node_array:
job_server_node.wait()
if Def_Print >= 2:
job_server_node.print_stats()
if Def_Print:
if len(job_server_node_results) > 0:
for job in job_server_node_results:
job_index = job_server_node_results.index(job)
if job_index > (Ensemble_Number - 1):
break
print "Results of ",job_index,"is", job()
else:
print "*************************************************** Run Prepare_Model_Operator Sequentially"
for Ens_Index in range(Ensemble_Number):
Prepare_Model_Operator(Ens_Index, Model_Driver, Def_CESM_Multi_Instance, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Do_DA_Flag, Def_Debug, CLM_NA, NAvalue, finidat_initial_CLM, Def_ParFor, Def_Region, Def_Initial, \
Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Run_Dir_Multi_Instance, Run_Dir_Array, Model_Path, CLM_Flag, num_processors,
CLM_File_Name_List, Parameter_Range_Soil,
Start_Year, Start_Month, Start_Day, Stop_Year, Stop_Month, Stop_Day, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End, Datetime_Initial,
DAS_Data_Path, DasPy_Path, DAS_Output_Path, Forcing_File_Path_Array, dtime, Variable_Assimilation_Flag, Variable_List,\
Def_PP, N_Steps, Ensemble_Number, Ensemble_Number_Predict, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String,
DAS_Depends_Path, maxpft, ntasks_CLM, rootpe_CLM, nthreads_CLM, Weather_Forecast_Days, Variable_Assimilation_Flag[Variable_List.index("Irrigation_Scheduling")],\
omp_get_num_procs_ParFor, Low_Ratio_Par, High_Ratio_Par, Soil_Texture_Layer_Opt_Num, Def_Snow_Effects, PFT_Par_Sens_Array,\
Soil_Thickness, Soil_Layer_Num, Snow_Layer_Num, Density_of_liquid_water, Initial_Perturbation, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, \
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single)
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
if True:
if Def_PP == 1:
print "********************************************** Using PP to Accelerate Call_Model_Operator"
Job_Num_Per_Node = int(numpy.ceil(float(Ensemble_Number) / len(active_nodes_server)))
print "The following submits",Job_Num_Per_Node,"jobs on each node and then retrieves the results"
if Job_Num_Per_Node == 0:
Job_Num_Per_Node = 1
job_server_node_results = []
Ens_Index = 0
Job_Num_Per_Node_Index = 0
while Job_Num_Per_Node_Index < Job_Num_Per_Node and Ens_Index < Ensemble_Number:
for Node_Index in range(len(active_nodes_server)):
job_server_node = job_server_node_array[numpy.min([Job_Num_Per_Node_Index+Node_Index*len(job_server_node_array)/len(active_nodes_server),len(job_server_node_array)-1])]
#job_server_node = job_server_node_array[Node_Index]
if Ens_Index > Ensemble_Number - 1:
break
job_server_node_results.append(job_server_node.submit(Call_Model_Operator, args=(Ens_Index, Model_Driver, Def_CESM_Multi_Instance, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Do_DA_Flag, Def_Debug, CLM_NA, NAvalue, finidat_initial_CLM, Def_ParFor, Def_Region, Def_Initial, \
Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Run_Dir_Multi_Instance, Run_Dir_Array, Model_Path, CLM_Flag, num_processors,
CLM_File_Name_List, Start_Year, Start_Month, Start_Day, Stop_Year, Stop_Month, Stop_Day, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End, Datetime_Initial, DAS_Data_Path, DasPy_Path, Forcing_File_Path_Array, dtime,\
Def_PP, N_Steps, Ensemble_Number, Ensemble_Number_Predict, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, DAS_Depends_Path, maxpft, ntasks_CLM, rootpe_CLM, nthreads_CLM, Weather_Forecast_Days, Variable_Assimilation_Flag[Variable_List.index("Irrigation_Scheduling")],\
Low_Ratio_Par, High_Ratio_Par, Soil_Texture_Layer_Opt_Num, Def_Snow_Effects, PFT_Par_Sens_Array,\
Soil_Thickness, Soil_Layer_Num, Snow_Layer_Num, Density_of_liquid_water, Initial_Perturbation, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic,
NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Parameter_Space_Single, COUP_OAS_PFL, CESM_Init_Flag, mpi4py_comm_split, mpi4py_null),
depfuncs=(Run_CLM, Call_CLM_3D, Write_datm_atm_in, Write_datm_streams_txt_rad, Write_datm_streams_txt_prec, Write_datm_streams_txt_tair, Write_presaero_stream_txt, Write_lnd_in, Write_rof_in, Write_Config_Files, Write_drv_in, Write_seq_maps),
modules=("numpy", "netCDF4", "sys", "os", "re", "unittest", "time", "datetime", "shutil", "fnmatch", "subprocess", "string", "socket", "signal", "gc", "imp", "getpass", "calendar", "glob","scipy.stats","scipy.signal",'scipy.weave',), group='Call_Model_Operator'))
Ens_Index = Ens_Index + 1
Job_Num_Per_Node_Index = Job_Num_Per_Node_Index + 1
for job_server_node in job_server_node_array:
job_server_node.wait()
if Def_Print >= 2:
job_server_node.print_stats()
if Def_Print:
if len(job_server_node_results) > 0:
for job in job_server_node_results:
job_index = job_server_node_results.index(job)
if job_index > (Ensemble_Number - 1):
break
print "Results of ",job_index,"is", job()
elif Def_PP == 2:
if mpi4py_rank == 0:
print "********************************************** Using Mpi4Py to Accelerate Call_Model_Operator"
Ens_Index = mpi4py_rank/mpipy_comm_decomposition
if Def_Print:
print "mpi4py_rank",mpi4py_rank,"Ens_Index",Ens_Index
if Ens_Index < Ensemble_Number:
Call_Model_Operator(Ens_Index, Model_Driver, Def_CESM_Multi_Instance, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Do_DA_Flag, Def_Debug, CLM_NA, NAvalue, finidat_initial_CLM, Def_ParFor, Def_Region, Def_Initial, \
Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Run_Dir_Multi_Instance, Run_Dir_Array, Model_Path, CLM_Flag, num_processors,
CLM_File_Name_List, Start_Year, Start_Month, Start_Day, Stop_Year, Stop_Month, Stop_Day, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End, Datetime_Initial, DAS_Data_Path, DasPy_Path, Forcing_File_Path_Array, dtime,\
Def_PP, N_Steps, Ensemble_Number, Ensemble_Number_Predict, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, DAS_Depends_Path, maxpft, ntasks_CLM, rootpe_CLM, nthreads_CLM, Weather_Forecast_Days, Variable_Assimilation_Flag[Variable_List.index("Irrigation_Scheduling")],\
Low_Ratio_Par, High_Ratio_Par, Soil_Texture_Layer_Opt_Num, Def_Snow_Effects, PFT_Par_Sens_Array,\
Soil_Thickness, Soil_Layer_Num, Snow_Layer_Num, Density_of_liquid_water, Initial_Perturbation, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic,
NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Parameter_Space_Single,COUP_OAS_PFL, CESM_Init_Flag, mpi4py_comm_split, mpi4py_null)
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
else:
print "*************************************************** Run DAS Sequentially"
for Ens_Index in range(Ensemble_Number):
Call_Model_Operator(Ens_Index, Model_Driver, Def_CESM_Multi_Instance, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Do_DA_Flag, Def_Debug, CLM_NA, NAvalue, finidat_initial_CLM, Def_ParFor, Def_Region, Def_Initial, \
Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Run_Dir_Multi_Instance, Run_Dir_Array, Model_Path, CLM_Flag, num_processors,
CLM_File_Name_List, Start_Year, Start_Month, Start_Day, Stop_Year, Stop_Month, Stop_Day, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End, Datetime_Initial, DAS_Data_Path, DasPy_Path, Forcing_File_Path_Array, dtime,\
Def_PP, N_Steps, Ensemble_Number, Ensemble_Number_Predict, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, DAS_Depends_Path, maxpft, ntasks_CLM, rootpe_CLM, nthreads_CLM, Weather_Forecast_Days, Variable_Assimilation_Flag[Variable_List.index("Irrigation_Scheduling")],\
Low_Ratio_Par, High_Ratio_Par, Soil_Texture_Layer_Opt_Num, Def_Snow_Effects, PFT_Par_Sens_Array,\
Soil_Thickness, Soil_Layer_Num, Snow_Layer_Num, Density_of_liquid_water, Initial_Perturbation, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic,
NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Parameter_Space_Single, COUP_OAS_PFL, CESM_Init_Flag, mpi4py_comm_split, mpi4py_null)
#Datetime_Start_Mean = datetime.datetime(Datetime_Start.year,Datetime_Start.month,Datetime_Start.day,0,0)
stop_tod_string_final = str((Datetime_Stop - Datetime_Stop_Init).seconds).zfill(5)
Datetime_Start_Mean = Datetime_Start
Datetime_Stop_Mean = Datetime_Stop
end = time.time()
if mpi4py_rank == 0:
print 'Time Is: ', (end - start) / 3600.0, 'Hours'
|
void preorder(node * root) {
if(root == NULL)
return
cout << root -> data << " "
preOrder(root -> left)
preOrder(root -> right)
}
e
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_VisualizerMainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(772, 800)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_2 = QtWidgets.QFrame(self.centralwidget)
self.frame_2.setMaximumSize(QtCore.QSize(16777215, 100))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.gridLayout = QtWidgets.QGridLayout(self.frame_2)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.frame_2)
self.label.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.frame_2)
self.frame = PlotCanvas(self.centralwidget)
self.frame.setMinimumSize(QtCore.QSize(0, 500))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout.addWidget(self.frame)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 772, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Visualizing the signal sending process"))
from PlotCanvas import PlotCanvas
|
import threading
import time
def run(n):
print("task:",n)
time.sleep(2)
print("task-end",n)
# t1 = threading.Thread(target=run,args=('t1',))
# t2 = threading.Thread(target=run,args=('t2',))
# t1.start()
# t2.start()
thread_list = []
start_time = time.time()
for i in range(50):
t = threading.Thread(target=run,args=("t-%s"%i,))
t.setDaemon(True) #把当前线程设置为守护线程
t.start()
#为了不阻塞后面的线程的启动 不再此处join 放到列表中
thread_list.append(t)
# for res in thread_list:
# # print(threading.active_count())
# res.join()
print("-------ALL Threads Has been Finished--------",threading.current_thread())
time.sleep(1.9)
end_time = time.time()
print("cost",end_time-start_time)
# 主线程 启动了 子线程之后 子线程与主线程是并行的关系
|
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DetailView, DeleteView
from .models import Material
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
# Create your views here.
def home(request):
context = {
"materials":Material.objects.all()
}
return render(request,"blog/home.html",context)
class PostListView(ListView):
model = Material
template_name = 'blog/home.html' #<app>/<model>_<viewtype>.html
context_object_name = 'materials'
ordering = ['-date_added']
paginate_by = 2
class UserPostListView(ListView):
model = Material
template_name = 'blog/user_posts.html' #<app>/<model>_<viewtype>.html
context_object_name = 'materials'
paginate_by = 2
def get_queryset(self):
user = get_object_or_404(User,username =self.kwargs.get('username'))
return Material.objects.filter(author = user).order_by('-date_added')
class PostDetailView(DetailView):
model = Material
class PostCreateView(LoginRequiredMixin, CreateView):
model = Material
fields =['material_name','purchase_price','stock_price']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Material
fields =['material_name','purchase_price','stock_price']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
material = self.get_object()
if self.request.user == material.author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Material
success_url = '/'
def test_func(self):
material = self.get_object()
if self.request.user == material.author:
return True
return False
def about(request):
return render(request,"blog/about.html",{"title":"here is title"})
|
from datetime import datetime, timedelta
import json
import inspect
# https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
def GetCurrentQuarterStartDate() -> datetime:
return FindQuarterDates(datetime.now())[0]
def GetCurrentQuarterEndDate() -> datetime:
return FindQuarterDates(datetime.now())[1]
def FindQuarterDates(dt: datetime) -> (datetime, datetime):
quarter = (dt.month - 1) // 3 + 1
sDate = datetime(dt.year, 3 * quarter - 2, 1)
eDate = datetime(dt.year, int(3 * quarter + 1), 1) + timedelta(days=-1)
return sDate, eDate
def DateTimeToISO8601(dt: datetime) -> str:
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
def DateTimeAddSeconds(dt: datetime, secs: int) -> datetime:
return dt + timedelta(seconds=secs)
def CurrentDateTimeAddSeconds(secs: int) -> datetime:
return DateTimeAddSeconds(datetime.now(), secs)
# JSONEncoder override
class ObjectEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return self.default(obj.to_json())
elif hasattr(obj, "__dict__"):
d = dict(
(key, value)
for key, value in inspect.getmembers(obj)
if not key.startswith("__")
and not inspect.isabstract(value)
and not inspect.isbuiltin(value)
and not inspect.isfunction(value)
and not inspect.isgenerator(value)
and not inspect.isgeneratorfunction(value)
and not inspect.ismethod(value)
and not inspect.ismethoddescriptor(value)
and not inspect.isroutine(value)
)
return self.default(d)
return obj
def ExportModelToJSON(modelObj):
return json.dumps(modelObj, cls=ObjectEncoder)
|
# activate theano on gpu
from __future__ import print_function
import os;
os.environ['THEANO_FLAGS'] = "device=gpu";
import theano;
theano.config.floatX = 'float32';
import numpy as np;
import sys, os;
import gzip;
from six.moves import cPickle;
from vae_conv import conv_variational_autoencoder;
from keras import backend as K;
from scipy.stats import norm;
# define parameters;
# no of trajectory files and frames in each file;
n_traj = 1;
f_traj = 110*10000;
# fraction of train, test and pred data separation;
sep_train = 0.8;
sep_test = 0.9;
sep_pred = 1;
# choice to flatten data: "0" for NO & "1" for YES;
choice = 0;
# row and column dimension for each frame;
row = 28;
col =28;
# padding: use this incase diemsion mismatch for encoders;
# pad_row and pad_col are row or colums to be added;
pad_row = 0;
pad_col = 0;
# define parameters for variational autoencoder - convolutional;
channels = 1;
batch_size = 1000;
conv_layers = 3;
# feature_maps is number of filters
feature_maps = [128,128,128,128];
filter_shapes = [(3,3),(3,3),(3,3),(3,3)];
strides = [(1,1),(2,2),(1,1),(1,1)];
dense_layers = 1;
dense_neurons = [128];
dense_dropouts = [0];
latent_dim = 3;
epochs = 1;
nb_start = 0;
nb_end = 50;
# loading section;
nb_select = 10;
load_step = 10;
load_start = nb_select;
load_end = nb_end+1;
# number of digits for decoding;
n_dec = 10;
# what image to pick for to decode;
pick = 400;
# figure with 10x10 digits for generator images;
n_d = 10;
n1 = 0;
# end define parameters;
# opening file;
# load data for labelling;
label = np.loadtxt("/home/odb/dl/keras/1FME-0/data/1FME-0_cont-mat.dat");
# open pickled file;
#with gzip.open('./aligned_fs-peptide_coor.pkl.gz', 'rb') as f3:
# (X) = cPickle.load(f3)
#x_raw = X;
#print "dataset dimension:", np.shape(x_raw);
# open dat file;
path_data_array = "/home/odb/dl/keras/1FME-0/data/1FME-0_cont-mat.array";
# read dat type large file line by line to save in array
nf = n_traj*f_traj;
q = row*col;
j_count = 0;
k_count = 0;
samples = (nf);
row_num = (nf)*row;
column_num = (col);
array_f_int = np.zeros(shape=(row_num,column_num));
with open(path_data_array) as infile:
for line in infile:
array_f_string = line.split();
array_f_array = np.array(list(array_f_string), dtype='|S4');
array_f_float = array_f_array.astype(np.float);
array_f_int[j_count] = array_f_float;
if j_count == k_count:
print('Frames read:', (j_count/row))
k_count = k_count + 10000*row;
j_count = j_count + 1;
if j_count == (row_num):
break;
print(('Initial matrix array dimension:'), np.shape(array_f_int))
array_f = np.reshape(array_f_int, (samples, row, col));
print(('Final matrix array dimension:'), np.shape(array_f))
x_raw = array_f[0:];
print("Dataset dimension:", np.shape(x_raw))
##########################################################################################################
##########################################################################################################
##########################################################################################################
# process of input data;
# padding;
row_dim_array = row + pad_row;
col_dim_array = col + pad_col;
# reshape data according to the choice of flatteing;
if choice == 0:
new_shape = (len(x_raw),row_dim_array,col_dim_array)
if choice == 1:
new_shape = (len(x_raw),row_dim_array*col_dim_array)
add_zero = np.zeros(new_shape,dtype = x_raw.dtype);
if choice == 0:
add_zero[0:x_raw.shape[0],0:x_raw.shape[1],0:x_raw.shape[2]] = x_raw
if choice == 1:
add_zero[0:x_raw.shape[0],0:x_raw.shape[1]] = x_raw
x_raw = add_zero;
# determine size for training, testing & prediction data;
sep_1 = int(x_raw.shape[0]*sep_train);
sep_2 = int(x_raw.shape[0]*sep_test);
sep_3 = int(x_raw.shape[0]*sep_pred);
x_train_raw = x_raw[:sep_1];
x_test_raw = x_raw[sep_1:sep_2];
x_pred_raw = x_raw[sep_2:sep_3];
print("Shape to load:", "train:", np.shape(x_train_raw), "test:", np.shape(x_test_raw), "prediction:", np.shape(x_pred_raw))
# start variational autoencoder - convolutional;
# create directories;
path_1 = "./fig"
path_2 = "./imgs"
path_3 = "./hist"
path_4 = "./model"
if not os.path.exists(path_1):
os.mkdir(path_1, 0755);
if not os.path.exists(path_2):
os.mkdir(path_2, 0755);
if not os.path.exists(path_3):
os.mkdir(path_3, 0755);
if not os.path.exists(path_4):
os.mkdir(path_4, 0755);
print("Completed directories creation or if already exist - then checked")
# load data;
print("Loading data")
# normalizing input image matrix;
X_train = x_train_raw.astype('float32') / np.amax(x_train_raw);
X_test = x_test_raw.astype('float32') / np.amax(x_test_raw);
X_pred = x_pred_raw.astype('float32') / np.amax(x_pred_raw);
print("Shape of data loaded:", "train:", np.shape(X_train), "test:", np.shape(X_test))
# reshape to 4d tensors;
image_size = X_train.shape[-2:];
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
tensor_shape = (1,image_size[0],image_size[1])
else:
tensor_shape = (image_size[0],image_size[1],1)
X_train = X_train.reshape((X_train.shape[0],) + tensor_shape);
X_test = X_test.reshape((X_test.shape[0],) + tensor_shape);
print("Reshaped data:", "train:", np.shape(X_train), "test:", np.shape(X_test))
# build autoencoder;
print("Building variational autoencoder")
# set up parameter;
feature_maps = feature_maps[0:conv_layers];
filter_shapes = filter_shapes[0:conv_layers];
strides = strides[0:conv_layers];
autoencoder = conv_variational_autoencoder(image_size,channels,conv_layers,feature_maps,
filter_shapes,strides,dense_layers,dense_neurons,dense_dropouts,latent_dim);
# load data to analyze;
conv_full_train = X_train[0:];
conv_full_test = X_test[0:];
conv_full_pred = X_pred[0:];
label = label[:len(x_raw)];
y_train_0 = label[:sep_1,0];
y_train_2 = label[:sep_1,2];
y_test_0 = label[sep_1:sep_2,0];
y_test_2 = label[sep_1:sep_2,2];
y_pred_0 = label[sep_2:sep_3,0];
y_pred_2 = label[sep_2:sep_3,2];
# pixel size of decoded figure;
row_dim = row_dim_array-pad_row;
col_dim = col_dim_array-pad_col;
# for generator images (for latent space = nD);
z_axis = np.arange(latent_dim-2);
# print "plot starts";
for load in range(load_start, load_end, load_step):
print("**********************************************loading", load)
# loading model;
autoencoder.load("./model/model_%i" %load);
####################################################################
print("Decode image for train data")
# decode images;
decoded_imgs_full = autoencoder.decode(conv_full_train);
# save decoded arary to file;
np.savetxt('./imgs/decoded_train_%i.out' %load, np.reshape(decoded_imgs_full[:, 0:row_dim, 0:col_dim, :],
(len(decoded_imgs_full), (row_dim*col_dim))), fmt='%f');
# plot decoded images;
import matplotlib.pyplot as plt;
plt.switch_backend('agg');
plt.figure(figsize=(20, 4));
for i in range (n_dec):
# display original;
ax = plt.subplot(2, n_dec, i + 1);
plt.imshow(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/original_imgs_train_%i_%i.out' %(i,load),
(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
# display reconstruction;
ax = plt.subplot(2, n_dec, i + 1 + n_dec);
plt.imshow(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/decoded_imgs_train_%i_%i.out' %(i,load),
(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
plt.savefig('./fig/decoded_train_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print("Decode image for test data")
# decode images;
decoded_imgs_full = autoencoder.decode(conv_full_test);
# save decoded arary to file;
np.savetxt('./imgs/decoded_test_%i.out' %load, np.reshape(decoded_imgs_full[:, 0:row_dim, 0:col_dim, :],
(len(decoded_imgs_full), (row_dim*col_dim))), fmt='%f');
# plot decoded images;
import matplotlib.pyplot as plt;
plt.figure(figsize=(20, 4));
for i in range (n_dec):
# display original;
ax = plt.subplot(2, n_dec, i + 1);
plt.imshow(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/original_imgs_test_%i_%i.out' %(i,load),
(conv_full_train[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
# display reconstruction;
ax = plt.subplot(2, n_dec, i + 1 + n_dec);
plt.imshow(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim));
np.savetxt('./imgs/decoded_imgs_test_%i_%i.out' %(i,load),
(decoded_imgs_full[i+pick, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim)));
plt.colorbar(orientation='vertical');
ax.get_xaxis().set_visible(False);
ax.get_yaxis().set_visible(False);
plt.savefig('./fig/decoded_test_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print("Encode image for train data")
# encode images;
# project inputs on the latent space;
x_pred_encoded = autoencoder.return_embeddings(conv_full_train);
# save encoded array to file ;
np.savetxt('./imgs/encoded_train_%i.out' %load, x_pred_encoded, fmt='%f');
# plot 1:
Dmax = y_train_2;
[n,s] = np.histogram(Dmax, 11);
d = np.digitize(Dmax, s);
#[n,s] = np.histogram(-np.log10(Dmax), 11);
#d = np.digitize(-np.log10(Dmax), s);
from matplotlib import cm;
import matplotlib as mpl;
cmi = plt.get_cmap('jet');
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax));
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240);
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi);
import numpy as np;
from mpl_toolkits.mplot3d import Axes3D;
import matplotlib.pyplot as plt;
fig = plt.figure();
ax = fig.add_subplot(111, projection='3d');
# scatter3D requires a 1D array for x, y, and z;
# ravel() converts the 100x100 array into a 1x10000 array;
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='o', c=scalarMap.to_rgba(Dmax));
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])));
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])));
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])));
ax.set_xlabel('VAE 0');
ax.set_ylabel('VAE 1');
ax.set_zlabel('VAE 2');
scalarMap.set_array(Dmax);
fig.colorbar(scalarMap);
plt.savefig('./fig/encoded_train_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print("Encode image for test data")
# encode images;
# project inputs on the latent space;
x_pred_encoded = autoencoder.return_embeddings(conv_full_test);
# save encoded array to file ;
np.savetxt('./imgs/encoded_test_%i.out' %load, x_pred_encoded, fmt='%f');
# plot 1:
Dmax = y_test_2;
[n,s] = np.histogram(Dmax, 11);
d = np.digitize(Dmax, s);
#[n,s] = np.histogram(-np.log10(Dmax), 11);
#d = np.digitize(-np.log10(Dmax), s);
from matplotlib import cm;
import matplotlib as mpl;
cmi = plt.get_cmap('jet');
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax));
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240);
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi);
import numpy as np;
from mpl_toolkits.mplot3d import Axes3D;
import matplotlib.pyplot as plt;
fig = plt.figure();
ax = fig.add_subplot(111, projection='3d');
# scatter3D requires a 1D array for x, y, and z;
# ravel() converts the 100x100 array into a 1x10000 array;
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='o', c=scalarMap.to_rgba(Dmax));
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])));
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])));
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])));
ax.set_xlabel('VAE 0');
ax.set_ylabel('VAE 1');
ax.set_zlabel('VAE 2');
scalarMap.set_array(Dmax);
fig.colorbar(scalarMap);
plt.savefig('./fig/encoded_test_%i.png' %load, dpi=600);
plt.clf();
####################################################################
print("Generate image")
# building generator;
# build a digit generator that can sample from the learned distribution;
# display a 2D manifold of the digits;
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian;
figure = np.zeros((row_dim * n_d, col_dim * n_d));
grid_x = norm.ppf(np.linspace(0.05, 0.95, n_d));
grid_y = norm.ppf(np.linspace(0.05, 0.95, n_d));
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
n1 = n1 + 1;
z_sample = np.append([xi, yi], [z_axis]);
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, latent_dim);
x_decoded = autoencoder.generate(z_sample);
digit = x_decoded[0, 0:row_dim, 0:col_dim, :].reshape(row_dim, col_dim);
# saving generated array to file;
# np.savetxt('./generated/digit_%i.out' %n1, digit, fmt='%f');
figure[i * row_dim: (i + 1) * row_dim,
j * col_dim: (j + 1) * col_dim] = digit;
plt.figure(figsize=(10, 10));
plt.imshow(figure);
plt.savefig('./fig/generated_%i.png' %load, dpi=600);
plt.clf();
|
#!/usr/bin/python
import math
total = 0
for n in range(1, 101):
for r in range(n):
if math.factorial(n) // math.factorial(r) // math.factorial(n - r) > 1000000:
total += 1
print(total)
|
from flask import Blueprint, url_for
import app.adapters.repository as repo
import app.utilities as util
# Configure Blueprint.
services_blueprint = Blueprint(
'services_bp', __name__)
def get_genres_and_urls():
genres_name = util.get_genres(repo.repo_instance)
genre_url = dict()
for genre in genres_name:
genre_url[genre] = url_for('movie_bp.movie_by_genres', genre=genre)
return genre_url
def get_actors_and_urls():
actors = util.get_genres(repo.repo_instance)
actor_url = dict()
for actor in actors:
actor_url[actor] = url_for('movie_bp.movie_by_genres', actor=actor)
return actor_url
def get_director_and_urls():
directors = util.get_genres(repo.repo_instance)
director_url = dict()
for director in directors:
director_url[director] = url_for('movie_bp.movie_by_genres', director=director)
return director_url
# def get_selected_movies(quantity=3):
# articles = util.get_movies_random(quantity, repo.repository_instance)
#
# for article in articles:
# article['hyperlink'] = url_for('news_bp.articles_by_date', date=article['date'].informant())
# return articles
|
#https://gist.github.com/stared/dfb4dfaf6d9a8501cd1cc8b8cb806d2e
import keras
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Flatten, Dense, Activation
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
#from IPython.display import clear_output
# data loading
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# data preprocessing
Y_train = to_categorical(y_train)
Y_test = to_categorical(y_test)
X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255.
X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255.
# just logistic regression, to keep it simple and fast
model = Sequential()
model.add(Flatten(input_shape=(28, 28, 1)))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# in this static viewer it is not obvious,
# but this plot grows step by step
history = model.fit(X_train, Y_train,
epochs=3,
validation_data=(X_test, Y_test),
verbose=1)
print(history.history.keys())
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
#plt.show()
plt.savefig('visualize.png')
|
import requests
def check(pin):
try:
response = requests.get('http://localhost:8000')
except:
response = None
if response and response.status_code == 200:
try:
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, True)
except:
pass
else:
try:
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
except:
pass
if __name__ == '__main__':
check(18)
|
# SQLite and Python
# Anatoli Penev
# 15.04.2018
# SQLite integration in Python
import sqlite3
import sys
# connect to the database
conn = sqlite3.connect("C:\\sqlite\\EAL.db")
cursor = conn.cursor()
# create a table
cursor.execute("""CREATE TABLE customer(
idCust integer NOT NULL,
name text,
email text,
address text,
city text)
""")
conn.commit
# insert data in the table
customer = [('1', 'Per', 'pda@eal.dk', 'MyStreet 1', 'Odense'),
('2', 'Artur', 'at@hotmail.com', 'Allstreet 741', 'Vilnius'),
('3', 'Alice', 'al@gmail.com', 'Topstreet 56', 'London')]
cursor.executemany("INSERT INTO customer VALUES (?,?,?,?,?)", customer)
conn.commit()
# retrieve data from the table
cursor.execute('''SELECT idCust, name, email, address, city FROM customer''')
for row in cursor:
print('{0}, {1}, {2}, {3}, {4}'.format(row[0], row[1], row[2], row[3], row[4]))
# updating data in the table
email = 'alice@gmail.com'
idCust = 3
cursor.execute('''UPDATE customer SET email = ? WHERE id = ? ''',
(email, idCust))
conn.commit()
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.javascript.subsystems import nodejs
from pants.backend.openapi.lint.openapi_format import rules as openapi_format_rules
from pants.backend.openapi.lint.openapi_format import skip_field
def rules():
return (*nodejs.rules(), *openapi_format_rules.rules(), *skip_field.rules())
|
from unittest import TestCase
import msal
from settings import settings
from office365.graph_client import GraphClient
def get_token():
"""
Acquire token via MSAL ROPC flow!
"""
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings['tenant'])
app = msal.PublicClientApplication(
authority=authority_url,
client_id=settings.get('client_credentials').get('client_id')
)
result = app.acquire_token_by_username_password(username=settings.get('user_credentials').get('username'),
password=settings.get('user_credentials').get('password'),
scopes=["https://graph.microsoft.com/.default"])
return result
class GraphTestCase(TestCase):
"""Microsoft Graph specific test case base class"""
client = None # type: GraphClient
@classmethod
def setUpClass(cls):
cls.client = GraphClient(get_token)
|
from rest_framework import viewsets, status
from .models import Notification
from .serializers import NotificationSerializer
from rest_framework import viewsets, status
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from annoying.functions import get_object_or_None
class NotificationViewSet(viewsets.ModelViewSet):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
permission_classes = []
@list_route(methods=['POST'])
def is_read(self, request):
pk = request.data.get('id', None)
notification = get_object_or_None(Notification, id=pk)
if notification:
notification.is_read = True
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_404_NOT_FOUND)
|
from itm import ITM
class Sim_Com(ITM):
def __init__(self, k, bits, crupt, sid, pid, channels, pump, poly, importargs):
self.crupt = crupt
self.ssid = sid[0]
self.committer = sid[1]
self.receiver = sid[2]
self.table = {}
self.revtable = {}
self.receiver_random = None
self.receiver_state = 1
handlers = {
channels['p2a'] : self.party_msg,
channels['f2a'] : self.func_msg,
channels['z2a'] : self.env_msg,
}
ITM.__init__(self, k, bits, sid, pid, channels, handlers, poly, pump, importargs)
def is_dishonest(self, sid, pid):
return (sid,pid) in self.crupt
def is_honest(self, sid, pid):
return not self.is_dishonest(sid,pid)
def hash(self, s):
if k not in self.table:
self.table[s] = self.sample(self.k)
self.revtable[self.table[s]] = s
return self.table[s]
def env_msg(self, m):
msg = m.msg
imp = m.imp
if msg[0] == 'A2F':
t,msg,iprime = msg
if msg[0] == 'ro':
self.write('a2f', ('ro', self.hash(msg[1])))
else:
self.pump.write('')
#elif isdishonest(self.sid, self.committer):
elif self.is_dishonest(self.sid, self.committer):
if msg[0] == 'A2P':
_,to,msg = msg
assert to == (self.sid, self.committer)
if msg[0] == 'commit':
# env gives some hash value
if msg[1] in self.revtable:
self.write('a2p', (to, ('commit', self.revtable[msg[1]])))
else:
b = self.sample(1)
self.write('a2p', (to, ('commit', b)))
else:
self.pump.write('')
else:
self.pump.write('')
def party_msg(self, m):
msg = m.msg
imp = m.imp
fro,msg = msg
print('adv party message', m)
if self.is_dishonest(self.sid, self.receiver) and fro == (self.sid, self.receiver):
if msg == 'commit' and self.receiver_state == 1:
self.receiver_random = self.sample(self.k)
#self.write('a2z', ('P2A', (fro, ((self.sid, 'F_ro'),('send', self.receiver_random)))))
self.write('a2z', ('P2A', (fro, ('send', self.receiver_random))))
self.receiver_state = 2
elif msg[0] == 'open' and self.receiver_state == 2 :
bit = msg[1]
#self.write('a2z', ('P2A', (fro, ((self.sid,'F_ro'),('send', (self.sample(self.k), bit))))))
self.write('a2z', ('P2A', (fro, ('send', (self.sample(self.k), bit)))))
self.receiver_state = 3
else:
self.pump.write('')
else:
self.pump.write('')
def func_msg(self, m):
self.pump.write('')
|
"""
We are going to define a simple form with an action and two fields
coming from a Zope interface.
We put our example in a separate file, since the configure.zcml of
zeam.form needs to be loaded in order to be able to create the fields,
which is no the case when the tests are collected.
Let's grok our example:
>>> from zeam.form.ztk.testing import grok
>>> grok('zeam.form.ztk.ftests.forms.ztkform_fixture')
We can now lookup our form by the name of its class:
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> from zeam.form.ztk.ftests.forms.ztkform_fixture import Person
>>> context = Person()
>>> from zope import component
>>> form = component.getMultiAdapter(
... (context, request), name='personform')
>>> form
<zeam.form.ztk.ftests.forms.ztkform_fixture.PersonForm object at ...>
>>> len(form.actions)
1
>>> len(form.fields)
2
Integration test
----------------
Let's try to take a browser and submit that form:
>>> root = getRootFolder()
>>> root['person'] = context
>>> from zope.testbrowser.wsgi import Browser
>>> browser = Browser()
>>> browser.handleErrors = False
We can access the form, fill it and submit it:
>>> browser.open('http://localhost/person/personform')
>>> namefield = browser.getControl('Person name')
>>> namefield
<Control name='form.field.name' type='text'>
>>> namefield.value = 'Arthur Sanderman'
>>> agefield = browser.getControl('Person age')
>>> agefield
<Control name='form.field.age' type='number'>
>>> agefield.value = '42'
>>> action = browser.getControl('Send')
>>> action
<SubmitControl name='form.action.send' type='submit'>
>>> action.click()
>>> 'We sent Arthur Sanderman, age 42' in browser.contents
True
"""
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set up the gcloud environment and Forseti prerequisites.
This has been tested with python 2.7.
"""
import argparse
from environment import gcloud_env
def run():
"""Run the steps for the gcloud setup."""
parser = argparse.ArgumentParser()
parser.add_argument('--no-cloudshell',
action='store_true',
help='Bypass Cloud Shell requirement')
parser.add_argument('--no-iam-check',
action='store_true',
help='Bypass IAM check for user running script')
parser.add_argument('--branch',
help='Which Forseti branch to deploy')
group = parser.add_argument_group(title='regions')
group.add_argument('--gcs-location',
help='The GCS bucket location')
group.add_argument('--cloudsql-region',
help='The Cloud SQL region')
network = parser.add_argument_group(title='network')
network.add_argument('--network-host-project-id',
help='The project id that is hosting the network '
'resources.')
network.add_argument('--vpc-name',
help='The VPC name where Forseti VM will run.')
network.add_argument('--subnetwork-name',
help='The subnetwork name where Forseti VM will run.')
email_params = parser.add_argument_group(title='email')
email_params.add_argument('--sendgrid-api-key',
help='Sendgrid API key')
email_params.add_argument('--notification-recipient-email',
help='Notification recipient email')
email_params.add_argument('--gsuite-superadmin-email',
help='G Suite super admin email')
args = vars(parser.parse_args())
forseti_setup = gcloud_env.ForsetiGcpSetup(**args)
forseti_setup.run_setup()
if __name__ == '__main__':
run()
|
import socket
from gosnu.consumer import Consumer
from gosnu.producer import Producer
class Connection():
def __init__(self, ip, port=8081):
self.ip = ip
self.port = port
self.tcp_client = None
def connect(self):
# Initialize a TCP client socket using SOCK_STREAM
self.tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Establish connection to TCP server and exchange data
self.tcp_client.connect((self.ip, self.port))
def __enter__(self):
return self
def __exit__(self, *args):
self.tcp_client.close()
def Producer(self):
return Producer(self.ip)
def Consumer(self, callback=print):
return Consumer(self.tcp_client, callback=callback)
|
from virtualscada.vs import removeRows
from virtualscada.vs import removeValues
from virtualscada.vs import fillValuesMLPFForward
|
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import torch
import numpy as np
from torch.utils.data import DataLoader, ConcatDataset
import torch.utils.data
import data.transforms_mitorch as tf
import torchvision.transforms as torch_tf
from torch.utils.data import random_split
from .build import build_dataset
from .build_transformations import build_transformations
from data.VolSet import collate_fn as collate_fn_vol
from data.NeuroSegSets import collate_fn as collate_fn_pat
import os
# noinspection PyUnresolvedReferences
def ds_worker_init_fn(worker_id):
# set numpy seed number for each worker separately
assert torch.utils.data.get_worker_info().id == worker_id
seed = torch.utils.data.get_worker_info().seed
# needed for numpy random seed to be between 0 < seed < 2**32 - 1
seed = seed if seed < 2**32 else seed % 2**32
assert 0 < seed < 2 ** 32
np.random.seed(seed)
# noinspection PyTypeChecker
class DataContainer:
def __init__(self, mode, cfg):
self.cfg = cfg
self.dataset, self.dataloader, self.sampler = None, None, None
self.mode = mode
self.dataset_name, self.dl_params = self.init_dl_params()
self.create_dataset()
self.create_dataloader()
def init_dl_params(self):
collate_fn = collate_fn_pat if self.cfg.NVT.ENABLE else collate_fn_vol
if self.mode == 'train':
dataset_name = self.cfg.TRAIN.DATASET
batch_size = self.cfg.TRAIN.BATCH_SIZE
shuffle = self.cfg.TRAIN.SHUFFLE
drop_last = True
elif self.mode == 'valid':
dataset_name = self.cfg.TRAIN.DATASET
batch_size = self.cfg.VALID.BATCH_SIZE
shuffle = False
drop_last = False
elif self.mode == 'test':
dataset_name = self.cfg.TEST.DATASET
batch_size = self.cfg.TEST.BATCH_SIZE
shuffle = False
drop_last = False
else:
raise NotImplementedError
return dataset_name, {
'batch_size': batch_size,
'shuffle': shuffle,
'drop_last': drop_last,
'collate_fn': collate_fn,
}
def create_transform_single(self):
# --- BODY ---
if self.mode == 'train':
transformations_body = [
# tf.ToTensorImageVolume(),
# tf.RandomOrientationTo('RPI'),
# tf.RandomOrientationTo('RPI', prand=True),
# tf.RandomResampleTomm(target_spacing=(1, 1, 1)),
# tf.RandomResampleTomm(target_spacing=(1, 1, 1), target_spacing_scale=(0.2, 0.2, 0.2), prand=True),
tf.RandomCropImageVolumeConditional(self.cfg.DATA.CROP_SIZE, prand=True,
num_attemps=self.cfg.NVT.RANDOM_CROP_NUM_ATTEMPS,
threshold=self.cfg.NVT.RANDOM_CROP_THRESHOLD),
# tf.ResizeImageVolume(self.cfg.DATA.MAX_SIDE_SIZE, min_side=self.cfg.DATA.MIN_SIDE),
# tf.PadToSizeVolume(self.cfg.DATA.MAX_SIDE_SIZE, padding_mode=self.cfg.DATA.PADDING_MODE),
# tf.CenterCropImageVolume(self.cfg.DATA.CROP_SIZE),
# tf.RandomCropImageVolume(self.cfg.DATA.CROP_SIZE),
# tf.RandomResizedCropImageVolume(self.cfg.DATA.CROP_SIZE,
# scale=self.cfg.DATA.CROP_SCALE,
# uni_scale=self.cfg.DATA.UNI_SCALE),
# tf.RandomFlipImageVolume(dim=-1),
# tf.RandomBrightness(value=0.1, prand=True, channel_wise=True),
# tf.RandomContrast(value=0.1, prand=True, channel_wise=True),
# tf.RandomGamma(value=0.1, prand=True, channel_wise=True),
# tf.LogCorrection(inverse=(False, True)[1], channel_wise=True),
# tf.SigmoidCorrection(inverse=(False, True)[1], channel_wise=True),
# tf.HistEqual(num_bins=256, channel_wise=True),
# tf.AdditiveNoise(sigma=0.1, noise_type=('gaussian', 'rician', 'rayleigh')[0], randomize_type=False,
# out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
]
elif self.mode in ('valid', 'test'):
transformations_body = [
# tf.ToTensorImageVolume(),
# tf.RandomOrientationTo('RPI'),
# tf.RandomOrientationTo('RPI', prand=True),
# tf.RandomResampleTomm(target_spacing=(1, 1, 1)),
tf.RandomCropImageVolumeConditional(self.cfg.DATA.CROP_SIZE, prand=True,
num_attemps=self.cfg.NVT.RANDOM_CROP_NUM_ATTEMPS,
threshold=self.cfg.NVT.RANDOM_CROP_THRESHOLD),
# tf.ResizeImageVolume(self.cfg.DATA.MAX_SIDE_SIZE, min_side=self.cfg.DATA.MIN_SIDE),
# tf.PadToSizeVolume(self.cfg.DATA.MAX_SIDE_SIZE, padding_mode=self.cfg.DATA.PADDING_MODE),
# tf.HistEqual(num_bins=256, channel_wise=True),
]
else:
raise NotImplementedError
# --- TAIL ---
transformations_tail = [
# tf.NormalizeMinMaxVolume(max_div=True, inplace=True),
# tf.NormalizeMeanStdVolume(
# mean=self.cfg.DATA.MEAN,
# std=self.cfg.DATA.STD,
# inplace=True
# ),
]
return torch_tf.Compose(
transformations_body + transformations_tail
)
def create_transform_hpo_brain(self):
if self.mode == 'train':
transformations_body = [
tf.ToTensorImageVolume(),
(
tf.RandomOrientationTo('RPI'),
tf.RandomOrientationTo('RPI', prand=True)
)[self.cfg.DATA.EXP.HEAD_ORI],
(
tf.RandomResampleTomm(target_spacing=(1, 1, 1)),
tf.RandomResampleTomm(target_spacing=(1, 1, 1), target_spacing_scale=(0.2, 0.2, 0.2), prand=True),
)[self.cfg.DATA.EXP.HEAD_RES],
tf.ResizeImageVolume(self.cfg.DATA.MAX_SIDE_SIZE, min_side=False),
tf.PadToSizeVolume(self.cfg.DATA.MAX_SIDE_SIZE, padding_mode=self.cfg.DATA.PADDING_MODE),
] + (
[],
[tf.CenterCropImageVolume(self.cfg.DATA.CROP_SIZE)],
[tf.RandomCropImageVolume(self.cfg.DATA.CROP_SIZE)],
[tf.RandomResizedCropImageVolume(self.cfg.DATA.CROP_SIZE, scale=self.cfg.DATA.CROP_SCALE)],
)[self.cfg.DATA.EXP.BODY_CRO] + (
[],
[tf.RandomFlipImageVolume(dim=-1)],
)[self.cfg.DATA.EXP.BODY_FLI] + (
[],
[[
tf.RandomBrightness(value=0.25, prand=True, channel_wise=True),
tf.RandomContrast(value=0.25, prand=True, channel_wise=True),
tf.RandomGamma(value=2.0, prand=True, channel_wise=True),
tf.LogCorrection(inverse=(False, True)[0], channel_wise=True),
tf.SigmoidCorrection(inverse=(False, True)[0], channel_wise=True),
tf.HistEqual(num_bins=512, channel_wise=True),
tf.AdditiveNoise(sigma=0.5, noise_type=('gaussian', 'rician', 'rayleigh')[0], randomize_type=False,
out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
tf.AdditiveNoise(sigma=0.5, noise_type=('gaussian', 'rician', 'rayleigh')[1], randomize_type=False,
out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
tf.AdditiveNoise(sigma=0.5, noise_type=('gaussian', 'rician', 'rayleigh')[2], randomize_type=False,
out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
][self.cfg.DATA.EXP.INTENSITY_SEL]]
)[self.cfg.DATA.EXP.INTENSITY]
elif self.mode in ('valid', 'test'):
transformations_body = [
tf.ToTensorImageVolume(),
tf.RandomOrientationTo('RPI'),
tf.RandomResampleTomm(target_spacing=(1, 1, 1)),
tf.ResizeImageVolume(self.cfg.DATA.MAX_SIDE_SIZE, min_side=self.cfg.DATA.MIN_SIDE),
# tf.PadToSizeVolume(self.cfg.DATA.MAX_SIDE_SIZE, padding_mode=self.cfg.DATA.PADDING_MODE),
# tf.HistEqual(num_bins=256, channel_wise=True),
]
else:
raise NotImplementedError
# --- TAIL ---
transformations_tail = [
tf.NormalizeMinMaxVolume(max_div=True, inplace=True),
tf.NormalizeMeanStdVolume(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
]
return torch_tf.Compose(
transformations_body + transformations_tail
)
def create_transform_hpo(self):
if self.mode == 'train':
transformations_body = [
tf.RandomCropImageVolumeConditional(self.cfg.DATA.CROP_SIZE, prand=True,
num_attemps=self.cfg.NVT.RANDOM_CROP_NUM_ATTEMPS,
threshold=self.cfg.NVT.RANDOM_CROP_THRESHOLD),
] + (
[],
[tf.RandomFlipImageVolume(dim=-1)],
[tf.RandomFlipImageVolume(dim=0)],
[tf.RandomFlipImageVolume(dim=1)],
[tf.RandomFlipImageVolume(dim=2)],
)[self.cfg.DATA.EXP.BODY_FLI] + (
[],
[[
tf.RandomBrightness(value=0.1, prand=True, channel_wise=True),
tf.RandomContrast(value=0.1, prand=True, channel_wise=True),
tf.RandomGamma(value=0.1, prand=True, channel_wise=True),
tf.LogCorrection(inverse=False, channel_wise=True),
tf.LogCorrection(inverse=True, channel_wise=True),
tf.SigmoidCorrection(inverse=False, channel_wise=True),
tf.SigmoidCorrection(inverse=True, channel_wise=True),
tf.HistEqual(num_bins=128, channel_wise=True),
tf.HistEqual(num_bins=256, channel_wise=True),
tf.HistEqual(num_bins=512, channel_wise=True),
tf.AdditiveNoise(sigma=0.1, noise_type='gaussian', randomize_type=False,
out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
tf.AdditiveNoise(sigma=0.1, noise_type='rician', randomize_type=False,
out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
tf.AdditiveNoise(sigma=0.1, noise_type='rayleigh', randomize_type=False,
out_of_bound_mode=('normalize', 'clamp')[1], prand=True, channel_wise=True),
][self.cfg.DATA.EXP.INTENSITY_SEL]]
)[self.cfg.DATA.EXP.INTENSITY]
elif self.mode in ('valid', 'test'):
transformations_body = [
tf.RandomCropImageVolumeConditional(self.cfg.DATA.CROP_SIZE, prand=True,
num_attemps=self.cfg.NVT.RANDOM_CROP_NUM_ATTEMPS,
threshold=self.cfg.NVT.RANDOM_CROP_THRESHOLD),
]
else:
raise NotImplementedError
# --- TAIL ---
transformations_tail = (
[],
[
tf.NormalizeMeanStdVolume(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
],
[
tf.NormalizeMinMaxVolume(max_div=True, inplace=True),
tf.NormalizeMeanStdVolume(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
]
)[self.cfg.DATA.EXP.TAIL_NORM]
if self.cfg.DATA.EXP.TAIL_NORM == 1:
self.cfg.DATA.MEAN: [256.42889404296875, 380.6856689453125] # without MinMax
self.cfg.DATA.STD: [64.1461410522461, 78.29484558105469]
return torch_tf.Compose(
transformations_body + transformations_tail
)
def create_transform(self):
CHOOSE_BEST_TRANSFORMS = (False, True)[1]
CHOOSE_HPO_TRANSFORMS = (False, True)[0]
if CHOOSE_BEST_TRANSFORMS:
transformations = build_transformations(self.dataset_name, self.cfg, self.mode)()
else:
if CHOOSE_HPO_TRANSFORMS:
transformations = self.create_transform_hpo()
else:
transformations = self.create_transform_single()
self.save_transformations_str(transformations)
return transformations
def data_split_pa_ind(self):
with open(os.path.join(self.cfg.PROJECT.DATASET_DIR, 'wmh_validation_subjs.txt'), 'r') as fh:
ind_list = fh.readlines()
ind_list = [i.strip() for i in ind_list]
ind_list_index = list()
non_ind_list_index = list()
for i, s in enumerate(self.dataset.sample_path_list):
s_name = s.rpartition('/')[-1]
if s_name in ind_list:
ind_list_index.append(i)
else:
non_ind_list_index.append(i)
if self.mode == 'train':
self.dataset = torch.utils.data.Subset(self.dataset, non_ind_list_index)
elif self.mode == 'valid':
self.dataset = torch.utils.data.Subset(self.dataset, ind_list_index)
elif self.mode == 'test':
raise NotImplementedError('undefined in this function')
def data_split(self):
torch.manual_seed(self.cfg.RNG_SEED)
n_tst = int(len(self.dataset) * self.cfg.PROJECT.TSR)
n_traval = len(self.dataset) - n_tst
n_tra = int(n_traval * self.cfg.PROJECT.TVSR)
n_val = n_traval - n_tra
tra, val, tst = random_split(
self.dataset,
(
n_tra,
n_val,
n_tst,
)
)
if self.mode == 'train':
self.dataset = tra
elif self.mode == 'valid':
self.dataset = val
elif self.mode == 'test':
self.dataset = tst
def create_dataset(self):
transformations = self.create_transform()
self.dataset = build_dataset(self.dataset_name, self.cfg, self.mode, transformations)
if self.cfg.TRAIN and self.cfg.TRAIN.DATASET == 'SRIBIL' and self.cfg.PROJECT.PA_INDICES:
self.data_split_pa_ind()
else:
self.data_split()
if self.mode == 'train' and self.cfg.NVT.ENABLE and self.cfg.NVT.REPEAT_DATASET > 1:
self.dataset = ConcatDataset([self.dataset]*self.cfg.NVT.REPEAT_DATASET)
if self.cfg.DDP:
try: # torch 1.5.0 on mist has issue with seed, remove it later
self.sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset,
num_replicas=self.cfg.DDP_CFG.WORLD_SIZE,
rank=self.cfg.DDP_CFG.RANK,
shuffle=self.dl_params['shuffle'],
seed=self.cfg.RNG_SEED,
)
except Exception:
self.sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset,
num_replicas=self.cfg.DDP_CFG.WORLD_SIZE,
rank=self.cfg.DDP_CFG.RANK,
shuffle=self.dl_params['shuffle'],
)
finally:
self.dl_params['shuffle'] = False
def create_dataloader(self):
self.dataloader = DataLoader(self.dataset,
sampler=self.sampler,
num_workers=self.cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=self.cfg.DATA_LOADER.PIN_MEMORY,
worker_init_fn=ds_worker_init_fn,
** self.dl_params
)
def save_transformations_str(self, transformations):
self.cfg.__setitem__(f'transformations_{self.mode}'.upper(), transformations.__str__())
|
from cap_res_prob import CapResProb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import time
np.random.seed(0)
# Small example:
n = 20 # Number of nodes.
m = 50 # Number of edges.
K = 10 # Number of scenarios.
# Large example:
# n = 2000 # Number of nodes.
# m = 5000 # Number of edges.
# K = 1000 # Number of scenarios.
# Data generation.
A_complete = np.zeros((n,int(n*(n-1)*0.5)))
count = 0
for i in range(n-1):
for j in range(i+1,n):
A_complete[i,count] = 1
A_complete[j,count] = -1
count += 1
edges = np.random.permutation(n*(n-1))[0:m]
A = np.hstack([A_complete, -A_complete])[:,edges] # Adjacency matrix.
p = np.random.rand(m,1)*2 # Prices on edges.
S = -A.dot(np.random.rand(m,K))*5 # Source vectors.
c = np.ones(m)*5 # Edge capacities.
# Algorithm parameters.
max_iter = 100
ep = 1e-2
mu = 0.05
prob = CapResProb(A, S, p, c)
mos_start_time = time.time()
J_mos, F_mos, Pi_mos = prob.solve_cvx(solver = 'MOSEK')
print('Mosek run time = %d' % (time.time() - mos_start_time))
start_time = time.time()
F_admm, Pi_admm, U, L = prob.solve_admm(solver='MOSEK', mu=mu)
print("ADMM run time = %d" % (time.time() - start_time))
print('J_star = %f' % J_mos)
print('L[0] = %f' % L[0])
print('U[0] = %f' % U[0])
L_best = [L[0]]
U_best = [U[0]]
for i in range(len(L)-1):
L_best.append(max(L_best[i], L[i+1]))
U_best.append(min(U_best[i], U[i+1]))
plt.figure()
plt.subplot(211)
plt.plot(U_best, linewidth = 2.0)
plt.plot(L_best, linewidth = 2.0)
plt.legend(["U(l)", "L(l)"], loc = 4)
plt.subplot(212)
plt.semilogy((np.array(U_best)-np.array(L_best))/np.array(L_best), linewidth = 2.0)
plt.semilogy((np.array(U_best)-J_mos)/J_mos, linewidth = 2.0)
plt.legend(["rel. gap", "rel. subopt."], loc = 1)
plt.savefig("bounds.pdf")
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package, file, core
def install():
package.ensure(["curl", "git-core"])
if not dir.exists(".php-build"):
core.run("git clone git://github.com/CHH/php-build .php-build")
with ctx.cd(".php-build"):
core.run("git pull")
dir.create("versions")
dir.create("tmp")
_ensure_autoload(".bashrc")
_ensure_autoload(".zshrc")
def ensure():
if not dir.exists(".php-build"):
install()
def _ensure_autoload(filename):
file.append(filename, 'export PATH="$HOME/.php-build/bin:$PATH"')
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.engine.target import (
COMMON_TARGET_FIELDS,
Dependencies,
MultipleSourcesField,
SingleSourceField,
Target,
TargetFilesGenerator,
generate_multiple_sources_field_help_message,
)
SWIFT_FILE_EXTENSIONS = (".swift",)
class SwiftDependenciesField(Dependencies):
pass
class SwiftSourceField(SingleSourceField):
expected_file_extensions = SWIFT_FILE_EXTENSIONS
class SwiftGeneratorSourcesField(MultipleSourcesField):
expected_file_extensions = SWIFT_FILE_EXTENSIONS
# -----------------------------------------------------------------------------------------------
# `swift_source` and `swift_sources` targets
# -----------------------------------------------------------------------------------------------
class SwiftSourceTarget(Target):
alias = "swift_source"
core_fields = (
*COMMON_TARGET_FIELDS,
SwiftDependenciesField,
SwiftSourceField,
)
help = "A single Swift source file."
class SwiftSourcesGeneratorSourcesField(SwiftGeneratorSourcesField):
default = tuple(f"*{ext}" for ext in SWIFT_FILE_EXTENSIONS)
help = generate_multiple_sources_field_help_message(
"Example: `sources=['utils.swift', 'subdir/*.swift', '!ignore_me.swift']`"
)
class SwiftSourcesGeneratorTarget(TargetFilesGenerator):
alias = "swift_sources"
core_fields = (
*COMMON_TARGET_FIELDS,
SwiftSourcesGeneratorSourcesField,
)
generated_target_cls = SwiftSourceTarget
copied_fields = COMMON_TARGET_FIELDS
moved_fields = (SwiftDependenciesField,)
help = "Generate a `swift_source` target for each file in the `sources` field."
|
from collections import OrderedDict
absolute_path = r'D:\__Alexzander_files__\__computer_science__\python_stuff\andrew_packages\programming_problems\reply_challenge'
class TestCase:
def __init__(self, index, total_teams, total_logs, teams_stats_list):
self.index = index
self.total_teams = total_teams
self.total_logs = total_logs
self.teams_stats_list = teams_stats_list
def __str__(self):
content = '=' * 50 + '\n'
content += f'Test case[{self.index}] =>\n'
content += f'\ttotal teams involved: {self.total_teams}\n'
content += f'\ttotal logs read: {self.total_logs}\n'
content += '\t' + '~' * 50 + '\n'
content += f'\tTeams stats for this test case:\n'
for ts in self.teams_stats_list:
content += f'{ts}'
content += '\n' + '=' * 50 + '\n'
return content
class TeamStats:
def __init__(self, timestamp, team_id, problem_id, input_id, scored):
self.timestamp = timestamp
self.team_id = team_id
self.problem_id = problem_id
self.input_id = input_id
self.scored = scored
def __str__(self):
content = '\t\t' + '~' * 50 + '\n'
content += f'\t\tTeam[{self.team_id}] with =>\n'
content += f'\t\t\ttimestamp: {self.timestamp}\n'
content += f'\t\t\tproblem id: {self.problem_id}\n'
content += f'\t\t\tinput id: {self.input_id}\n'
content += f'\t\t\tscored: {self.scored}\n'
content += '\t\t' + '~' * 50 + '\n'
return content
def ReadFromFile(filename: str):
with open(filename, 'r', encoding='utf-8') as input_file:
test_cases = list()
T = int(input_file.readline()[0])
T_index = 0
teams_stats = list()
line = tuple(map(int, input_file.readline().split()))
if len(line) == 2:
N, L = line
while line:
if len(line) == 5:
timestamp, team_id, problem_id, input_id, scored = line
teams_stats.append(TeamStats(timestamp, team_id, problem_id, input_id, scored))
elif len(line) == 2:
if teams_stats:
test_cases.append(TestCase(T_index, N, L, teams_stats))
teams_stats = list()
T_index += 1
N, L = line
line = tuple(map(int, input_file.readline().split()))
T_index += 1
test_cases.append(TestCase(T_index, N, L, teams_stats))
return test_cases
def ComputeProblem(test_cases_list: list):
test_cases_results = list()
for testcase in test_cases_list:
teams = OrderedDict()
absent_teams = [1, 2, 3, 4, 5]
for teamstats in testcase.teams_stats_list:
# update the actual team
id = teamstats.team_id
if id in absent_teams:
absent_teams.remove(id)
if id in teams:
if teamstats.scored == 1:
# we update the stats
teams[id]['score'] += teamstats.problem_id * 100 * teamstats.input_id
teams[id]['penalty_time'] += teamstats.timestamp
else:
teams[id] = OrderedDict()
if teamstats.scored == 1:
teams[id]['score'] = teamstats.problem_id * 100 * teamstats.input_id
teams[id]['penalty_time'] = teamstats.timestamp
else:
teams[id]['score'] = 0
teams[id]['penalty_time'] = 0
for abst_team in absent_teams:
teams[abst_team] = OrderedDict()
teams[abst_team]['score'] = 0
teams[abst_team]['penalty_time'] = 0
test_cases_results.append(teams)
return test_cases_results
def Tranform(final_results: list):
results = list()
for testcase_dict in final_results:
testcase_result = list()
for teamid in testcase_dict:
new_format = OrderedDict()
new_format['team'] = teamid
new_format['score'] = testcase_dict[teamid]['score']
new_format['penalty_time'] = testcase_dict[teamid]['penalty_time']
testcase_result.append(new_format)
results.append(testcase_result)
return results
def DisplayTeamsResults(teams_dict: dict):
for teamid in teams_dict:
sc = teams_dict[teamid]["score"]
pen = teams_dict[teamid]["penalty_time"]
print(f'Team {teamid} with {sc} points and {pen} penalty time.')
def DisplayDictionaries(final_results: list):
print('\n[')
for testcase_dict in final_results:
print('\t{')
for teamid in testcase_dict:
print(f'\t\t{teamid}: {dict(testcase_dict[teamid])}')
print('\t}')
print(']\n')
def DisplayResults(final_results: list):
print('\n[')
for testcase_list in final_results:
print('\t[')
for team_dict in testcase_list:
print(f'\t\t{dict(team_dict)}')
print('\t]')
print(']\n')
def SortFinalResults(final_results: list):
sorted_results = []
for testcase_list in final_results:
testcase_sorted = sorted(testcase_list, key=lambda dictionary: (-dictionary['score'], dictionary['penalty_time']))
sorted_results.append(testcase_sorted)
return sorted_results
def WriteInFile(final_results: list, absolute_path: str):
with open(absolute_path, 'w', encoding='utf-8') as results_file:
for index, testcase in enumerate(final_results):
results_file.write(f'Case #{index + 1}: ')
for team in testcase[:len(testcase) - 1]:
results_file.write(f'{team["team"]} ')
results_file.write(f'{testcase[len(testcase) - 1]["team"]}')
results_file.write('\n')
if __name__ == '__main__':
input_files_list = [
'\input1.txt',
'\input2.txt',
'\input3.txt',
'\myinput.txt'
]
for index, file in enumerate(input_files_list):
test_cases = ReadFromFile(absolute_path + file)
results = ComputeProblem(test_cases)
results = Tranform(results)
results = SortFinalResults(results)
DisplayResults(results)
WriteInFile(results, absolute_path + f'\input{index + 1}_results.txt')
print('=' * 100)
|
class SymbolTable:
def __init__(self):
self.symbol_table = {
'scope_0': {
'name': 'scope_0',
'parent': None,
'rules': [],
}
}
self.current_scope = 'scope_0'
def insert(self, symbol):
self.symbol_table[self.current_scope]['rules'].append(symbol)
def start_scope(self, scope):
self.current_scope = scope
def end_scope(self):
if(self.current_scope['parent'] is not None):
self.current_scope = self.current_scope['parent']
def add_scope(self):
scope = 'scope_{}'.format(len(self.symbol_table))
self.symbol_table[scope] = {
'name': scope,
'parent': self.current_scope,
'rules': [],
}
self.start_scope(scope)
def find_scope(self, symbol):
scope = self.current_scope
while scope != None:
for rule in self.symbol_table[scope]['rules']:
if rule.formule == symbol:
return scope
scope = self.symbol_table[scope]['parent']
return scope
def lookup_formule_by_line(self, symbol, line):
scope = self.find_scope(symbol)
while scope != None:
for rule in self.symbol_table[scope]['rules']:
if rule.line == line:
return rule.formule
scope = self.symbol_table[scope]['parent']
return None
def get_rule(self, symbol):
scope = self.current_scope
while scope != None:
for rule in self.symbol_table[scope]['rules']:
if rule.formule.toString() == symbol:
return rule
scope = self.symbol_table[scope]['parent']
return None
|
import speech_recognition as sr
import struct
import base64
import wave
import matplotlib.pyplot as plt
import numpy as np
from scipy.fft import fft, ifft
r = sr.Recognizer()
myframerate = 88000
mychannel = 1
mysampleWidth = 2
duration = 7 # edit duration of sound here
myframes = duration * myframerate
if __name__ == '__main__':
encoded = input('Enter Base64 data!\n')
decoded = base64.standard_b64decode(encoded)
i = 0
obj = wave.open('sound_44KHz.wav', 'wb')
obj.setnchannels(mychannel)
obj.setsampwidth(mysampleWidth)
obj.setframerate(myframerate)
obj.setnframes(int(myframes))
voice_sample = []
for i in range(0, (len(decoded)//7)*7, 2):
temp1 = ((decoded[i + 1] << 8) | decoded[i])
# if(temp1 >= 4096):
# temp1 = 0
# else:
# temp1 = temp1 * 5
voice_sample.append(temp1)
data = struct.pack('<i', temp1)
obj.writeframesraw(data)
obj.close()
harvard = sr.AudioFile('sound_new_encoding.wav')
with harvard as source:
audio = r.record(source)
print(r.recognize_google(audio))
voice_samples_np = np.array(voice_sample)
frequencies = fft(voice_samples_np)
time_values = np.linspace(0, 7, len(voice_sample))
print(frequencies)
# plt.plot(time_values, frequencies, 'ro')
# plt.show()
|
from jousting.round.phase import Phase
from jousting.util.dice import D6, roll
from jousting.util.rps import SHIELD
class TasteOfTheLance(Phase):
def do_taste_of_the_lance(self):
p1 = self._controller.get_p1()
p2 = self._controller.get_p2()
if not p1.get_failed_to_start() and not p2.get_failed_to_start():
p1_tactical_modifier = 1 if p1.get_won_rps() and p1.get_tactical_card() != SHIELD else 0
p2_tactical_modifier = 1 if p2.get_won_rps() and p2.get_tactical_card() != SHIELD else 0
self.determine_strike_modifier(p1, p1_tactical_modifier)
self.determine_strike_modifier(p2, p2_tactical_modifier)
self.strike_roll(p1, p2)
if not p2.get_unhorsed():
self.strike_roll(p2, p1)
def determine_strike_modifier(self, player, tactical=0):
if player.get_current_position() <= 12:
position = 0
elif player.get_current_position() <= 15:
position = 1
else:
position = 2
total_modifier = position + tactical - player.get_bruises()
total_modifier = total_modifier if total_modifier >= 0 else 0
player.set_strike_modifier(total_modifier if total_modifier >= 0 else 0)
# 1-2: Glancing blow. Effectively a miss. No points scored
# 3-4: Light blow. Score 2 points
# 5-6: Heavy blow. Score 3 points. Roll for effects
def strike_roll(self, player, opponent):
modifier = player.get_strike_modifier()
strike_roll = roll(D6, 1, modifier)
if (not opponent.get_accept_heavy_blows()) and strike_roll > 4:
strike_roll = 4
if 3 <= strike_roll < 5:
player.add_points(2)
elif strike_roll >= 5:
player.add_points(3)
self.heavy_blow_roll(opponent)
# 1-3: No effect
# 4-5: Bruise opponent
# 6: Unhorse opponent. Win joust
def heavy_blow_roll(self, opponent):
heavy_roll = roll(D6)
if 4 <= heavy_roll < 6:
opponent.add_bruise()
elif heavy_roll == 6:
opponent.set_unhorsed(True)
# 1-3: Lance not broken
# 4-6: Lance broken. Score 1 point
def lance_break_roll(self, player):
break_roll = roll(D6)
if break_roll >= 4:
player.add_points(1)
|
import numpy as np
def bootstrap_idx(dataset_size, n_bootstraps=150):
"""
Obtains indices for bootstrapping
:param dataset_size: size of the dataset
:param n_bootstraps: number of bootstraps to run
:return:
"""
data_idx = np.random.choice(np.arange(dataset_size), size=(n_bootstraps, dataset_size), replace=True)
return data_idx
def bootstrap_data(particles, weights, hist_range, n_bootstraps, percentile=5):
"""
Calculates the bootstrap for the error bars in a histogram
Example usage to plot error bars of a histogram
```
delta05, delta95, vals_ = bootstrap_data(data['posterior_particles'],
data['posterior_weights'],
hist_range,
10000,
percentile=1)
fig = plt.figure()
ax = fig.add_subplot(111)
probs, _, _ = ax.hist(data['posterior_particles'],
bins=hist_range,
density=True,
weights=data['posterior_weights'],
alpha=0.7)
ebars = np.stack([probs+delta05, probs+delta95])
eb = ax.errorbar(vals_, probs, yerr=ebars, color='k', linestyle='none')
```
:param particles: the particles
:param weights: the weights of each particle
:param hist_range: the range of the histogram
:param n_bootstraps: the number of boostraps to use
in the estimation process
:param percentile: the CI percentile to return
:return:
"""
particles = np.array(particles)
weights = np.array(weights)
hist_probs, vals = np.histogram(particles, bins=hist_range, density=True, weights=weights)
prob_estimates = []
for idx in bootstrap_idx(particles.shape[0], n_bootstraps):
probs, vals = np.histogram(particles[idx], bins=hist_range, density=True, weights=weights[idx])
prob_estimates.append(probs)
prob_estimates = np.stack(prob_estimates)
deltas = hist_probs - prob_estimates
delta_bottom = np.percentile(deltas, percentile, axis=0)
delta_top = np.percentile(deltas, 100 - percentile, axis=0)
vals = (vals + 0.5)[:-1]
return delta_bottom, delta_top, vals
def empirical_distribution(particles, weights, histbin_range, return_numpy=False):
"""
Calculates the estimated distribution from particles and weights
:param particles:
:param weights:
:param histbin_range:
:return:
"""
hist_range = np.arange(-histbin_range- 2, histbin_range+2) + 0.5 # add 0.5 to move into put center of boxes in integers
# Handle the example when there are multiple dimensions.
# Treat each dimension as independent and then just multiplex the dists.
if len(particles.shape) > 1:
estimated_dists = []
for i in range(particles.shape[1]):
estimated_dists.append(
empirical_distribution(
particles[:, i], weights, histbin_range, return_numpy))
if len(estimated_dists) == 1: return estimated_dists[0]
return estimated_dists
probs, vals = np.histogram(particles, bins=hist_range, density=True,
weights=weights)
if return_numpy:
return probs, (vals + 0.5)[:-1]
estimated_dist = dict(zip((vals + 0.5)[:-1], probs)) # add 0.5 to shift back
return estimated_dist
def average_estimated_distributions(estimated_distributions):
n_estimated_distributions = len(estimated_distributions['prob_estimates'])
prob_estimates = np.stack(estimated_distributions['prob_estimates'])
avg_prob = np.mean(prob_estimates, axis=0)
stderr_prob = np.std(prob_estimates, axis=0)/np.sqrt(n_estimated_distributions)
return estimated_distributions['support'][0], avg_prob, stderr_prob
if __name__ == '__main__':
# bootstrap for calculating the confidence interval around the mean
X = np.array([30, 37, 36, 43, 42, 43, 43, 46, 41, 42])
X_bar = X.mean()
bootstrapped_means = []
for idx in bootstrap_idx(X.shape[0], n_bootstraps=100000):
bootstrapped_means.append(X[idx].mean())
bootstrapped_means = np.array(bootstrapped_means)
deltas = X_bar - bootstrapped_means
delta_1 = np.percentile(deltas, 10, axis=0)
delta_9 = np.percentile(deltas, 90, axis=0)
print((X_bar + delta_1, X_bar + delta_9))
|
# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import os
import time
import re
import random
import json
import threading
se = requests.session()
class Pixiv():
def __init__(self):
self.base_url = 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index'
self.login_url = 'https://accounts.pixiv.net/api/login?lang=zh'
self.notify_url = '/notify_all.php'
self.notify_work_url = 'https://www.pixiv.net/rpc/notify.php'
self.return_to = 'https://www.pixiv.net'
self.headers = {
'Referer': 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
self.notify_headers = {
'Referer': 'https://www.pixiv.net/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
"Content-Length": "0",
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
self.notify_suffix = 'op=notify&tt='
def login(self, id, pswd):
post_key = BeautifulSoup(get(se, self.base_url, headers = self.headers).text, 'lxml').find('input')['value']
data = {
'pixiv_id': id,
'password': pswd,
'return_to': self.return_to,
'post_key': post_key
}
resp_text = post(se, self.login_url, data = data, headers = self.headers).text
resp = json.loads(resp_text, 'utf-8')
if 'success' not in resp['body']:
for x in resp['body']['validation_errors']:
return [x, resp['body']['validation_errors'][x]]
# return resp['body']['validation_errors']
else:
self.pid = id
self.pswd = pswd
cookies = requests.utils.dict_from_cookiejar(se.cookies)
cookies['login_ever'] = 'yes'
cookies['user_language'] = 'zh'
cookies['__utmc'] = '235335808'
cookies['__utma'] = '235335808.186197117.1487139067.1503166340.1503195157.86'
cookies['__utmb'] = '235335808.512.9.1503200678674'
cookies['__utmz'] = '235335808.1502737260.45.7.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)'
se.cookies = requests.utils.cookiejar_from_dict(cookies, cookiejar = None, overwrite = True)
return None
def getServer(self):
ss = requests.session()
ss.cookies = se.cookies
return ss
def check_msg(self):
ss = se#self.getServer()
main_page_html = get(ss, self.return_to, timeout = 5).text
main_page = BeautifulSoup(main_page_html, 'lxml')
post_key = main_page.find('input', attrs = {'name': 'tt'})['value']
notify_msg = self.notify_suffix + post_key
rmsg = post(ss, self.notify_work_url,
headers = self.notify_headers, data = notify_msg
)
if rmsg == None:
return ""
return rmsg.text
def check_priv(self):
ss = se
# main_page_html = get(ss, self.return_to, timeout = 3).text
# main_page = BeautifulSoup(main_page_html, 'lxml')
# post_key = main_page.find('input', attrs = {'name': 'tt'})['value']
rpriv = get(ss, self.return_to + '/rpc/index.php?mode=latest_message_threads2&num=5',#'&tt=' + post_key,
headers = create_header(self.return_to)
)
if rpriv == None:
return ""
return rpriv.text
def create_header(url):
return {
'Referer': url,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
mutex = threading.Lock()
def get(ss, *args, **kwargs):
mutex.acquire()
while 1:
try:
r = ss.get(*args, **kwargs)
break
except Exception, e:
print e.message
mutex.release()
return r
def post(ss, *args, **kwargs):
mutex.acquire()
while 1:
try:
r = ss.post(*args, **kwargs)
break
except Exception, e:
print e.message
mutex.release()
return r
pixiv = Pixiv()
if __name__ == "__main__":
import cache
dat = cache.config.read(('pixiv_id', 'password'))
# import testform
pixiv.login(dat['pixiv_id'], dat['password'])
# testform.CheckMessageThread().start()
print pixiv.check_msg()
|
# -*- coding: utf-8 -*-
import unittest
import time
class TestGenerator(unittest.TestCase):
def setUp(self):
pass
def simple_generator(self):
yield 2
for i in range(10):
yield i
def test_simple(self):
for i in self.simple_generator():
pass
# print(i)
LAG_SEC = 4
def lag_sec(now):
'welcome to the future'
now = (now / LAG_SEC + 1) * LAG_SEC
return now
times = int(time.time())
print(times)
print(lag_sec(times))
|
import xlrd
import traceback
import json
import sys
def convert_jet():
input_filename = 'JetCategories.xlsx'
output_filename = 'jet_categories.json'
# output_filename1 = 'pricefalls_categories1.json'
xls_categories = get_xls_data(input_filename)
categories_list = get_categories_list(xls_categories)
categories_dict_list = get_categories_dict_list(categories_list)
print('categories_dict_list = ',len(categories_dict_list),' records')
correct_list = create_correct_list(categories_dict_list)
print('correct_list = ',len(correct_list),' records')
# for dict in correct_list:
# print(dict)
with open(output_filename, 'w') as output_file:
json.dump(correct_list, output_file, sort_keys=False)
# with open(output_filename1, 'w', encoding='utf-8') as output_file:
# json.dump(categories_dict_list, output_file, sort_keys=False)
# print(json_list)
def get_xls_data(filename):
xls_data = []
try:
xls_file = xlrd.open_workbook(filename)
sheet = xls_file.sheet_by_index(0)
for row_num in range(0, sheet.nrows):
xls_data.append(sheet.row_values(row_num))
except Exception as e:
print(str(e))
finally:
return xls_data
def get_categories_list(xls_categories):
categories_list = []
for row in xls_categories:
try:
if row[0] != '' and row[0] != 'CategoryID':
category_list = row[2].split('/')
category_list.reverse()
category_id = int(row[0])
parent_category_id = int(row[3])
category_dict = {'category_list': category_list,
'category_id': category_id,
'parent_category_id': parent_category_id
}
categories_list.append(category_dict)
except Exception:
traceback.print_exc()
return categories_list
def get_categories_dict_list(categories_list):
only_categories_list = []
for dict in categories_list:
only_categories_list.append(dict['category_list'])
categories_dict_list = []
counter = 0
for dict in categories_list:
counter += 1
category_list = dict['category_list']
if len(category_list) == 1:
parent = None
else:
# parent = get_parent(only_categories_list, category_list)
parent = dict['parent_category_id']
category = dict['category_id']
# try:
# name = category_list[-1].decode('utf-8')
# except Exception:
name = category_list[-1]
category_dict = {'model': 'category_map.jetcategories',
# 'pk': category,
'pk': counter,
'fields': {
'name': name,
'level': len(category_list) - 1,
'category_id': category,
'parentId': parent,
'tree': get_tree(category_list)
}
}
categories_dict_list.append(category_dict)
# print(category_list,' - ',category_dict)
return categories_dict_list
def get_tree(category_list):
tree = category_list[0]
if len(category_list) > 1:
for i in range(1,len(category_list)):
tree += '/' + category_list[i]
return tree
def get_parent(categories_list,category_list):
parent = None
if category_list[:-1] in categories_list:
parent = categories_list.index(category_list[:-1]) + 1
return parent
def create_correct_list(categories_dict_list):
correct_list = []
counter = 0
for dict in categories_dict_list:
if dict['fields']['level'] == 0:
counter += 1
counter1 = counter
parent = None
category_dict = change_dict(dict, counter, parent, 0)
correct_list.append(category_dict)
for dict1 in categories_dict_list:
if dict1['fields']['level'] == 1 and dict1['fields']['parentId'] == dict['fields']['category_id']:
counter += 1
counter2 = counter
parent = counter1
category_dict = change_dict(dict1, counter, parent, 1)
correct_list.append(category_dict)
for dict2 in categories_dict_list:
if dict2['fields']['level'] == 2 and dict2['fields']['parentId'] == dict1['fields']['category_id']:
counter += 1
counter3 = counter
parent = counter2
category_dict = change_dict(dict2, counter, parent, 2)
correct_list.append(category_dict)
for dict3 in categories_dict_list:
if dict3['fields']['level'] == 3 and dict3['fields']['parentId'] == dict2['fields']['category_id']:
counter += 1
parent = counter3
category_dict = change_dict(dict3, counter, parent, 3)
correct_list.append(category_dict)
return correct_list
def change_dict(dict, counter, parent, level):
category_dict = {'model': 'category_map.jetcategories',
'pk': counter,
'fields': {
'name': dict['fields']['name'],
'level': level,
'category_id': dict['fields']['category_id'],
'parentId': parent,
'tree': dict['fields']['tree']
}
}
return category_dict
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a positive integer, output its complement number.
# The complement strategy is to flip the bits of its binary representation.
# Note:
# The given integer is guaranteed to fit within the range of a 32-bit signed integer.
# You could assume no leading zero bit in the integer’s binary representation.
# Example 1:
# Input: 5
# Output: 2
# Explanation: The binary representation of 5 is 101 (no leading zero bits),
# and its complement is 010. So you need to output 2.
# Example 2:
# Input: 1
# Output: 0
# Explanation: The binary representation of 1 is 1 (no leading zero bits),
# and its complement is 0. So you need to output 0.
# 149 / 149 test cases passed.
# Status: Accepted
# Runtime: 51 ms
# Your runtime beats 4.98 % of python submissions.
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
complement = {'1': '0', '0': '1'}
return int(''.join(complement[x] for x in bin(num)[2:]), 2)
# 149 / 149 test cases passed.
# Status: Accepted
# Runtime: 39 ms
# Your runtime beats 18.41 % of python submissions.
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
i = 1
while i <= num:
i <<= 1
return (i - 1) ^ num # Because there is no leading zero.
if __name__ == '__main__':
print(Solution().findComplement(5))
print(Solution().findComplement(0))
|
import arcpy
arcpy.env.overwriteOutput = True
folder = arcpy.GetParameterAsText(0)
datapoints = arcpy.GetParameterAsText(1)
pieceofplace = arcpy.GetParameterAsText(2)
nameDataBase = arcpy.GetParameterAsText(3)
arcpy.CreateFileGDB_management(folder, nameDataBase + '.gdb')
arcpy.AddMessage('Created new File GDB: {}.gdb'.format(nameDataBase))
arcpy.env.workspace = folder + "\\" + nameDataBase + '.gdb'
amenities = ['school', 'hospital', 'place_of_worship']
place = arcpy.GetParameterAsText(4)
arcpy.MakeFeatureLayer_management(pieceofplace, 'zoneclip', '"NAME" = ' + "'"+place + "'")
arcpy.Clip_analysis(datapoints, 'zoneclip', 'clipshp')
arcpy.AddMessage('Objects are cut for a given area ({})'.format(place))
for i in amenities:
arcpy.MakeFeatureLayer_management('clipshp', 'clip', '"amenity" = ' + "'" + i + "'")
arcpy.CopyFeatures_management('clip', 'zones_' + i)
arcpy.AddField_management('zones_' + i, 'source', 'TEXT')
arcpy.AddField_management('zones_' + i, 'GID', 'DOUBLE')
with arcpy.da.UpdateCursor('zones_' + i, ['source', 'GID', 'id']) as cursor:
for row in cursor:
row[1] = row[2]
row[0] = "OpenStreetMap"
cursor.updateRow(row)
arcpy.AddMessage('Created file for location '+i)
arcpy.Delete_management('clipshp')
|
s=input('Enter:')
temp=''
for i in s:
temp=i+temp
if temp==s:
print('Palindrome')
else:
print('No')
|
def saludar():
print('Hola')
from tkinter import*
ventana=Tk()
boton=Button(ventana, text='Púlsame', command=saludar)
boton.pack()
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class OpenPensionCrawlerItem(scrapy.Item):
# The fields for out item
file_name = scrapy.Field()
page_url = scrapy.Field()
|
from __future__ import print_function
import os
# TODO: Add theano if statement check
# activate theano on gpu
#os.environ['THEANO_FLAGS'] = "device=gpu"
#import theano
#theano.config.floatX = 'float32'
import numpy as np
import sys
import gzip
from six.moves import cPickle
from vae_conv import conv_variational_autoencoder
from keras import backend as K
from scipy.stats import norm
# For plotting purposes
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
class CVAE(object):
def __init__(self, path="./", n_traj=1, f_traj=10, sep_train=0.8, sep_test=0.9,
sep_pred=1, choice=0, row=21, col=21, pad_row=1, pad_col = 1,
channels=1, batch_size=1000, conv_layers=4, feature_maps=[64,64,64,64],
filter_shapes=[(3,3),(3,3),(3,3),(3,3)], strides=[(1,1),(2,2),(1,1),(1,1)],
dense_layers=1, dense_neurons=[128], dense_dropouts=[0], latent_dim=3,
epochs=1, nb_start=0, nb_end=50, nb_select=10, load_step=10,
n_dec=10, pick=400, n_d=10, n1=0):
"""
Builds Keras CVAE model and provides API to use model.
"""
# TODO: Add path variable to allow output to any directory. Default to "./".
# TODO: Add exception handling for each input and add doc string.
# TODO: Add automatic scaling for odd dimension matrix
# TODO: Add automatice n_traj and f_traj calculation
if not os.path.exists(path):
raise Exception("Path: " + str(path) + " does not exist!")
# Define parameters (For training and loading)
self.path = path
# No of trajectory files and frames in each file
self.n_traj = n_traj
self.f_traj = f_traj
# Fraction of train, test and pred data separation
self.sep_train = sep_train
self.sep_test = sep_test
self.sep_pred = sep_pred
# Choice to flatten data: "0" for NO & "1" for YES
self.choice = choice
# Row and column dimension for each frame
self.row = row
self.col =col
# Padding: use this incase diemsion mismatch for encoders
# pad_row and pad_col are row or colums to be added
# TODO: Consider handling automatically (if shaoe is odd then pad = 1, else pad = 0)
self.pad_row = pad_row
self.pad_col = pad_col
# Define parameters for variational autoencoder - convolutional
self.channels = channels
self.batch_size = batch_size
self.conv_layers = conv_layers
self.feature_maps = feature_maps
self.filter_shapes = filter_shapes
self.strides = strides
self.dense_layers = dense_layers
self.dense_neurons = dense_neurons
self.dense_dropouts = dense_dropouts
self.latent_dim = latent_dim
self.epochs = epochs
self.nb_start = nb_start
self.nb_end = nb_end
# Define parameters for loading section
self.nb_select = nb_select
self.load_step = load_step
self.load_start = self.nb_select
self.load_end = self.nb_end+1
# Number of digits for decoding
self.n_dec = n_dec
# What image to pick for to decode
self.pick = pick
# Figure with 10x10 digits for generator images
self.n_d = n_d
self.n1 = n1
# End define parameters
# TODO: Put at first instance of outputting data
self.build_directories()
# Other class atributes
def load_contact_matrix(self, dat_path, array_path):
"""
dat_path : str
Path of cont-mat.dat file.
EX) dat_path="./../native-contact/data/cont-mat.dat"
array_path : str
Path of cont-mat.array file
EX) array_path="./../native-contact/data/cont-mat.array"
"""
# Load data for labelling
self.label = np.loadtxt(dat_path)
# Open dat file
self.path_data_array = array_path
self.read_data()
self.process_input_data()
print("Data was successfully read, loaded, and processed")
# Not implemented
# Open pickled file
#with gzip.open('./aligned_fs-peptide_coor.pkl.gz', 'rb') as f3:
# (X) = cPickle.load(f3)
#x_raw = X
#print("Dataset dimension:", np.shape(x_raw))
def read_data(self):
"""
Internal method.
"""
# Read dat type large file line by line to save in array
nf = self.n_traj * self.f_traj
q = self.row * self.col
j_count = 0
k_count = 0
samples = (nf)
row_num = (nf) * self.row
column_num = (self.col)
array_f_int = np.zeros(shape=(row_num, column_num))
with open(self.path_data_array) as infile:
for line in infile:
array_f_string = line.split()
array_f_array = np.array(list(array_f_string), dtype='|S4')
array_f_float = array_f_array.astype(np.float)
array_f_int[j_count] = array_f_float
if j_count == k_count:
print("Frames read:", (j_count/self.row))
k_count = k_count + 10000 * self.row
j_count = j_count + 1
if j_count == (row_num):
break
print("Initial matrix array dimension:", np.shape(array_f_int))
array_f = np.reshape(array_f_int, (samples, self.row, self.col))
print("Final matrix array dimension:", np.shape(array_f))
x_raw = array_f[0:]
print("Dataset dimension:", np.shape(x_raw))
self.x_raw = x_raw
def process_input_data(self):
"""
Internal method.
"""
# Process of input data
# TODO: Add if expression as instructed in __init__ to automate padding.
# Padding
row_dim_array = self.row + self.pad_row
col_dim_array = self.col + self.pad_col
# Reshape data according to the choice of flatteing
if self.choice == 0:
new_shape = (len(self.x_raw), row_dim_array, col_dim_array)
if self.choice == 1:
new_shape = (len(self.x_raw), row_dim_array * col_dim_array)
add_zero = np.zeros(new_shape, dtype = self.x_raw.dtype)
if self.choice == 0:
add_zero[0:self.x_raw.shape[0], 0:self.x_raw.shape[1], 0:self.x_raw.shape[2]] = self.x_raw
if self.choice == 1:
add_zero[0:self.x_raw.shape[0], 0:self.x_raw.shape[1]] = self.x_raw
self.x_raw = add_zero
# Determine size for training, testing & prediction data
sep_1 = int(self.x_raw.shape[0] * self.sep_train)
sep_2 = int(self.x_raw.shape[0] * self.sep_test)
sep_3 = int(self.x_raw.shape[0] * self.sep_pred)
x_train_raw = self.x_raw[:sep_1]
x_test_raw = self.x_raw[sep_1:sep_2]
x_pred_raw = self.x_raw[sep_2:sep_3]
print("Shape to load:", "Train:", np.shape(x_train_raw), "Test:", np.shape(x_test_raw), "Prediction:", np.shape(x_pred_raw))
# load data
print("Loading data")
# Normalizing input image matrix
if len(x_train_raw) != 0:
X_train = x_train_raw.astype('float32') / np.amax(x_train_raw)
else:
X_train = x_train_raw.astype('float32')
if len(x_test_raw) != 0:
X_test = x_test_raw.astype('float32') / np.amax(x_test_raw)
else:
X_test = x_test_raw.astype('float32')
if len(x_pred_raw) != 0:
X_pred = x_pred_raw.astype('float32') / np.amax(x_pred_raw)
else:
X_pred = x_pred_raw.astype('float32')
print("Shape of data loaded:", "Train:", np.shape(X_train), "Test:", np.shape(X_test), "Prediction:", np.shape(X_pred))
# TODO: Reshape prediction shape X_pred ? Ask Sindhu
# Reshape to 4d tensors
image_size = X_train.shape[-2:]
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'channels_first':
tensor_shape = (1,image_size[0],image_size[1])
else:
tensor_shape = (image_size[0],image_size[1],1)
# OLD
#X_train = X_train.reshape((X_train.shape[0],) + tensor_shape)
#X_test = X_test.reshape((X_test.shape[0],) + tensor_shape)
#print("Reshaped data:", "Train:", np.shape(X_train), "Test:", np.shape(X_test))
#
#NEW A. Brace 7/16/2018, comment: Ask Sindhu why he didn't reshape X_pred.
X_train = X_train.reshape((X_train.shape[0],) + tensor_shape)
X_test = X_test.reshape((X_test.shape[0],) + tensor_shape)
X_pred = X_pred.reshape((X_pred.shape[0],) + tensor_shape)
print("Reshaped data:", "Train:", np.shape(X_train), "Test:", np.shape(X_test), "Prediction:", np.shape(X_pred))
self.X_train = X_train
self.X_test = X_test
self.X_pred = X_pred
self.sep_1 = sep_1
self.sep_2 = sep_2
self.sep_3 = sep_3
self.image_size = image_size
def build_directories(self):
"""
Internal method.
"""
# Create directories
self.path = self.path + "/cvae"
path_1 = self.path + "/fig"
path_2 = self.path + "/imgs"
path_3 = self.path + "/hist"
path_4 = self.path + "/model"
if not os.path.exists(self.path):
os.mkdir(self.path, 0755)
if not os.path.exists(path_1):
os.mkdir(path_1, 0755)
if not os.path.exists(path_2):
os.mkdir(path_2, 0755)
if not os.path.exists(path_3):
os.mkdir(path_3, 0755)
if not os.path.exists(path_4):
os.mkdir(path_4, 0755)
print("Completed directories creation or if already exist - then checked")
def compile(self):
"""
Builds autoencoder.
"""
print("Building convolutional variational autoencoder")
# set up parameter
self.feature_maps = self.feature_maps[0:self.conv_layers]
self.filter_shapes = self.filter_shapes[0:self.conv_layers]
self.strides = self.strides[0:self.conv_layers]
self.autoencoder = conv_variational_autoencoder(self.image_size,
self.channels,
self.conv_layers,
self.feature_maps,
self.filter_shapes,
self.strides,
self.dense_layers,
self.dense_neurons,
self.dense_dropouts,
self.latent_dim)
def train(self):
"""
Train, save & load.
"""
for i in range (self.nb_start, self.nb_end):
# Load model
if i == 0:
print("Skipping - no previous saved file to load")
else:
self.autoencoder.load(self.path + "/model/model_%i" %i)
# Train model
self.autoencoder.train(self.X_train[0:], self.batch_size, epochs=self.epochs,
validation_data=(self.X_test[0:], self.X_test[0:]),
checkpoint=False, filepath=self.path + "/savedweights.dat")
# Save model
self.autoencoder.save(filepath=self.path + "/model/model_%i" %(i+1))
# Save loss over train & validation
np.savetxt(self.path + '/hist/history.losses_%i' %(i+1), self.autoencoder.history.losses, delimiter=',')
np.savetxt(self.path + '/hist/history.val_losses_%i' %(i+1), self.autoencoder.history.val_losses, delimiter=',')
print("Completed %i epochs" % ((i+1) * self.epochs))
def history(self):
"""
Call method after training.
Compile loss value.
Saves history in "cvae/hist/hist_tot".
Plot loss value.
Plot train & validation loss.
Saves figures in "cvae/fig/history.png".
"""
# TODO: Add exception if "./hist/history.losses_%i" does not exist (inside for loop).
hist = np.zeros(((self.nb_end - self.nb_start), 3))
for i in range ((self.nb_start + 1), (self.nb_end + 1)):
hist_loss = np.loadtxt(self.path + "/hist/history.losses_%i" %i)
hist_val_loss = np.loadtxt(self.path + "/hist/history.val_losses_%i" %i)
tmp = np.array([i, hist_loss, hist_val_loss])
hist[i-1] = tmp
np.savetxt(self.path + '/hist/hist_tot', hist, delimiter=' ')
plt.switch_backend('agg')
plt.semilogx(hist[:, 0], hist[:, 1], color="blue", linewidth=1.5, linestyle="-", label="train_loss")
plt.semilogx(hist[:, 0], hist[:, 2], color="red", linewidth=3.5, linestyle=":", label="test_loss")
plt.legend(loc='upper right')
# plt.ylim(np.amin(hist[:,1:3]/np.amax(hist[:, 1:3])),np.amax(hist[:,1:3]/np.amax(hist[:, 1:3])))
plt.savefig(self.path + '/fig/history.png', dpi=600)
plt.clf()
def analyze(self, data_set, model_selection):
"""
data_set : str
'train', 'test', or 'pred'
model_selection : int
select file number
EX) Want "/model/model_2", then select model_selection = 2
Generates plots (.png files) stored in "/cvae/fig/".
Save evaluated data in '/imgs/decoded_train_%i.out'.
"""
# TODO: Add exception handling for input data
#Load, encode, decode, save both encode and decode
# 1) Select data set
# Load data to analyze
data = np.array([])
if data_set == 'train':
data = self.X_train[0:]
elif data_set == 'test':
data = self.X_test[0:]
elif data_set == 'pred':
data = self.X_pred[0:]
print("Loading", model_selection)
# TODO: Add exception handling checking that the file exists
# 2) Loading model
self.load_weights(self.path + "/model/model_%i" %model_selection)
print("Decode image for train data")
# 3) Decode images
decoded_imgs_full = self.decode(data)
# 4) Save decoded array to file
np.savetxt(self.path + '/imgs/decoded_train_%i.out' %model_selection,
np.reshape(decoded_imgs_full[:, 0:self.row, 0:self.col, :],
(len(decoded_imgs_full), (self.row *self.col))), fmt='%f')
print("Encode image for train data")
# Encode images
# 5) Project inputs on the latent space
x_pred_encoded = self.encode(data)
# 6) Save encoded array to file
np.savetxt(self.path + '/imgs/encoded_train_%i.out' %model_selection, x_pred_encoded, fmt='%f')
def load_weights(self, weight_path):
"""
weight_path : str
"""
self.autoencoder.load(weight_path)
def decode(self, data):
return self.autoencoder.decode(data)
def encode(self, data):
return self.autoencoder.return_embeddings(data)
def decode_pred(self):
return self.autoencoder.decode(self.X_pred)
def encode_pred(self):
return self.autoencoder.return_embeddings(self.X_pred)
def summary(self):
self.autoencoder.summary()
def analyze_all(self):
"""
Generates plots (.png files) stored in "/cvae/fig/".
"""
# TODO: Break up functions into several smaller plotting funcions
#Load, encode, decode, save both encode and decode
# 1) Select data set (just one line)
# Load data to analyze
conv_full_train = self.X_train[0:]
conv_full_test = self.X_test[0:]
conv_full_pred = self.X_pred[0:]
label = self.label[:len(self.x_raw)]
y_train_0 = label[:self.sep_1, 0]
y_train_2 = label[:self.sep_1, 2]
y_test_0 = label[self.sep_1:self.sep_2, 0]
y_test_2 = label[self.sep_1:self.sep_2, 2]
y_pred_0 = label[self.sep_2:self.sep_3, 0]
y_pred_2 = label[self.sep_2:self.sep_3, 2]
# For generator images (for latent space = nD)
z_axis = np.arange(self.latent_dim - 2)
for load in range(self.load_start, self.load_end, self.load_step):
# need
#########################
print("Loading", load)
# TODO: Add exception handling checking that the file exists
# 2) Loading model
self.autoencoder.load(self.path + "/model/model_%i" %load)
print("Decode image for train data")
# 3) Decode images
decoded_imgs_full = self.autoencoder.decode(conv_full_train)
# Save decoded array to file
np.savetxt(self.path + '/imgs/decoded_train_%i.out' %load,
np.reshape(decoded_imgs_full[:, 0:self.row, 0:self.col, :],
(len(decoded_imgs_full), (self.row *self.col))), fmt='%f')
###########################
# Plot decoded images
plt.switch_backend('agg')
plt.figure(figsize=(20, 4))
for i in range (self.n_dec):
# Display original
ax = plt.subplot(2, self.n_dec, i + 1)
plt.imshow(conv_full_train[i + self.pick, 0:self.row , 0:self.col, :].reshape(self.row, self.col))
np.savetxt(self.path + '/imgs/original_imgs_train_%i_%i.out' %(i, load),
(conv_full_train[i + self.pick, 0:self.row , 0:self.col, :].reshape(self.row, self.col)))
plt.colorbar(orientation='vertical')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display reconstruction
ax = plt.subplot(2, self.n_dec, i + 1 + self.n_dec)
plt.imshow(decoded_imgs_full[i + self.pick, 0:self.row, 0:self.col, :].reshape(self.row, self.col))
np.savetxt(self.path + '/imgs/decoded_imgs_train_%i_%i.out' %(i, load),
(decoded_imgs_full[i + self.pick, 0:self.row, 0:self.col, :].reshape(self.row, self.col)))
plt.colorbar(orientation='vertical')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(self.path + '/fig/decoded_train_%i.png' %load, dpi=600)
plt.clf()
print("Decode image for test data")
# Decode images
decoded_imgs_full = self.autoencoder.decode(conv_full_test)
# Save decoded array to file
np.savetxt(self.path + '/imgs/decoded_test_%i.out' %load,
np.reshape(decoded_imgs_full[:, 0:self.row, 0:self.col, :],
(len(decoded_imgs_full), (self.row * self.col))), fmt='%f')
# Plot decoded images
plt.figure(figsize=(20, 4))
for i in range (self.n_dec):
# Display original
ax = plt.subplot(2, self.n_dec, i + 1)
plt.imshow(conv_full_train[i + self.pick, 0:self.row, 0:self.col, :].reshape(self.row, self.col))
np.savetxt(self.path + '/imgs/original_imgs_test_%i_%i.out' %(i,load),
(conv_full_train[i + self.pick, 0:self.row, 0:self.col, :].reshape(self.row, self.col)))
plt.colorbar(orientation='vertical')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display reconstruction
ax = plt.subplot(2, self.n_dec, i + 1 + self.n_dec)
plt.imshow(decoded_imgs_full[i + self.pick, 0:self.row, 0:self.col, :].reshape(self.row, self.col))
np.savetxt(self.path + '/imgs/decoded_imgs_test_%i_%i.out' %(i, load),
(decoded_imgs_full[i+self.pick, 0:self.row, 0:self.col, :].reshape(self.row, self.col)))
plt.colorbar(orientation='vertical')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(self.path + '/fig/decoded_test_%i.png' %load, dpi=600)
plt.clf()
print("Encode image for train data")
# Encode images
# 4) Project inputs on the latent space
x_pred_encoded = self.autoencoder.return_embeddings(conv_full_train)
# 5) Save encoded array to file
np.savetxt(self.path + '/imgs/encoded_train_%i.out' %load, x_pred_encoded, fmt='%f')
# PLOT in another subclass
# Plot 1:
Dmax = y_train_2
[n,s] = np.histogram(Dmax, 11)
d = np.digitize(Dmax, s)
#[n,s] = np.histogram(-np.log10(Dmax), 11)
#d = np.digitize(-np.log10(Dmax), s)
cmi = plt.get_cmap('jet')
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax))
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# scatter3D requires a 1D array for x, y, and z
# ravel() converts the 100x100 array into a 1x10000 array
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='o', c=scalarMap.to_rgba(Dmax))
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])))
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])))
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])))
ax.set_xlabel('VAE 0')
ax.set_ylabel('VAE 1')
ax.set_zlabel('VAE 2')
scalarMap.set_array(Dmax)
fig.colorbar(scalarMap)
plt.savefig(self.path + '/fig/encoded_train_%i.png' %load, dpi=600)
plt.clf()
print("Encode image for test data")
# Encode images
# Project inputs on the latent space
x_pred_encoded = self.autoencoder.return_embeddings(conv_full_test)
# Save encoded array to file
np.savetxt(self.path + '/imgs/encoded_test_%i.out' %load, x_pred_encoded, fmt='%f')
# Plot 2:
Dmax = y_test_2
[n,s] = np.histogram(Dmax, 11)
d = np.digitize(Dmax, s)
#[n,s] = np.histogram(-np.log10(Dmax), 11)
#d = np.digitize(-np.log10(Dmax), s)
cmi = plt.get_cmap('jet')
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax))
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# scatter3D requires a 1D array for x, y, and z
# ravel() converts the 100x100 array into a 1x10000 array
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='o', c=scalarMap.to_rgba(Dmax))
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])))
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])))
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])))
ax.set_xlabel('VAE 0')
ax.set_ylabel('VAE 1')
ax.set_zlabel('VAE 2')
scalarMap.set_array(Dmax)
fig.colorbar(scalarMap)
plt.savefig(self.path + '/fig/encoded_test_%i.png' %load, dpi=600)
plt.clf()
print("Generate image")
# Building generator
# Build a digit generator that can sample from the learned distribution
# Display a 2D manifold of the digits
# Linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
figure = np.zeros((self.row * self.n_d, self.col * self.n_d))
grid_x = norm.ppf(np.linspace(0.05, 0.95, self.n_d))
grid_y = norm.ppf(np.linspace(0.05, 0.95, self.n_d))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
self.n1 = self.n1 + 1
z_sample = np.append([xi, yi], [z_axis])
z_sample = np.tile(z_sample, self.batch_size).reshape(self.batch_size, self.latent_dim)
x_decoded = self.autoencoder.generate(z_sample)
digit = x_decoded[0, 0:self.row, 0:self.col, :].reshape(self.row , self.col)
# Saving generated array to file
# np.savetxt('./generated/digit_%i.out' %self.n1, digit, fmt='%f')
figure[i * self.row: (i + 1) * self.row,
j * self.col: (j + 1) * self.col] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure)
plt.savefig(self.path + '/fig/generated_%i.png' %load, dpi=600)
plt.clf()
|
#!/usr/bin/python3
from PIL import ImageFile
import sys
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BACKG = (30, 30, 30)
GREEN = (0, 128, 0)
THRESHOLD = 150
def distance(pixel, color):
distance = 0
for i in range(3):
distance += pow(color[i] - pixel[i], 2)
return distance
def is_green(pixel):
distance_to_green = distance(pixel, GREEN)
if distance_to_green < THRESHOLD:
return True
return False
with open(sys.argv[1], "rb") as fd:
p = ImageFile.Parser()
p.feed(fd.read())
image = p.close()
img2 = image.copy()
w, h = image.size
y_start = 0xFFFF
y_end = 0
x_start = 0xFFFF
x_end = 0
for x in range(0, w):
for y in range(0, h):
if is_green(image.getpixel((x, y))):
img2.putpixel((x, y), BLACK)
x_start = min(x, x_start)
y_start = min(y, y_start)
x_end = max(x, x_end)
y_end = max(y, y_end)
else:
img2.putpixel((x, y), WHITE)
print("Box is {}/{} to {}/{}".format(x_start, y_start, x_end, y_end))
img2 = img2.crop(box=(x_start, y_start, x_end+1, y_end))
img2.save(sys.argv[1].replace(".png", "_clean.png"))
|
#maxtrix사용 안했을때#
import tensorflow as tf
tf.compat.v1.enable_eager_execution()#그래프를 생성하지 않고 함수를 바로 실행하는 명령형 프로그래밍 환경
#난수 생성 초기값 부여
tf.compat.v1.set_random_seed(0)#set_random_seed를 통해 모든 random value generation function들이 매번 같은 값을 반환함
x1 = [73., 93., 89., 96., 73.]
x2 = [80., 88., 91., 98., 66.]
x3 = [75., 93., 90., 100., 70.]
Y = [152., 185., 180., 196., 142.]
# random weights
#초기값 1로 지정
w1 = tf.Variable(tf.random.normal([1]))#w1,w2,w3 전부 지정
w2 = tf.Variable(tf.random.normal([1]))
w3 = tf.Variable(tf.random.normal([1]))
b = tf.Variable(tf.random.normal([1]))
learning_rate = 0.000001
for i in range(1000 + 1):
# tf.GradientTape() to record the gradient of the cost function
with tf.GradientTape() as tape:#cost함수의 미분값을 tf.GradientTape()에 기록
hypothesis = w1 * x1 + w2 * x2 + w3 * x3 + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
#변수들을 tape에 기록
#tape.gradient를 호출하여 w1,w2,w3,b에 대한 기울기 값을 구함
w1_grad, w2_grad, w3_grad, b_grad = tape.gradient(cost, [w1, w2, w3, b])
# update w1,w2,w3 and b
#w-(learning_Rate*(w_grad)
w1.assign_sub(learning_rate * w1_grad)#값을 변수마다 각각 할당해야함
w2.assign_sub(learning_rate * w2_grad)
w3.assign_sub(learning_rate * w3_grad)
b.assign_sub(learning_rate * b_grad)
if i % 50 == 0:
print("{:5} | {:12.4f}".format(i, cost.numpy()))
ㅁ
|
from objects import *
import pygame
from homescene import HomeScene
from game import App
class WinnerScene:
def __init__(self, post_game):
self._running = True
self.size = self.width, self.height = 400, 800
self.post_game = post_game
self.center = (self.width/2, self.height/2)
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE)
self.image_surface = load_img("background/background.png")[0]
self.next_scene = None
def on_init(self):
self.txWinner = Text((self.center[0], self.center[1] - 120),"Winner: " + self.post_game.winner, 40, color=(232,215,42))
self.txScore = Text((self.center[0], self.center[1] - 50),"%d - %d" % (self.post_game.score1.count, self.post_game.score2.count), 36, color=(232,215,42))
btnback_pos = (self.center[0]+ 20, self.center[1])
btnagain_pos = (self.center[0] - 100, self.center[1])
self.btnBack = Button(
btnback_pos,
100, 50,
Text((btnback_pos[0] , btnback_pos[1] ), "Back", 25, color=(232,215,42)),
color=(83,35,222))
self.btnAgain = Button(
btnagain_pos,
100, 50,
Text((btnagain_pos[0] , btnagain_pos[1] ), "Again", 25, color=(232,215,42)),
color=(83,35,222))
# make center
self.txWinner.pos = (self.txWinner.pos[0] - self.txWinner.get_rect().w / 2, self.txWinner.pos[1])
self.txScore.pos = (self.txScore.pos[0] - self.txScore.get_rect().w / 2, self.txScore.pos[1])
self.btnBack.set_callback(
lambda: HomeScene()
)
self.btnAgain.set_callback(
lambda: App(self.post_game.p2b)
)
def on_event(self, events):
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
a = self.btnBack.check_click(event.pos)
b = self.btnAgain.check_click(event.pos)
self.next_scene = a or b
def on_loop(self):
if self.next_scene:
self._running = False
def on_render(self):
self._display_surf.blit(self.image_surface, (0,0))
self.txWinner.draw(self._display_surf)
self.txScore.draw(self._display_surf)
self.btnBack.draw(self._display_surf)
self.btnAgain.draw(self._display_surf)
pygame.display.flip()
def on_cleanup(self):
if self.next_scene:
self.next_scene.on_execute()
def on_execute(self):
if self.on_init() == False:
self._running = False
while self._running:
self.on_event(pygame.event.get())
self.on_loop()
self.on_render()
self.on_cleanup()
|
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
import ssl
import requests
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter URL - ')
if len(url)<1:
url = 'http://py4e-data.dr-chuck.net/comments_521898.xml'
# html = urllib.request.urlopen(url, context=ctx).read()
html = requests.get(url).text
tree = ET.fromstring(html)
sum = 0
count = 0
for num in tree.findall('.//count'):
# print(num.text)
sum = sum + int(num.text)
count = count + 1
print('Retrieving',url)
print('Count:',count)
print('Sum:',sum)
|
import cv2
import numpy as np
# Global Varibules
res = [1280, 720]
eps = 1.5
face_cascade = cv2.CascadeClassifier('/Users/jeremy.meyer/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
# Draws bounding box and text from coordinates.
def bbox(img, x1, y1, x2, y2, base_color=(255, 0, 0), text='Human Detected'):
x_adj = 12*len(text)
y_adj = 17
cv2.rectangle(img, (x1, y1), (x2, y2), base_color, 2)
if (y1 > 20):
cv2.rectangle(img, (x1, y1 - y_adj), (x1 + x_adj, y1 - 1), np.array(base_color) / 5, -1)
cv2.putText(img, text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, base_color)
else:
cv2.rectangle(img, (x1, y2 + y_adj), (x1 + x_adj, y2 + 1), np.array(base_color) / 5, -1)
cv2.putText(img, text, (x1, y2 + y_adj - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, base_color)
def readvideo(filepath, fps, playback_multiplier=1, res=[640, 360]):
cap = cv2.VideoCapture(filepath)
cap.set(3, res[0])
cap.set(4, res[1])
while cap.isOpened():
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# cl1 = clahe.apply(gray)
# faces1 = face_cascade.detectMultiScale(cl1)
histeq = cv2.equalizeHist(gray)
faces1 = face_cascade.detectMultiScale(histeq)
faces2 = face_cascade.detectMultiScale(gray)
# grouped = cv2.groupRectangles(list(faces), 1, eps=eps)
# Draws bboxes on original calculated faces in blue
for (x, y, w, h) in faces1:
bbox(histeq, x, y, x + w, y + h, (255, 175, 0))
for (x, y, w, h) in faces2:
bbox(gray, x, y, x + w, y + h, (255, 175, 0))
# Draws bboxes on combined rectangles in red
# for (x, y, w, h) in grouped[0]:
# bbox(cl1, x, y, x + w, y + h, (0, 0, 255), "Human (Averaged)")
# bbox(gray, x, y, x + w, y + h, (255, 175, 0))
cv2.imshow('Hist-Eq', histeq)
cv2.imshow('BW', gray)
if cv2.waitKey(1) == ord('q'):
break
else:
break
cv2.destroyAllWindows()
cap.release()
#readvideo('testVid.mp4', 15, 10)
readvideo('jeremy-vids/positive/test1.mp4', 15, 12)
|
__author__ = "Narwhale"
#考察匿名函数把t1和t2合并用zip函数,再将k,v合并成字典,考察列表推导式
# t1 = (('a'),('b'))
# t2 = (('c'),('d'))
# res = lambda t1,t2:[{k:v} for k,v in (zip(t1,t2))]
# # res = lambda t1,t2:[dict(zip(t1,t2))]
# print(res(t1,t2))
################################################
#什么是匿名函数,匿名函数有什么好处?
#当我们创建函数时不需要显示的定义函数,省去了给函数起名字的麻烦,也减少了相当的代码
###################################################
#考察and 和 or
#1 and 2 ---------->2
#1 or 2------------>1
#################################################
# *args代表将位置参数打包成元组
# **kwargs代表关键字参数打包成字典
#都可以当作普通参数使用
# def func( *args, **kwargs):
# print('args is', args)
# print('kwargs is', kwargs)
# return args
#
#
# f = func('hello',12,3,445,6,a=2,e=54)
# # args is ('hello', 12, 3, 445, 6)
# # kwargs is {'a': 2, 'e': 54}
# print(f[0]) --------->hello
#########################################
#生成器
# x = (i%2 for i in range(10))
# print(x) --------------->generater
###########################################
# Unicode 和 utf-8 的关系
# UTF-8就是在互联网上使用最广的一种unicode的实现方式
###################################################
#def func(a,b=[]):pass这样写函数有什么缺陷?
#python 函数被定义的时候。默认b的值就被计算出来了,即[],参数b是一个变量,它指向的对象是[]
#每次调用这个函数的时候,可如果改变了b的内容,默认参数也改变了,不再是定义时的[],内容改变了
#所以牢记 :默认参数必须指向不可变对象
#####################################################
#实现九九乘法
# for i in range(1,10):
# print('\n')
# for j in range(1,10):
# print('%s*%s=%s'%(i,j,i*j),end=' | ')
#
#########################################################
#python标准库
#os、sys、datetime、re、match
#######################################################
#GIL全局解释器锁,同一个进程中有多线程执行时,同一时间下只能有一个线程运行
#########################################################
#单例模式
# class Single_instance(object):
# __instance = None
#
# def __init__(self):
# pass
#
# def __new__(cls, *args, **kwargs):
# if cls.__instance is None:
# cls.__instance = object.__new__(cls, *args, **kwargs)
# return cls.__instance
#
# a = Single_instance()
# b = Single_instance()
# print(id(a),id(b))
###############################################
#文件操作
#写入文件时最好加入encoding这样不容易出错,读取文件时也要加入encoding不然会出错
# with open('f.txt','w',encoding='utf-8') as f:
# f.write('你好')
# f = open('f.txt','r',encoding='utf-8')
# for line in f:
# print(line)
# #无论是写入文件读取文件都要打开一个文件,因此都要关闭文件
# f.close()
#############################################################
#匹配<div class="name">中国</div>,class内容不确定
# import re
# a = '<div class="name">中国</div>'
# res = re.compile('<div class=".*(.*)</div>')
# result = res.findall(a)
# print(result)
###########################################################
#数据有表student,表有字段id、name、score、city
#其中name中名字可有重复,需要消除重复行
#select distinct name from student
############################################################
#linux常用命令
#ls、pwd、rm、mkdir、tree、cp、touch、echo、more、mv
#########################################################
#列表可变类型
# a = [1,2,3,4,5,6,7]
# print(id(a))
# a.append(8)
# print(id(a))
#
# #字符串不可变类型
# s = 'shdkjhdfoisd'
# print(id(s))
# s = s[:4]
# print(id(s))
###################################################
# g = lambda x,y:x*y
# print(g(1,4))
#################################################
#根据键从小到排序
# dict1 = {"name":"zs","age":23,"city":"广州","tel":"1376487434"}
# res = dict1.items()
# print(res)
# s = sorted(res,key=lambda x:x[0])
# print(s)
# # dict1 = dict(s)
# #字典推导式
# dict2 = { k:v for k,v in s}
# print(dict2)
###################################################
#正则表达式过滤掉英文和数字,输出中文
#import re
# a = "not 404 found 张三 99 深圳"
# a = a.replace(' ','')
# print(a)
# re_object = re.compile('\d+|[a-zA-Z]+')
# x = re_object.findall(a)
# print(x)
# res = re.findall('\d+|[a-zA-Z]+',a)
# print(res)
# 将字符串序列化
# list_a = a.split()
# 移除筛选出来的东西
# for i in res:
# list_a.remove(i)
#
# print(list_a)
# 拼接成字符串
# a = ' '.join(list_a)
# print(a)
###############################################
#正则匹配替换
# import re
# a = "张明 98分"
# res = re.sub(r'\d+','100',a)
# print(res)
############################################
# a = '你好'
# #编码成byte类型
# a = a.encode()
# print(a)
##############################################
# 在有空格的时候all()是True空格也是内容
# a = [1,23,4,5,6,' ']
# c = [1,23,4,5,6,'']
# # print(any(a))
# print(all(a))
# print(all(c))
# # 为什么all()时[1,23,4,5,6,' ']和[1,23,4,5,6,'']结果不一样
################################################
# #copy和deepcopy
# import copy
# #不可变类型
# #不论是copy还是deepcopy共享一个地址相当于赋值=,当数字改变地址相应改变
# a = 1
# c = copy.copy(a)
# d = copy.deepcopy(a)
# print(id(c))
# print(id(d))
# d = 2
#可变类型
#copy和deepcopy独立内存,两个操作内存地址不一样,当数值改变时地址并不会改变
#copy浅复制不能复制子对象,deepcopy可以复制子对象
# f = [1,2,3]
# f1 = copy.copy(f)
# f2 = copy.deepcopy(f)
#
# print(id(f1))
# f.append(3)
# print(id(f1))
# print(id(f2))
############################################
# foo = [-5,8,0,9,-4,-20,-2,8,2,-4]
# #key很重要
# #从小到大排序
# res1 = sorted(foo,key=lambda x:x)
# print(res1)
# #正数从小到大,负数从大到小
# res2 = sorted(foo,key=lambda x:(x<0,abs(x)))
# print(res2)
##############################################
# foo = [{"name":"zs","age":19},{"name":"ll","age":54},{"name":"wa","age":17},{"name":"df","age":23}]
# #根据所提供key规则进行排序
# res1 = sorted(foo,key=lambda x:x['name'])
# print(res1)
# res2 = sorted(foo,key=lambda x:x['age'])
# print(res2)
######################################
# import re
# s = "info:xiaoZhang 33 shangdong"
#
# res = re.split(' |:',s)
# print(res)
#########################################
#递归求和1+2+3+...+10
# def get_sum(num):
# if num:
# num = num + get_sum(num-1)
# return num
#
# result = get_sum(10)
# print(result)
#######################################
#json
# import json
# dict1 = {'a':1,'e':12}
# res1 = json.dumps(dict1)
# print(res1,type(res1))
#
# res2 = json.loads(res1)
# print(res2,type(res2))
#########################################
#python垃圾回收机制
#引用计数机制
#标记清除机制
#分代回收机制
##########################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.