repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/exceptions.py | class ClassValidationException(Exception):
pass |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/klass/klass.py | <reponame>prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin
class Klass:
"""
represents a C++ class
"""
def __init__(self, name):
self._name = name
self._methods = []
self._template = None
self._namespace = None
@property
def name(self):
return self._name
@property
def methods(self):
return self._methods
def add_method(self, method):
self._methods.append(method)
@property
def template(self):
return self._template
@template.setter
def template(self, template):
self._template = template
@property
def namespace(self):
return self._namespace
@namespace.setter
def namespace(self, namespace):
self._namespace = namespace
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | tests/test_parser.py | <reponame>prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin
from method_generator.parser import ClassParser
from method_generator.tokenizer import ClassTokenizer
from method_generator.exceptions import ClassValidationException
import os
def read_files_as_tokenizers():
test_files = [
os.path.abspath("tests/files/User.hpp"),
os.path.abspath("tests/files/Window.hpp")
]
files_list = []
for file in test_files:
with open(file, 'r') as f:
file = f.read()
try:
token_object = ClassTokenizer().tokenize(file)
files_list.append(ClassParser().parse(token_object))
except ClassValidationException as e:
print(str(e))
return files_list
# tests = read_files_as_tokenizers()
# test_method = tests[0].methods[1]
# print(str(test_method))
# a = 1
class TestClassParser():
def test_has_namespace(self):
test_files = read_files_as_tokenizers()
assert test_files[0].namespace == "sf::sd"
assert test_files[1].namespace == "OS"
def test_name_parsing(self):
test_files = read_files_as_tokenizers()
assert test_files[0].name == "User"
assert test_files[1].name == "Window"
def test_template_parsing(self):
test_files = read_files_as_tokenizers()
assert str(test_files[0].template) == "template <class T, typename D>"
assert test_files[1].template == None
class TestMethodParser:
def test_method_parsing(self):
test_files = read_files_as_tokenizers()
methods_0 = test_files[0].methods
assert len(methods_0) == 14
assert len(methods_0[0].arguments) == 0
assert len(methods_0[1].arguments) == 2
assert len(methods_0[2].arguments) == 0
assert len(methods_0[3].arguments) == 1
assert methods_0[3].arguments[0] == "std::string name"
assert len(methods_0[4].arguments) == 0
assert methods_0[4].is_const == True
assert methods_0[5].is_pure_virtual == True
assert len(methods_0[6].arguments) == 0
assert methods_0[6].return_type == "void"
assert str(methods_0[10].template) == "template <typename T>"
# testing full output from methods
assert str(methods_0[0]) == "template <class T, typename D> sf::sd::User<T, D>::User() {}"
assert str(methods_0[1]) == "template <class T, typename D> sf::sd::User<T, D>::User(std::string name, int skillLevel) {}"
assert str(methods_0[2]) == "template <class T, typename D> sf::sd::User<T, D>::~User() {}"
assert str(methods_0[3]) == "template <class T, typename D> void sf::sd::User<T, D>::setName(std::string name) {}"
assert str(methods_0[4]) == "template <class T, typename D> std::string sf::sd::User<T, D>::getName() const {}"
assert str(methods_0[6]) == "template <class T, typename D> void sf::sd::User<T, D>::move() {}"
assert str(methods_0[7]) == "template <class T, typename D> GameState* sf::sd::User<T, D>::getGameState() {}"
assert str(methods_0[8]) == "template <class T, typename D> GameRef& sf::sd::User<T, D>::getGameRef() {}"
assert str(methods_0[9]) == "template <class T, typename D> SuperPower* sf::sd::User<T, D>::getSuperPower() {}"
assert str(methods_0[10]) == "template <class T, typename D> template <typename T> T sf::sd::User<T, D>::getEnemy() {}"
assert str(methods_0[11]) == "template <class T, typename D> template <typename T> T sf::sd::User<T, D>::getSomethingElse() {}"
assert str(methods_0[12]) == "template <class T, typename D> void sf::sd::User<T, D>::foo() {}"
assert str(methods_0[13]) == "template <class T, typename D> void sf::sd::User<T, D>::stop() {}"
methods_0[13].add_option("newline_after_template", True)
methods_0[13].add_option("newline_after_method", True)
methods_0[13].add_option("place_cursor_between_brackets", True)
assert str(methods_0[13]) == "template <class T, typename D>\nvoid sf::sd::User<T, D>::stop() \n{\n\t\n}"
methods_0[13].add_option("newline_after_template", False)
methods_0[13].add_option("newline_after_method", True)
methods_0[13].add_option("place_cursor_between_brackets", True)
assert str(methods_0[13]) == "template <class T, typename D> void sf::sd::User<T, D>::stop() \n{\n\t\n}"
methods_0[13].add_option("newline_after_template", False)
methods_0[13].add_option("newline_after_method", False)
methods_0[13].add_option("place_cursor_between_brackets", True)
assert str(methods_0[13]) == "template <class T, typename D> void sf::sd::User<T, D>::stop() {\n\t\n}"
methods_0[13].add_option("newline_after_template", False)
methods_0[13].add_option("newline_after_method", False)
methods_0[13].add_option("place_cursor_between_brackets", False)
assert str(methods_0[13]) == "template <class T, typename D> void sf::sd::User<T, D>::stop() {}" |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | tests/test_tokenizer.py | from method_generator.tokenizer import ClassTokenizer
from method_generator.exceptions import ClassValidationException
import os
def read_files_as_tokenizers():
test_files = [
os.path.abspath("tests/files/User.hpp"),
os.path.abspath("tests/files/Window.hpp")
]
files_list = []
for file in test_files:
with open(file, 'r') as f:
file = f.read()
try:
files_list.append(ClassTokenizer().tokenize(file))
except ClassValidationException as e:
print(str(e))
return files_list
# tests = read_files_as_tokenizers()
# a = 1
class TestClassTokenizer:
def test_name_parsing(self):
test_files = read_files_as_tokenizers()
assert test_files[0]["name"] == "User"
assert test_files[1]["name"] == "Window"
class TestTemplateTokenizer:
def test_has_template(self):
test_files = read_files_as_tokenizers()
assert test_files[0]["template"][0]["typename"] == "class"
assert test_files[0]["template"][0]["datatype"] == "T"
assert test_files[0]["template"][1]["typename"] == "typename"
assert test_files[0]["template"][1]["datatype"] == "D"
assert test_files[1]["template"] == None
class TestNamespaceTokenizer:
def test_has_namespace(self):
test_files = read_files_as_tokenizers()
assert test_files[0]["namespace"] == "sf::sd"
assert test_files[1]["namespace"] == "OS"
class TestMethodTokenizer:
def test_check_method_names(self):
test_files = read_files_as_tokenizers()
# testing first class methods
methods_file_0 = test_files[0]["methods"]
assert len(methods_file_0) == 14
assert methods_file_0[0]["name"] == "User"
assert methods_file_0[0]["return_type"] == None
assert methods_file_0[0]["is_pure_virtual"] == False
assert methods_file_0[1]["name"] == "User"
assert methods_file_0[1]["return_type"] == None
assert methods_file_0[1]["is_pure_virtual"] == False
assert methods_file_0[2]["name"] == "~User"
assert methods_file_0[2]["return_type"] == None
assert methods_file_0[2]["is_pure_virtual"] == False
assert methods_file_0[3]["name"] == "setName"
assert methods_file_0[3]["return_type"] == "void"
assert methods_file_0[3]["is_pure_virtual"] == False
assert methods_file_0[4]["name"] == "getName"
assert methods_file_0[4]["return_type"] == "std::string"
assert methods_file_0[4]["is_pure_virtual"] == False
assert methods_file_0[4]["is_const"] == True
assert methods_file_0[5]["name"] == "play"
assert methods_file_0[5]["return_type"] == "void"
assert methods_file_0[5]["is_pure_virtual"] == True
assert methods_file_0[10]["name"] == "getEnemy"
assert methods_file_0[10]["return_type"] == "T"
assert methods_file_0[10]["template"][0]["typename"] == "typename"
assert methods_file_0[10]["template"][0]["datatype"] == "T"
# testing second class methods
methods_file_1 = test_files[1]["methods"]
assert len(methods_file_1) == 3
assert methods_file_1[2]["return_type"] == "int"
assert methods_file_1[2]["name"] == "getSize"
assert methods_file_1[2]["is_pure_virtual"] == False
assert methods_file_1[2]["is_const"] == False
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/klass/datatype.py | class Datatype:
"""
represents a datatype
"""
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def __str__(self):
"""
renders datatype name
:return: str
"""
return self._name |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/klass/__init__.py | from .datatype import Datatype
from .template import Template
from .klass import Klass
from .method import Method
from .template_type import TemplateType
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/parser.py | <gh_stars>1-10
from .klass import *
class ClassParser:
@classmethod
def parse(cls, input_obj):
"""
create python classes from tokenized JSON object
:param input_obj: dict
:return: Klass
"""
klass = Klass(input_obj["name"])
klass.namespace = input_obj["namespace"]
if input_obj["methods"]:
for method in cls._parse_methods(input_obj["methods"]):
method.related_class = klass
klass.add_method(method)
klass.template = TemplateParser().parse(input_obj["template"])
return klass
@classmethod
def _parse_methods(cls, input_methods):
for method in input_methods:
method = MethodParser().parse(method)
yield method
class MethodParser:
@classmethod
def parse(cls, input_method):
"""
parses a single method into method object
:param input_method: dict
:return: Method
"""
method = Method()
method.name = input_method["name"]
method.template = TemplateParser().parse(input_method["template"])
method.is_const = input_method["is_const"]
method.is_pure_virtual = input_method["is_pure_virtual"]
for argument in input_method["arguments"]:
method.add_argument(argument)
method.return_type = input_method["return_type"]
return method
class TemplateParser:
@classmethod
def parse(cls, input_obj):
"""
parses template method or class into Template object
:param input_obj: dict
:return: Template
"""
if input_obj is None:
return
template = Template()
for template_type in input_obj:
template_type_class = TemplateType(template_type["typename"], template_type["datatype"])
template.add_template_type(template_type_class)
return template |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/klass/method.py | <filename>method_generator/klass/method.py
class Method:
"""
represents a C++ class method
"""
def __init__(self):
self._name = None
self._arguments = []
self._return_type = None
self._template = None
self._class = None
self._is_const = False
self._is_pure_virtual = False
self._options = {
"newline_after_template": False,
"newline_after_method": False,
"place_cursor_between_brackets": False
}
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def arguments(self):
return self._arguments
def add_argument(self, argument):
self._arguments.append(str(argument))
@property
def return_type(self):
return self._return_type
@return_type.setter
def return_type(self, return_type):
self._return_type = return_type
@property
def template(self):
return self._template
@template.setter
def template(self, template):
self._template = template
@property
def related_class(self):
return self._class
@related_class.setter
def related_class(self, related_clas):
self._class = related_clas
@property
def is_const(self):
return self._is_const
@is_const.setter
def is_const(self, is_const):
self._is_const = is_const
@property
def is_pure_virtual(self):
return self._is_pure_virtual
@is_pure_virtual.setter
def is_pure_virtual(self, is_pure_virtual):
self._is_pure_virtual = is_pure_virtual
@property
def options(self):
return self._options
def add_option(self, option, value):
self._options[option] = value
def __str__(self):
"""
renders full method definition with all components
:return: str
"""
# creating empty method string to fill
method = ""
# check if class has template
# place newline after each template if option is set
if self._class.template is not None:
method += str(self._class.template)
if self._options["newline_after_template"]:
method += "\n"
else:
method += ""
# check if method has template
# place newline after method if option is set
if self._template is not None:
if self._class.template:
method += " "
method += str(self.template)
if self._options["newline_after_template"]:
method += "\n"
else:
method += ""
# make sure to leave space between the definition components
if self._return_type:
if not self.options["newline_after_template"]:
method += " "
method += self._return_type + " "
else:
if self.options["newline_after_template"]:
method += ""
else:
method += " "
# insert 2 colons if class has namespace
if self._class.namespace is not None:
method += self._class.namespace + "::"
method += self._class.name
# render class templates; merging the template types
if self._class.template is not None:
class_template_types = []
for template_type in self._class.template.template_types:
class_template_types.append(str(template_type.name))
method += "<{}>".format(', '.join(class_template_types))
# connecting again the definition components
method += "::"
method += self._name
# create list comprehension from method arguments
# make sure to strip every argument
if self._arguments:
self._arguments = [x.strip() for x in self._arguments]
self._arguments = ', '.join(self._arguments)
else:
self._arguments = ""
# insert arguments
method += "({}) ".format(self._arguments)
# check if method is const
if self._is_const:
method += "const "
# place newline after if option is set
if self._options["newline_after_method"]:
method += "\n"
# place newline and tab after brackets, so cursor is placed between the brackets
if self._options["place_cursor_between_brackets"]:
method += "{\n\t\n}"
else:
method += "{}"
# strip to only make sure, there are no spaces at begin and end of definition
return method.strip() |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | tests/test_generator.py | <filename>tests/test_generator.py<gh_stars>1-10
from method_generator.generator import Generator
from method_generator.exceptions import ClassValidationException
import os
def read_files_as_method_generators():
test_files = [
os.path.abspath("tests/files/User.hpp"),
os.path.abspath("tests/files/Window.hpp")
]
files_list = []
for file in test_files:
with open(file, 'r') as f:
file = f.read()
try:
generator = Generator(file)
files_list.append({
'definitions': generator.generate_method_list(generator.DEFINITIONS),
'names': generator.generate_method_list(generator.NAMES)
})
except ClassValidationException as e:
print(str(e))
return files_list
class TestGenerator:
def test_has_correct_list_items(self):
test_files = read_files_as_method_generators()
mg0_definitions = test_files[0]['definitions']
mg1_definitions = test_files[1]["definitions"]
mg0_names = test_files[0]['names']
mg1_names = test_files[1]['names']
assert len(mg0_definitions) == 13
assert len(mg1_definitions) == 3
assert len(mg0_names) == 13
assert len(mg1_names) == 3
assert len(mg0_names) == len(mg0_definitions)
assert len(mg1_names) == len(mg1_definitions)
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/klass/template_type.py | from .datatype import Datatype
class TemplateType(Datatype):
"""
represents a template type
"""
def __init__(self, typename, name):
super(TemplateType, self).__init__(name)
self._typename = typename
@property
def typename(self):
return self._typename
@typename.setter
def typename(self, typename):
self._typename = typename
def __str__(self):
"""
renders template type with datatype and placeholder
:return: str
"""
return "{typename} {datatype}".format(typename=self._typename, datatype=self._name) |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | template.py | import os, sublime
class Template:
def __init__(self, name):
self._template = None
self._name = name
self.file = None
def load(self, filename):
# load template file
self.file = open(filename)
# reading template
self._template = self.file.read()
self.file.close()
def render(self, **values):
"""
:values key, value for replacing template vars
"""
self._template = self._template.format(**values)
@property
def template(self):
return self._template
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/__init__.py | <filename>method_generator/__init__.py
from .klass.klass import Klass
from .klass.datatype import Datatype
from .klass.method import Method
from .klass.template import Template
from .klass.template_type import TemplateType
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/klass/template.py | <reponame>prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin<filename>method_generator/klass/template.py<gh_stars>1-10
class Template:
"""
represents a C++ class or method template
"""
def __init__(self):
self._template_types = []
@property
def template_types(self):
return self._template_types
def add_template_type(self, template_type):
self._template_types.append(template_type)
def __str__(self):
"""
renders a class or method template
:return: str
"""
template = "template <{content}>"
render = []
for template_type in self._template_types:
render.append(str(template_type))
return template.format(content=', '.join(render)) |
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | tests/class_generator.py | import sys, os, json
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from method_generator import klass as c
def test():
pass
class ClassGenerator:
def __init__(self, filename):
self.filename = filename
self.klass = None
with open(self.filename, 'r') as file:
self.data = json.load(file)
def generate(self):
self.klass = c.Klass(self.data["name"])
generated_methods = self._generate_methods(self.klass)
self.klass = generated_methods["class"]
if self.data["template"]:
self.klass.template = self._create_class_template()
return generated_methods["obj"]
def _create_class_template(self):
template = c.Template()
for tpl in self.data["template"]:
template.add_template_type(c.TemplateType(tpl["typename"], tpl["datatype"]))
return template
def _generate_methods(self, klass):
obj = []
for method in self.data["methods"]:
m = c.Method()
m.name = method["name"]
m.return_type = method["return_type"]
m.related_class = klass
if method["template"]:
template = c.Template()
for tpl in method["template"]:
template.add_template_type(c.TemplateType(tpl["typename"], tpl["datatype"]))
m.template = template
if method["arguments"]:
for arg in method["arguments"]:
a = c.Argument()
a.identifier = arg["identifier"]
a.datatype = arg["datatype"]
m.add_argument(a)
klass.add_method(m)
obj.append({
"method": m,
"expected": method["expected"]
})
return {
"class": klass,
"obj": obj
}
|
prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin | method_generator/generator.py | <reponame>prolidorpenrvapiroldon/cppclasshelper-sublime-text-plugin
from .tokenizer import *
from .parser import *
class Generator:
"""
generates all class method definitions
"""
DEFINITIONS = 1
NAMES = 2
def __init__(self, source_code):
self._source_code = source_code
def _generate_method_display_name(self, method):
if method.return_type is None:
method.return_type = ""
return "{} {}".format(method.return_type, method.name).strip()
def generate_method_list(self, return_mode):
"""
generates a list of methods
only includes the method names
:param return_mode:
:return: list
"""
tokenized_source_code = ClassTokenizer().tokenize(self._source_code)
klass = ClassParser().parse(tokenized_source_code)
method_names_list = []
method_definitions_list = []
for method in klass.methods:
if method.is_pure_virtual:
continue
key = self._generate_method_display_name(method)
method_names_list.append(key)
method_definitions_list.append(method)
if return_mode == self.DEFINITIONS:
return method_definitions_list
elif return_mode == self.NAMES:
return method_names_list
|
Prabhav55223/Flipkart-PORTRAY | app.py | <reponame>Prabhav55223/Flipkart-PORTRAY<filename>app.py
# -----------------------------------------------------------------PORTRAY----------------------------------------------------------------------#
# AUTHORS
# <NAME> -> <EMAIL>
# <NAME> -> <EMAIL>
# TEAM ILLUMINATI
#__________________________________________________________________APPLICATION_______________________________________________________________
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, render_template
import pickle
import yake
'''
Setting up FLASK INSTANCE
'''
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
from main import predictor
from trends import articles
@app.route('/')
def home():
'''
Home Page
'''
return render_template('index.html')
@app.route('/documentation', methods=["GET"])
def documentation():
return render_template('docs.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
PREDICTOR
'''
int_features = [x for x in request.form.values()]
choice = int_features[0]
query = int_features[1]
count = int(int_features[2])
predictor(choice, query, count)
top = pd.read_pickle('static/Sample_Results/TOP.pkl')
bottom = pd.read_pickle('static/Sample_Results/BOTTOM.pkl')
'''
Using Custom Keyword Extractor to Load Keywords.
'''
top["Keywords"] = 'None'
bottom["Keywords"] = 'None'
language = "en"
max_ngram_size = 1
deduplication_thresold = 0.9
deduplication_algo = 'seqm'
windowSize = 1
numOfKeywords = 2
# FOR TOP
custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_thresold, dedupFunc=deduplication_algo, windowsSize=windowSize, top=numOfKeywords, features=None)
for i in range(len(top)):
if (top["Description"][i] == ''):
top["Keywords"][i] = 'None'
continue
keywords = custom_kw_extractor.extract_keywords(top["Description"][i])
if (len(keywords) == 0):
top["Keywords"][i] = 'None'
continue
if (len(keywords) == 1):
top["Keywords"][i] = str(keywords[0][0])
continue
top["Keywords"][i] = str(keywords[0][0]) + ', ' + str(keywords[1][0])
# FOR BOTTOM
for i in range(len(bottom)):
if (bottom["Description"][i] == ''):
bottom["Keywords"][i] = 'None'
continue
keywords = custom_kw_extractor.extract_keywords(bottom["Description"][i])
if (len(keywords) == 0):
bottom["Keywords"][i] = 'None'
continue
if (len(keywords) == 1):
bottom["Keywords"][i] = str(keywords[0][0])
continue
bottom["Keywords"][i] = str(keywords[0][0]) + ', ' + str(keywords[1][0])
return render_template('ecommresults.html', details = choice + ' - ' + query, top = top, bottom = bottom)
@app.route('/results',methods=['POST'])
def results():
predictor('NORDSTROM', 'jeans')
return
@app.route('/analyse',methods=['POST'])
def analyse():
'''
Analysis for articles.
'''
int_features = [x for x in request.form.values()]
website = int_features[0]
product = int_features[1]
articles(website, product)
top5 = pd.read_pickle('static/Sample_Results/top5articles.pkl')
prods = pd.read_pickle('static/Sample_Results/top5prods.pkl')
return render_template('inner-page.html', details = website + ' - ' + product, arts = top5, prods = prods)
if __name__ == "__main__":
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=True, host='0.0.0.0') |
Prabhav55223/Flipkart-PORTRAY | trends.py | # -----------------------------------------------------------------PORTRAY----------------------------------------------------------------------#
# AUTHORS
# <NAME> -> <EMAIL>
# <NAME> -> <EMAIL>
# TEAM ILLUMINATI
#____________________________________________________________________TREND ANALYSIS________________________________________________________________
''' Importing Libraries '''
import nltk
import os
from datetime import datetime
import warnings
import gc
import sys
import urllib.request
import pandas as pd
import time
import json
import glob
import pickle
import random
from pathlib import Path
import editdistance
import string
from sklearn.preprocessing import MinMaxScaler
import io
import itertools
import networkx as nx
import re
import networkx
from rake_nltk import Rake
from nltk.tokenize import word_tokenize, sent_tokenize
import numpy as np
import numpy as np
import pandas as pd
import itertools
from tqdm import tqdm
''' Create a dictionnary for classifying products into categories '''
apparel= {
'tshirt' : ['tee', 'tshirt', 'T-shirt',' t shirt', 'teeshirt'],
'footwear' : ['shoes', 'sandal', 'sandals', 'shoes', 'footwear', 'slipper','Croc', 'Sandals'],
'jewellery' : ['jewellery'],
'jeans': ['jeans, denim, ripped, levis'],
'dress' : ['dress', 'dresses', 'sundress', 'sundresses'],
'skirt': ['skirt', 'skirts', 'maxi'],
'bag': ['bag', 'bags', 'purse', 'purses', 'wallet', 'wallets']
}
def normal(tagged):
return [(item[0].replace('.', ' '), item[1]) for item in tagged]
def filter_for_tags(tagged, tags=['NNP']):
"""Semantic Filter Based on POS."""
return [item for item in tagged if item[1] in tags]
def unique_ever(iterable, key=None):
seen = set()
seen_add = seen.add
if key is None:
for element in [x for x in iterable if x not in seen]:
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def build_graph(nodes):
"""Return a networkx graph instance.
:param nodes: List of hashables that represent the nodes of a graph.
"""
gr = nx.Graph()
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = editdistance.eval(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extract_key_phrases(text):
word_tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(word_tokens)
textlist = [x[0] for x in tagged]
tagged = filter_for_tags(tagged)
tagged = normal(tagged)
unique_word_set =unique_ever([x[0] for x in tagged])
word_set_list = list(unique_word_set)
graph = build_graph(word_set_list)
calculated_page_rank = nx.pagerank(graph, weight='weight')
keyphrases = sorted(calculated_page_rank, key=calculated_page_rank.get,
reverse=True)
one_third = 50
keyphrases = keyphrases[0:50]
modified_key_phrases = set([])
dealt_with = set([])
i = 0
j = 1
while j < len(textlist):
first = textlist[i]
second = textlist[j]
if first in keyphrases and second in keyphrases:
keyphrase = first + ' ' + second
modified_key_phrases.add(keyphrase)
dealt_with.add(first)
dealt_with.add(second)
else:
if first in keyphrases and first not in dealt_with:
modified_key_phrases.add(first)
if j == len(textlist) - 1 and second in keyphrases and \
second not in dealt_with:
modified_key_phrases.add(second)
i = i + 1
j = j + 1
return list(modified_key_phrases)
def todf(vogue):
''' Function to convert the extracted pickle file to a suitable dataframe
vogue: dictionary of the pickle file
df1: temporary dataframe
df: final dataframe of articles in suitable format'''
df1=pd.DataFrame.from_dict(vogue)
row=[]
for entry in df1.columns:
row.append(entry)
df1.loc[len(df1)]= row
cols=[]
for i in range(len(df1.loc[0])):
cols.append(str(i))
df1.columns= cols
df= df1.T
df.columns= ['Date', 'Tags', 'Text', 'Links', 'Title']
cols = list(df.columns)
a, b = cols.index('Date'), cols.index('Title')
cols[b], cols[a] = cols[a], cols[b]
df = df[cols]
cols = list(df.columns)
a, b = cols.index('Date'), cols.index('Tags')
cols[b], cols[a] = cols[a], cols[b]
df = df[cols]
return df
def convert_date(text):
''' Function to convert the date of articles to a timestamp
text: date of the article to be converted '''
datetime_object = datetime.strptime(text, '%d %B %Y')
return datetime_object
def dscore(df):
''' Function to calculate a score based on dates of the articles
and sort the dataframe based on it '''
df['Date'] = df.Date.apply(convert_date)
df = df.sort_values(by='Date').reset_index(drop=True)
lis = []
for i in range(len(df)):
lis.append(i)
df["DScore"] = pd.Series(lis)
return df
def endorse(text, title, tags):
''' Function to calculate a score based on number of
proper nouns(used as a param for endorsements)
text: variable for text of the articles
title: variable for title of the articles
tags: variable for subheading of the articles '''
temp= text+title+tags
res= extract_key_phrases(temp)
return len(res)
def escore(df):
''' Function to apply endorsement score '''
# df['EScore']= ""
df['EScore'] = df.apply(lambda row : endorse(row['Text'], row['Title'], row['Tags']), axis = 1)
return df
scaler = MinMaxScaler(feature_range = (0,10))
def normalize(df):
''' Function to normalize the scores obtained to a
range between 0 and 10 '''
column_names_to_normalize = ['DScore', 'EScore']
x = df[column_names_to_normalize].values
x_scaled = scaler.fit_transform(x)
df_temp = pd.DataFrame(x_scaled, columns=column_names_to_normalize, index =df.index)
df[column_names_to_normalize] = df_temp
return df
def filter_for_tags(tagged, tags=['NN', 'JJ']):
"""Semantic Filter Based on POS."""
return [item for item in tagged if item[1] in tags]
def clean(rev):
''' Function to perform basic cleaning of strings'''
lis = []
for i in rev:
i = re.sub(r'[^\w\s\']','',i)
i = i.replace("\n", " ")
lis.append(i)
return lis
def category(df):
''' Function to allot category to an article
tshirt: number of times the article mentions words related to tshirt
footwear: number of times the article mentions words related to footwear
jewellery: number of times the article mentions words related to jewellery
skirt: number of times the article mentions words related to skirt
bag: number of times the article mentions words related to bag'''
for i in range(len(df)):
tshirt=0
footwear=0
jewellery=0
dress=0
skirt=0
bag=0
res = df['Title'][i].split()
res = [word.lower() for word in res]
df['Title'][i]=clean(res)
for word in df['Title'][i]:
if word in apparel['footwear']:
footwear+=1
if word in apparel['tshirt']:
tshirt+=1
if word in apparel['jewellery']:
jewellery+=1
if word in apparel['dress']:
dress+=1
if word in apparel['skirt']:
skirt+=1
if word in apparel['bag']:
bag+=1
if footwear== tshirt== jewellery==dress==skirt==bag:
res=extract_key_phrases(df['Text'][i])
res= list(res)
new= []
for word in res:
small= word.split()
new.append(small)
tshirt=0
footwear=0
jewellery=0
dress=0
skirt=0
bag=0
for li in new:
for word in li:
if word in apparel['footwear']:
footwear+=1
if word in apparel['tshirt']:
tshirt+=1
if word in apparel['jewellery']:
jewellery+=1
if word in apparel['dress']:
dress+=1
if word in apparel['skirt']:
skirt+=1
if word in apparel['bag']:
bag+=1
df['tshirt'][i]=tshirt
df['footwear'][i]=footwear
df['jewellery'][i]=jewellery
df['dress'][i]= dress
df['skirt'][i]= skirt
df['bag'][i]= bag
else:
df['tshirt'][i]=tshirt
df['footwear'][i]=footwear
df['jewellery'][i]=jewellery
df['dress'][i]= dress
df['skirt'][i]= skirt
df['bag'][i]= bag
for index, row in df.iterrows():
val = max([row["tshirt"], row["footwear"], row["bag"], row["skirt"], row["dress"], row["jewellery"]])
if (row['tshirt']==val):
df["Category"][index] = "tshirt"
elif (row["footwear"]==val):
df["Category"][index] = "footwear"
elif (row["bag"]==val):
df["Category"][index] = "bag"
elif (row["skirt"]==val):
df["Category"][index] = "skirt"
elif (row["dress"]==val):
df["Category"][index] = "dress"
else:
df["Category"][index] = "jewellery"
return df
def clean_text(text):
''' Function to lowercase a string and tokenize it '''
try:
text= text.lower()
res= text.split(" ")
except:
res=[]
return res
def clean_list(str):
''' Function to split words in string based on space and hyphens'''
res=[]
for li in str:
li = li.replace('-', ' ').split(' ')
for word in li:
res.append(word)
return res
def similarity(nord, res_dict):
''' Function to calculate similarity of product name and description
with the exracted keywords from an article
nord: variable for the dataframe of products from a website
res_dict : dictionary containing data of articles with the required category
tscore: score based on number of matches with the article keywords
dscore: Date score of the article
escore: Endorsements score of the articles'''
for i in range(len(nord)):
tscore=0
dscore=0
escore=0
for j in range(len(res_dict['text'])):
for word in res_dict['text'][j]:
if word in nord['Name'][i]:
tscore+=1
if word in nord['Description'][i]:
tscore+=1
dscore= res_dict['dscore'][j]
escore= res_dict['escore'][j]
nord['AScore'][i]= (tscore + dscore+ escore)/3
def normalize2(nord):
''' Function to mormlaize scores '''
column_names_to_normalize = ['AScore']
x = nord[column_names_to_normalize].values
x_scaled = scaler.fit_transform(x)
nord_temp = pd.DataFrame(x_scaled, columns=column_names_to_normalize, index=nord.index)
nord[column_names_to_normalize] = nord_temp
return nord
def sortbyarticles(df):
'''Function to return the top articles '''
res1 = df
lister = []
for i in range(len(res1)):
x = (res1["DScore"][i] + res1["EScore"][i])/2
lister.append(x)
res1["Scores"] = pd.Series(lister)
res1 = res1.sort_values(by = "Scores", ascending = False).reset_index(drop = True)[:5]
del res1["Tags"], res1["DScore"], res1["EScore"], df["Scores"]
return res1
def backtonormal(liste):
'''Function to join thr product names' list to string'''
return ' '.join(liste)
def articles(website, product):
''' Driver function to rate products based on latest trends
website: user inputted fashion website
product: user inputted particular product
For demonstartion purposes, products from the webiste http://nordstrom.com are ranked '''
nord= pd.read_csv('static/CSV/NORDSTROM_' + product + '.csv')
file = 'static/PKL/' + website + '_articles.pkl'
with open(file, 'rb') as f:
vogue = pickle.load(f)
df = todf(vogue)
df = dscore(df)
df = escore(df)
res1 = sortbyarticles(df)
res1.to_pickle('static/Sample_Results/top5articles.pkl')
df['tshirt'] = ""
df['footwear'] = ""
df['jewellery'] = ""
df['dress'] = ""
df['skirt'] = ""
df['bag'] = ""
df["Category"] = ""
df = category(df)
df['Text'] = df.Text.apply(extract_key_phrases)
df['Text']= df.Text.apply(clean_list)
nord['Name']= nord.Name.apply(clean_text)
nord['Name']= nord.Name.apply(clean_list)
nord['Description']= nord.Description.apply(clean_text)
nord['Description']= nord.Description.apply(clean_list)
#selecting category and building it's list
res_dict={'text': [], 'dscore' : [], 'escore': []}
relevant= df.loc[df['Category'] == product].reset_index()
for i in range(len(relevant)):
small= []
for word in relevant['Text'][i]:
small.append(word)
res_dict['text'].append(small)
res_dict['dscore'].append(relevant['DScore'][i])
res_dict['escore'].append(relevant['EScore'][i])
nord['AScore']= ""
similarity(nord, res_dict)
result = normalize2(nord)
result = result.sort_values(by = "AScore", ascending = False)
result = result[:5]
result = result.reset_index(drop = True)
result["Name"] = result.Name.apply(backtonormal)
del result["Rating"], result["Number of Ratings"], result["Reviews"], result["Current Views"], result["Description"], result["Discount"]
result.to_pickle('static/Sample_Results/top5prods.pkl')
return |
Prabhav55223/Flipkart-PORTRAY | main.py | # -----------------------------------------------------------------PORTRAY----------------------------------------------------------------------#
# AUTHORS
# <NAME> -> <EMAIL>
# <NAME> -> <EMAIL>
# TEAM ILLUMINATI
# ----------------------------------------------------------------IMAGE ANALYSER----------------------------------------------------------------#
'''DISABLE WARNINGS'''
import os
import warnings
import gc
import sys
import yake
import urllib.request
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import json
import glob
import pickle
import random
from pathlib import Path
import pickle
import cv2
import editdistance
import string
from sklearn.preprocessing import MinMaxScaler
import io
import itertools
import networkx as nx
import nltk
import re
import networkx
from rake_nltk import Rake
from nltk.tokenize import word_tokenize, sent_tokenize
import numpy as np
import numpy as np
import pandas as pd
import itertools
from tqdm import tqdm
from imgaug import augmenters as iaa
from sklearn.model_selection import StratifiedKFold, KFold
import mrcnn
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
import keras.layers
from mrcnn.model import log
from mrcnn.model import log, BatchNorm
COCO_WEIGHTS_PATH = 'mask_rcnn_coco.h5'
warnings.filterwarnings("ignore", category=DeprecationWarning)
# ------------------------------------------------------------SETTING UP THE IMAGE MODEL AND LOADING WEIGHTS------------------------------------------------------------
'''SET CONFIGURATIONS FOR MODEL'''
NUM_CATS = 46
IMAGE_SIZE = 512
class FashionConfig(Config):
NAME = "fashion"
NUM_CLASSES = NUM_CATS + 1
GPU_COUNT = 1
IMAGES_PER_GPU = 4
BACKBONE = 'resnet50'
IMAGE_MIN_DIM = IMAGE_SIZE
IMAGE_MAX_DIM = IMAGE_SIZE
IMAGE_RESIZE_MODE = 'none'
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)
STEPS_PER_EPOCH = 1000
VALIDATION_STEPS = 200
config = FashionConfig()
'''LOAD LABELS FOR IMAGE SEGMENTATION'''
with open("label_descriptions.json") as f:
label_descriptions = json.load(f)
label_names = [x['name'] for x in label_descriptions['categories']]
'''Helper Functions For Image Analysis'''
def resize_image(image_path):
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)
return img
def to_rle(bits):
rle = []
pos = 0
for bit, group in itertools.groupby(bits):
group_list = list(group)
if bit:
rle.extend([pos, sum(group_list)])
pos += len(group_list)
return rle
def refine_masks(masks, rois):
areas = np.sum(masks.reshape(-1, masks.shape[-1]), axis=0)
mask_index = np.argsort(areas)
union_mask = np.zeros(masks.shape[:-1], dtype=bool)
for m in mask_index:
masks[:, :, m] = np.logical_and(masks[:, :, m], np.logical_not(union_mask))
union_mask = np.logical_or(masks[:, :, m], union_mask)
for m in range(masks.shape[-1]):
mask_pos = np.where(masks[:, :, m]==True)
if np.any(mask_pos):
y1, x1 = np.min(mask_pos, axis=1)
y2, x2 = np.max(mask_pos, axis=1)
rois[m, :] = [y1, x1, y2, x2]
return masks, rois
augmentation = iaa.Sequential([
iaa.Fliplr(0.5) # only horizontal flip here
])
'''Model Setup And Download'''
model_path = 'mask_rcnn_fashion_0008.h5'
class InferenceConfig(FashionConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
model = modellib.MaskRCNN(mode='inference',
config=inference_config,
model_dir='../Mask_RCNN/')
assert model_path != '', "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
print("MODEL LOADED")
print()
'''Main Functions for Image Analysis -> Download, Save and Run Predictions'''
def main(df):
'''
This function utilises the loaded model to extract featires from the images
df -> Final Dataframe with ranks
feature_list -> This is the jsin file containing list of all predefined features.
'''
feature_list = []
missing_count = 0
os.chdir('static/Images/')
for i in range(len(df)):
image_url = df["Image_Link"][i]
save_name = df["Name"][i] + '.jpg'
urllib.request.urlretrieve(image_url, save_name)
for i in tqdm(range(len(df))):
labels = []
path = df["Name"][i] + '.jpg'
try:
image = resize_image(path)
result = model.detect([image])[0]
except:
print(df["Name"][i])
feature_list.append([1])
continue
if result['masks'].size > 0:
masks, _ = refine_masks(result['masks'], result['rois'])
for m in range(masks.shape[-1]):
mask = masks[:, :, m].ravel(order='F')
rle = to_rle(mask)
label = result['class_ids'][m] - 1
labels.append(label)
feature_list.append(list(set(labels)))
else:
feature_list.append([1])
missing_count += 1
for i in range(len(feature_list)):
for j in range(len(feature_list[i])):
feature_list[i][j] = label_names[feature_list[i][j]]
df["Feature"] = pd.Series(feature_list)
os.chdir('..')
os.chdir('..')
return df
def cleanresults(df):
'''
A simple funtion to remove unwanted data after ranking products.
'''
del df["Discount"], df["Rating"], df["Number of Ratings"], df["Reviews"], df["Current Views"]
# lis = getanalysis(df)
# df["Keywords"] = pd.Series(lis)
return df
print("SETUP COMPLETE")
print()
# --------------------------------------------------------------DATA SCRAPER--------------------------------------------------------------------------#
# Setting up the Chome Instance
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument('--disable-extensions')
class DataCollectionEcomm:
'''
The main class instance for Data Scraping.
All inputs are provided through a unique pkl file generated by the code previously for each website. The PKL file contains XPaths for each element.
DataFrame Description:
NAME | BRAND | PRICE | DISCOUNT | IMAGE LINK | RATING | NUMBER OF RATINGS | REVIEWS | CURRENT VIEWS | DESCRIPTION
'''
def __init__(self, base_site, search, path, query = ['T-Shirt']):
self.browser = self.genrateBroswer()
self.links = []
self.base_site = base_site
self.path = path
self.search = search
self.query = query
self.df = pd.DataFrame(columns=["Name", "Brand", "Price", "Discount", "Image_Link", "Rating", "Number of Ratings", "Reviews", "Current Views", "Description"])
def getalllinkstoproduct(self, query):
'''
Gathers Links to all related Products.
'''
self.browser.find_element_by_xpath(self.search["search_box"]).click()
self.browser.implicitly_wait(5)
self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(query)
self.browser.implicitly_wait(10)
self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(Keys.ENTER)
temps = []
for i in range(1,1000):
lis =self.browser.find_elements_by_css_selector(self.search["product_selector"] + str(i) + self.search["product_selector_no"])
if (not lis):
break
temps.append(lis[0].get_attribute('href'))
self.browser.get(self.base_site)
self.browser.implicitly_wait(5)
return temps
def genrateBroswer(self):
'''
Generates Browser Instance
'''
self.browser = webdriver.Chrome(options=options)
return self.browser
def getproductdata(self):
'''
Uses selectors from pkl file to extract data.
'''
self.browser.implicitly_wait(3)
Product_Name = self.browser.find_element_by_xpath(self.path["p_name"]).text
try:
Product_Brand = self.browser.find_element_by_xpath(self.path["p_brand"]).text
except:
Product_Brand = Product_Name
try:
Product_Price = self.browser.find_element_by_xpath(self.path["p_price"]).text
except:
Product_Price = "Out Of Stock"
try:
Product_Disc = self.browser.find_element_by_xpath(self.path["p_disc"]).text[:3]
print(1)
except:
Product_Disc = 'NULL'
try:
Product_Image = self.browser.find_element_by_xpath(self.path["p_img"]).get_attribute("src")
except:
Product_Image = self.browser.find_element_by_xpath(self.path["p_img2"]).get_attribute("src")
'''
Using EC for dynamic websites.
Comment out in case of static.
'''
for second in range(0,50):
self.browser.execute_script("window.scrollBy(0,300)", "")
time.sleep(5)
try:
self.browser.find_element_by_id(self.path["p_rev"])
break
except:
continue
Product_Reviews = []
try:
Product_Rating = self.browser.find_element_by_xpath(self.path["p_rat"]).text
except:
Product_Rating = "None"
print("Help - STOP")
try:
Product_NumRatings = self.browser.find_element_by_xpath(self.path["p_numrat"]).text
except:
Product_NumRatings = "Zero"
print("Help - STOP")
try:
Curr_Views = self.browser.find_element_by_xpath(self.path["p_curr"]).text
except:
Curr_Views = "0"
print('Help')
try:
Product_Desc = self.browser.find_element_by_xpath("//*[@id='product-page-selling-statement']").text
except:
Product_Desc = ""
print("Help")
reviews = self.browser.find_elements_by_class_name("_2k-Kq")
for x in reviews:
subject = x.find_element_by_class_name("_3P2YP").text
text = x.find_element_by_class_name("_2wSBV").text
stars = x.find_element_by_class_name("_3tZR1").value_of_css_property('width')[:-2]
Product_Reviews.append([subject, text, stars])
self.df = self.df.append({'Name': Product_Name, 'Brand': Product_Brand, "Price": Product_Price, "Discount": Product_Disc, "Image_Link": Product_Image, "Rating": Product_Rating, "Number of Ratings": Product_NumRatings, "Reviews": Product_Reviews, "Current Views": Curr_Views, "Description": Product_Desc}, ignore_index=True)
def helper(self, link):
self.browser.get(link)
def main_1(self):
self.browser.get(self.base_site)
self.browser.delete_all_cookies()
temp = []
time.sleep(10)
for i in self.query:
link = self.getalllinkstoproduct(i)
temp += link
link_set = set(temp)
self.links = list(link_set)
return self.links
def main_2(self):
for i in tqdm(range(len(self.links))):
self.helper(self.links[i])
time.sleep(5)
self.getproductdata()
'''FOR SHEIN'''
# 1. Comment out:
# for second in range(0,50):
# self.browser.execute_script("window.scrollBy(0,300)", "")
# time.sleep(5)
# try:
# self.browser.find_element_by_id(self.path["p_rev"])
# break
# except:
# continue
# 2. Change:
# self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(query)
# self.browser.implicitly_wait(10)
# self.browser.find_element_by_xpath(self.search["search_input"]).send_keys(Keys.ENTER)
# 3. Change:
# reviews = self.browser.find_elements_by_class_name('common-reviews__list-item-detail')
# for i in range(len(reviews)):
# subject = ''
# text = reviews[i].find_element_by_class_name("rate-des").text
# stars = Product_Rating
# Product_Reviews.append([subject, text, stars])
# --------------------------------------------------------------Review Weights-------------------------------------------------------------------#
class WeightingReviews:
def __init__(self, df):
self.df = df
self.k = 0.3
def setup_environment(self):
"""Download required resources."""
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
print('Completed resource downloads.')
def filter_for_tags(self, tagged, tags=['NN', 'JJ', 'NNP']):
"""Semantic Filter Based on POS."""
return [item for item in tagged if item[1] in tags]
def normal(self, tagged):
return [(item[0].replace('.', ' '), item[1]) for item in tagged]
def unique_ever(self, iterable, key=None):
'''
Extracts only unique nodes for graph.
'''
seen = set()
seen_add = seen.add
if key is None:
for element in [x for x in iterable if x not in seen]:
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def build_graph(self, nodes):
"""Return a networkx graph instance.
nodes-> List of hashables that represent the nodes of a graph.
"""
gr = nx.Graph()
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = editdistance.eval(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extract_key_phrases(self, text):
'''
Main function to extract key phrases by buiding a Leventshien Distance Graph.
text-> Text to run on
'''
word_tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(word_tokens)
textlist = [x[0] for x in tagged]
tagged = self.filter_for_tags(tagged)
tagged = self.normal(tagged)
unique_word_set = self.unique_ever([x[0] for x in tagged])
word_set_list = list(unique_word_set)
graph = self.build_graph(word_set_list)
calculated_page_rank = nx.pagerank(graph, weight='weight')
keyphrases = sorted(calculated_page_rank, key=calculated_page_rank.get,
reverse=True)
one_third = 50
keyphrases = keyphrases[0:50]
modified_key_phrases = set([])
dealt_with = set([])
i = 0
j = 1
while j < len(textlist):
first = textlist[i]
second = textlist[j]
if first in keyphrases and second in keyphrases:
keyphrase = first + ' ' + second
modified_key_phrases.add(keyphrase)
dealt_with.add(first)
dealt_with.add(second)
else:
if first in keyphrases and first not in dealt_with:
modified_key_phrases.add(first)
if j == len(textlist) - 1 and second in keyphrases and \
second not in dealt_with:
modified_key_phrases.add(second)
i = i + 1
j = j + 1
return modified_key_phrases
def raking(self, text):
'''
Using Python Module RAKE to supplement TextRank
'''
r = Rake(min_length=1, max_length=3)
r.extract_keywords_from_text(text)
ans = r.get_ranked_phrases_with_scores()
return ans
def calcweight(self, text, final):
'''
Calculating weights based on frequency of keywords/phrases.
final-> Final chosen keywords.
'''
count = 0
words = word_tokenize(text)
for i in words:
if i in final:
count += 1
weight = (count/len(final)) * 100
return weight
def main_weights(self):
text = ""
for i in self.df["Reviews"]:
for j in i:
text = text + "" + j
pattern = '[0-9]'
text = re.sub(pattern, ' ', text)
result_rake = self.raking(text)
final = []
for i in result_rake:
if (i[0] > 8):
lis = nltk.word_tokenize(i[1])
final += lis
result_textrank = self.extract_key_phrases(text)
final += result_textrank
resulting = []
for i in self.df["Reviews"]:
lis = []
if (not i):
lis.append(self.k)
resulting.append(lis)
continue
for text, score in i.items():
weight_factor = self.calcweight(text, final)
a = weight_factor + self.k
lis.append(a)
resulting.append(lis)
self.df["Weights"] = pd.Series(resulting)
return self.df
# --------------------------------------------------------------Pre Processor-------------------------------------------------------------------#
class PreProcessEcomm:
'''
PreProcess Funtion. It utilises multiple helper function to clean data. As Data extracted
from different websites have a different format, it brings them to the same format.
'''
def __init__(self, df):
self.df = df
def simplify(self, rev):
reviews = []
if (type(rev) != str):
for i in rev:
text = i[0] + i[1] + ' ' + i[2]
reviews.append(text)
return reviews
temp = rev.split(']')
reviews = []
for i in temp:
if i != '':
reviews.append(i)
return reviews
def clean(self, rev):
lis = []
for i in rev:
i = re.sub(r'[^\w\s]','',i)
i = i.replace("\n", " ")
lis.append(i)
return lis
def clean2(self, rev):
try:
i = re.sub(r'[^\w\s]','',rev)
i = i.replace("\n", " ")
return i
except:
return ""
def reviewtodict(self):
lis = []
for i in self.df["Reviews"]:
a = {}
for j in i:
try:
score = int(j[-2:])
text = j[:len(j) - 2]
a[text] = score
except:
score = 0
text = j[:len(j) - 2]
a[text] = score
lis.append(a)
self.df["Reviews"] = pd.Series(lis)
return
def ratings(self, s):
x = s[:3]
try:
return float(x)
except:
return 0
def num_ratings(self, s):
try:
x = re.findall(r'\d+', s)
return int(x[0])
except:
return 0
def curr_views(self, s):
try:
x = re.findall(r'\d+', s)[0]
ans = int(x)
return ans
except:
return 0
def price(self, s):
try:
x = re.findall('[\$\£\€](\d+(?:\.\d{1,2})?)', s)
return float(x[0])
except:
if (s == 'Out Of Stock'):
return 0
s = s[1:]
return float(s[:4])
def discount(self, s):
if (s == 0):
return 0
elif (s == None):
return 0
else:
return int(re.findall(r'\d+', s)[0])
def main_pre(self):
self.df['Reviews']= self.df.Reviews.apply(self.simplify)
self.df['Reviews']= self.df.Reviews.apply(self.clean)
self.df['Discount'] = self.df['Discount'].fillna(0)
self.df["Rating"] = self.df["Rating"].apply(self.ratings)
self.df["Number of Ratings"] = self.df["Number of Ratings"].apply(self.num_ratings)
self.df["Current Views"] = self.df["Current Views"].apply(self.curr_views)
self.df["Price"] = self.df["Price"].apply(self.price)
self.df["Discount"] = self.df["Discount"].apply(self.discount)
self.df['Description'] = self.df.Description.apply(self.clean2)
self.reviewtodict()
return self.df
# --------------------------------------------------------------PORTRAY - ECOMMERCE--------------------------------------------------------------#
scaler = MinMaxScaler(feature_range = (0,10))
class PORTRAY_E:
'''
PORTRAY RANKING ALGORITHM
It utilises :
1. Weights of the Reviews
2. Star Rating
3. Number of Views and Number of ratings.
4. Price and Discount
To rank the products.
'''
def __init__(self, df):
self.df = df
def normalize(self):
column_names_to_normalize = ['RSCORE']
x = self.df[column_names_to_normalize].values
x_scaled = scaler.fit_transform(x)
df_temp = pd.DataFrame(x_scaled, columns=column_names_to_normalize, index = self.df.index)
self.df[column_names_to_normalize] = df_temp
def r1score(self):
mean_revs = self.df["Number of Ratings"].mean()
mean_views = self.df["Current Views"].mean()
r1scores = []
for i in range(len(self.df)):
rating = self.df["Rating"][i]
views = self.df["Current Views"][i]
count = self.df["Current Views"][i]
factor = (views + count) / 2
r1 = factor*rating
r1scores.append(r1)
self.df["R1SCORE"] = pd.Series(r1scores)
mean_dist = self.df["R1SCORE"].mean()
self.df["R1SCORE"] = self.df["R1SCORE"] / mean_dist
return
def r2score(self):
r2scores = []
for i in range(len(self.df)):
currdict = self.df["Reviews"][i]
weights = self.df["Weights"][i]
if (not currdict):
r2scores.append(weights[0])
continue
j = 0
r2 = 0
for key, val in currdict.items():
r2 = r2 + (val*weights[j])
j += 1
r2scores.append(r2/10)
self.df["R2SCORE"] = pd.Series(r2scores)
return
def rscore(self):
rscores = []
for i in range(len(self.df)):
r = (self.df["R1SCORE"][i] + self.df["R2SCORE"][i]) / 2
rscores.append(r)
self.df["RSCORE"] = pd.Series(rscores)
del self.df["R1SCORE"], self.df["R2SCORE"]
del self.df["Weights"]
return
def price_discount(self):
P_mean = self.df["Price"].mean()
total = 0
count = 0
for i in self.df["Discount"]:
if i != 0:
total = total + i
count += 1
if count == 0:
D_mean = 0
else:
D_mean = total/count
lis = []
dis = []
for i in range(len(self.df)):
if (self.df["Price"][i] >= 2*P_mean and self.df["RSCORE"][i] > 5.00):
self.df["RSCORE"][i] + 0.5
elif (self.df["Price"][i] <= 0.5*P_mean and self.df["RSCORE"][i] < 3.00):
self.df["RSCORE"][i] - 0.5
for i in range(len(self.df)):
if (self.df["Discount"][i] >= 1.5*D_mean and self.df["RSCORE"][i] < 5.00):
self.df["RSCORE"][i] - 0.5
def results(self, n, m):
self.r1score()
self.r2score()
self.rscore()
self.price_discount()
self.normalize()
self.df = self.df.sort_values(by='RSCORE', ascending = False)
self.df = self.df.reset_index(drop = True)
TOP_PRO = self.df[:n]
TOP_PRO = TOP_PRO.reset_index(drop = True)
BOT_PRO = self.df[-m:]
BOT_PRO = BOT_PRO.reset_index(drop = True)
return TOP_PRO, BOT_PRO
# --------------------------------------------------------------CORE CODE--------------------------------------------------------------#
def predictor(choice, query, count):
'''
A P I FUNCTION
1. Takes Input from Website
2. Runs only if data previously scraped.
3. Collects Data
4. Calculates Score
5. Performs Image Analysis
6. Performs feature extraction
7. Return results.
'''
m = {}
m['NORDSTROM'] = 'NORDSTROM'
m['SHEIN'] = 'SHEIN'
choice = [choice]
file = ''
print()
if (len(choice) == 1):
file = m[choice[0]]
util = pickle.load(open('static/PKL/' + file + '.pkl', "rb"))
util[1] = [query]
for i in util[1]:
try:
df = pd.read_csv('static/CSV/' + file + '_' + i + '.csv')
break
except:
print('WE WILL HAVE TO SCRAPE THIS DATA.')
print()
scraper = DataCollectionEcomm(util[0], util[2], util[3], util[1])
links = scraper.main_1()
scraper.main_2()
df = scraper.df
df.to_csv('static/CSV/' + file + '_' + i + '.csv', index = False)
break
pre = PreProcessEcomm(df)
df = pre.main_pre()
wgt = WeightingReviews(df)
df = wgt.main_weights()
alg = PORTRAY_E(df)
top, bottom = alg.results(count,count)
else:
queries = [item for item in input("Enter the Queries: ").split()]
DATA = pd.DataFrame(columns=["Name", "Brand", "Price", "Discount", "Image_Link", "Rating", "Number of Ratings", "Reviews", "Current Views", "Description"])
for i in choice:
file = m[i]
util = pickle.load(open('static/PKL/' + file + '.pkl', "rb"))
util[1] = queries
for i in util[1]:
try:
df = pd.read_csv('static/CSV/' + file + '_' + i + '.csv')
DATA.append(df)
reak
except:
scraper = DataCollectionEcomm(util[0], util[2], util[3], util[1])
links = scraper.main_1()
scraper.main_2()
df = scraper.df
DATA.append(df)
df.to_csv('static/CSV/' + file + '_' + i + '.csv', index = 'False')
break
pre = PreProcessEcomm(df)
df = pre.main_pre()
DATA.append(df)
wgt = WeightingReviews(DATA)
df = wgt.main_weights()
alg = PORTRAY_E(DATA)
top, bottom = alg.results(count,count)
df_top = main(top)
df_bottom = main(bottom)
df_top = cleanresults(df_top)
df_bottom = cleanresults(df_bottom)
df_top.to_pickle('static/Sample_Results/TOP.pkl')
df_bottom.to_pickle('static/Sample_Results/BOTTOM.pkl')
print('Results are saved')
print('Terminating')
'''--------------------------------------------------------------------E N D-----------------------------------------------------------------'''
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/etch-a-sketch.py | import turtle
import random
window = turtle.Screen()
square = turtle.Turtle()
square.speed(0)
square.hideturtle()
square.up()
square.goto(-200, 200)
square.down()
for i in range(4):
square.forward(50)
square.right(90)
square.up()
square.goto(-205, 205)
square.write("Change Color")
pencil = turtle.Turtle()
pencil.shape("circle")
def drawing_controls(x, y):
if (-200 <= x <= -150) and (150 <= y <= 200):
pencil.color(random.random(), random.random(), random.random())
window.onclick(drawing_controls)
pencil.onrelease(pencil.goto)
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/nested-lists.py | populations = [["California", 38332521],
["Texas", 26448193],
["New York", 19651127],
["Florida", 19552860],
["Illinois", 12882135],
["Pennsylvania", 12773801],
["Ohio", 11570808],
["Georgia", 9992167],
["Michigan", 9895622],
["North Carolina", 9848060]]
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab11.py | <gh_stars>0
import matplotlib.pyplot as plt
import pandas as pd
# -------------------------------------------------
# CSCI 127, Lab 11 |
# June 23, 2020 |
# Your Name |
# -------------------------------------------------
##### your code here
##### do not change anything below this line
def main():
print("This program calculates some information from billionaires.csv.\n")
data = pd.read_csv('billionaires.csv')
rows, columns = get_data_shape(data)
total_wealth_by_year = compute_total_wealth(data)
num_women = compute_num_women(data)
plot_histogram(data, "age_histogram.png")
print("The data set has {} rows and {} columns.".format(rows, columns))
print("\nThe total wealth of billionaires (in billions) by year was:")
print(total_wealth_by_year)
print("\nThe number of female billionaires in the data set is {}.".format(num_women))
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/date.py | <filename>_teaching/csci127-summer-2020/readings/activities/date.py
class Date:
"""Date class for representing and manipulating dates"""
def __init__(self, month, day, year):
"""A constructor method that sets the month, day and year"""
self.month = month
self.day = day
self.year = year
def get_month(self):
"""A reader method that returns the month"""
return self.month
def get_day(self):
"""A reader method that returns the day"""
return self.day
def get_year(self):
"""A reader method that returns the year"""
return self.year
def set_day(self, day):
"""A writer methods that sets the day"""
self.day = day
# -------------------------------
# Create an instance of Date with the value March 6, 2017
today = Date("March", 6, 2017)
print("Date:", today.get_month(), today.get_day(), today.get_year())
# Update the instance to be one day later
day = today.get_day()
today.set_day(day + 1)
print("Date:", today.get_month(), today.get_day(), today.get_year())
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab9.py | <gh_stars>0
import numpy as np
import random
# -------------------------------------------------
# CSCI 127, Lab 9
# June 18, 2020
# Your Name
# -------------------------------------------------
class Die:
def __init__(self, sides):
"""A constructor method to create a die"""
self.sides = sides
def roll(self):
"""A general method to roll the die"""
return random.randint(1, self.sides)
# -------------------------------------------------
class Yahtzee:
def __init__(self, sides):
"""A constructor method that can record 5 dice rolls"""
self.rolls = np.zeros(5, dtype=np.int16)
self.sides = sides
def roll_dice(self):
"""A general method that rolls 5 dice"""
for i in range(len(self.rolls)):
self.rolls[i] = Die(self.sides).roll()
def count_outcomes(self):
"""A helper method that determines how many 1s, 2s, etc. were rolled"""
counts = np.zeros(self.sides + 1, dtype=np.int16)
for roll in self.rolls:
counts[roll] += 1
return counts
# -------------------------------------------------
# do not change anything above this line
# your methods here
# do not change anything below this line
# -------------------------------------------------
def main(how_many):
high_rolls = 0
three_of_a_kinds = 0
large_straights = 0
game = Yahtzee(6) # 6-sided dice
for i in range(how_many):
game.roll_dice()
if game.is_it_high_roll():
high_rolls += 1
elif game.is_it_three_of_a_kind():
three_of_a_kinds += 1
elif game.is_it_large_straight():
large_straights += 1
print("Number of Rolls:", how_many)
print("---------------------")
print("Number of High Rolls:", high_rolls)
print("Percent:", "{:.2f}%\n".format(high_rolls * 100 / how_many))
print("Number of Three of a Kinds:", three_of_a_kinds)
print("Percent:", "{:.2f}%\n".format(three_of_a_kinds * 100 / how_many))
print("Number of Large Straights:", large_straights)
print("Percent:", "{:.2f}%".format(large_straights * 100 / how_many))
# -------------------------------------------------
main(20000)
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/demo.py | def alternate_case (sentence, start = 0):
result = ""
upper = True
for i in range(len(sentence)):
if i < start:
result = result + sentence[i]
elif upper:
result = result + sentence[i].upper()
else:
result = result + sentence[i].lower()
upper = not upper
return result
print(alternate_case("abcdefghij"))
print(alternate_case("abcdefghij", 2))
print(alternate_case("abcdefghij", 3))
print(alternate_case("ABCDEFGHIJ"))
print(alternate_case("ABCDEFGHIJ", 3))
print(alternate_case("ABCDEFGHIJ", 4))
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/racing-turtles.py | import turtle
import random
window = turtle.Screen()
racer_1 = turtle.Turtle()
racer_1.up()
racer_1.shape("turtle")
racer_1.color(random.random(), random.random(), random.random())
racer_1.goto(-200, 100)
racer_1.down()
racer_1.stamp()
racer_2 = turtle.Turtle()
racer_2.up()
racer_2.shape("turtle")
racer_2.color(random.random(), random.random(), random.random())
racer_2.goto(-200, 0)
racer_2.down()
racer_2.stamp()
for i in range(10):
racer_1.forward(random.randint(1, 40))
racer_1.dot()
racer_2.forward(random.randint(1, 40))
racer_2.dot()
if racer_1.xcor() > racer_2.xcor():
print("Turtle racer #1 wins!")
else:
print("Turtle racer #2 wins!")
window.exitonclick()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/bridger_snowfall.py | <gh_stars>0
import pandas as pd
import matplotlib.pyplot as plt
# Create Data --------------------------
# http://www.onthesnow.com/montana/bridger-bowl/historical-snowfall.html
years = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019] # bridger bowl year
total_snowfall = [253, 304, 388, 265, 283, 209, 194, 271, 177, 186] # inches
largest_snowfall = [19, 16, 19, 25, 20, 14, 13, 20, 15, 21] # inches
# Your code here -----------------------
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/snow_report.py | <filename>_teaching/csci127-summer-2020/readings/snow_report.py<gh_stars>0
import matplotlib.pyplot as plt
years = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018] # bridger bowl year
total_snowfall = [253, 304, 388, 265, 283, 209, 194, 271, 177] # inches
largest_snowfall = [19, 16, 19, 25, 20, 14, 13, 20, 15] # inches
# your code here
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab10.py | <reponame>lgw2/lgw2.github.io
import numpy as np
import matplotlib.pyplot as plt
# -------------------------------------------------
# CSCI 127, Lab 10 |
# June 19, 2020 |
# Your Name |
# -------------------------------------------------
def read_file(file_name):
pass
# -------------------------------------------------
def main(file_name):
college_names, college_enrollments = read_file(file_name)
# -------------------------------------------------
main("spring-2020.csv")
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/diceSimulator.py | <reponame>lgw2/lgw2.github.io
# --------------------------------------
# Simulate rolling 2 dice and plot the results.
# --------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# --------------------------------------
def generate_rolls(how_many):
result = np.ndarray(how_many, dtype=int)
for roll in range(how_many):
result[roll] = np.random.randint(1, 7) + np.random.randint(1, 7)
return result
# --------------------------------------
def main():
rolls = generate_rolls(10)
plt.hist(rolls, bins=np.arange(2, 14), facecolor='g', align='left') # generate histogram
plt.xticks(np.arange(2, 13))
plt.xlabel('Value')
plt.ylabel('Occurrences')
plt.title('Histogram of Dice Rolls')
plt.grid(True)
plt.show()
# --------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab3.py | <reponame>lgw2/lgw2.github.io
# --------------------------------------
# CSCI 127, Lab 3 |
# May 28, 2020 |
# --------------------------------------
# Calculate the length of a string |
# using three techniques. |
# --------------------------------------
def length_built_in(sentence):
pass
def length_iterative(sentence):
pass
def length_recursive(sentence):
pass
# --------------------------------------
def main():
answer = "yes"
while (answer == "yes") or (answer == "y"):
sentence = input("Please enter a sentence: ")
sentence = sentence.lower()
print()
print("Calculating length of the sentence using ...")
print("---------------------------------------")
print("Built-in function =", length_built_in(sentence))
print("Iteration =", length_iterative(sentence))
print("Recursion =", length_recursive(sentence))
print()
answer = input("Would you like to continue: ").lower()
print()
# --------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/lesson1.py | import pandas as pd
import matplotlib.pyplot as plt
import sys # to determine Python version number
import matplotlib # to determine Matplotlib version number
print('Python version ' + sys.version)
print('Pandas version ' + pd.__version__)
print('Matplotlib version ' + matplotlib.__version__)
print()
# Create Data --------------------------
# http://www.onthesnow.com/montana/bridger-bowl/historical-snowfall.html
years = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018] # bridger bowl year
total_snowfall = [253, 304, 388, 265, 283, 209, 194, 271, 177] # inches
largest_snowfall = [19, 16, 19, 25, 20, 14, 13, 20, 15] # inches
BridgerDataSet = list(zip(years, total_snowfall, largest_snowfall))
print("BridgerDataSet:", BridgerDataSet, "\n")
data = pd.DataFrame(data = BridgerDataSet, columns=["Year", "Total", "Largest"])
print("Bridger DataFrame")
print("-----------------")
print(data)
data.to_csv('bridger.csv',index=False,header=False)
# Get Data -----------------------------
bridger = pd.read_csv('bridger.csv', names=['Year', 'Total', 'Largest'])
print("\nBridger DataFrame after reading csv file")
print("----------------------------------------")
print(bridger)
# Prepare Data -------------------------
if (bridger.Total.dtype == 'int64'):
print("\nTotal snowfall is of type int64")
else:
print("\nTotal Snowfall is of type", bridger.Total.dtype)
# Analyze Data -------------------------
sorted_data = bridger.sort_values(['Total'], ascending=False)
print("\nSorted Bridger Data Set")
print("-----------------------")
print(sorted_data)
print("\nThe least total snowfall was", bridger['Total'].min())
# Display Data -------------------------
bridger.plot(x="Year", y="Total", kind="bar", color="yellow")
plt.xlabel("Year")
plt.ylabel("Total Snowfall")
plt.show()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/card_blackjack.py | class Card:
"""Card class for representing and manipulating one playing card"""
def __init__(self, rank, suit):
"""A constructor method that sets the suit and rank"""
self.suit = suit
self.rank = rank
self.value = self.assign_value(rank)
def get_suit(self):
"""A reader method that returns the suit of the card"""
return self.suit
def get_rank(self):
"""A reader method that returns the rank of the card"""
return self.rank
def get_value(self):
""" A reader method that returns the blackjack value of the card"""
return self.value
def assign_value(self, rank):
"""A helper function to determine the blackjack value of a rank"""
print("The assignValue method needs to be completed")
return -1
# -----------------------
def evaluate(hand):
result = 0
for one_card in hand:
result += one_card.get_value()
return result
# -----------------------
def process_hand(hand):
print("The processHand function needs to be completed")
# -----------------------
def main():
ace = Card("ace", "spades")
king = Card("king", "diamonds")
queen = Card("queen", "hearts")
jack = Card("jack", "clubs")
ten = Card("ten", "spades")
nine = Card("nine", "hearts")
eight = Card("eight", "diamonds")
seven = Card("seven", "clubs")
six = Card("six", "spades")
five = Card("five", "hearts")
four = Card("four", "diamonds")
three = Card("three", "clubs")
two = Card("two", "spades")
process_hand([ace, king])
process_hand([queen, ace])
process_hand([ace, jack])
process_hand([ten, ace])
process_hand([two, three, four, five, six, seven])
process_hand([eight, nine, two])
# -----------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab7.py | <reponame>lgw2/lgw2.github.io
# -----------------------------------------------------
# CSCI 127, Lab 7
# June 10, 2020
# Your Name
# -----------------------------------------------------
# -----------------------------------------------------
# Do not change anything below this line
# -----------------------------------------------------
def print_directory(contacts):
print("My Contacts")
print("-----------")
for person in contacts:
person.print_entry()
print("-----------\n")
# -----------------------------------------------------
def main():
champ = Contact("???", "Bobcat", "406-994-0000")
president = Contact("Waded", "Cruzado", "406-994-CATS")
professor = Contact("John", "Paxton", "406-994-4780")
contacts = [champ, president, professor]
print_directory(contacts)
champ.set_first_name("Champ")
president.set_title("President")
professor.set_title("Professor")
print_directory(contacts)
print("The area code for cell number", champ.get_cell_number(), "is", \
champ.get_area_code())
# -----------------------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/carddeck.py | class Card:
"""Card class for representing and manipulating one playing card"""
def __init__(self, rank, suit):
"""A constructor method that sets the suit and rank"""
self.suit = suit
self.rank = rank
def __str__(self):
"""Overrides the information used by the print function"""
return "The " + self.rank + " of " + self.suit
def get_suit(self):
"""A reader method that returns the suit of the card"""
return self.suit
def get_rank(self):
"""A reader method that returns the rank of the card"""
return self.rank
# -----------------------------------------------------
class Deck:
"""Deck class for representing and manipulating 52 instances of Card"""
def __init__(self):
"""A constructor method that creates a 52 card deck"""
self.cards = []
for rank in ["two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "jack", "queen", "king", "ace"]:
for suit in ["clubs", "diamonds", "hearts", "spades"]:
self.cards += [Card(rank,suit)]
def print_deck(self):
"""A method that prints the 52 card deck"""
print("Deck of Cards")
print("-------------")
number = 1
for card in self.cards:
print(number, card)
number += 1
print()
# -----------------------------------------------------
cards = Deck()
cards.print_deck()
# cards.shuffle()
print("After shuffling...\n")
# cards.print_deck()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/lesson2.py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
how_many = 1000 # number of submissions
file_name = "jbd.csv" # file to write to and then read from
# Create Data --------------------------
names = ['HughJ', 'Ryan', 'Justin', 'Sam', 'Michael', 'Grace',
'Courtney', 'Chris', 'Kyle', 'Tyler', 'HughO']
# np.random.seed(2017)
random_names = list(map((lambda x: names[x]),
np.random.randint(0, len(names), how_many)))
print("First 5 names:", random_names[:5], "\n")
scores = np.random.randint(0, 101, how_many)
print("First 5 scores:", scores[:5], "\n")
cs_data_set = list(zip(random_names,scores))
print("First 5 Zipped Names and Scores")
print("-----------------------------")
print(cs_data_set[:5])
data_frame = pd.DataFrame(data = cs_data_set, columns=['Names', 'Scores'])
print("\nFirst 5 Items in Data Frame")
print("---------------------------")
print(data_frame[:5])
data_frame.to_csv(file_name,index=False,header=True)
# Access Data --------------------------
jbd_data_frame = pd.read_csv(file_name)
print("\nFirst 5 Items in Data Frame From File")
print("-------------------------------------")
print(jbd_data_frame[:5])
print("\nData Frame Info")
print("---------------")
print(jbd_data_frame.info())
print("\nData Frame Head")
print("---------------")
print(jbd_data_frame.head())
print("\nData Frame Tail")
print("---------------")
print(jbd_data_frame.tail(3))
# Manipulate Data ----------------------
print("\nUnique Names in Data Frame")
print("--------------------------")
for name in jbd_data_frame['Names'].unique():
print(name)
jbd_object = jbd_data_frame.groupby('Names')
print("\nDescribe Data Frame")
print("-------------------")
print(jbd_object.describe())
condensed_data_frame = jbd_object.sum()
print("\nFirst 5 Items in Condensed Data Frame After Sum")
print("-----------------------------------------------")
print(condensed_data_frame[:5])
condensed_data_frame = condensed_data_frame.sort_values(['Scores'])
print("\nFirst 5 Items in Condensed Data Frame After Sort")
print("------------------------------------------------")
print(condensed_data_frame[:5])
# Present Data -------------------------
condensed_data_frame['Scores'].plot.bar(color="turquoise")
plt.xlabel("Student")
plt.ylabel("Points Earned")
plt.title("Joy and Beauty of Data Dashboard")
plt.show()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2019/labs/lab6.py | # --------------------------------------
# CSCI 127, Lab 7 |
# February 28, 2019 |
# Your Name |
# --------------------------------------
# The missing functions go here.
# --------------------------------------
def main():
dictionary = create_dictionary("ascii-codes.csv")
sentence = "Buck lived at a big house in the sun-kissed Santa Clara Valley. <NAME>'s place, it was called!"
translate(sentence, dictionary, "output-1.txt")
sentence = "Bozeman, MT 59717"
translate(sentence, dictionary, "output-2.txt")
sentence = "The value is ~$25.00"
translate(sentence, dictionary, "output-3.txt")
# --------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/loop.py |
print("Loop with continue\n")
for i in range(2000, 2017):
if i % 7 == 0:
continue
print(i)
print("\nLoop with break\n")
for i in range(2000, 2017):
if i % 7 == 0:
break
print(i)
print("\nDemonstration finished")
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/card.py | <reponame>lgw2/lgw2.github.io
class Card:
"""Card class for representing and manipulating one playing card"""
def __init__(self, rank, suit):
"""A constructor method that sets the suit and rank"""
self.suit = suit
self.rank = rank
self.value = self.assign_value(rank)
def get_suit(self):
"""A reader method that returns the suit of the card"""
return self.suit
def get_rank(self):
"""A reader method that returns the rank of the card"""
return self.rank
def get_value(self):
""" A reader method that returns the blackjack value of the card"""
return self.value
def assign_value(self, rank):
"""A helper function to determine the blackjack value of a rank"""
print("The assignValue method needs to be completed")
return -1
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/planets.py | <gh_stars>0
import pandas as pd
import matplotlib.pyplot as plt
# read in the data
planets = pd.read_csv("planets.csv")
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/assignments/weather.py | <reponame>lgw2/lgw2.github.io
# -----------------------------------------+
# CSCI 127, Joy and Beauty of Data |
# Program 3: Weather CSV Library |
# Your Name(, Your Partner's Name) |
# Last Modified: ??, 2019 |
# -----------------------------------------+
# Provide a brief overview of the program. |
# -----------------------------------------+
# -----------------------------------------+
# Do not change anything below this line |
# with the exception of code related to |
# option 4. |
# -----------------------------------------+
# -----------------------------------------+
# menu |
# -----------------------------------------+
# Prints a menu of options for the user. |
# -----------------------------------------+
def menu():
print()
print("1. Identify coldest temperature.")
print("2. Identify average temperature for a given location.")
print("3. Identify all recording station locations by state.")
print("4. Something interesting, non-trivial and not a variation of the above options.")
print("5. Quit.")
print()
# -----------------------------------------+
# main |
# -----------------------------------------+
# Repeatedly query the user for options. |
# -----------------------------------------+
def main():
input_file = "weather.csv"
choice = 0
while (choice != 5):
menu()
choice = int(input("Enter your choice: "))
print()
if (choice == 1):
coldest_temperature(input_file)
elif (choice == 2):
location = input("Enter desired location (e.g. Miles City, MT): ")
average_temperature(input_file, location)
elif (choice == 3):
state = input("Enter name of state (e.g. Montana): ")
all_stations_by_state(input_file, state)
elif (choice == 4):
pass
elif (choice != 5):
print("That is not a valid option. Please try again.")
print("Goodbye!")
# -----------------------------------------+
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/quiz3.py | <filename>_teaching/csci127-summer-2020/readings/quiz3.py
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('billionaires.csv')
# your code here
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/draw_square.py | <reponame>lgw2/lgw2.github.io<gh_stars>0
import turtle
def drawSquare(t, sz):
"""Make turtle t draw a square of with side sz."""
for i in range(4):
t.forward(sz)
t.left(90)
wn = turtle.Screen() # Set up the window and its attributes
wn.bgcolor("lightgreen")
alex = turtle.Turtle() # create alex
drawSquare(alex, 50) # Call the function to draw the square passing the actual turtle and the actual side size
alex.penup()
alex.goto(100, 100)
alex.pendown()
drawSquare(alex, 200)
wn.exitonclick()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2019/labs/lab3.py | # --------------------------------------
# CSCI 127, Lab 3 |
# June 13, 2019 |
# Partner names |
# --------------------------------------
# Calculate how many z's are in a |
# sentence using three techniques. |
# --------------------------------------
def count_built_in(sentence):
pass
def count_iterative(sentence):
pass
def count_recursive(sentence):
pass
# --------------------------------------
def main():
answer = "yes"
while (answer == "yes") or (answer == "y"):
sentence = input("Please enter a sentence: ")
sentence = sentence.lower()
print()
print("Calculating the number of z's using ...")
print("---------------------------------------")
print("Built-in function =", count_built_in(sentence))
print("Iteration =", count_iterative(sentence))
print("Recursion =", count_recursive(sentence))
print()
answer = input("Would you like to continue: ").lower()
print()
# --------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/taxes.py | # ---------------------------------------
# CSCI 127, Joy and Beauty of Data |
# Lab 2: Tax Calculator |
# Your Name |
# Date: |
# ---------------------------------------
# Calculate the amount of tax owed by an|
# unmarried taxpayer in tax year 2020. |
# ---------------------------------------
# The missing Python function goes here.
# ---------------------------------------
def process(income):
print("The 2020 taxable income is ${:.2f}".format(income))
tax_owed = unmarried_individual_tax(income)
print("An unmarried individual owes ${:.2f}\n".format(tax_owed))
#---------------------------------------
def main():
process(5000) # test case 1
process(20000) # test case 2
process(50000) # test case 3
process(100000) # test case 4
process(200000) # test case 5
process(400000) # test case 6
process(600000) # test case 7
# ---------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/graph.py | <filename>_teaching/csci127-summer-2020/readings/graph.py<gh_stars>0
# --------------------------------------
# CSCI 127: Joy and Beauty of Data
# MatPlotLib Introductory Example
# --------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import math
# --------------------------------------
def plot_line(x1, y1, x2, y2):
"""Plot a line using the specified points."""
x = [x1, x2]
y = [y1, y2]
plt.plot(x, y, "gold")
# --------------------------------------
def plot_sine_wave(start_x, stop_x, amplitude):
"""Plot a sine wave."""
x_array = np.linspace(start_x, stop_x, 1000)
y_array = amplitude * np.sin(x_array)
plt.plot(x_array, y_array)
# --------------------------------------
def main(graph_min, graph_max):
plt.xlim(graph_min, graph_max)
plt.ylim(graph_min, graph_max)
plot_line(graph_min, graph_min, graph_max, graph_max)
x_array = np.array([graph_min, graph_max])
y_array = np.array([graph_max, graph_min])
plt.plot(x_array, y_array, "blue")
plot_sine_wave(graph_min // 2, graph_max // 2, graph_max // 4)
plt.show()
# --------------------------------------
main(-100, 100)
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/raven.py | import string
# ---------------------------
def keep_letters(filename):
file = open(filename, "r")
modified_text = ""
for line in file:
line = line.lower()
for letter in line:
if letter in string.ascii_lowercase:
modified_text += letter
file.close()
return modified_text
# ---------------------------
text = keep_letters("raven.txt")
print(text)
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/dungeons.py | # Object Oriented Programming - Inheritance Demo
# <NAME> and <NAME>
import random
# ------------------------------------------------
class Character:
"""Common base class for any type of character"""
def __init__(self, name, age, height, weight):
"""Constructor for a new Character"""
self.race = "Generic Character"
self.intelligence = random.randrange(8, 18)
self.strength = random.randrange(8, 18)
self.dexterity = random.randrange(8, 18)
self.wisdom = random.randrange(8, 18)
self.charisma = random.randrange(8, 18)
self.name = name
self.age = age
self.height = height
self.weight = weight
def get_intelligence(self):
"""Reader method to return character's intelligence"""
return(self.intelligence)
def get_strength(self):
"""Reader method to return character's strength"""
return(self.strength)
def get_dexterity(self):
"""Reader method to return character's dexterity"""
return(self.dexterity)
def get_wisdom(self):
"""Reader method to return character's wisdom"""
return(self.wisdom)
def get_charisma(self):
"""Reader method to return character's charisma"""
return(self.charisma)
def set_intelligence(self, intelligence):
"""Writer method to set character's intelligence"""
self.intelligence = intelligence
def set_strength(self, strength):
"""Writer method to set character's strength"""
self.strength = strength
def set_dexterity(self, dexterity):
"""Writer method to set character's dexterity"""
self.dexterity = dexterity
def set_wisdom(self, wisdom):
"""Writer method to set character's wisdom"""
self.wisdom = wisdom
def set_charisma(self, charisma):
"""Writer method to set character's charisma"""
self.charisma = charisma
def __str__(self):
"""Override the print method for a Character"""
answer = "Name: " + self.name + "\n"
answer += "Race: " + self.race + "\n"
answer += "Age: " + str(self.age) + "\n"
answer += "Height: " + str(self.height) + " inches\n"
answer += "Weight: " + str(self.weight) + " lbs\n"
answer += "Intelligence: " + str(self.intelligence) + "\n"
answer += "Strength: " + str(self.strength) + "\n"
answer += "Dexterity: " + str(self.dexterity) + "\n"
answer += "Wisdom: " + str(self.wisdom) + "\n"
answer += "Charisma: " + str(self.charisma) + "\n"
return answer
# ------------------------------------------------
class Human(Character):
"""Define Human to be a subclass of Character"""
def __init__(self, name, age, height, weight):
"""Constructor for a new Human"""
Character.__init__(self, name, age, height, weight)
self.race = "Human"
# ------------------------------------------------
class Orc(Character):
"""Define Orc to be a subclass of Character"""
def __init__(self, name, age, height, weight):
"""Constructor for a new Orc"""
Character.__init__(self, name, age, height, weight)
self.race = "Orc"
self.set_charisma(self.get_charisma() - 2)
self.set_intelligence(self.get_intelligence() - 2)
self.set_strength(self.get_strength() + 2)
# ------------------------------------------------
def main():
name = input("Enter name: ") # e.g. "Borug"
name = name.capitalize()
age = input("Enter age: ") # e.g. 53
height = input("Enter height in inches: ") # e.g. 60
weight = input("Enter weight in pounds: ") # e.g. 175
race = input("Enter race (human or orc): ") # e.g. "halforc"
race = race.lower()
print()
if (race =="orc"):
player = Orc(name, age, height, weight)
print(player)
elif (race == "human"):
player = Human(name, age, height, weight)
print(player)
else:
print("Illegal character type.")
# ------------------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/madlib.py | # --------------------------------
# Joy and Beauty of Data
# Authors: <NAME> and <NAME>
# --------------------------------
# MadLib Exercise
# --------------------------------
# Get User Input for MadLib Story
adjective_1 = input("Enter an adjective: ")
name_1 = input("Enter a name: ")
number = input("Enter a number: ")
name_2 = input("Enter another name: ")
town_1 = input("Enter a town name: ")
town_2 = input("Enter a different town name: ")
adjective_2 = input("Enter an adjective: ")
# Print Story
print()
print("A new and "+ adjective_1 + " movie is coming soon!")
print("It's about " + name_1 + " and the " + number + " Python programmers. ")
print(name_1 + " is a computing wizard whose talent threatens " + name_2 + ".")
print(name_1 + " is forced to flee from "+ town_1 + " and hides in nearby "+ town_2 + ".")
print("But " + name_2 + " finds " + name_1 + " and casts a " + adjective_2 + " spell.")
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/key-events.py |
import turtle
window = turtle.Screen()
drawer = turtle.Turtle()
drawer.speed(0)
def east():
drawer.setheading(0)
drawer.forward(50)
def north():
drawer.setheading(90)
drawer.forward(50)
def west():
drawer.setheading(180)
drawer.forward(50)
def south():
drawer.setheading(270)
drawer.forward(50)
window.onkey(east, "Right")
window.onkey(north, "Up")
window.onkey(west, "Left")
window.onkey(south, "Down")
window.listen()
window.exitonclick()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/blackjack.py | <gh_stars>0
import card
# -----------------------
def evaluate(hand):
result = 0
for one_card in hand:
result += one_card.get_value()
return result
# -----------------------
def process_hand(hand):
print("The processHand function needs to be completed")
# -----------------------
def main():
ace = card.Card("ace", "spades")
king = card.Card("king", "diamonds")
queen = card.Card("queen", "hearts")
jack = card.Card("jack", "clubs")
ten = card.Card("ten", "spades")
nine = card.Card("nine", "hearts")
eight = card.Card("eight", "diamonds")
seven = card.Card("seven", "clubs")
six = card.Card("six", "spades")
five = card.Card("five", "hearts")
four = card.Card("four", "diamonds")
three = card.Card("three", "clubs")
two = card.Card("two", "spades")
process_hand([ace, king])
process_hand([queen, ace])
process_hand([ace, jack])
process_hand([ten, ace])
process_hand([two, three, four, five, six, seven])
process_hand([eight, nine, two])
# -----------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/raven_words_updated.py | f = open('raven.txt', 'r')
# create an empty dictionary
count = {}
for line in f:
for word in line.split():
# remove punctuation
word = word.replace('_', '').replace('"', '').replace(',', '').replace('.', '')
word = word.replace('-', '').replace('?', '').replace('!', '').replace("'", "")
word = word.replace('(', '').replace(')', '').replace(':', '').replace('[', '')
word = word.replace(']', '').replace(';', '')
# ignore case
word = word.lower()
# add to dictionary
if word in count:
count[word] = count[word] + 1
else:
count[word] = 1
f.close()
# create an ordered list of keys
keys = list(count.keys())
keys.sort()
out = open('word_counts.txt', 'w')
# iterate over keys and write to file
for key in keys:
out.write(key + ' ' + str(count[key]) + '\n')
out.close()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/assignments/program5.py | import numpy as np
# ---------------------------------------
# CSCI 127, Joy and Beauty of Data
# Program 5: Eight Puzzle
# Your Name
# ---------------------------------------
# A brief overview of the program.
# ---------------------------------------
class EightPuzzle:
def __init__(self):
self.solution = np.array([1,2,3,4,5,6,7,8," "])
self.solution = self.solution.reshape(3,3)
def __str__(self):
separator = "+-+-+-+\n"
answer = separator
for row in range(3):
for col in range(3):
answer += "|" + str(self.puzzle[row][col])
answer += "|\n"
answer += separator
return answer
def puzzle_1(self):
self.puzzle = np.array([1,2,3,4,5,6,7,8," "])
self.puzzle = self.puzzle.reshape(3,3)
self.blank_x = 2
self.blank_y = 2
def puzzle_2(self):
self.puzzle = np.array([4,1,3,7,2,5,8," ", 6])
self.puzzle = self.puzzle.reshape(3,3)
self.blank_x = 2
self.blank_y = 1
def swap_positions(self, x1, y1, x2, y2):
self.puzzle[x1][y1], self.puzzle[x2][y2] = \
self.puzzle[x2][y2], self.puzzle[x1][y1]
# ---------------------------------------
# Do not change anything above this line
# ---------------------------------------
# Your solution goes here ...
# ---------------------------------------
# Do not change anything below this line
# ---------------------------------------
def solve(puzzle):
steps = 0
print("Puzzle:\n")
print(puzzle)
while not puzzle.is_puzzle_solved():
puzzle.move_blank()
print(puzzle)
steps += 1
print("Congratulations - you solved the puzzle in", steps, "steps!\n")
def main():
puzzle = EightPuzzle()
puzzle.puzzle_1()
solve(puzzle)
puzzle.puzzle_2()
solve(puzzle)
# ---------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/ml.py | # --------------------------------------
# Simple Machine Learning Demo
# --------------------------------------
from sklearn import datasets, svm
from sklearn.tree import DecisionTreeClassifier
# --------------------------------------
def learn(classifier, dataset):
n = len(dataset.data)
classifier.fit(dataset.data[:n//2], dataset.target[:n//2])
predicted_answers = classifier.predict(dataset.data[n//2:])
correct_answers = dataset.target[n//2:]
correct = 0
for predicted, target in zip(predicted_answers, correct_answers):
if predicted == target:
correct += 1
print("Percentage correct = {:.2f}\n".format(100 * correct / (n // 2)))
# --------------------------------------
def dataset_demonstration(dataset):
print("Dataset Data")
print(dataset.data)
print("Length:", len(dataset.data))
print("Dataset Classifications")
print(dataset.target)
print("Length:", len(dataset.target))
# --------------------------------------
def main():
digits = datasets.load_digits()
dataset_demonstration(digits)
classifier = svm.SVC(gamma=0.001, C=100.0)
print("\nSupport Vector Machine")
learn(classifier, digits)
for depth in range(1, 11):
classifier = DecisionTreeClassifier(max_depth = depth)
print("Decision Tree of Depth", depth)
learn(classifier, digits)
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/draw_tree.py | <filename>_teaching/csci127-summer-2020/readings/activities/draw_tree.py
import turtle
def tree(branchLen,t):
if branchLen > 5:
t.forward(branchLen)
t.right(20)
tree(branchLen-15,t)
t.left(40)
tree(branchLen-15,t)
t.right(20)
t.backward(branchLen)
def main():
t = turtle.Turtle()
myWin = turtle.Screen()
t.up()
t.goto(0, -100)
t.down()
t.color("green")
tree(75,t)
myWin.exitonclick()
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/montana.py | import matplotlib.pyplot as plt
import numpy as np
# --------------------------------------
def read_file(file_name):
"""Read census data from a file"""
years = []
populations = []
census_file = open(file_name, "r")
for one_line in census_file:
values = one_line.split()
years.insert(0, int(values[0]))
populations.insert(0, int(values[1]))
census_file.close()
years.reverse()
populations.reverse()
return ([np.array(years), np.array(populations)])
# --------------------------------------
def main():
"""Main function to produce graphs"""
years, populations = read_file("montana.txt")
plt.figure("Historical Montana Populations") # Graph 1
plt.plot(years, populations, "ro")
plt.plot(years, populations, "black")
plt.xlabel("Year")
plt.ylabel("Population")
plt.show()
# --------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2019/labs/lab5.py | # --------------------------------------
# CSCI 127, Lab 6 |
# June 16, 2019 |
# Your Name |
# --------------------------------------
# The missing functions go here.
# --------------------------------------
def main(file_name):
magnitude = average_magnitude(file_name)
print("The average earthquake magnitude is {:.2f}\n".format(magnitude))
earthquake_locations(file_name)
lower_bound = float(input("Enter a lower bound for the magnitude: "))
upper_bound = float(input("Enter an upper bound for the magnitude: "))
how_many = count_earthquakes(file_name, lower_bound, upper_bound)
print("Number of recorded earthquakes between {:.2f} and {:.2f} = {:d}".format(lower_bound, upper_bound, how_many))
# --------------------------------------
main("earthquakes.csv")
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/etch-a-sketch.py | # -----------------------------------------+
# Your name |
# CSCI 127, Lab 1 |
# Last Updated: June 6, 2019 |
# -----------------------------------------|
# Modify an etch-a-sketch program. |
# -----------------------------------------+
import turtle
import random
window = turtle.Screen()
pencil = turtle.Turtle()
square = turtle.Turtle()
# ---------------------------------
def draw_square(square):
square.up()
square.goto(-200, 200)
square.down()
square.begin_fill()
for i in range(4):
square.forward(50)
square.right(90)
square.end_fill()
# ---------------------------------
def drawing_controls(x, y):
if (-200 <= x <= -150) and (150 <= y <= 200):
red = random.random()
green = random.random()
blue = random.random()
pencil.color(red, green, blue)
# ---------------------------------
def main():
pencil.shape("circle")
text = turtle.Turtle()
text.hideturtle()
text.up()
text.goto(-205, 205)
text.write("Change Color")
square.speed(0)
square.hideturtle()
draw_square(square)
window.onclick(drawing_controls)
pencil.onrelease(pencil.goto)
# ---------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2019/assignments/cribbage.py | <gh_stars>0
# -----------------------------------------+
# Your name |
# CSCI 127, Program 2 |
# Last Updated: ???, 2019 |
# -----------------------------------------|
# A simplified Cribbage scoring system. |
# -----------------------------------------+
# -----------------------------------------+
# Do not change anything below this line. |
# -----------------------------------------+
def process_hands(cribbage_input, cards_in_hand):
number = 1
for hand in cribbage_input:
hand = hand.split()
hand_as_list = []
for i in range(cards_in_hand):
hand_as_list.append([hand[0].capitalize(), hand[1].capitalize()])
hand = hand[2:]
print_hand(hand_as_list, number)
evaluate_hand(hand_as_list)
number += 1
# -----------------------------------------+
def main():
cribbage_file= open("cribbage.txt", "r")
process_hands(cribbage_file, 5)
cribbage_file.close()
# -----------------------------------------+
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/assignments/pokedex.py | import string
# ---------------------------------------
# CSCI 127, Joy and Beauty of Data |
# Program 4: Pokedex |
# Your Name(, Your Partner's Name) |
# Last Modified: ??, 2019 |
# ---------------------------------------
# A brief overview of the program.
# ---------------------------------------
# Your solution goes here ...
# ---------------------------------------
# Do not change anything below this line
# ---------------------------------------
def create_pokedex(filename):
pokedex = []
file = open(filename, "r")
for pokemon in file:
pokelist = pokemon.strip().split(",")
number = int(pokelist[0]) # number
name = pokelist[1] # name
combat_points = int(pokelist[2]) # hit points
types = []
for position in range(3, len(pokelist)):
types += [pokelist[position]] # type
pokedex += [Pokemon(name, number, combat_points, types)]
file.close()
return pokedex
# ---------------------------------------
def get_choice(low, high, message):
legal_choice = False
while not legal_choice:
legal_choice = True
answer = input(message)
for character in answer:
if character not in string.digits:
legal_choice = False
print("That is not a number, try again.")
break
if legal_choice:
answer = int(answer)
if (answer < low) or (answer > high):
legal_choice = False
print("That is not a valid choice, try again.")
return answer
# ---------------------------------------
def main():
pokedex = create_pokedex("pokedex.txt")
choice = 0
while choice != 6:
print_menu()
choice = get_choice(1, 6, "Enter a menu option: ")
if choice == 1:
print_pokedex(pokedex)
elif choice == 2:
name = input("Enter a Pokemon name: ").lower()
lookup_by_name(pokedex, name)
elif choice == 3:
number = get_choice(1, 1000, "Enter a Pokemon number: ")
lookup_by_number(pokedex, number)
elif choice == 4:
pokemon_type = input("Enter a Pokemon type: ").lower()
total_by_type(pokedex, pokemon_type)
elif choice == 5:
average_hit_points(pokedex)
elif choice == 6:
print("Thank you. Goodbye!")
print()
# ---------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab8.py | <reponame>lgw2/lgw2.github.io<gh_stars>0
# -----------------------------------------------------
# CSCI 127, Lab 8 |
# June 12, 2020 |
# Your Name |
# -----------------------------------------------------
# Your solution goes here. Do not change anything below.
# -----------------------------------------------------
def main():
numbers = Queue("Numbers")
print("Enqueue 1, 2, 3, 4, 5")
print("---------------------")
for number in range(1, 6):
numbers.enqueue(number)
print(numbers)
print("\nDequeue one item")
print("----------------")
numbers.dequeue()
print(numbers)
print("\nDeque all items")
print("---------------")
while not numbers.is_empty():
print("Item dequeued:", numbers.dequeue())
print(numbers)
# Enqueue 10, 11, 12, 13, 14
for number in range(10, 15):
numbers.enqueue(number)
# Enqueue 15
numbers += 15
print("\n10, 11, 12, 13, 14, 15 enqueued")
print("-------------------------------")
print(numbers)
# -----------------------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/np_practice.py | <filename>_teaching/csci127-summer-2020/readings/activities/np_practice.py
# Create a one-dimensional array called v with 10 integers.
# Each integer should be a random number between 1 and 100.
# Create a new array which consists of the odd indices of
# previously created array v.
# Create a new array in backwards ordering from v.
# Create a two-dimensional array called m with 25 integers
# in a 5 by 5 matrix. Each integer should be a random number
# between 1 and 100.
# Create a new array from m, in which the elements of each row
# are in reverse order.
# Create another array from m, where the rows are in reverse order.
# Create another array from m, where columns and rows are in reverse order.
# Create another array from m, where the first and last row and
# the first and last column are cut off.
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/convert.py | <gh_stars>0
def convert(base_10_number, new_base):
convert_string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if base_10_number < new_base:
return convert_string[base_10_number]
else:
return convert(base_10_number // new_base, new_base) + convert_string[base_10_number % new_base]
print(convert(100, 10))
print(convert(100, 2))
print(convert(100, 36))
# print(unconvert("100", 10))
# print(unconvert("1100100", 2))
# print(unconvert("2S", 36))
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/labs/lab4.py | <filename>_teaching/csci127-summer-2020/labs/lab4.py
# --------------------------------------
# CSCI 127, Lab 4
# May 29, 2020
# Your Name
# --------------------------------------
def process_season(season, games_played, points_earned):
print("Season: " + str(season) + ", Games Played: " + str(games_played) +
", Points earned: " + str(points_earned))
print("Possible Win-Tie-Loss Records")
print("-----------------------------")
pass
print()
# --------------------------------------
def process_seasons(seasons):
pass
# --------------------------------------
def main():
# format of list: [[season-1-games, season-1-points], [season-2-games, season-2-points], etc.]
soccer_seasons = [[1, 3], [1, 1], [1, 0], [20, 30]]
process_seasons(soccer_seasons)
# --------------------------------------
main()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/house.py | import turtle
square = turtle.Turtle()
square.hideturtle()
rectangle = turtle.Turtle()
rectangle.hideturtle()
triangle = turtle.Turtle()
triangle.hideturtle()
square.up()
square.goto(-50, 50)
square.down()
for side in range(4):
square.forward(100)
square.right(90)
triangle.up()
triangle.color("red")
triangle.goto(-50, 50)
triangle.down()
triangle.begin_fill()
triangle.goto(0, 100)
triangle.goto(50, 50)
triangle.goto(-50, 50)
triangle.end_fill()
rectangle.up()
rectangle.goto(-10, -10)
rectangle.down()
rectangle.begin_fill()
for half in range(2):
rectangle.forward(20)
rectangle.right(90)
rectangle.forward(40)
rectangle.right(90)
rectangle.end_fill()
square.up()
square.goto(-30, 30)
square.down()
for side in range(4):
square.forward(20)
square.right(90)
square.up()
square.goto(10, 30)
square.down()
for side in range(4):
square.forward(20)
square.right(90)
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/raven_words.py | <filename>_teaching/csci127-summer-2020/readings/activities/raven_words.py<gh_stars>0
f = open('raven.txt', 'r')
for line in f:
for word in line.split():
# remove punctuation
word = word.replace('_', '').replace('"', '').replace(',', '').replace('.', '')
word = word.replace('-', '').replace('?', '').replace('!', '').replace("'", "")
word = word.replace('(', '').replace(')', '').replace(':', '').replace('[', '')
word = word.replace(']', '').replace(';', '')
# ignore case
word = word.lower()
f.close()
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2019/labs/lab10.py | import numpy as np
import matplotlib.pyplot as plt
# -------------------------------------------------
# CSCI 127, Lab 10 |
# July 1, 2019 |
# Your Name |
# -------------------------------------------------
def read_file(file_name):
pass
# -------------------------------------------------
def main(file_name):
college_names, college_enrollments = read_file(file_name)
# -------------------------------------------------
main("fall-2018.csv")
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/np_tutorial.py | <reponame>lgw2/lgw2.github.io
import numpy as np
# NumPy Tutorial
a = np.array([2.0, 3.3, 5.8, 7.3])
print("a =\n", a)
print("a.size =", a.size)
print("a.ndim =", a.ndim)
print("a.shape =", a.shape)
print("a.dtype =", a.dtype)
print()
b = np.array([["naif", "brittany", "cooper", "joseph"],
["logan", "jacob", "ben", "doug"],
["blake", "natalia", "mack", "coleton"]])
print("b =\n", b)
print("b.size =", b.size)
print("b.ndim =", b.ndim)
print("b.shape =", b.shape)
print("b.dtype =", b.dtype)
print()
c = np.arange(1, 40, 2)
print("c =\n", c)
c = c.reshape(4, 5)
print("c =\n", c)
print("c.size =", c.size)
print("c.ndim =", c.ndim)
print("c.shape =", c.shape)
print("c.dtype =", c.dtype)
print()
c += 1 # this is called broadcasting
print("c = \n", c)
print()
d = np.array([1, 2, 3], dtype="float16")
print("d =\n", d)
print("d.type =", d.dtype)
print()
e = np.zeros(4)
print("e =\n", e)
print()
f = np.ones((4), dtype="int16")
print("f =\n", f)
print()
g = np.linspace(10, 20, 21)
print("g =\n", g)
print()
h = np.random.random(10)
print("h =\n", h)
print()
i = np.random.randint(10, 20, size=(2, 5))
print("i =\n", i)
print()
print("i =")
for row in range(i.shape[0]):
line = ""
for col in range(i.shape[1]):
line += str(i[row][col]) + " "
print(line)
print()
print("i =")
for row in i:
line = ""
for item in row:
line += str(item) + " "
print(line)
print()
print("i.sum() =", i.sum()) # Method examples
print("i.min() =", i.min())
print("i.max() =", i.max())
print("i.mean() =", i.mean())
print("np.sqrt(i) =", np.sqrt(i)) # Universal function example
print()
j = np.arange(1, 28).reshape((3,3,3))
print("j =\n", j)
print()
print("j[1:2, :2, 1:] =\n", j[1:2, :2, 1:]) # Slicing
|
lgw2/lgw2.github.io | _teaching/csci127-summer-2020/readings/activities/alternating.py | <reponame>lgw2/lgw2.github.io
def alternating(list_of_ints):
pass
print(alternating([1, 2, 3, 4]))
print(alternating([10, 11, 1, 12]))
print(alternating([10, 21, 22, -5, 100, 101, 2]))
|
Tom-SW1/Politics-War-Formulas | content/spy_estimate/code/example.py | <filename>content/spy_estimate/code/example.py<gh_stars>0
import requests
#get_nation = string ; tactician, arcane, covert = boolean
def get_spy_count(get_nation, tactician, arcane, covert):
safety = 1 #do not touch (changing will break)
odds = 50 #do not touch (changing will break)
min = 0
max = 60
while min <= max:
median = int(round((min + max) / 2, 0))
spies = requests.get(f'https://politicsandwar.com/war/espionage_get_odds.php?id1={get_nation}&id2={get_nation}&id3=0&id4=1&id5={median}').text
if spies == 'Greater than 50%':
check = requests.get(f'https://politicsandwar.com/war/espionage_get_odds.php?id1={get_nation}&id2={get_nation}&id3=0&id4=1&id5={median - 1}').text
if check == 'Greater than 50%':
max = median - 1
else:
spy = median
break
else:
min = median + 1
if spies == 'Greater than 50%':
spy = ((safety * 25) + (spy * 100) - odds) / (3 * (odds - (safety * 25)))
if tactician == True or covert == True:
spy = spy / 0.75
elif arcane == True:
spy = spy * 0.75
return spy
else:
return 60
print(get_spy_count('193160', False, False, False)) |
satorumpen/node-pdfium-native | third_party/pdfium/build/gyp_pdfium.py | <reponame>satorumpen/node-pdfium-native
# Copyright 2014 PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
path = os.path.abspath(os.path.split(__file__)[0])
execfile(os.path.join(path, 'gyp_pdfium'))
|
satorumpen/node-pdfium-native | binding.gyp | <gh_stars>10-100
{
'includes': [
'./common.gypi'
],
'target_defaults': {
'defines' : [
'PNG_PREFIX',
'PNGPREFIX_H',
'PNG_USE_READ_MACROS',
],
# 'include_dirs': [
# '<(DEPTH)/third_party/pdfium',
# '<(DEPTH)/third_party/pdfium/third_party/freetype/include',
# ],
'conditions': [
['OS=="linux"', {
'conditions': [
['target_arch=="x64"', {
'defines' : [ '_FX_CPU_=_FX_X64_', ],
'cflags': [ '-fPIC', ],
}],
['target_arch=="ia32"', {
'defines' : [ '_FX_CPU_=_FX_X86_', ],
}],
],
}]
],
'msvs_disabled_warnings': [
4005, 4018, 4146, 4333, 4345, 4267
]
},
'targets': [
{
'target_name': 'node_pdfium',
'dependencies' : [
'fx_lpng',
'./third_party/pdfium/pdfium.gyp:pdfium'
],
'sources': [
# is like "ls -1 src/*.cc", but gyp does not support direct patterns on
# sources
'<!@(["python", "tools/getSourceFiles.py", "src", "cc"])'
]
},
{
'target_name': 'fx_lpng',
'type': 'static_library',
'dependencies': [
'third_party/pdfium/pdfium.gyp:fxcodec',
],
'include_dirs': [
'third_party/pdfium/core/src/fxcodec/fx_zlib/include/',
],
'sources': [
'third_party/fx_lpng/include/fx_png.h',
'third_party/fx_lpng/src/fx_png.c',
'third_party/fx_lpng/src/fx_pngerror.c',
'third_party/fx_lpng/src/fx_pngget.c',
'third_party/fx_lpng/src/fx_pngmem.c',
'third_party/fx_lpng/src/fx_pngpread.c',
'third_party/fx_lpng/src/fx_pngread.c',
'third_party/fx_lpng/src/fx_pngrio.c',
'third_party/fx_lpng/src/fx_pngrtran.c',
'third_party/fx_lpng/src/fx_pngrutil.c',
'third_party/fx_lpng/src/fx_pngset.c',
'third_party/fx_lpng/src/fx_pngtrans.c',
'third_party/fx_lpng/src/fx_pngwio.c',
'third_party/fx_lpng/src/fx_pngwrite.c',
'third_party/fx_lpng/src/fx_pngwtran.c',
'third_party/fx_lpng/src/fx_pngwutil.c',
]
}
]
}
|
satorumpen/node-pdfium-native | third_party/pdfium/testing/gtest.gyp | # Copyright 2014 PDFium authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Original code from V8, original license was:
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This file is used only by the standalone PDFium build. Under a chromium
# checkout, the src/testing/gtest.gyp file is used instead.
{
'targets': [
{
'target_name': 'gtest',
'toolsets': ['host', 'target'],
'type': 'static_library',
'sources': [
'gtest/include/gtest/gtest-death-test.h',
'gtest/include/gtest/gtest-message.h',
'gtest/include/gtest/gtest-param-test.h',
'gtest/include/gtest/gtest-printers.h',
'gtest/include/gtest/gtest-spi.h',
'gtest/include/gtest/gtest-test-part.h',
'gtest/include/gtest/gtest-typed-test.h',
'gtest/include/gtest/gtest.h',
'gtest/include/gtest/gtest_pred_impl.h',
'gtest/include/gtest/internal/gtest-death-test-internal.h',
'gtest/include/gtest/internal/gtest-filepath.h',
'gtest/include/gtest/internal/gtest-internal.h',
'gtest/include/gtest/internal/gtest-linked_ptr.h',
'gtest/include/gtest/internal/gtest-param-util-generated.h',
'gtest/include/gtest/internal/gtest-param-util.h',
'gtest/include/gtest/internal/gtest-port.h',
'gtest/include/gtest/internal/gtest-string.h',
'gtest/include/gtest/internal/gtest-tuple.h',
'gtest/include/gtest/internal/gtest-type-util.h',
'gtest/src/gtest-all.cc',
'gtest/src/gtest-death-test.cc',
'gtest/src/gtest-filepath.cc',
'gtest/src/gtest-internal-inl.h',
'gtest/src/gtest-port.cc',
'gtest/src/gtest-printers.cc',
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
'gtest-support.h',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
'gtest',
'gtest/include',
],
'dependencies': [
'gtest_prod',
],
'defines': [
# In order to allow regex matches in gtest to be shared between Windows
# and other systems, we tell gtest to always use it's internal engine.
'GTEST_HAS_POSIX_RE=0',
# Unit tests don't require C++11, yet.
'GTEST_LANG_CXX11=0',
],
'all_dependent_settings': {
'defines': [
'GTEST_HAS_POSIX_RE=0',
'GTEST_LANG_CXX11=0',
],
},
'conditions': [
['os_posix == 1', {
'defines': [
# gtest isn't able to figure out when RTTI is disabled for gcc
# versions older than 4.3.2, and assumes it's enabled. Our Mac
# and Linux builds disable RTTI, and cannot guarantee that the
# compiler will be 4.3.2. or newer. The Mac, for example, uses
# 4.2.1 as that is the latest available on that platform. gtest
# must be instructed that RTTI is disabled here, and for any
# direct dependents that might include gtest headers.
'GTEST_HAS_RTTI=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_RTTI=0',
],
},
}],
['OS=="android"', {
'defines': [
'GTEST_HAS_CLONE=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_CLONE=0',
],
},
}],
['OS=="android"', {
# We want gtest features that use tr1::tuple, but we currently
# don't support the variadic templates used by libstdc++'s
# implementation. gtest supports this scenario by providing its
# own implementation but we must opt in to it.
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
# GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
# gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
# automatically on android, so it has to be set explicitly here.
'GTEST_HAS_TR1_TUPLE=1',
],
'direct_dependent_settings': {
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
'GTEST_HAS_TR1_TUPLE=1',
],
},
}],
],
'direct_dependent_settings': {
'defines': [
'UNIT_TEST',
],
'include_dirs': [
'gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
'test': 1,
'conditions': [
['OS=="mac"', {
'run_as': {
'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'],
},
}],
['OS=="win"', {
'run_as': {
'action????': ['$(TargetPath)', '--gtest_print_time'],
},
}],
],
}],
],
'msvs_disabled_warnings': [4800],
},
},
{
'target_name': 'gtest_main',
'type': 'static_library',
'dependencies': [
'gtest',
],
'sources': [
'gtest/src/gtest_main.cc',
],
},
{
'target_name': 'gtest_prod',
'toolsets': ['host', 'target'],
'type': 'none',
'sources': [
'gtest/include/gtest/gtest_prod.h',
],
},
],
}
|
supro200/cisco-sdwan-query-tool | sdnetsql.py | <reponame>supro200/cisco-sdwan-query-tool
import json
import csv
import getpass
import argparse
import sys
import pandas as pd
import numpy as np
import requests
from datetime import datetime
from tqdm import tqdm # progress bar
from colorama import init, Fore, Style # colored screen output
from sshtunnel import SSHTunnelForwarder # ssh tunnel to jump host
from rest_api_lib import rest_api_lib # lib to make queries to vManage
from pathlib import Path # OS-agnostic file handling
# Separate directories for unprocessed source data and results - CSV and HTML
RAW_OUTPUT_DIR = "raw_data/"
REPORT_DIR = "reports/"
HELP_STRING = 'Usage examples:\n' \
'- Interface State:\n' \
'python sdnetsql.py -q "select deviceId,vdevice-host-name,ifname,ip-address,port-type,if-admin-status,if-oper-status from interfaces af-type=ipv4" -u usera -c customera --html\n' \
' - All active BFD sessions from all devices\n' \
'python sdnetsql.py -q "select * from bfd_sessions where state = up" -u usera -c customera --html\n' \
' - Get OMP sessions state:\n' \
'python sdnetsql.py -q "select * from omp_peers" -u usera -c customera --html\n' \
'- Query only specific device:\n' \
'python sdnetsql.py -q "select vdevice-host-name,ifname,ip-address,port-type,if-admin-status,if-oper-status from interfaces where vdevice-host-name=jc7003edge01 and af-type=ipv4" -u usera -c customera --html'
# max lines for screen output
SCREEN_ROW_COUNT = 30
class CustomParser(argparse.ArgumentParser):
"""
Overrides default CLI parser's print_help and error methods
"""
def print_help(self):
# Print default help from argparse.ArgumentParser class
super().print_help()
# print help messages
print(HELP_STRING)
def error(self, message):
print("error: %s\n" % message)
print("Use --help or -h for help")
exit(2)
# -------------------------------------------------------------------------------------------
def parse_args(args=sys.argv[1:]):
"""Parse arguments."""
parser = CustomParser()
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
# Required arguments
required.add_argument(
"-q", "--query", help="Query, see usage examples", type=str, required=True
)
required.add_argument(
"-u",
"--user",
help="Username to connect to network devices",
type=str,
required=True,
)
required.add_argument(
"-c", "--customer", help="Customer name", type=str, required=True
)
# Optional arguments
optional.add_argument(
"--no-connect",
"-nc",
default=False,
action="store_true",
help="Run without connecting to network devices, uses the output previously collected. Impoves query processing speed",
)
optional.add_argument(
"--screen-output",
"--sc",
default=True,
required=False,
action="store_true",
help="Prints report to screen. CVS reports are always generated",
)
optional.add_argument(
"--screen-lines",
default=30,
type=int,
required=False,
help="Number of lines to output for each device",
)
optional.add_argument(
"--html-output",
"-html",
default=False,
action="store_true",
help="Prints report to HTML. CVS reports are always generated",
)
optional.add_argument(
"--report-dir",
"-dir",
help="Specify directory for reports. If not specified, a new directory will be built: YYYY-MM-DD",
)
optional.add_argument(
"--password",
"-p",
help="Password. If not specified, ",
)
return parser.parse_args(args)
# -------------------------------------------------------------------------------------------
def command_analysis(text):
"""
:param text: SQL string, for example:
select first_name,last_name from students where id = 5
select * from students where first_name = "Mike" or "Andrew" and last_name = "Brown"
select last_name from students where math_score = "90" or "80" and last_name = "Smith" and year = 7 or 8
:return: Dictionary built from the input string, for example:
{'conditions': [{'cond_field': 'math_score',
'cond_value': ['"90"',
'"80"']},
{'cond_field': 'last_name',
'cond_value': '"Smith"'},
{'cond_field': 'year',
'cond_value': ['7',
'8']}],
'fields': ['*'],
'source': 'students'}
Written by <NAME>, McKinnon Secondary College, 07K. 2019.
"""
# TODO: implement sort by in SQL parser
fields = []
source = ""
conditions = []
conditions_list = []
result = {}
command = text.split()
if command[0] == "select":
# field analysis
if "," in command[1]:
morefields = command[1].split(",")
for item in morefields:
fields.append(item)
else:
fields.append(command[1])
# checking whether 'from' exists
if command[2] == "from":
# source
source = command[3]
else:
print("Error: 'from' not found!")
try:
if command[4] == "where":
tempcond = " ".join(command[5:])
# split conditions by keyword 'and'
condition = tempcond.split("and")
# loop until everything has been sorted
for element in condition:
condition_dic = {}
# split every condition by keyword '='
val = element.split("=")
condition_dic["cond_field"] = val[0].strip()
conditions_list.append(val[0].strip())
if "or" in val[1]:
# if there is an 'or' in the request
tempvalue = ("").join(val[1])
values = tempvalue.split("or")
condition_dic["cond_value"] = []
for value in values:
if value != " ":
condition_dic["cond_value"].append(value.strip())
else:
condition_dic["cond_value"] = val[1].strip()
conditions.append(condition_dic)
except:
pass
else:
print("Invalid Format or Command!")
# if * is in list, return all fields anyway, so ignore all other selected fields
if "*" in fields:
fields[0] = "*"
del fields[1:]
else:
# add 'conditions' fields to the list of fields selected
fields.extend(conditions_list)
# remove duplicates
fields_no_duplicates = []
[
fields_no_duplicates.append(item)
for item in fields
if item not in fields_no_duplicates
]
fields = fields_no_duplicates
result["fields"] = fields[0:]
result["source"] = source
result["conditions"] = conditions[0:]
return result
# -------------------------------------------------------------------------------------------
def get_file_path(customer, custom_report_dir, api_mount, file_type):
"""
Builds file name from input arguments
:param customer: Customer name
:param + report_tag_dir: optional string to create a subdirectory for set of reports
:param api_mount: vManage API mount point
:param file_type: report or raw output
:return: full path with filename
"""
# add / at the end of report_tag_dir and convert to string
if custom_report_dir:
if not "/" in custom_report_dir:
custom_report_dir += "/"
report_tag_dir = str(custom_report_dir)
if file_type == "report":
file_name = REPORT_DIR + customer + "/" + custom_report_dir + api_mount.replace("/", "_")
# Create directory if does not exit
Path(REPORT_DIR + customer + "/" + custom_report_dir).mkdir(parents=True, exist_ok=True)
else:
file_name = RAW_OUTPUT_DIR + customer + "/" + api_mount.replace("/", "_")
Path(RAW_OUTPUT_DIR + customer).mkdir(parents=True, exist_ok=True)
return file_name
# -------------------------------------------------------------------------------------------
def print_to_csv_file(headers, content, file_name):
"""
Prints text to CSV files, also changes command output where necessary, such as Gi -> GigabitEthernet
:param headers: CSV headers - List
:param content: CSV text - List of lists
:param file_name: output file name - string
:return: None
"""
try:
with open(file_name, "w", newline="") as out_csv:
csvwriter = csv.writer(out_csv, delimiter=",")
csvwriter.writerow(headers)
for item in content:
csvwriter.writerow(item)
# print("Writing CSV", file_name)
except Exception as e:
print("Error while opening file", e)
# -------------------------------------------------------------------------------------------
def process_csv_files(
join_dataframes, common_column, fields_to_select, sort_by, filter, file1, file2, result_file
):
"""
Joins two dataframes.
Input parameters:
- common_column
- two csv files to join
Writes raw output to a CSV file
@param join_dataframes:
@param common_column:
@param fields_to_select:
@param sort_by:
@param filter:
@param file1:
@param file2:
@param result_file:
"""
if join_dataframes:
pd1 = pd.read_csv(file1)
pd2 = pd.read_csv(file2)
if fields_to_select[0] == "*":
result_pd = pd.merge(
pd1, pd2, left_on=common_column[0], right_on=common_column[1]
)
else:
result_pd = pd.merge(
pd1, pd2, left_on=common_column[0], right_on=common_column[1]
).filter(fields_to_select)
else:
# If "join_dataframes": false is source_definition.json
pd1 = pd.read_csv(file1)
if fields_to_select[0] == "*":
result_pd = pd1
else:
result_pd = pd1.filter(fields_to_select)
if filter:
for filter_item in filter:
try:
# handle OR clause in SQL - add multiple filters
condition_sting = ""
if isinstance(filter_item["cond_value"], list):
# if filter_item["cond_value"] is list which means there is more that 1 value
for cond_value in filter_item["cond_value"]:
# concatenate values in a single string using |
# https://stackoverflow.com/questions/19169649/using-str-contains-in-pandas-with-dataframes
condition_sting = condition_sting + "|" + cond_value
# remove leading "|"
condition_sting = condition_sting[1:]
else:
# no a list, just a single value - sting
condition_sting = filter_item["cond_value"]
result_pd = result_pd[
result_pd[filter_item["cond_field"]]
.astype(str)
.str.contains(condition_sting, na=False)
]
except:
# simply ignore any exceptions, not filtered results
pass
# sort
result_pd.sort_values(sort_by, inplace=True, ascending=True)
# output to CSV file
result_pd.to_csv(result_file, index=False)
# -------------------------------------------------------------------------------------------
def get_vedges_details(customer, api_response_data, query_condition):
"""
Parses JSON response and builds CSV file with vEdge details
All other devices, such as vBond, vSmart are excluded
:param query_condition: filter
:param customer: string to build correct directory to store CSV files
:param api_response_data: JSON response with all devices from vManage
:return: list of deviceId of vEdge devices
"""
# List of devices - for return
device_ids = []
# Build device list to query - can be a list from CLI, or all devices if no DeviceId is specified
device_list = []
found_device_id_in_filter = False
# handle site-id or hostname in query sting
devices_to_query = {}
values_to_query = []
# if deviceId and hostname already specified in filter - 'where' condition, only query this device
# exception is when multiple device are there in 'when' condition - checking item["cond_value"] isn't a list
for item in query_condition:
if "deviceId" in item["cond_field"] and not (isinstance(item["cond_value"], list)):
# Querying particular devices
device_list.append(item["cond_value"])
# already found deviceId - no need to get device list later
found_device_id_in_filter = True
# check if host-name or site-id is in query, get vEdge details only for these sites
elif "host-name" in item["cond_field"]:
devices_to_query["type"] = "host-name"
# if a list of host name already given using 'or' condition, simply use it, otherwise add to list
if (isinstance(item["cond_value"], list)):
values_to_query = item["cond_value"]
else:
values_to_query.append(item["cond_value"])
query_condition.remove(item)
elif "site-id" in item["cond_field"]:
devices_to_query["type"] = "site-id"
if (isinstance(item["cond_value"], list)):
values_to_query = item["cond_value"]
else:
values_to_query.append(item["cond_value"])
query_condition.remove(item)
devices_to_query["device_list"] = values_to_query
# found deviceId in condition filter
if found_device_id_in_filter:
return device_ids
# Get CSV Headers for vEdge devices
csv_headers = []
found = False
for element in api_response_data:
for key, value in element.items():
if element["device-type"] == "vedge":
csv_headers.append(key)
found = True
if found:
# found vEdge device, got headers, no need to process other records
break
# Get CSV Data for vEdge devices
csv_data = []
found = False
for element in api_response_data:
csv_row = []
for key, value in element.items():
# only getting vEdge devices from vManage
if element["device-type"] == "vedge":
# Populate list - CSV row for a vEdge device
csv_row.append(value)
# Save device ID
device_id = element["deviceId"]
found = True
if found:
# if specific devices requested - look up for host-name or site-id
if "type" in devices_to_query:
# Look up in any hostnames in query matches a vEdge hostname received from vManage
if devices_to_query["type"] == "host-name" and \
next((s for s in devices_to_query["device_list"] if s in element["host-name"]), None):
device_ids.append(device_id)
elif devices_to_query["type"] == "site-id" and element["site-id"] in devices_to_query["device_list"]:
device_ids.append(device_id)
else:
device_ids.append(device_id)
# Add next row to a CSV data
csv_data.append(csv_row)
# Dump data
print_to_csv_file(
csv_headers, csv_data, get_file_path(customer, "", "vedges", "raw_output") + ".csv")
return device_ids
# -------------------------------------------------------------------------------------------
def run_api_query_and_save_to_csv(customer, sdwan_controller, api_query, device_list, no_connect):
rows_list = []
skipped_devices = []
# If Do Not Connect flag is set, do not make API queries
# The script uses the output .csv files previously collected
if no_connect:
try:
df = pd.read_csv(get_file_path(customer, "", api_query.split("?")[0], "raw_output") + ".csv")
except FileNotFoundError:
# no such file
print("Could not read CSV file: ", get_file_path(customer, "", api_query.split("?")[0], "raw_output") + ".csv")
print("Try to remove no-connect option")
return 0
return len(df.index)
print(">>> Making API request to", api_query)
# Initialise progress bar
pbar = tqdm(total=len(device_list), unit="dev")
pbar.set_description("Processed devices")
for device in device_list:
response = json.loads(sdwan_controller.get_request(api_query + device))
pbar.set_description("Processing %s" % device)
pbar.update(1)
try:
response_data = response["data"]
except:
# if no data returned, skip the device
skipped_devices.append(device)
continue
# pprint.pprint(response)
# ssh_tunnel.stop()
# exit(0)
# print("------------------------------", device, "------------------------------")
# print(json.dumps(response_data, sort_keys=True, indent=4))
for element in response_data:
element["deviceId"] = device
rows_list.append(element)
# Got lists of lists, convert it to Dataframe
df = pd.DataFrame(rows_list)
# replace NaN with empty strings
df = df.replace(np.nan, "", regex=True)
# If query contains device_id put this column as first and replace it with default index
if "deviceId" in api_query and not df.empty:
# Rearrange columns to device_id comes first
cols_to_order = ["deviceId"]
new_columns = cols_to_order + (df.columns.drop(cols_to_order).tolist())
df = df[new_columns]
df.set_index("deviceId", inplace=True)
# Dump dataframe to CSV, don't include anything after ? in the filename
df.to_csv(get_file_path(customer, "", api_query.split("?")[0], "raw_output") + ".csv")
if len(skipped_devices) > 0:
print(Fore.RED + "\n>>> Check if these devices and reachable, couldn't get data from: ", skipped_devices)
print(Style.RESET_ALL)
return len(df.index)
# -------------------------------------------------------------------------------------------
def save_report_to_html(csv_file, html_file):
"""
Converts CVS file to HTML, applying CSS
:param csv_file: input CSV file
:param html_file: output HTML file, created in the same directory
:return:
"""
# reads source CSV, ignore first index column
dataframe = pd.read_csv(csv_file, index_col=False)
# convert Dataframe to HTML, apply CSS
html_string = '<link rel="stylesheet" href="../../html_css/style.css">' + dataframe.to_html(
index=False, na_rep=" "
).replace(
'<table border="1" class="dataframe">',
'<table style = "border:1px solid; border-color: white" class="hoverTable">',
).replace(
"<th>", '<th style = "background-color: #5abfdf" align="left">'
)
# write result HTML file
with open(html_file, "w") as f:
f.write(html_string)
print("\nHTML Report saved as: " + str(Path(html_file).resolve()))
# -------------------------------------------------------------------------------------------
def stop_ssh_tunnel(ssh_tunnel):
# Received data, closing ssh tunnel
if ssh_tunnel:
if ssh_tunnel.is_active:
print("Closing SSH tunnel connection...")
ssh_tunnel.stop()
print("")
# -------------------------------------------------------------------------------------------
def main():
# Added for using with sandbox, comment the line below for using in production
requests.packages.urllib3.disable_warnings()
# init colorama
init()
# Check CLI arguments
options = parse_args()
# Parse query from CLI input
query_processed = command_analysis(options.query)
# Analyse query
source = query_processed["source"]
if query_processed["conditions"]:
query_condition = query_processed["conditions"]
else:
query_condition = ""
fields_to_select = query_processed["fields"]
with open("datasources.json", "r") as f:
source_definitions = json.load(f)
with open("customers.json", "r") as f:
customers_definitions = json.load(f)
for item in source_definitions:
if item["data_source"] == source:
api_query = item["api_mount"]
# Get customer name from CLI
customer_name = options.customer
vmanage_host = ""
jump_host = ""
# Get vManage and Jump Host details from customer definitions
for item in customers_definitions:
if item["customer"] == customer_name:
vmanage_host = item["vmanage_ip"]
try:
jump_host = item["jump_host"]
except KeyError:
print("No jumphost defined, connecting directly...")
if not vmanage_host:
# No such customer No vManage defined - existing program
print(
"No such Customer or vManage. Please specify valid customer name - see customers.json"
)
exit(1)
print("Found vManage Host: ", vmanage_host)
if jump_host:
print(" Connecting via jumphost:", jump_host)
# Add DeviceID field if not already inclided
if (
("deviceId" in api_query)
and ("*" not in fields_to_select)
and ("deviceId" not in fields_to_select)
and (not any("deviceId" in x["cond_field"] for x in query_condition))
):
fields_to_select.insert(0, "deviceId")
if options.password:
password = <PASSWORD>
else:
# Ask for password
password = <PASSWORD>("Password: ")
# Get customer report dir from CLI
if options.report_dir:
custom_report_dir = options.report_dir
else:
custom_report_dir = datetime.now().strftime('%Y-%m-%d')
# jump host is defined for a customer, build ssh tunnel
ssh_tunnel = ""
if jump_host:
try:
ssh_tunnel = SSHTunnelForwarder(
jump_host,
ssh_username=options.user,
ssh_password=password,
remote_bind_address=(vmanage_host, 443),
)
ssh_tunnel.daemon_forward_servers = True
ssh_tunnel.start()
except Exception as e:
print(str(e))
print("Jump host is defined, but can't connect to it, exiting...")
exit(1)
vmanage_connect_port = ssh_tunnel.local_bind_port
print(
"SSH tunnel established:", jump_host,
"Allocated local port:", ssh_tunnel.local_bind_port,
) # show assigned local port
vmanage_host = "127.0.0.1" # set vmanage host to local tunnel endpoint
# ssh tunnel has been built
else:
vmanage_connect_port = 8443
# Initialise vManage
try:
sdwan_controller = rest_api_lib(
vmanage_host, vmanage_connect_port, options.user, password
)
except:
print(Fore.RED + "Could not connect to vManage, exiting...")
stop_ssh_tunnel(ssh_tunnel)
exit(0)
# Get vEdge device details
response = json.loads(sdwan_controller.get_request("device"))
response_data = response["data"]
# Get vEdges device IDs to query
device_list = get_vedges_details(customer_name, response_data, query_condition)
print(Fore.GREEN + "Got", str(len(device_list)), "vEdge devices")
print(Style.RESET_ALL)
# Run the query
dataframe_size = run_api_query_and_save_to_csv(
customer_name, sdwan_controller, api_query, device_list, options.no_connect
)
if dataframe_size == 0:
print(Fore.RED + "API query returned no data")
stop_ssh_tunnel(ssh_tunnel)
exit(0)
# Received data, don't need ssh tunnel anymore, closing connection
stop_ssh_tunnel(ssh_tunnel)
# sorting by first column - fields_to_select[0] and then second fields_to_select[1]
# TODO: implement sort by in SQL parser
if fields_to_select[0] == "*":
sort_by = ["deviceId"]
else:
sort_by = [fields_to_select[0],fields_to_select[1]]
# Process CSV files and generate reports
process_csv_files(
False,
"",
fields_to_select,
sort_by,
query_condition,
get_file_path(customer_name, "", api_query.split("?")[0], "raw_output") + ".csv",
"",
get_file_path(customer_name, custom_report_dir, api_query.split("?")[0], "report") + ".csv",
)
# print result CSV file to screen unless it's set to False is CLI arguments
if options.screen_output:
df = pd.read_csv(
get_file_path(customer_name, custom_report_dir, api_query.split("?")[0], "report") + ".csv",
index_col=0,
)
# Get number of rows and columns in Dataframe
count_row = len(df)
if count_row > SCREEN_ROW_COUNT:
print(
"Returned",
count_row,
"but printed only first",
SCREEN_ROW_COUNT,
". Check CVS file for full output",
)
print("-" * 80)
if count_row > 0:
print(df.head(SCREEN_ROW_COUNT))
print(Fore.GREEN + "Returned", count_row, "record(s)")
else:
print(Fore.RED + "Returned 0 record(s)")
print(Style.RESET_ALL)
print("-" * 80)
if options.html_output:
save_report_to_html(
get_file_path(customer_name, custom_report_dir, api_query.split("?")[0], "report") + ".csv",
get_file_path(customer_name, custom_report_dir, api_query.split("?")[0], "report") + ".html",
)
if __name__ == "__main__":
main()
|
supro200/cisco-sdwan-query-tool | rest_api_lib.py | <reponame>supro200/cisco-sdwan-query-tool
import requests
import json
import sys
class rest_api_lib:
def __init__(self, vmanage_ip, vmanage_port, username, password):
self.vmanage_ip = vmanage_ip
self.vmanage_port = vmanage_port
self.session = {}
self.login(username, password)
def login(self, username, password):
"""Login to vmanage"""
base_url_str = "https://" + str(self.vmanage_ip) + ":" + str(self.vmanage_port)
login_action = '/j_security_check'
# Format data for loginForm
login_data = {'j_username': username, 'j_password': password}
# Url for posting login data
login_url = base_url_str + login_action
sess = requests.session()
# If the vmanage has a certificate signed by a trusted authority change verify to True
login_response = sess.post(url=login_url, data=login_data, verify=False)
if b'<html>' in login_response.content:
print("Login Failed")
sys.exit(0)
self.session[self.vmanage_ip] = sess
def get_request(self, mount_point):
"""GET request"""
url = "https://" + str(self.vmanage_ip) + ":" + str(self.vmanage_port) + "/dataservice/" + mount_point
response = self.session[self.vmanage_ip].get(url, verify=False)
data = response.content
return data
def post_request(self, mount_point, payload, headers={'Content-Type': 'application/json'}):
"""POST request"""
url = "https://" + str(self.vmanage_ip) + ":" + str(self.vmanage_port) + "/dataservice/" + mount_point
payload = json.dumps(payload)
print(payload)
response = self.session[self.vmanage_ip].post(url=url, data=payload, headers=headers, verify=False)
data = response.json()
return data
|
Frikilinux/arigram | arigram/__init__.py | """
Terminal client for telegram
"""
__version__ = "0.1.1"
|
Frikilinux/arigram | arigram/colours.py | <reponame>Frikilinux/arigram
import curses
DEFAULT_FG = curses.COLOR_WHITE
DEFAULT_BG = curses.COLOR_BLACK
COLOR_PAIRS = {(10, 10): 0}
COLOR_PAIRS = {}
# colors
black = curses.COLOR_BLACK
blue = curses.COLOR_BLUE
cyan = curses.COLOR_CYAN
green = curses.COLOR_GREEN
magenta = curses.COLOR_MAGENTA
red = curses.COLOR_RED
white = curses.COLOR_WHITE
yellow = curses.COLOR_YELLOW
default = -1
# modes
normal = curses.A_NORMAL
bold = curses.A_BOLD
blink = curses.A_BLINK
reverse = curses.A_REVERSE
underline = curses.A_UNDERLINE
invisible = curses.A_INVIS
dim = curses.A_DIM
def get_colour(fg: int, bg: int) -> int:
"""Returns the curses color pair for the given fg/bg combination."""
key = (fg, bg)
if key not in COLOR_PAIRS:
size = len(COLOR_PAIRS)
try:
curses.init_pair(size, fg, bg)
except curses.error:
# If curses.use_default_colors() failed during the initialization
# of curses, then using -1 as fg or bg will fail as well, which
# we need to handle with fallback-defaults:
if fg == -1: # -1 is the "default" color
fg = DEFAULT_FG
if bg == -1: # -1 is the "default" color
bg = DEFAULT_BG
try:
curses.init_pair(size, fg, bg)
except curses.error:
# If this fails too, colors are probably not supported
pass
COLOR_PAIRS[key] = size
return curses.color_pair(COLOR_PAIRS[key])
|
Frikilinux/arigram | arigram/utils.py | import base64
import curses
import functools
import hashlib
import logging
import mailcap
import math
import mimetypes
import os
import random
import shlex
import struct
import subprocess
import sys
import types
import unicodedata
from datetime import datetime
from functools import lru_cache
from logging.handlers import RotatingFileHandler
from subprocess import CompletedProcess
from types import TracebackType
from typing import Any, Callable, Dict, Optional, Tuple, Type
from pyperclip import copy as copy_clipboard
from arigram import config
log = logging.getLogger(__name__)
units = {"B": 1, "KB": 10 ** 3, "MB": 10 ** 6, "GB": 10 ** 9, "TB": 10 ** 12}
class LogWriter:
def __init__(self, level: Any) -> None:
self.level = level
def write(self, message: str) -> None:
if message != "\n":
self.level.log(self.level, message)
def flush(self) -> None:
pass
def copy_func(f: Callable) -> Callable[[], Any]:
"""Based on https://stackoverflow.com/a/6528148/190597 (<NAME>)"""
g = types.FunctionType(
f.__code__,
f.__globals__, # type: ignore
name=f.__name__,
argdefs=f.__defaults__, # type: ignore
closure=f.__closure__, # type: ignore
)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__ # type: ignore
return g
def rename_function(new_name: str) -> Callable:
def decorator(fun: Callable) -> Callable:
fun.__name__ = new_name
return fun
return decorator
def setup_log() -> None:
os.makedirs(config.LOG_PATH, exist_ok=True)
handlers = []
for level, filename in (
(config.LOG_LEVEL, "all.log"),
(logging.ERROR, "error.log"),
):
handler = RotatingFileHandler(
os.path.join(config.LOG_PATH, filename),
maxBytes=parse_size("32MB"),
backupCount=1,
)
handler.setLevel(level) # type: ignore
handlers.append(handler)
logging.basicConfig(
format="%(levelname)s [%(asctime)s] %(filename)s:%(lineno)s - %(funcName)s | %(message)s",
handlers=handlers,
)
logging.getLogger().setLevel(config.LOG_LEVEL)
sys.stderr = LogWriter(log.error) # type: ignore
logging.captureWarnings(True)
def get_mime(file_path: str) -> str:
mtype, _ = mimetypes.guess_type(file_path)
if not mtype:
return ""
if mtype == "image/gif":
return "animation"
return mtype.split("/")[0]
def get_mailcap() -> Dict:
if config.MAILCAP_FILE:
with open(config.MAILCAP_FILE) as f:
return mailcap.readmailcapfile(f) # type: ignore
return mailcap.getcaps()
def get_file_handler(file_path: str) -> str:
mtype, _ = mimetypes.guess_type(file_path)
if not mtype:
return config.DEFAULT_OPEN.format(file_path=shlex.quote(file_path))
caps = get_mailcap()
handler, view = mailcap.findmatch(caps, mtype, filename=file_path)
del view
if not handler:
return config.DEFAULT_OPEN.format(file_path=shlex.quote(file_path))
return handler
def parse_size(size: str) -> int:
if size[-2].isalpha():
number, unit = size[:-2], size[-2:]
else:
number, unit = size[:-1], size[-1:]
return int(float(number) * units[unit])
def humanize_size(
num: int,
suffix: str = "B",
suffixes: Tuple[str, ...] = (
"",
"K",
"M",
"G",
"T",
"P",
"E",
"Z",
),
) -> str:
magnitude = int(math.floor(math.log(num, 1024)))
val = num / math.pow(1024, magnitude)
if magnitude > 7:
return "{:.1f}{}{}".format(val, "Yi", suffix)
return "{:3.1f}{}{}".format(val, suffixes[magnitude], suffix)
def humanize_duration(seconds: int) -> str:
dt = datetime.utcfromtimestamp(seconds)
fmt = "%-M:%S"
if seconds >= 3600:
fmt = "%-H:%M:%S"
return dt.strftime(fmt)
def num(value: str, default: Optional[int] = None) -> Optional[int]:
try:
return int(value)
except ValueError:
return default
def is_yes(resp: str) -> bool:
return not resp or resp.strip().lower() == "y"
def is_no(resp: str) -> bool:
return not resp or resp.strip().lower() == "n"
def get_duration(file_path: str) -> int:
cmd = f"ffprobe -v error -i '{file_path}' -show_format"
stdout = subprocess.check_output(shlex.split(cmd)).decode().splitlines()
line = next((line for line in stdout if "duration" in line), None)
if line:
_, duration = line.split("=")
log.info("duration: %s", duration)
return int(float(duration))
return 0
def get_video_resolution(file_path: str) -> Tuple[int, int]:
cmd = f"ffprobe -v error -show_entries stream=width,height -of default=noprint_wrappers=1 '{file_path}'"
lines = subprocess.check_output(shlex.split(cmd)).decode().splitlines()
info = {line.split("=")[0]: line.split("=")[1] for line in lines}
return int(str(info.get("width"))), int(str(info.get("height")))
def get_waveform(file_path: str) -> str:
# stub for now
del file_path
waveform = (random.randint(0, 255) for _ in range(100))
packed = struct.pack("100B", *waveform)
return base64.b64encode(packed).decode()
safe_map = str.maketrans({"'": "", "`": "", '"': ""})
def notify(
msg: str,
subtitle: str = "",
title: str = "tg",
function: Any = config.NOTIFY_FUNCTION,
) -> None:
if not function:
return
function(
icon_path=shlex.quote(config.ICON_PATH),
title=shlex.quote(title),
subtitle=shlex.quote(subtitle.translate(safe_map)),
msg=shlex.quote(msg.translate(safe_map)),
)
def string_len_dwc(string: str) -> int:
"""Returns string len including count for double width characters"""
return sum(1 + (unicodedata.east_asian_width(c) in "WF") for c in string)
def truncate_to_len(string: str, width: int) -> str:
real_len = string_len_dwc(string)
if real_len <= width:
return string
cur_len = 0
out_string = ""
for char in string:
cur_len += 2 if unicodedata.east_asian_width(char) in "WF" else 1
out_string += char
if cur_len >= width:
break
return out_string
def copy_to_clipboard(text: str) -> None:
copy_clipboard(text)
class suspend:
# FIXME: can't explicitly set type "View" due to circular import
def __init__(self, view: Any) -> None:
self.view = view
def call(self, cmd: str) -> CompletedProcess:
return subprocess.run(cmd, shell=True)
def run_with_input(self, cmd: str, text: str) -> None:
proc = subprocess.run(
cmd, universal_newlines=True, input=text, shell=True
)
if proc.returncode:
input(f"Command <{cmd}> failed: press <enter> to continue")
def open_file(self, file_path: str, cmd: str = None) -> None:
if cmd:
cmd = cmd % shlex.quote(file_path)
else:
cmd = get_file_handler(file_path)
proc = self.call(cmd)
if proc.returncode:
input(f"Command <{cmd}> failed: press <enter> to continue")
def __enter__(self) -> "suspend":
for view in (self.view.chats, self.view.msgs, self.view.status):
view._refresh = view.win.noutrefresh
self.view.resize_handler = self.view.resize_stub
curses.echo()
curses.nocbreak()
self.view.stdscr.keypad(False)
curses.curs_set(1)
curses.endwin()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
del exc_type, exc_val, tb
for view in (self.view.chats, self.view.msgs, self.view.status):
view._refresh = view.win.refresh
self.view.resize_handler = self.view.resize
curses.noecho()
curses.cbreak()
self.view.stdscr.keypad(True)
curses.curs_set(0)
curses.doupdate()
def set_shorter_esc_delay(delay: int = 25) -> None:
os.environ.setdefault("ESCDELAY", str(delay))
def pretty_ts(ts: int) -> str:
now = datetime.utcnow()
diff = now - datetime.utcfromtimestamp(ts)
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ""
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return f"{second_diff} seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return f"{int(second_diff / 60)} minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return f"{int(second_diff / 3600)} hours ago"
if day_diff == 1:
return "yesterday"
if day_diff < 7:
return f"{day_diff} days ago"
if day_diff < 31:
return f"{int(day_diff / 7)} weeks ago"
if day_diff < 365:
return f"{int(day_diff / 30)} months ago"
return f"{int(day_diff / 365)} years ago"
@lru_cache(maxsize=256)
def get_colour_by_str(user: str) -> int:
index = int(hashlib.sha1(user.encode()).hexdigest(), 16) % len(
config.USERS_COLOURS
)
return config.USERS_COLOURS[index]
def cleanup_cache() -> None:
if not config.KEEP_MEDIA:
return
files_path = os.path.join(config.FILES_DIR, "files")
cmd = f"find {files_path} -type f -mtime +{config.KEEP_MEDIA} -delete"
subprocess.Popen(cmd, shell=True)
|
Frikilinux/arigram | arigram/config.py | <filename>arigram/config.py
"""
Every parameter (except for CONFIG_FILE) can be
overwritten by external config file
"""
import mailcap
import os
import platform
import runpy
from typing import Any, Dict, List, Optional, Tuple
_os_name = platform.system()
_linux = "Linux"
_global_mailcap = mailcap.getcaps()
CONFIG_DIR: str = os.path.expanduser("~/.config/arigram/")
CONFIG_FILE: str = os.path.join(CONFIG_DIR, "config.py")
FILES_DIR: str = os.path.expanduser("~/.cache/arigram/")
DRAFTS_FILE: str = os.path.join(FILES_DIR, "drafts.json")
MAILCAP_FILE: Optional[str] = None
LOG_LEVEL: str = "INFO"
LOG_PATH: str = os.path.expanduser("~/.local/share/arigram/")
API_ID: str = "559815"
API_HASH: str = "fd121358f59d764c57c55871aa0807ca"
PHONE: Optional[str] = None
ENC_KEY: str = ""
TDLIB_PATH: Optional[str] = None
TDLIB_VERBOSITY: int = 0
MAX_DOWNLOAD_SIZE: str = "10MB"
NOTIFY_FUNCTION: Optional[Any] = None
VIEW_TEXT_CMD: str = "less"
# for more info see https://trac.ffmpeg.org/wiki/Capture/ALSA
VOICE_RECORD_CMD: str = (
"ffmpeg -f alsa -i hw:0 -c:a libopus -b:a 32k {file_path}"
if _os_name == _linux
else "ffmpeg -f avfoundation -i ':0' -c:a libopus -b:a 32k {file_path}"
)
EDITOR: str = os.environ.get("EDITOR", "vim")
_, __MAILCAP_EDITOR = mailcap.findmatch(_global_mailcap, "text/markdown")
if __MAILCAP_EDITOR:
EDITOR = str(__MAILCAP_EDITOR["view"]).split(" ", 1)[0]
LONG_MSG_CMD: str = f"{EDITOR} '{{file_path}}'"
DEFAULT_OPEN: str = (
"xdg-open {file_path}" if _os_name == _linux else "open {file_path}"
)
CHAT_FLAGS: Dict[str, str] = {}
MSG_FLAGS: Dict[str, str] = {}
ICON_PATH: str = os.path.join(
os.path.dirname(__file__), "resources", "arigram.png"
)
URL_VIEW: Optional[str] = None
USERS_COLOURS: Tuple[int, ...] = tuple(range(2, 16))
KEEP_MEDIA: int = 7
FILE_PICKER_CMD: Optional[str] = None
DOWNLOAD_DIR: str = os.path.expanduser("~/Downloads/")
EXTRA_FILE_CHOOSER_PATHS: List[str] = ["..", "/", "~"]
CUSTOM_KEYBINDS: Dict[str, Dict[str, Any]] = {}
TRUNCATE_LIMIT: int = 15
EXTRA_TDLIB_HEADEARS: Dict[Any, Any] = {}
if os.path.isfile(CONFIG_FILE):
config_params = runpy.run_path(CONFIG_FILE) # type: ignore
for param, value in config_params.items():
if param.isupper():
globals()[param] = value
else:
os.makedirs(CONFIG_DIR, exist_ok=True)
if not PHONE:
print(
"Enter your phone number in international format, including country code (example: +5037754762346)"
)
PHONE = input("(phone) ")
if not PHONE.startswith("+"):
PHONE = "+" + PHONE
with open(CONFIG_FILE, "a") as f:
f.write(f'\nPHONE = "{PHONE}"\n')
|
Frikilinux/arigram | arigram/exceptions.py | <reponame>Frikilinux/arigram
class KeyBoundError(Exception):
"""Exception to raise when a key is already bound"""
pass
|
Frikilinux/arigram | setup.py | <reponame>Frikilinux/arigram
from setuptools import setup
import arigram
with open("README.md", "r") as fh:
readme = fh.read()
setup(
long_description=readme,
long_description_content_type="text/markdown",
name="arigram",
version=arigram.__version__,
description="A fork of tg -- a hackable telegram TUI client",
url="https://github.com/TruncatedDinosour/arigram",
author="TruncatedDinosour",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=["arigram"],
entry_points={"console_scripts": ["arigram = arigram.__main__:main"]},
python_requires=">=3.8",
install_requires=[
"python-telegram==0.15.0",
"pyfzf>=0.2.2",
"pyperclip>=1.8.2",
],
)
|
kompotkot/RevitApi-RoomMassing | RoomMassingScript.py | <filename>RoomMassingScript.py
#__doc__ = 'Print the full path to the central model (if model is workshared).'
# IMPORTS
import clr
import System
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
clr.AddReference("RevitServices")
clr.AddReference("RevitNodes")
from Autodesk.Revit.DB import *
from Autodesk.Revit.UI import *
from Autodesk.Revit.DB.Architecture import RoomFilter
from RevitServices.Persistence import DocumentManager
from System import Guid
# PREDEFINED VARIABLES
app = __revit__.Application
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
selection = __revit__.ActiveUIDocument.Selection
# PARAMS
forFilterFlatType = 'ROM_Type of Layout' # Getting row from room properties for material name
forFilterLevel = "02" # For level filter
forFilterTower = "R1" # For tower filter
def materialname(materialname,docname=doc): # Takes material from mname. Input: Material name and working Document. Possible to draw on behalf of the material Id
collectorMaterials = FilteredElementCollector(docname)
matitr = collectorMaterials.WherePasses(ElementClassFilter(Material)).ToElements()
matname = None
for material in matitr:
if material.Name == materialname:
matname = material
break
return matname
def materialcreator(newmaterialname, docname=doc): # Create material
collectorMaterials = FilteredElementCollector(docname)
matitr = collectorMaterials.WherePasses(ElementClassFilter(Material)).ToElements()
listofmaterials = []
for material in matitr:
listofmaterials.append(material.Name)
if newmaterialname in listofmaterials:
return
else:
Material.Create(docname, newmaterialname)
# Use a RoomFilter to find all room elements in the document
roomfilter = RoomFilter()
collectorRooms = FilteredElementCollector(doc) # Apply the filter to the elements in the active document
collectorRooms.WherePasses(roomfilter)
roomiditr = collectorRooms.GetElementIdIterator() # Get result as ElementId iterator
roomiditr.Reset()
opt = SpatialElementBoundaryOptions() # Boundary options for rooms
# Cycle through each existing room
while (roomiditr.MoveNext()):
roomid = roomiditr.Current # Take roomid
room = doc.GetElement(roomid) # Take room by roomid
roomLevel = room.get_Parameter(BuiltInParameter.ROOM_LEVEL_ID).AsValueString()
roomTower = room.LookupParameter('ROM_Number (Section)').AsString()
roomFlatType = room.LookupParameter(forFilterFlatType).AsString() # Get name of room type for material name
if roomLevel == forFilterLevel and roomTower == forFilterTower and roomFlatType != None:
# Get room height
if type(room.get_Parameter(BuiltInParameter.ROOM_HEIGHT).AsValueString()) == str:
roomHeightMm = room.get_Parameter(BuiltInParameter.ROOM_HEIGHT).AsValueString()
roomHeightFt = roomHeightMm.replace(' ','')
roomHeightFt = roomHeightFt.replace(',','.')
roomHeightFt = float(roomHeightFt) / 304.8
else:
roomHeightFt = int(10)
# Load family temlate
docFam = app.NewFamilyDocument("C:\\ProgramData\Autodesk\\RVT 2016\\Family Templates\\English\\Metric Generic Model.rft")
docFam.SaveAs("D:\\Temp\\" + room.UniqueId.ToString() + ".rfa")
# Start transaction for model
m_Trans = Transaction(doc, 'Model transaction to create room boundaries')
m_Trans.Start()
# Start transaction for family
m_TransFam = Transaction(docFam, 'Family transaction to create each Family')
m_TransFam.Start()
# Working with room contours
rvBoundary = room.GetBoundarySegments(opt) # Get room boundary
# Filter room if there are no possibility to generate from
for rvLoop in rvBoundary: # For each loop in the room boundary
crvarr = CurveArray() # There will be curve fro each room segments
for rvPiece in rvLoop: # Retrieve each segment of the loop
dsPiece = rvPiece.Curve # Transform to segments
crvarr.Append(dsPiece) # Add segments to curve crvarr
# Form generation in family
ptOrigin = XYZ(0,0,0)
ptNormal = XYZ(0,0,1)
plane = app.Create.NewPlane(ptNormal, ptOrigin)
sketchPlane = SketchPlane.Create(docFam, plane)
# Convert the outline. No idea waht difference CurveArrArray of CurveArray
curveArrArray = CurveArrArray();
curveArrArray.Append(crvarr);
# Generate from. Filter out if no success
try:
extrusion = docFam.FamilyCreate.NewExtrusion(True, curveArrArray, sketchPlane, roomHeightFt)
print('Form generated')
except:
print('Form can\'t be generated')
break
# Create material in family
matforflattype = "PNT-" + roomFlatType + "-FormAlgorithm"
materialcreator(matforflattype,docFam)
# Find the first geometry face of the given extrusion object
geomElement = extrusion.get_Geometry(Options())
geoObjectItor = geomElement.GetEnumerator()
while (geoObjectItor.MoveNext()):
solid = geoObjectItor.Current # Need to find a solid first
for face in solid.Faces: # Take each surface and paint it
docFam.Paint(extrusion.Id, face, materialname(matforflattype,docFam).Id)
# Finish transaction for family
m_TransFam.Commit()
docFam.Save()
# Finish transaction for model
m_Trans.Commit()
# Load family in model
family = docFam.LoadFamily(doc)
# ElementId iterator for Family
collectorFamilySymbols = FilteredElementCollector(doc)
collectorFamilySymbols.OfClass(FamilySymbol)
famtypeitr = collectorFamilySymbols.GetElementIdIterator()
famtypeitr.Reset()
# Go through each existing family in model and compare
familySymbol = None # Global var, write in from local in cycle
"""
# Comments
cl_sheets = FilteredElementCollector(doc)
allsheets = cl_sheets.OfCategory(BuiltInCategory.OST_Sheets)
allviews = allviews.UnionWith(allsheets).WhereElementIsNotElementType().ToElements()
a = filter(lambda x: x.Name == family.Name.ToString(),famtypeitr) # No possibility to take Name from x, like FamilyName
"""
#doc.GetElement(famtypeitr.Current).FamilyName
while (famtypeitr.MoveNext()):
famid = famtypeitr.Current # Get familyid
fam = doc.GetElement(famid) # Get family by famid
famname = fam.FamilyName
if family.Name.ToString() == famname: # Compare name of created family and name of family in model
#print(family.Name.ToString() + " eaqual " + famname)
familySymbol = fam
break
else:
#print(family.Name.ToString() + " NOT " + famname)
continue
m_Trans = Transaction(doc, 'Model transaction to place the Family into the model')
m_Trans.Start()
# Activate Symbol
familySymbol.Activate()
# Place family
familyInstance = doc.Create.NewFamilyInstance(XYZ(0, 0, 0), familySymbol, Structure.StructuralType.NonStructural)
m_Trans.Commit()
TaskDialog.Show("Massing Done.", "Did it.")
|
CodyJohnsonCHL/dfm_models | dfm_models/scripts/generate_BCs_gcm.py | #!/usr/bin/env python
# coding: utf-8
# version 0.1 2020/01/08 -- <NAME>
import argparse
from pathlib import Path
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cmocean.cm as cmo
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# functions #
def write_bc_for_pli(bc_fn, gcm, pli, quantity, method, depth_avg):
"""
append or write 3D boundary conditions for quantities
bc_fn = path to write or append boundary condition data
gcm = general circulation model output which contains boundary points (xr.DataArray)
pli = table of boundary support points (pd.DataFrame)
quantity = variable to output to the BC files (salinity or water_temp)
depth_avg = flag to enable depth averaging
"""
with open(bc_fn, "a") as f:
gcm_refd, ref_date = assign_seconds_from_refdate(gcm)
for _, (x_pli, y_pli, pli_point_name) in pli.iterrows():
x_pli_east = x_pli + 360
if (quantity == "salinity") or (quantity == "water_temp"):
bc_data = interpolate_to_pli_point(
gcm_refd, quantity, x_pli_east, y_pli, pli_point_name, method
)
# check valid depth point
if bc_data is None:
continue
if depth_avg:
write_ts_record(f, bc_data, pli_point_name, quantity, ref_date)
else:
write_t3d_record(f, bc_data, pli_point_name, quantity, ref_date)
# in the case of velocity both components are interpolated
elif quantity == "velocity":
bc_data = interpolate_to_pli_point(
gcm_refd,
["water_u", "water_v"],
x_pli_east,
y_pli,
pli_point_name,
method,
)
# check valid depth point
if bc_data is None:
continue
write_vector_t3d_record(f, bc_data, pli_point_name, quantity, ref_date)
# in case of surf_el
elif quantity == "surf_el":
bc_data = interpolate_to_pli_point(
gcm_refd, quantity, x_pli_east, y_pli, pli_point_name, method
)
# check valid depth point
if bc_data is None:
continue
write_ts_record(f, bc_data, pli_point_name, quantity, ref_date)
def write_ts_record(f, bc_data, pli_point_name, quantity, ref_date):
"""
append or write time series boundary conditions for depth averaged quantities
f = file descriptor for writing boundary condition output
bc_data = data at with percent bed coords (xr.DataFrame)
pli_point_name = name for entry
quantity = variable to output to the BC files
ref_date = date used to calculate offset of the record in seconds
"""
# get units for quantity
if quantity == "salinity":
quantbnd = "salinitybnd"
units = "ppt"
elif quantity == "water_temp":
quantbnd = "temperaturebnd"
units = "°C"
elif quantity == "surf_el":
quantbnd = "waterlevelbnd"
units = "m"
else:
print('quantity needs to be either "salinity", "water_temp" or "surf_el"\n')
raise ValueError
# write a record
f.write("[forcing]\n")
f.write(f"Name = {pli_point_name}\n")
f.write(f"Function = t3d\n")
f.write(f"Time-interpolation = linear\n")
f.write(f"Vertical position type = single\n")
f.write(f"Vertical interpolation = linear\n")
f.write(f"Quantity = time\n")
f.write(f"Unit = seconds since {ref_date}\n")
f.write(f"Quantity = {quantbnd}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = 1\n")
if quantity == "surf_el":
for td, value in bc_data.to_dataframe()[quantity].iteritems():
value = f"{value:.05f}"
f.write(f"{td} {value}\n")
else:
# write data after converting to dataframe and iterating over the rows
for td, values in bc_data.to_dataframe()[quantity].unstack(level=-1).iterrows():
# take mean of values to get depth averaged
value = values.mean()
# see results of interpolation
if value > 100.0:
print(
f"Problem with {quantity} exceeding maximum allowed value: {values.max():.03f} ppt."
)
elif value < 0.0:
print(
f"Problem with {quantity} becoming negative: {values.max():.03f} ppt."
)
print(f"Negative value for {quantity} has been set to 0.01 {units}.")
value = 0.01
value = f"{value:.05f}"
f.write(f"{td} {value}\n")
f.write("\n")
def write_vector_t3d_record(f, bc_data, pli_point_name, quantity, ref_date):
"""
append or write 3D boundary conditions for quantities
f = file descriptor for writing boundary condition output
bc_data = data at with percent bed coords (xr.DataFrame)
pli_point_name = name for entry
quantity = variable to output to the BC files
ref_date = date used to calculate offset of the record in seconds
"""
if quantity == "velocity":
vector = "uxuyadvectionvelocitybnd:ux,uy"
quantbndx = "ux"
quantbndy = "uy"
x_comp = "water_u"
y_comp = "water_v"
units = "-" # no units for velocity in example provided by Kees
else:
print('quantity should be "velocity"\n')
raise ValueError
# convert percent from bed into formated string
pos_spec = [f"{perc:.02f}" for perc in bc_data.perc_from_bed.data]
pos_spec_str = " ".join(pos_spec[::-1]) # reverse order for D3D
# write a record
f.write("[forcing]\n")
f.write(f"Name = {pli_point_name}\n")
f.write(f"Function = t3d\n")
f.write(f"Time-interpolation = linear\n")
f.write(f"Vertical position type = percentage from bed\n")
f.write(f"Vertical position specification = {pos_spec_str}\n")
f.write(f"Vertical interpolation = linear\n")
f.write(f"Quantity = time\n")
f.write(f"Unit = seconds since {ref_date}\n")
f.write(f"Vector = {vector}\n")
# loop over number of vertical positions
for vert_pos in range(1, len(pos_spec) + 1):
f.write(f"Quantity = {quantbndx}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = {vert_pos}\n")
f.write(f"Quantity = {quantbndy}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = {vert_pos}\n")
# write data after converting to dataframe and iterating over the rows
for td, values in (
bc_data.to_dataframe()[[x_comp, y_comp]].unstack(level=0).iterrows()
):
# get componets as array in order to format for d3d input
x_comp_vals = values[x_comp].values[::-1] # reverse order for D3D
y_comp_vals = values[y_comp].values[::-1] # reverse order for D3D
values = [
f"{x_comp_val:.03f} {y_comp_val:.03f}"
for x_comp_val, y_comp_val in zip(x_comp_vals, y_comp_vals)
]
values_str = " ".join(values)
f.write(f"{td} {values_str}\n")
f.write("\n")
def write_t3d_record(f, bc_data, pli_point_name, quantity, ref_date):
"""
append or write 3D boundary conditions for quantities
f = file descriptor for writing boundary condition output
bc_data = data at with percent bed coords (xr.DataFrame)
pli_point_name = name for entry
quantity = variable to output to the BC files
ref_date = date used to calculate offset of the record in seconds
"""
# get units for quantity
if quantity == "salinity":
quantbnd = "salinitybnd"
units = "ppt"
elif quantity == "water_temp":
quantbnd = "temperaturebnd"
units = "°C"
else:
print('quantity needs to be either "salinity" or "water_temp"\n')
raise ValueError
# convert percent from bed into formated string
pos_spec = [f"{perc:.02f}" for perc in bc_data.perc_from_bed.data]
pos_spec_str = " ".join(pos_spec[::-1]) # reverse order for D3D
# write a record
f.write("[forcing]\n")
f.write(f"Name = {pli_point_name}\n")
f.write(f"Function = t3d\n")
f.write(f"Time-interpolation = linear\n")
f.write(f"Vertical position type = percentage from bed\n")
f.write(f"Vertical position specification = {pos_spec_str}\n")
f.write(f"Vertical interpolation = linear\n")
f.write(f"Quantity = time\n")
f.write(f"Unit = seconds since {ref_date}\n")
# loop over number of vertical positions
for vert_pos in range(1, len(pos_spec) + 1):
f.write(f"Quantity = {quantbnd}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = {vert_pos}\n")
# write data after converting to dataframe and iterating over the rows
for td, values in bc_data.to_dataframe()[quantity].unstack(level=-1).iterrows():
# see results of interpolation
if values.max() > 100.0:
print(
f"problem with {quantity} exceeding maximum allowed value: {values.max():.03f} ppt"
)
elif values.min() < 0.0:
print(f"problem with {quantity} becoming negative: {values.max():.03f} ppt")
print(f"Negative values for {quantity} has been set to 0.01 {units}.")
values.where(values > 0.01, 0.01, inplace=True)
values = [f"{value:.05f}" for value in values]
values_str = " ".join(values[::-1]) # reverse order for D3D
f.write(f"{td} {values_str}\n")
f.write("\n")
def assign_seconds_from_refdate(gcm):
"""
This func assigns seconds from a user specified ref date as coords.
This is how D3D interpolates the boundary conditions in time.
gcm = model output to add coords to
"""
ref_date = gcm.time.data[0]
ref_dt = pd.to_datetime(ref_date)
ref_date_str = ref_dt.strftime("%Y-%m-%d %H:%M:%S")
timedeltas = pd.to_datetime(gcm.time.data) - ref_dt
seconds = timedeltas.days * 24 * 60 * 60 + timedeltas.seconds
gcm = gcm.assign_coords(coords={"seconds_from_ref": ("time", seconds)})
return gcm.swap_dims({"time": "seconds_from_ref"}), ref_date_str
def interpolate_to_pli_point(
gcm_refd, quantity, x_pli_east, y_pli, pli_point_name, method
):
"""interpolates the quanitites to the sigma depths and pli coords
gcm_refd = gcm with new time coordinates
quantity = variable to output to the BC files (salinity or water_temp)
x_pli_east = longitude of pli point in degrees east from meridian (GCM convention)
y_pli = latitude
"""
if quantity == "surf_el":
# interpolate to pli point and drop data below bed level at nearest gcm_refd point
bc_data = gcm_refd[quantity].interp(lon=x_pli_east, lat=y_pli, method=method)
return bc_data
else:
# interpolate to pli point and drop data below bed level at nearest gcm_refd point
bc_data = (
gcm_refd[quantity]
.interp(lon=x_pli_east, lat=y_pli, method=method)
.dropna(dim="depth")
.squeeze()
)
# add coordinate for percent from bed. D3D uses this in its bc file format
try:
gcm_refd_zb = bc_data.depth[-1] # get bed level of gcm_refd point
except IndexError:
print(
f"Depth invalid for {pli_point_name} at: {x_pli_east}, {y_pli}. Omitting point..."
)
return None
perc_from_bed = 100 * (-1 * bc_data.depth + gcm_refd_zb) / gcm_refd_zb
bc_data = bc_data.assign_coords(
coords={"perc_from_bed": ("depth", perc_from_bed)}
)
return bc_data
### main loop ###
if __name__ == "__main__":
### arguments ###
parser = argparse.ArgumentParser()
parser.add_argument(
"nc",
help="GCM NetCDF output containing boundary support points and duration of Delft3D simulation",
)
parser.add_argument(
"quantity",
help='GCM variable. Must be either "salintiy", "water_temp", or "velocity"',
)
parser.add_argument(
"--pli-list",
nargs="*",
type=str,
help="list of boundary support point polyline filenames",
required=True,
dest="pli_list",
)
parser.add_argument(
"--bc-filename",
help="Optional filename for Delft3D boundary condition filename",
type=str,
dest="bc_filename",
)
parser.add_argument(
"--depth-avg",
help="flag to enable depth averaged output",
default=False,
action="store_true",
dest="depth_avg",
)
parser.add_argument(
"--interp-method",
help="flag to enable depth averaged output",
default="linear",
type=str,
dest="method",
)
args = parser.parse_args()
gcm = args.nc
quantity = args.quantity
pli_list = args.pli_list
depth_avg = args.depth_avg
method = args.method
# validate arguments
if (
(quantity != "salinity")
and (quantity != "water_temp")
and (quantity != "velocity")
and (quantity != "surf_el")
):
print(
f'<quantity> was specfied as {quantity}, but should be either "salinity" or "water_temp".'
)
raise ValueError
# open gcm NetCDF output as Xarray dataset
try:
gcm = xr.open_dataset(Path(gcm), drop_variables="tau")
except FileNotFoundError as e:
print("<GCM output> should be path to GCM NetCDF output")
raise e
# set defualt boundary condition filename depending on quanitity
bc_fn = args.bc_filename
if bc_fn is None:
if quantity == "salinity":
bc_fn = Path("Salinity.bc")
elif quantity == "water_temp":
bc_fn = Path("Temperature.bc")
elif quantity == "velocity":
bc_fn = Path("Velocity.bc")
elif quantity == "surf_el":
bc_fn = Path("WaterLevel.bc")
# pli files opened as Pandas DataFrames
pli_points = []
for pli_fn in pli_list:
print(f"Reading in file: {pli_fn}")
pli = pd.read_csv(
pli_fn, sep="\s+", skiprows=2, header=None, names=["x", "y", "point_id"]
)
write_bc_for_pli(bc_fn, gcm, pli, quantity, method=method, depth_avg=depth_avg)
# add points to list for visualization
pli_points.append(pli)
# concat pli points
pli_points = pd.concat(pli_points)
### visualization ###
# color map depending on quantity
if quantity == "salinity":
cmap = cmo.haline
elif quantity == "water_temp":
cmap = cmo.thermal
elif quantity == "velocity":
cmap = "jet"
else:
cmap = "jet"
# setup orthographic projection for geographic data
fig, ax = plt.subplots(
1, 1, subplot_kw={"projection": ccrs.Orthographic(-91, 29)}, figsize=(16, 9)
)
# plot initial quantity at surface
if (quantity == "salinity") or (quantity == "water_temp"):
gcm[quantity].isel(time=0, depth=0).plot(
ax=ax, transform=ccrs.PlateCarree(), cmap=cmap
)
elif quantity == "velocity":
tmp = gcm.isel(time=0, depth=0)
tmp["magnitude"] = np.sqrt(tmp["water_u"] ** 2 + tmp["water_v"] ** 2)
tmp["magnitude"].plot(
ax=ax,
transform=ccrs.PlateCarree(),
cmap=cmap,
cbar_kwargs={"label": "velocity magnitude [m/s]"},
)
# add coastline for reference
ax.add_feature(cfeature.COASTLINE, edgecolor="0.3")
# boundary condition support points
pli_points.plot.scatter(
"x", "y", marker="x", color="k", ax=ax, transform=ccrs.PlateCarree()
)
fig.savefig("point_output_locations.png", bbox_inches="tight")
|
CodyJohnsonCHL/dfm_models | dfm_models/_internal.py | import pathlib
from datetime import datetime as dt
def validate_COOPs_loaded(Obs):
"""Simple validation that COOPs stations are properly loaded"""
try:
if not bool(Obs.COOPs):
raise TypeError(
"Load COOPs stations before downloading predicted water levels."
)
except AttributeError:
raise AttributeError("Load COOPs stations before downloading data.")
def validate_harcon(container):
"""ensure harcons exists and are correct type
:function: TODO
:returns: TODO
"""
try:
if not bool(container.harcons):
raise TypeError(
"harcons must be a dictionary mapping station names to DataFrame of harmonic constiuents."
)
except AttributeError:
raise AttributeError(
f"{container} has no harcons. Check that harmonic analysis has been completed."
)
def validate_project(project_dir):
"""validate path to FM model project dir
:function: various check on top level DFM project directory
:returns: PurePath object of DFM project
"""
if not isinstance(project_dir, pathlib.Path):
project_dir = pathlib.Path(project_dir)
if not project_dir.is_dir():
print(f"{project_dir} is not a valid directory.")
print("Check path specification.")
raise
return project_dir
def validate_file(file_path):
"""check type and validate his_fn. Raise error is incorrect
:function: TODO
:returns: TODO
"""
if not isinstance(file_path, pathlib.Path):
file_path = pathlib.Path(file_path)
if not file_path.is_file():
raise TypeError(
f"{file_path} is not a valid file.Check path specification."
)
return file_path
def validate_datetime(datetime):
if not isinstance(datetime, dt):
raise TypeError(f"{datetime} should be of type datetime.")
def validate_variable(variables):
valid_vars = ["salinity", "water_temp", "surf_el", "water_u", "water_v"]
if not set(variables) <= set(valid_vars):
raise TypeError(f"{variables} should be a subset of {valid_vars}")
def validate_cmems_variable(variables):
valid_vars = ["zos", "uo", "vo", "so", "thetao"]
if not set(variables) <= set(valid_vars):
raise TypeError(f"{variables} should be a subset of {valid_vars}")
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/io.py | """Code for io and network calls
<EMAIL>
"""
from pathlib import Path
from tempfile import NamedTemporaryFile as tmp
from urllib.error import HTTPError
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import xarray as xr
from pydap.cas.get_cookies import setup_session
from dfm_models._internal import (
validate_cmems_variable,
validate_datetime,
validate_variable,
)
def download_ncoda(lats, lons, t0, tf, variables, region=1, fn=None):
"""Subset HYCOM output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
datetime = "Y-M-D HH:mm" string
variables = list of variables in ["salinity", "water_temp", "surf_el", "water_u", "water_v"]
region = Hycom re-analysis region (default region=1)
:returns:
Xarray Dataset of selected variables
"""
validate_datetime(t0)
validate_datetime(tf)
_variables, surf_el = fix_surf_el(variables)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
coords = xr.open_dataset(request)
lon_ll = lon2index(lons[0], coords, corr=False) # lower left longtiude of bbox
lon_ur = lon2index(lons[1], coords, corr=False)
lat_ll = lat2index(lats[0], coords)
lat_ur = lat2index(lats[1], coords)
request = (
f"https://tds.hycom.org/thredds/dodsC/GLBy0.08/expt_93.0/ssh?"
f"lat[lat_ll:1:lat_ur],lon[lon_ll:1:lon_ur],time[0:1:7562],surf_el[0:1:0][lat_ll:1:lat_ur][lon_ll:1:lon_ur]"
)
request = request + "".join(
[
f"{variable}[0:1:0][0:1:39][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}],"
for variable in _variables
]
)
# append surf_el if present
if surf_el is not None:
request = (
request + f"{surf_el}[0:1:0][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}],"
)
request = request + "time[0:1:0]"
ds = xr.open_dataset(request)
if fn is not None:
ds.to_netcdf(fn)
return ds
def download_hycom(lats, lons, datetime, variables, region=1, fn=None):
"""Subset HYCOM output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
datetime = "Y-M-D HH:mm" string
variables = list of variables in ["salinity", "water_temp", "surf_el", "water_u", "water_v"]
region = Hycom re-analysis region (default region=1)
:returns:
Xarray Dataset of selected variables
"""
datetime = pd.to_datetime(datetime)
validate_datetime(datetime)
try:
validate_variable(variables)
except NameError:
raise NameError("Input 'variable' needs to be specified")
_variables, surf_el = fix_surf_el(variables)
ymd = datetime.strftime("%Y%m%d")
hr = datetime.strftime("%H")
request = (
f"https://www.ncei.noaa.gov/thredds-coastal/dodsC/hycom_region{region}"
f"/{ymd}/hycom_glb_regp{region:02d}_{ymd}00_t0{hr}.nc?"
f"depth[0:1:39],lat[0:1:875],lon[0:1:625]"
)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
coords = xr.open_dataset(request)
lon_ll = lon2index(lons[0], coords) # lower left longtiude of bbox
lon_ur = lon2index(lons[1], coords)
lat_ll = lat2index(lats[0], coords)
lat_ur = lat2index(lats[1], coords)
request = (
f"https://www.ncei.noaa.gov/thredds-coastal/dodsC/hycom_region{region}/"
f"{ymd}/hycom_glb_regp{region:02d}_{ymd}00_t0{hr}.nc?"
f"depth[0:1:39],lat[{lat_ll}:1:{lat_ur}],lon[{lon_ll}:1:{lon_ur}],"
)
request = request + "".join(
[
f"{variable}[0:1:0][0:1:39][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}],"
for variable in _variables
]
)
# append surf_el if present
if surf_el is not None:
request = (
request + f"{surf_el}[0:1:0][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}],"
)
request = request + "time[0:1:0]"
ds = xr.open_dataset(request)
if fn is not None:
ds.to_netcdf(fn)
return ds
def download_ncom(lats, lons, datetime, variables, region="amseas", fn=None):
"""Subset NCOM output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
datetime = "Y-M-D HH:mm" string
variables = list of variables in ["salinity", "water_temp", "surf_el", "water_u", "water_v"]
region = Hycom re-analysis region (default region=1)
:returns:
Xarray Dataset of selected variables
"""
datetime = pd.to_datetime(datetime)
validate_datetime(datetime)
try:
validate_variable(variables)
except NameError:
raise NameError("Input 'variable' needs to be specified")
_variables, surf_el = fix_surf_el(variables)
ymd = datetime.strftime("%Y%m%d")
hr = datetime.strftime("%H")
request = (
f"https://www.ncei.noaa.gov/thredds-coastal/dodsC/{region}/{region}_20130405_to_current/{ymd}"
f"/ncom_relo_{region}_u_{ymd}00_t0{hr}.nc?lon[0:1:1293],lat[0:1:813],depth[0:1:39]"
)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
try:
coords = xr.open_dataset(request)
except OSError:
raise OSError(f"URL not found: {request}")
lon_ll = lon2index(lons[0], coords) # lower left longtiude of bbox
lon_ur = lon2index(lons[1], coords)
lat_ll = lat2index(lats[0], coords)
lat_ur = lat2index(lats[1], coords)
request = (
f"https://www.ncei.noaa.gov/thredds-coastal/dodsC/{region}/{region}_20130405_to_current/{ymd}"
f"/ncom_relo_{region}_u_{ymd}00_t0{hr}.nc?lon[{lon_ll}:1:{lon_ur}],lat[{lat_ll}:1:{lat_ur}],depth[0:1:39],"
)
request = request + "".join(
[
f"{variable}[0:1:0][0:1:39][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}],"
for variable in _variables
]
)
# append surf_el if present
if surf_el is not None:
request = (
request + f"{surf_el}[0:1:0][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}],"
)
request = request + "time[0:1:0]"
try:
ds = xr.open_dataset(request)
except OSError:
raise OSError(f"URL not found: {request}")
if fn is not None:
ds.to_netcdf(fn)
return ds
def download_ocean_ts(lats, lons, t0, tf, variables, download):
"""Subset NCOM output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
datetime = "Y-M-D HH:mm" string
variables = list of variables in ["salinity", "water_temp", "surf_el", "water_u", "water_v"]
region = Hycom re-analysis region (default region=1)
:returns:
Xarray Dataset of selected variables
"""
datasets = []
tmpDir = Path("/tmp") / "".join(
random.choices(string.ascii_letters + string.digits, k=10)
)
tmpDir.mkdir()
t1 = pd.to_datetime(t0)
t2 = pd.to_datetime(tf)
date_range = pd.date_range(t1, t2, freq="3H")
for datetime in date_range:
fn = tmpDir / datetime.strftime("%Y%m%d.%H%M.nc")
try:
ds = download(lats, lons, datetime, variables, fn=fn)
datasets.append(ds)
except OSError:
print(f"Skipping timestep {datetime} because URL was not found.")
continue
print(f"Successfully downloaded data set to: {tmpDir}")
return xr.concat(datasets, "time")
def download_COOPs(product, station_name, station_id, datum, begin_date, end_date):
"""download predicted water level and append to attribute
:function: TODO
:returns: TODO
"""
request = (
f"https://tidesandcurrents.noaa.gov/api/datagetter?"
f"begin_date={begin_date}"
f"&end_date={end_date}"
f"&station={station_id}"
f"&product={product}"
f"&datum={datum}"
f"&units=metric"
f"&time_zone=gmt"
f"&application=ERDC"
f"&format=csv"
f"&interval=h"
)
fn = tmp()
try:
response, http = urlretrieve(request, fn.name)
except HTTPError:
print(f"{station_name} with {station_id} was not found in CO-OPs database.")
print(f"Check url for errors: {request}")
raise
data = pd.read_csv(
fn,
index_col=[0],
parse_dates=True,
names=["time", "water_level"],
header=0,
usecols=[0, 1],
squeeze=True,
)
data.name = f"water_level_{product}"
return data
def download_nwis(
station_name, station_id, begin_date, end_date, data_code=60, skiprows=28
):
"""download data from https://nwis.waterdata.usge and outputs as dataframe
inputs:
site_name = user specified name for site
site_no = USGS site number code
begin_date = first day in timeseries (YYYY-MM-dd)
end_date = last day in timeseries (YYYY-MM-dd)
skiprows = number of header rows to skip (default=28)
return = time series (pandas Series)
"""
request = (
f"https://nwis.waterdata.usgs.gov/usa/nwis/uv/"
f"?cb_{data_code:05d}=on"
f"&format=rdb&"
f"site_no={station_id}"
f"&period="
f"&begin_date={begin_date}"
f"&end_date={end_date}"
)
fn = tmp()
try:
response, http = urlretrieve(request, fn.name)
except HTTPError:
print(f"{station_name} with {station_id} was not found in CO-OPs database.")
print(f"Check url for errors: {request}")
raise
# Pandas
data = pd.read_csv(
fn,
sep="\s+", # noqa: W605
skiprows=skiprows,
usecols=[2, 3, 5],
parse_dates={"datetime_CST": [0, 1]},
header=0,
index_col=0,
names=["date", "time", "data"],
dtype={"data": float},
squeeze=True,
)
try:
data.index = (
data.index.tz_localize("America/Chicago", ambiguous=True)
.tz_convert("UTC")
.tz_localize(None)
)
data.index = data.index.rename("datetime_UTC")
except AttributeError as e: # noqa: F841
print("Problem converting datetime to UTC. Check data")
return np.nan
return data
def download_cmems_ts(lats, lons, t0, tf, variables, fn=None):
"""Subset CMEMS output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
t0 = datetime for start of time series
tf = datetime for end of time series
variables = list of variables in ["zos", "uo", "vo", "so", "thetao"]
:returns:
Xarray Dataset of selected variables
"""
validate_datetime(t0)
validate_datetime(tf)
try:
validate_cmems_variable(variables)
except NameError:
raise NameError("Input 'variable' needs to be specified")
_variables, zos = fix_zos(variables)
request = (
"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
"longitude[0:1:4319],latitude[0:1:2040],depth[0:1:49],time[0:1:10012]"
)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
coords = xr.open_dataset(request)
lon_ll = cmemslon2index(lons[0], coords) # lower left longtiude of bbox
lon_ur = cmemslon2index(lons[1], coords)
lat_ll = cmemslat2index(lats[0], coords)
lat_ur = cmemslat2index(lats[1], coords)
t0i = time2index(t0, coords)
tfi = time2index(tf, coords)
request = (
f"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
f"longitude[{lon_ll}:1:{lon_ur}],latitude[{lat_ll}:1:{lat_ur}],depth[0:1:49],time[{t0i}:1:{tfi}],"
)
request = request + "".join(
[
f"{variable}[{t0i}:1:{tfi}][0:1:49][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
for variable in _variables
]
)
# append surf_el if present
if zos is not None:
request = (
request + f"{zos}[{t0i}:1:{tfi}][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
)
ds = xr.open_dataset(request)
if fn is not None:
ds.to_netcdf(fn)
return ds
def copernicusmarine_session(username, password):
cas_url = "https://cmems-cas.cls.fr/cas/login"
session = setup_session(cas_url, username, password)
session.cookies.set("CASTGC", session.cookies.get_dict()["CASTGC"])
###########
# helpers #
###########
def fix_surf_el(variables):
"""change variables if surf_el is contained"""
if "surf_el" in set(variables):
_variables = variables.copy()
_variables.remove("surf_el")
return _variables, "surf_el"
else:
return variables, None
def fix_zos(variables):
"""change variables if surf_el is contained"""
if "zos" in set(variables):
_variables = variables.copy()
_variables.remove("zos")
return _variables, "zos"
else:
return variables, None
def time2index(t, coords):
"""convert time to index for OpenDAP request"""
validate_datetime(t)
time = pd.to_datetime(coords.time.values)
return np.argmin(np.abs(t - time))
def lon2index(lon, coords, corr=True):
"""convert longitude to index for OpenDAP request"""
if corr:
if lon < 0:
lon += 360
lons = coords.lon.values
return np.argmin(np.abs(lons - lon))
def lat2index(lat, coords):
"""convert latitude to index for OpenDAP request"""
lats = coords.lat.values
return np.argmin(np.abs(lats - lat))
def cmemslon2index(lon, coords):
"""convert longitude to index for OpenDAP request"""
lons = coords.longitude.values
return np.argmin(np.abs(lons - lon))
def cmemslat2index(lat, coords):
"""convert latitude to index for OpenDAP request"""
lats = coords.latitude.values
return np.argmin(np.abs(lats - lat))
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/openDA.py | """Code for working with OpenDA
<EMAIL>
"""
from datetime import datetime as dt
def writeNoosTs(data, Location, x, y, Unit, fn):
"""write time series data in OpenDA ExchangeObject format (NOOS)
data = pd.Series with datatime axis and value
"""
now = dt.now().strftime("%Y-%m-%d %H:%M")
with open(fn, "w") as f:
f.write("#======================================================\n")
f.write(f"# Generated on {now} \n")
f.write("#======================================================\n")
f.write(f"# Location : {Location}\n")
f.write(f"# Position : ({x:.05f},{y:.05f})\n")
f.write("# Source : observed\n")
f.write(f"# Unit : {Unit}\n")
f.write("# Analyse time : null\n")
f.write("#======================================================\n")
for time, value in data.iteritems():
strftime = time.strftime("%Y%m%d%H%M")
f.write(f"{strftime}\t{value}\n")
def createNoosConfigFile(stdevs, stationNames, fn):
with open(fn, "w") as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(
'<noosObserver xmlns="http://www.openda.org" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openda.org http://schemas.openda.org/observation/noosObservations.xsd">\n'
)
f.write("\n")
for stdev, stationName in zip(stdevs, stationNames):
writeNoosObserver(f, stdev, stationName)
f.write("\n")
f.write("</noosObserver>")
def writeNoosObserver(f, stdev, noosFn):
f.write(f'\t<timeSeries status="use" standardDeviation="{stdev:.02f}">\n')
f.write(f"\t\t{noosFn}\n")
f.write(f"\t</timeSeries>\n")
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/observations.py | <reponame>CodyJohnsonCHL/dfm_models
"""Classes and functions for working with observational data.
<EMAIL>
"""
import json
from collections.abc import Iterable
from tempfile import NamedTemporaryFile as tmp
from urllib.error import HTTPError
from urllib.request import urlretrieve
import pandas as pd
from dfm_models._internal import validate_COOPs_loaded
from dfm_models.utils.io import download_COOPs
class Observations:
"""Docstring for Observations."""
def __init__(self):
"""TODO: to be defined.
:Docstring for Observations.: TODO
"""
def load_COOPs_stations(self, stations):
"""TODO: Docstring for generate_water_level_comp.
:function: TODO
:returns: TODO
"""
self.COOPs = {}
# make sure station is iterable
if not isinstance(stations, Iterable):
raise TypeError(
"Station list is not iterable. Check that argument contains a list of valid station names."
)
# make sure stations is a dict
if not type(stations) is dict:
raise TypeError(
"Station should be dictionary mapping COOPs station names to station_ids."
)
for station_name, station_id in stations.items():
# verify that COOPs station is valid
request = f"https://api.tidesandcurrents.noaa.gov/mdapi/prod/webapi/stations/{station_id}.json"
try:
_, _ = urlretrieve(request)
station = COOPs(station_name, station_id)
self.COOPs[station_name] = station
except HTTPError:
print(
f"{station_name} with id: {station_id} is not a valid COOPs station. Check request:"
)
print(request)
pass
def gen_COOPS_observed_water_levels(self, begin_date, end_date, datum="MSL"):
"""TODO: Docstring for generate_water_level_comp.
:function: TODO
:returns: TODO
"""
validate_COOPs_loaded(self)
# differentiate object based on datum
if datum == "MSL":
self.observed_water_levels_MSL = {}
for station_name, station in self.COOPs.items():
try:
station.download_observations(datum, begin_date, end_date)
self.observed_water_levels_MSL[
station_name
] = station.water_level_observations_MSL
except HTTPError:
pass
elif datum == "NAVD":
self.observed_water_levels_NAVD = {}
for station_name, station in self.COOPs.items():
try:
station.download_observations(datum, begin_date, end_date)
self.observed_water_levels_NAVD[
station_name
] = station.water_level_observations_NAVD
except HTTPError:
pass
elif datum == "LMSL":
self.observed_water_levels_LMSL = {}
for station_name, station in self.COOPs.items():
try:
station.download_observations("MSL", begin_date, end_date)
self.observed_water_levels_LMSL[station_name] = (
station.water_level_observations_MSL
- station.water_level_observations_MSL.mean()
)
except HTTPError:
pass
def gen_COOPS_predicted_water_levels(self, begin_date, end_date, datum="MSL"):
"""TODO: Docstring for generate_water_level_comp.
:function: TODO
:returns: TODO
"""
validate_COOPs_loaded(self)
# differentiate object based on datum
if datum == "MSL":
self.predicted_water_levels_MSL = {}
for station_name, station in self.COOPs.items():
try:
station.download_prediction(datum, begin_date, end_date)
self.predicted_water_levels_MSL[
station_name
] = station.water_level_prediction_MSL
except HTTPError:
pass
elif datum == "NAVD":
self.predicted_water_level_NAVD = {}
for station_name, station in self.COOPs.items():
try:
station.download_prediction(datum, begin_date, end_date)
self.predicted_water_levels_NAVD[
station_name
] = station.water_level_prediction_NAVD
except HTTPError:
pass
elif datum == "LMSL":
self.predicted_water_levels_LMSL = {}
for station_name, station in self.COOPs.items():
try:
station.download_prediction("MSL", begin_date, end_date)
self.predicted_water_levels_LMSL[station_name] = (
station.water_level_prediction_MSL
- station.water_level_prediction_MSL.mean()
)
except HTTPError:
pass
def gen_COOPs_harcons(self):
"""TODO: Docstring for gen_COOPs_harcons.
:function: TODO
:returns: TODO
"""
validate_COOPs_loaded(self)
self.harcons = {}
for station_name, station in self.COOPs.items():
station.download_harcon()
self.harcons[station_name] = station.harcon
class Station:
"""Docstring for Station."""
def __init__(self):
"""TODO: to be defined.
:Docstring for Station.: TODO
"""
class COOPs(Station):
"""Docstring for COOPs_Station."""
def __init__(self, station_name, station_id):
"""TODO: to be defined.
:Docstring for COOPs_Station.: TODO
"""
self.station_name = station_name
self.station_id = station_id
self.get_station_metadata()
def get_station_metadata(self):
"""TODO: Docstring for get_station_metadata.
:function: TODO
:returns: TODO
"""
request = f"https://api.tidesandcurrents.noaa.gov/mdapi/prod/webapi/stations/{self.station_id}.json"
fn = tmp()
try:
txt, http = urlretrieve(request, fn.name)
except HTTPError:
print(
f"Station metadata for {self.station_name}"
f"with {self.station_id} was not found in CO-OPs database."
)
print(f"Check url for errors: {request}")
raise
metadata = json.load(fn)
self.lat = metadata["stations"][0]["lat"]
self.lon = metadata["stations"][0]["lng"]
def download_prediction(self, datum, begin_date, end_date):
"""download predicted water level and append to attribute
:function: TODO
:returns: TODO
"""
data = download_COOPs(
"predictions",
self.station_name,
self.station_id,
datum,
begin_date,
end_date,
)
# differentiate object based on datum
if datum == "MSL":
self.water_level_prediction_MSL = data
elif datum == "NAVD":
self.water_level_prediction_NAVD = data
def download_observations(self, datum, begin_date, end_date):
"""download observered water level and append to attribute
:function: TODO
:returns: TODO
"""
data = download_COOPs(
"hourly_height",
self.station_name,
self.station_id,
datum,
begin_date,
end_date,
)
# differentiate object based on datum
if datum == "MSL":
self.water_level_observations_MSL = data
elif datum == "NAVD":
self.water_level_observations_NAVD = data
def download_harcon(self):
"""TODO: Docstring for download_harcon.
:function: TODO
:returns: TODO
"""
request = (
f"https://api.tidesandcurrents.noaa.gov/mdapi/prod/webapi/stations/"
f"{self.station_id}/harcon.json?units=metric"
)
fn = tmp()
try:
txt, http = urlretrieve(request, fn.name)
except HTTPError:
print(
f"Harmonic consts for {self.station_name} with {self.station_id} was not found in CO-OPs database."
)
print(f"Check url for errors: {request}")
raise
harcon = json.load(fn)
self.harcon = get_amp_phase(harcon)
##########################
# functions #
##########################
# parse json of harcons
def get_amp_phase(harcon):
const = harcon["HarmonicConstituents"]
names = []
amps = []
phases = []
speeds = []
for component in const:
names.append(component["name"])
amps.append(component["amplitude"])
phases.append(component["phase_GMT"])
speeds.append(component["speed"])
pass
return pd.DataFrame(
index=names, data={"amplitude": amps, "phase": phases, "speed": speeds}
)
|
CodyJohnsonCHL/dfm_models | dfm_models/project/InitialConditions.py | <gh_stars>0
"""Object representing FM model
<EMAIL>
"""
import numpy as np
from scipy.interpolate import LinearNDInterpolator
def interp2mesh(da, mesh_data, mod180=False):
"""Interpolate variabel from Xarray DataArray to grid
:params:
da = DataArray of variable to interpolate
mesh_data = Xarray DataSet representing UGRID
:returns: TODO
"""
xv = mesh_data.NetNode_x.values
xv[xv < 0] = xv[xv < 0] + 360
yv = mesh_data.NetNode_y.values
pv = np.column_stack((xv, yv))
x = da.lon.values
if mod180:
x[x < 0] = x[x < 0] + 360
y = da.lat.values
X, Y = np.meshgrid(x, y)
x = X.ravel()
y = Y.ravel()
p = np.column_stack((x, y))
v = da.values.ravel()
idx = ~np.isnan(v)
interpolator = LinearNDInterpolator(p[idx, :], v[idx])
vv = interpolator(pv)
return vv
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/calibration.py | """Classes and functions for calibration.
<EMAIL>
"""
import numpy as np
def write_correction_file(corr_fn, correction_factors):
"""write out correction factors to Delft3D-FM readable forcing file
:function: TODO
:returns: TODO
"""
with open(corr_fn, "w") as f:
for boundary_point in correction_factors.keys():
f.write("[forcing]\n")
f.write(f"Name = {boundary_point}\n")
f.write("Function = astronomic-correction\n")
f.write("Quantity = astronomic component\n")
f.write("Unit = -\n")
f.write("Quantity = waterlevelbnd amplitude\n")
f.write("Quantity = waterlevelbnd phase\n")
f.write("Unit = deg\n")
for component, corr_fac in correction_factors[boundary_point].items():
f.write(f"{component:<8s}{corr_fac:>8.6f} 0.0\n")
f.write("\n")
def compute_phase_offset(BC, const_errors, stations, thresh=0.01):
"""compute amplitude corrections for AstroWaterLevelBC instance BC
:function: TODO
:returns: TODO
"""
phase_offsets = {}
# iterate over boundary_points in BC
for boundary_point in BC.boundary_points.itertuples():
point_id = boundary_point.point_id
harcons = BC.harcons[point_id]
harcons = harcons[harcons.amplitude > thresh]
phase_offsets[point_id] = {}
# loop over harmonic constituents
for component in harcons.component:
eps = idw(boundary_point, const_errors, stations, component, p=2)
phase_offsets[point_id][component] = eps
return phase_offsets
def compute_amp_correction_factor(BC, const_errors, stations, thresh=0.01):
"""compute amplitude corrections for AstroWaterLevelBC instance BC
:function: TODO
:returns: TODO
"""
correction_factors = {}
# iterate over boundary_points in BC
for boundary_point in BC.boundary_points.itertuples():
point_id = boundary_point.point_id
harcons = BC.harcons[point_id]
harcons = harcons[harcons.amplitude > thresh]
correction_factors[point_id] = {}
# loop over harmonic constituents
for component in harcons.component:
eps = idw(boundary_point, const_errors, stations, component)
a = harcons[harcons.component == component]["amplitude"].values[0]
# calculate correction factor
a_p = a - eps
A = a / a_p
correction_factors[point_id][component] = A
return correction_factors
def idw(boundary_point, const_errors, stations, component, p=1):
"""Calculate inverse distance weighted correction quanitty (amplitude or phase) for harmonic component
@params:
boundary_point = NamedTuple from Boundary object containing a boudnary_point attribute
const_errors = Dictionary containing station_name to harmonic analysis error Series
stations = Dictionary containing station_name to COOPs objects
component = Harmonic constant of concern
"""
errors = []
weights = []
bm_lon = boundary_point.x
bm_lat = boundary_point.y
bm_ll = (bm_lat, bm_lon)
for station in stations.keys():
sn_lat = stations[station].lat
sn_lon = stations[station].lon
sn_ll = (sn_lat, sn_lon)
# make sure that components match
try:
e = const_errors[station].loc[component]
except KeyError:
raise KeyError(
f"Component: {component} was not found in error table for {station}"
)
w = weight_function(bm_ll, sn_ll, p)
errors.append(e)
weights.append(w)
errors = np.array(errors)
weights = np.array(weights)
correction = np.sum(weights * errors) / np.sum(weights)
return correction
def weight_function(bm_ll, sn_ll, p):
"""calculate weight function for boundary point and observation point
inputs:
bm_ll = lat,lon tuple of boundary point
sn_ll = lat,lon ytuple of obs point
p = exponent in weight function
"""
lat1 = bm_ll[0]
lon1 = bm_ll[1]
lat2 = sn_ll[0]
lon2 = sn_ll[1]
d = haversine_distance(lat1, lon1, lat2, lon2)
return 1 / (d ** p)
def haversine_distance(lat1, lon1, lat2, lon2):
"""calculate distance using Haversine formula
Assumes xy are in degrees longitude, latitude in WGS
"""
lat1 *= np.pi / 180
lon1 *= np.pi / 180
lat2 *= np.pi / 180
lon2 *= np.pi / 180
R = 6371e3 # radius of earth in meters
DelLat = lat2 - lat1
DelLon = lon2 - lon1
a = np.sin(DelLat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(DelLon / 2) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
return R * c
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/analysis.py | <filename>dfm_models/utils/analysis.py
"""Analysis tools for comparing model results to observations
<EMAIL>
"""
import pandas as pd
import pytide
from numpy import abs, angle
from dfm_models._internal import validate_harcon
def compute_harcon_error(Observations, Results):
"""Calculate statistics between COOPs and FM model harmonic constiuents"""
amplitude_error = {}
phase_error = {}
validate_harcon(Observations)
validate_harcon(Results.his_output)
common_stations = get_common_stations(
Observations.harcons, Results.his_output.harcons
)
for station in common_stations:
amplitude_error[station] = (
(Results.his_output.harcons[station] - Observations.harcons[station])[
"amplitude"
]
.dropna()
.sort_values(ascending=False)
)
phase_results = Results.his_output.harcons[station]["phase"]
phase_obs = Observations.harcons[station]["phase"]
common_comps = get_common_components(phase_results, phase_obs)
errors = []
for comp in common_comps:
res = phase_results.loc[comp]
obs = phase_obs.loc[comp]
e = compute_phase_error(res, obs)
errors.append(e)
errors = pd.Series(errors, index=common_comps)
errors.name = "phase"
phase_error[station] = errors
return amplitude_error, phase_error
def compute_phase_error(res, obs):
"""compute error in phase between 0 and 360"""
if res < 0:
res += 360
if obs < 0:
obs += 360
if res >= obs:
mu = res - obs
return mu
else:
mu = res - obs
mu += 360
return mu
##########################
# tidal harmonic #
##########################
def harmonic_analysis(
waterlevel,
time,
consts=[
"K1",
"O1",
"P1",
"M2",
"Q1",
"S2",
"S1",
"Mf",
"N2",
"K2",
"J1",
"Mm",
"M4",
"Sa",
"Ssa",
],
):
wt = pytide.WaveTable(consts)
h = waterlevel.values
f, vu = wt.compute_nodal_modulations(time)
w = wt.harmonic_analysis(h, f, vu)
hp = wt.tide_from_tide_series(time, w)
return w, (h, hp, time), consts
def get_modulus_angle(w):
modulus = abs(w)
ang = angle(w, deg=True)
return modulus, ang
##########################
# getters #
##########################
def get_common_stations(obs_harcons, res_harcons):
"""Get intersection of stations for the observations and results
:function: TODO
:returns: TODO
"""
obs_stations = obs_harcons.keys()
res_stations = res_harcons.keys()
common_stations = list(set(obs_stations) & set(res_stations))
if len(common_stations) == 0:
print("There are no common stations between observations and results.")
return None
else:
return common_stations
def get_common_components(results, obs):
"""get intersectin of constituents"""
# cast to set for intersection operator
res_comps = set(results.index)
obs_comps = set(obs.index)
common_comps = list(res_comps & obs_comps)
return common_comps
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/metrics.py | <reponame>CodyJohnsonCHL/dfm_models
from numpy import abs, nanmax, nanmean, nanmin, nanstd, nanvar, sqrt
def rmse(model, ref):
"""
root mean square error. Ideal value is zero
model = numerical solution of shape M by N
ref = analytical solution or observations of shape M by N
returns rmse
"""
e = model - ref
e2 = e ** 2
mse = nanmean(e2)
rmse = sqrt(mse)
return rmse
def nrmse(model, ref):
"""
normalized (by data range) root mean square error. Ideal value is zero
model = numerical solution of shape M by N
ref = analytical solution or observations of shape M by N
returns rmse
"""
_rmse = rmse(model, ref)
_range = nanmax(model) - nanmin(model)
_nrmse = _rmse / _range
return _nrmse
def r2(model, ref):
"""
coefficient of determination. Ideal value is 1
model = nuemrical solution of shape M by N
ref = analytical solution or observation of shape M by N
returns r2
"""
e = model - ref
e2 = e ** 2
mse = nanmean(e2)
var_ref = nanvar(ref)
r2 = 1 - mse / var_ref
return r2
def r(model, ref):
"""
Pearson correlation coefficient.
model = nuemrical solution of shape M by N
ref = analytical solution or observation of shape M by N
returns r
"""
mod_res = model - nanmean(model)
ref_res = ref - nanmean(ref)
mod_sqr_res = mod_res ** 2
ref_sqr_res = ref_res ** 2
numerator = nanmean(mod_res * ref_res)
denominator = sqrt(nanmean(mod_sqr_res) * nanmean(ref_sqr_res))
r = numerator / denominator
return r
def SI(model, ref):
"""
Scatter index.
model = nuemrical solution of shape M by N
ref = analytical solution or observation of shape M by N
returns SI
"""
e = model - ref
std_e = nanstd(e)
mean_abs_ref = nanmean(abs(ref))
SI = std_e / mean_abs_ref
return SI
def bias(model, ref):
"""
bias
model = numerical solution of shape M by N
ref = analytical solution of shape M by N
returns bias
"""
e = model - ref
mean_e = nanmean(e)
return mean_e
def nb(model, ref):
"""
Normalzied bias.
model = nuemrical solution of shape M by N
ref = analytical solution or observation of shape M by N
returns nb
"""
e = model - ref
mean_e = nanmean(e)
mean_abs_ref = nanmean(abs(ref))
nb = mean_e / mean_abs_ref
return nb
|
CodyJohnsonCHL/dfm_models | dfm_models/project/FMmodel.py | """Object representing FM model
<EMAIL>
"""
import re
from collections.abc import Iterable
import numpy as np
import pandas as pd
import pytide
import xarray as xr
from dfm_models._internal import validate_file, validate_project
class Model:
"""Top-level object for organizing FM model input/output"""
def __init__(self, name, project_dir, results=None, **kwargs):
"""TODO: to be defined.
:Docstring for model.: TODO
"""
self.name = name
self.project_dir = validate_project(project_dir)
self.mdu = find_MDU(project_dir)
self.params = load_MDU(self.mdu)
# Check type of results
if results is not None:
if not isinstance(results, Results):
raise TypeError(
"Results object of incorrect type. Initialize as FMmodel.Results class"
)
# save results if None or Results object
self.results = results
class Grid:
"""Docstring for Grid."""
def __init__(self):
"""TODO: to be defined.
:Docstring for Grid.: TODO
"""
class UnstructGrid(Grid):
"""Docstring for UnstructGrid."""
def __init__(self, fn):
"""TODO: to be defined.
:Docstring for UnstructGrid.: TODO
"""
self.fn = fn
self.data = self.load_ugrid()
def load_ugrid(self):
data = xr.open_dataset(self.fn)
return data
class Results:
"""Class to organize various results from an FM model"""
def __init__(self, his_output):
self.his_output = his_output
class hisOutput:
"""Class representation of history file.
This is intented to package pre/post-processing code to work with
history data.
Add data processing methods here.
"""
def __init__(self, his_fn):
self.his_fn = validate_file(his_fn)
self.data = load_his(self.his_fn)
def water_level_comp_data(self, stations):
"""Create dictionary of water level time series and their demeaned
values.
:function: TODO
:returns: TODO
"""
water_level = {}
water_level_LMSL = {}
# make sure station is iterable
if not isinstance(stations, Iterable):
raise TypeError(
"Station list is not iterable. Check that argument contains a list of valid station names."
)
# Check if any stations are in his_output
if not any(item in stations for item in self.data.station_name):
print(f"No stations in {stations} are in history output.")
self.water_level = water_level
self.water_level_LMSL = water_level_LMSL
else:
for station_name in stations:
try:
data = (
self.data["waterlevel"]
.sel(station_name=station_name)
.to_dataframe()["waterlevel"]
)
data.rename()
water_level[station_name] = data
LMSL = data.mean()
water_level_LMSL[station_name] = data - LMSL
except KeyError:
print(
f"{station_name} from station list doesn't match with any history output station names."
)
self.water_level = water_level
self.water_level_LMSL = water_level_LMSL
def harmonic_analysis(
self,
consts=[
"K1",
"O1",
"Sa",
"Ssa",
"P1",
"Q1",
"M2",
"OO1",
"M11",
"S2",
"J1",
"Rho1",
"K2",
"S1",
"Mm",
"MSf",
"N2",
"MS4",
],
):
"""perform harmonic analysis on water level data, optionally using constants in consts.
:function: TODO
:returns: TODO
"""
harcons = {}
for station_name in self.data["station_name"].values:
data = self.data["waterlevel"].sel(station_name=station_name).values
time = self.data.time.values
# use full set of constants if consts isn't defined
if consts is None:
wt = pytide.WaveTable()
consts = wt.known_constituents()
else:
wt = pytide.WaveTable(consts)
f, vu = wt.compute_nodal_modulations(time)
w = wt.harmonic_analysis(data, f, vu)
amplitude = np.abs(w)
phase = np.angle(w, deg=True)
harcons[station_name] = pd.DataFrame(
{"amplitude": amplitude, "phase": phase},
index=[const.upper() for const in consts],
)
self.harcons = harcons
#################
# functions #
#################
def find_MDU(project_dir):
"""glob for mdu in project_dir
:function: TODO
:returns: TODO
"""
mdu = list(project_dir.glob("*.mdu"))[0]
if not mdu.exists():
print(f"No MDU found in {project_dir}.")
print("Check MDU file existence.")
return mdu
def find_net(project_dir):
"""glob for mdu in project_dir
:function: TODO
:returns: TODO
"""
net_nc = list(project_dir.glob("*_net.nc"))[0]
if not net_nc.exists():
print(f"No Net file found in {project_dir}.")
print("Check net file existence.")
return net_nc
def load_MDU(mdu):
"""parse MDU and store parameter key/values in dictionary
:function: TODO
:returns: TODO
"""
params = {}
key_value = re.compile("(.*)=(.*)#.*")
with open(mdu, "r") as f:
lines = f.readlines()
for line in lines:
match = key_value.match(line)
if match is None:
continue
else:
key = match.group(1).strip()
value = match.group(2).strip()
params[key] = value
return params
def load_his(his_fn):
"""load history file into Xarray DataSet w/ pre-processing
:function: TODO
:returns: TODO
"""
_his = xr.open_dataset(his_fn)
_his["station_name"] = _his["station_name"].str.decode("utf-8")
return _his.swap_dims({"stations": "station_name"}).drop_vars(["station_id"])
|
CodyJohnsonCHL/dfm_models | dfm_models/project/BoundaryConditions.py | """Object representing FM boundary conditions
<EMAIL>
"""
import re
import pandas as pd
from dfm_models._internal import validate_file
class Boundary:
"""Docstring for Boundary."""
def __init__(self):
"""TODO: to be defined.
:Docstring for Boundary.: TODO
"""
class WaterLevelBC(Boundary):
"""Docstring for WaterLevelBC."""
def __init__(self):
"""TODO: to be defined.
:Docstring for WaterLevelBC.: TODO
"""
class AstroWaterLevelBC(WaterLevelBC):
"""Docstring for WaterLevelBC."""
def __init__(self, pli_fn, bc_fn):
"""TODO: to be defined.
:Docstring for WaterLevelBC.: TODO
"""
self.pli_fn = validate_file(pli_fn)
self.boundary_points = parse_pli(self.pli_fn)
self.bc_fn = validate_file(bc_fn)
self.get_harmonic_constituents()
def get_harmonic_constituents(self):
"""TODO: Docstring for get_harmonic_constatns.
:function: TODO
:returns: TODO
"""
harcons = {}
for point_id in self.boundary_points.point_id:
harcons[point_id] = parse_bc(self.bc_fn, point_id)
self.harcons = harcons
#################
# functions #
#################
def parse_pli(pli_fn):
"""TODO: Docstring for parse_pli.
:function: TODO
:returns: TODO
"""
boundary_points = pd.read_csv(
pli_fn, skiprows=2, sep="\s+", names=["x", "y", "point_id"] # noqa: W605
)
return boundary_points
def parse_bc(bc_fn, point_id, quantity="astronomic"):
"""Parse bc
:function: TODO
:returns: TODO
"""
# regex to find record for point_id
record = re.compile(point_id)
# loop over lines in *.bc file to find record
with open(bc_fn, mode="r") as f:
i = 0
for line in f:
# conditionally execute
if record.search(line):
break
# increment line index until a match on record
i += 1
# particular amount of header lines and data for each record
skiprows = i + 8
harcons = pd.read_csv(
bc_fn,
sep="\s+", # noqa: W605
names=["component", "amplitude", "phase"],
skiprows=skiprows,
nrows=37,
)
return harcons
|
CodyJohnsonCHL/dfm_models | dfm_models/utils/visualization.py | <filename>dfm_models/utils/visualization.py
"""Visualization tools for comparing model results to observations
<EMAIL>
"""
import geoviews as gv
import geoviews.feature as gf
import geoviews.tile_sources as gts
import holoviews as hv
import matplotlib.pyplot as plt
import numpy as np
from bokeh.models import HoverTool
from geoviews import opts
def spatial_stat(stats, fn):
# data
lon = hv.Dimension("lon", label="Longitude [deg]")
lat = hv.Dimension("lat", label="Latitude [deg]")
hv_stats = hv.Table(stats, kdims=[lon, lat])
cols = stats.columns[2:-2].values
clabels = {
"nrmse": "normalized RMSE [% range]",
"nrmse_tide": "normalized RMSE [% tidal range]",
"rmse_cm": "RMSE [cm]",
"r2": "r-squared [.]",
}
# hover tool
tooltips = [
("Station", "@station"),
("# obs.", "@number"),
("Normalized RMSE [% range]", "@nrmse"),
("Normalized RMSE [% tidal range]", "@nrmse_tide"),
("RMSE [cm]", "@rmse_cm"),
("r-squared", "@r2"),
]
hover = HoverTool(tooltips=tooltips)
# style
psize = 10
cst_lw = 1.25
# Holoviews options
cOpts = opts.LineContours(line_width=cst_lw)
overOpts = opts.Overlay(aspect=6.5 / 3, responsive=True)
# generate holomap
holomap = hv.HoloMap(kdims="Statistic")
for col in cols:
clabel = clabels[col] # colorbar text label
# options for points
pOpts = opts.Points(
size=psize,
color=col,
cmap="turbo",
colorbar=True,
clabel=clabel,
tools=[hover],
clim=(0, hv_stats[col].max()),
)
# put together
overlay = (
gf.coastline(scale="10m").opts(cOpts)
* gv.Points(hv_stats).opts(pOpts)
* gts.EsriImagery
)
# map
holomap[col] = overlay.opts(overOpts)
# save output
gv.save(holomap, fn)
return holomap
def one2one(obs, mod, quantity_str="water level [m, MSL]", lims=None, ax=None):
std = obs.std()
std_shift = np.sin(np.pi / 4) * std
if lims is None:
lims = [np.min([obs.min(), mod.min()]), np.max([obs.max(), mod.max()])]
lower_bound_x = [lims[0] + std_shift, lims[1]]
lower_bound_y = [lims[0], lims[1] - std_shift]
upper_bound_x = [lims[0], lims[1] - std_shift]
upper_bound_y = [lims[0] + std_shift, lims[1]]
fill_between_x = lims
fill_between_y1 = [lims[0] - std_shift, lims[1] - std_shift]
fill_between_y2 = [lims[0] + std_shift, lims[1] + std_shift]
if ax is None:
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter(obs, mod, 15)
ax.plot(lims, lims, color="k", linestyle="--")
ax.plot(lower_bound_x, lower_bound_y, color="k", lw=0.75)
ax.plot(upper_bound_x, upper_bound_y, color="k", lw=0.75)
ax.fill_between(
fill_between_x, fill_between_y1, fill_between_y2, alpha=0.2, color="gray"
)
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.set_xlabel(f"observed {quantity_str}")
ax.set_ylabel(f"modeled {quantity_str}")
return fig, ax
else:
ax.scatter(obs, mod, 15)
ax.plot(lims, lims, color="k", linestyle="--")
ax.plot(lower_bound_x, lower_bound_y, color="k", lw=0.75)
ax.plot(upper_bound_x, upper_bound_y, color="k", lw=0.75)
ax.fill_between(
fill_between_x, fill_between_y1, fill_between_y2, alpha=0.2, color="gray"
)
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.set_xlabel(f"observed {quantity_str}")
ax.set_ylabel(f"modeled {quantity_str}")
return ax
def water_level_holomap(water_levels, label):
"""create a simple HoloMap with a key dim on station
:function: TODO
:returns: TODO
"""
kdims = hv.Dimension("station")
curves = {
station: hv.Curve(wl, group=label, label=label)
for station, wl in water_levels.items()
}
return hv.HoloMap(curves, kdims=kdims)
def harcon_error_holomap(harcon_errors, label, drop_small=True):
"""TODO: Docstring for harcon_error_holomap.
:function: TODO
:returns: TODO
"""
kdims = hv.Dimension("station")
bars = {}
for station, error in harcon_errors.items():
if drop_small:
error = error[error >= 0.005]
bars[station] = hv.Bars(error, group=label, label=label)
return hv.HoloMap(bars, kdims=kdims)
|
IsSveshuD/lab_5 | Individual_1.py | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Дано предложение. В нем слова разделены одним пробелом (символ «-» в предложении отсутствует). Верно ли, что число слов в предложении больше трех?
import sys
if __name__ == '__main__':
s = input("Введите предложение: ")
words = s.split(' ')
if len(words) > 3:
print(
"Число слов в предложении больше трёх.",
file = sys.stderr
)
else:
print("Число слов в предложении не больше трёх.")
|
IsSveshuD/lab_5 | Individual_3.py | #!/usr/bin/env python3
# -*- coding^ utf-8 -*-
#Дан текст, имеющий вид: « », где di – цифры ( ). Вычислить записанную в тексте алгебраическую сумму.
s = '+' + input()
print(sum([int(s[i : i + 2]) for i in range(0, len(s), 2)])) |
IsSveshuD/lab_5 | Individual_2.py | <gh_stars>0
#!/usr/bin/env python3
# -*- coding^ utf-8 -*-
#Дано слово. Поменять местами его вторую и пятую буквы.
s = input("Введите слово: ")
r = s.split()
d = []
for i in r:
d.append(i[0] + i[4] + i[2] + i[3] + i[1] + i[5:])
print (d)
|
ChrisDryden/VerifHire | server.py | from web3 import Web3
from solc import compile_files
# web3.py instance
w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
# compile all contract files
contracts = compile_files(['user.sol', 'stringUtils.sol'])
# separate main file and link file
main_contract = contracts.pop("user.sol:userRecords")
library_link = contracts.pop("stringUtils.sol:StringUtils") |
locuslab/sdp_mrf | sdp_mrf/solvers.py | import numpy as np
import time
from abc import ABC, abstractmethod
import sys, os
from . import _solvers
def get_string_from_vector(v):
'''
Converts vector to string
:param v: a numpy array with digits in 0-9
Returns: the string form of v
'''
ret = ""
for digit in v:
assert digit >=0 and digit < 10
ret += str(digit)
return ret
def get_vector_from_string(s):
'''
Converts string to vector
:param s: a string with digits in 0-9
Returns: the numpy array form of s
'''
ret = []
for c in s:
assert int(c) >= 0 and int(c) < 10
ret.append(int(c))
return np.array(ret)
def get_f(A, h, s):
"""
Computes f = \sum_{ij}Aij\delta(si, sj)/2 + \sum_i\sum_l h_il\delta(si, l)
:param A: the coupling matrix, numpy array of dim (n, n)
:param h: the unary biases, numpy array of dim (n, k)
:param s: the argument at which f is to be computed, string/numpy array
Returns: f value at s
"""
k = h.shape[1]
n = A.shape[0]
if type(s) == str:
s = np.array(list(s), dtype=int)
delta = np.zeros((k, n))
delta[s, np.arange(n)] = 1
sm = np.sum((delta.T @ delta) * A) - np.sum(A) / 2
truth = np.eye(k)
sm += 2 * np.sum((delta.T @ truth) * h) - np.sum(h)
return sm
def rand_unit_vector(k, d):
'''
Samples k uniformly random unit vectors of dimension d
:param k: number of unit vectors to sample, int
:param d: dimension of unit vectors, int
Returns: numpy array of dim (k, d) with rows as unit vectors
'''
r = np.random.normal(0, 1, size=(k, d))
return r / np.linalg.norm(r, axis=1, keepdims=True)
def obtain_rounded_v(V, B):
'''
Obtain rounded solution from SDP solution V and simplex B
:param V: SDP solution, numpy array of dim (n, d)
:param B: simplex, numpy array of dim (k, d)
Returns: the rounded configuration, numpy array of dim (n,)
'''
n = V.shape[0]
d = V.shape[1]
k = B.shape[0]
r = rand_unit_vector(k, d)
rounded_v = np.argmax(V @ r.T, axis=1)
rounded_v_one_hot = np.zeros((n, k))
rounded_v_one_hot[np.arange(n), rounded_v] = 1
# shape(num_classes): saying that r:i maps to S:j
r_to_B = np.argmax(r @ B.T, axis=1)
transformation_matrix = np.zeros((k, k))
transformation_matrix[np.arange(k), r_to_B] = 1
rounded_v_one_hot = rounded_v_one_hot @ transformation_matrix
rounded_v = np.argmax(rounded_v_one_hot, axis=1)
return rounded_v
def ensure_C(x):
'''ensure input matrix x is C-compatible'''
return np.ascontiguousarray(x, dtype=np.float32)
def LSE(y):
'''Log-sum-exp funciton'''
max_y = np.max(y)
return np.log(np.sum(np.exp(np.array(y) - max_y))) + max_y
# get regular or probabilistic simplex
def get_simplex(k, d, is_prob=False):
'''if is_prob: return a probabilistic k-simplex in R^d
else: return a regular k-simplex (centered a origin) in R^d'''
B = np.zeros((k, d))
B[np.arange(k), np.arange(k)] = 1
if not is_prob:
r0 = np.sum(B, axis=0) / k
c = np.sqrt((k - 1) / k)
B = (B - r0[np.newaxis, :]) / c
return B
class Solver(ABC):
"""
Abstract base class for Solvers
"""
@abstractmethod
def solve_map(self):
pass
@abstractmethod
def solve_partition_function(self):
pass
class M4Solver(Solver):
def __init__(self):
'''
Instantiates a M4 Solver.
Example usage: solver = M4Solver()
solver.solve_map(A, h, k)
'''
pass
def solve(self, A, h, V_init, max_iter, eps):
'''
Solve the SDP using M4
:param A: the coupling matrix, numpy array of dim (n,n)
:param h: biases, numpy array of dim (n,k)
:param V_init: initialization of unit vectors, dim (n, d)
:param max_iter: max number of iterations to run M4, int
:param eps: tolerance to stop M4, float
Returns: Solution to SDP
'''
n = A.shape[0]
d = V_init.shape[1]
assert h.shape[1] == d
A, h, V = map(ensure_C, [A, h, V_init])
diff = _solvers.M4(A, h, V, eps, max_iter)
return diff, V
def solve_map(self, A, h, k, rounding_iters=500, max_iter=100, eps=0, returnVB=False, returnTime=False):
'''
Solve the MAP estimation problem.
:param rounding_iters: number of rounding iterations, int
:param returnVB: boolean value used for partition function estimation
:param returnTime: boolean value used for timing expts
'''
n = A.shape[0]
k = h.shape[1]
d = int(np.ceil(np.sqrt(2*(n+k*(k+1)/2)) + 1))
V = rand_unit_vector(n, d)
B = get_simplex(k, d)
diff, V = self.solve(A, h @ B, V, max_iter, eps)
mode_x, mode_f = None, -np.inf
t_list = []
f_list = []
for _ in range(rounding_iters):
x = obtain_rounded_v(V, B)
f = get_f(A, h, x)
t_list.append(time.time())
f_list.append(f)
if f > mode_f:
mode_x = x
mode_f = f
if returnVB:
return mode_x, mode_f, V, B
if returnTime:
return mode_x, mode_f, t_list, f_list
return mode_x, mode_f
def solve_partition_function(self, A, h, k, rounding_iters=500, max_iter=100, eps=0):
'''
Compute the partition function from the SDP solution.
'''
n = A.shape[0]
_, _, V, B = self.solve_map(A, h, k, rounding_iters=rounding_iters, max_iter=max_iter,
eps=eps, returnVB=True)
s_list = {}
f_list = []
for _ in range(rounding_iters):
x = obtain_rounded_v(V, B)
f = get_f(A, h, x)
s = get_string_from_vector(x)
if s not in s_list:
s_list[s] = 1
f_list.append(f)
rem = np.log(1-np.exp(np.log(len(f_list))-n*np.log(k)))
y_list = []
while True:
if len(y_list) >= rounding_iters: break
x = np.random.choice(k, n, replace=True)
s = get_string_from_vector(x)
if s in s_list: continue
f = get_f(A, h, x)
log_q = -n * np.log(k) - rem
f_minus_log_q = f - log_q
y_list.append(f_minus_log_q)
sm = LSE(y_list) - np.log(len(y_list))
sm = LSE([sm]+f_list)
return sm
class M4PlusSolver(Solver):
def __init__(self):
'''
Instantiates a M4+ Solver.
Example usage: solver = M4PlusSolver()
solver.solve_map(A, h, k)
'''
pass
def solve(self, A, h, Z_init, k, max_iter, eps):
'''
Solve the SDP using M4+
:param A: the coupling matrix, numpy array of dim (n,n)
:param h: biases, numpy array of dim (n,k)
:param V_init: initialization of unit vectors, dim (n, d)
:param max_iter: max number of iterations to run M4, int
:param eps: tolerance to stop M4, float
Returns: Solution to SDP
'''
n = A.shape[0]
d = Z_init.shape[1]
m = d // k
assert A.shape[1] == n and h.shape[0] == n and h.shape[1] == d
assert d % k == 0
h, Z = [x.reshape((n, m, k)).transpose(0, 2, 1).reshape(n, d) for x in [h, Z_init]]
A, h, Z = map(ensure_C, [A, h, Z])
diff = _solvers.M4_plus(A, h, Z, k, eps, max_iter)
Z = Z.reshape((n, k, m)).transpose(0, 2, 1).reshape((n, d))
return diff, Z
def __mul_S(self, s, V):
'''
Efficiently multiply V with s using block structure
'''
d = V.shape[1]
k = s.shape[0]
assert d % k == 0
return (V.reshape(-1, d // k, k) @ s).reshape(V.shape)
def solve_map(self, A, h, k, rounding_iters=500, max_iter=100, eps=0, returnVB=False, returnTime=False):
'''
Solve the MAP estimation problem.
:param rounding_iters: number of rounding iterations, int
:param returnVB: boolean value used for partition function estimation
:param returnTime: boolean value used for timing expts
'''
n = len(A)
k = h.shape[1]
d = int(np.ceil(k * np.sqrt(2*n) + 1))
while(d % k != 0):
d += 1
assert d >= k
C_hat = (k/(k-1))*np.eye(k) - (1/(k-1))*np.full((k, k), 1)
U, Sigma, Ut = np.linalg.svd(C_hat)
s = (np.diag(Sigma) ** 0.5) @ Ut
Z = np.abs(rand_unit_vector(n, d))
B = get_simplex(k, d, is_prob=True)
diff, Z = self.solve(A, h @ B, Z, k, max_iter, eps)
V, B = self.__mul_S(s.T, Z), self.__mul_S(s.T, B)
mode_x, mode_f = None, -np.inf
t_list = []
f_list = []
for _ in range(rounding_iters):
x = obtain_rounded_v(V, B)
f = get_f(A, h, x)
t_list.append(time.time())
f_list.append(f)
if f > mode_f:
mode_x = x
mode_f = f
if returnVB:
return mode_x, mode_f, V, B
if returnTime:
return mode_x, mode_f, t_list, f_list
return mode_x, mode_f
def solve_partition_function(self, A, h, k, rounding_iters=500, max_iter=100, eps=0):
'''
Compute the partition function from the SDP solution.
'''
n = A.shape[0]
_, _, V, B = self.solve_map(A, h, k, rounding_iters=rounding_iters, max_iter=max_iter,
eps=eps, returnVB=True)
s_list = {}
f_list = []
for _ in range(rounding_iters):
x = obtain_rounded_v(V, B)
f = get_f(A, h, x)
s = get_string_from_vector(x)
if s not in s_list:
s_list[s] = 1
f_list.append(f)
rem = np.log(1-np.exp(np.log(len(f_list))-n*np.log(k)))
y_list = []
while True:
if len(y_list) >= rounding_iters: break
x = np.random.choice(k, n, replace=True)
s = get_string_from_vector(x)
if s in s_list: continue
f = get_f(A, h, x)
log_q = -n * np.log(k) - rem
f_minus_log_q = f - log_q
y_list.append(f_minus_log_q)
sm = LSE(y_list) - np.log(len(y_list))
sm = LSE([sm]+f_list)
return sm
class AISSolver(Solver):
def __init__(self):
'''
Instantiates a AIS Solver.
Example usage: solver = AISSolver()
solver.solve_map(A, h, k)
'''
pass
# p(x) \propto \exp(\sum_{ij}Aij\delta(i, j)/2 + \sum_i\sum_k b_ik\delta(i, k))
# x is a vector in [0, k-1]^{n}
def __gibbs_sampling(self, A, h, x, temp, num_cycles=10):
'''
Run a Gibbs Sampling chain on x
:param A: the coupling matrix, numpy array of dim (n,n)
:param h: biases, numpy array of dim (n,k)
:param x: the seed for gibbs sampling, numpy array/string of dim (n,)
:param temp: temperature of sampling, int
:param num_cycles: number of cycles of gibbs sampling, int
Returns: The sample after doing num_cycles cycles of gibbs sampling
'''
n = len(x)
k = h.shape[1]
for cycle in range(num_cycles):
for i in range(n):
mx = -np.inf
for j in range(k):
x[i] = j
f_j = get_f(A, h, x) / temp
mx = max(mx, f_j)
denominator = 0
for j in range(k):
x[i] = j
denominator += np.exp(get_f(A, h, x) / temp - mx)
sm_p = 0
p = np.random.rand()
for j in range(k):
x[i] = j
p_j = np.exp(get_f(A, h, x) / temp - mx) / denominator
sm_p += p_j
if p < sm_p:
break
return x
def __log_f_t(self, x, t, inv_temps, A, h):
'''
Compute function value at t^th step of annealing in AIS
'''
n = len(x)
k = h.shape[1]
weight_on_uniform = (inv_temps[t] - 1) * n * np.log(k)
f = get_f(A, h, x)
weight_on_true = inv_temps[t] * (f)
return weight_on_uniform + weight_on_true
def solve_map(self, A, h, k, num_samples=500, T=100, num_cycles=10, returnTime=False):
'''
Solve the MAP estimation problem.
:param num_samples: number of annealed samples
:param T: number of temperatures used in annealing
:param num_cycles: number of cycles of gibbs sampling
:param returnTime: boolean value used for timing expts
'''
n = len(A)
inv_temps = np.linspace(0, 1, T)
mode_x, mode_f = None, -np.inf
t_list = []
f_list = []
for i in range(num_samples):
x = np.random.choice(k, size=n, replace=True)
for t in range(1, T):
x = self.__gibbs_sampling(A, h, x, 1 / inv_temps[t], num_cycles=num_cycles)
f = get_f(A, h, x)
t_list.append(time.time())
f_list.append(f)
if f > mode_f:
mode_x = x
mode_f = f
if returnTime:
return mode_x, mode_f, t_list, f_list
return mode_x, mode_f
def solve_partition_function(self, A, h, k, num_samples=500, T=100, num_cycles=10):
'''
Compute the partition function via AIS.
'''
n = len(A)
inv_temps = np.linspace(0, 1, T)
log_w_list = []
mx = -np.inf
for i in range(num_samples):
x = np.random.choice(k, size=n, replace=True)
w = 0
for t in range(1, T):
w = w + self.__log_f_t(x, t, inv_temps, A, h) - self.__log_f_t(x, t-1, inv_temps, A, h)
x = self.__gibbs_sampling(A, h, x, 1 / inv_temps[t], num_cycles=num_cycles)
log_w_list.append(w)
mx = max(mx, w)
log_w_list = [elem - mx for elem in log_w_list]
logZ = mx + np.log(np.sum(np.exp(log_w_list))) - np.log(num_samples)
return logZ
class ExactSolver(Solver):
def __init__(self):
'''
Instantiates a Exact Solver.
Example usage: solver = ExactSolver()
solver.solve_map(A, h, k)
'''
pass
def __generate_strings(self, n, k):
'''
Generate all k^n strings in the support
'''
assert k >= 2 and k < 10
if n == 1:
return [str(i) for i in range(k)]
ret = []
all_smaller_strings = self.__generate_strings(n-1, k)
for i in range(k):
ret.extend([str(i) + s for s in all_smaller_strings])
return ret
def solve_map(self, A, h, k):
'''
Compute the map estimate exactly via brute force
'''
n = len(A)
all_strings = self.__generate_strings(n, k)
mode_x, mode_f = None, -np.inf
for s in all_strings:
f = get_f(A, h, s)
if f > mode_f:
mode_x = get_vector_from_string(s)
mode_f = f
return mode_x, mode_f
def solve_partition_function(self, A, h, k):
'''
Compute the partition function exactly via brute force
'''
n = len(A)
all_strings = self.__generate_strings(n, k)
mx = -np.inf
sm_list = []
for s in all_strings:
sm = get_f(A, h, s)
mx = max(mx, sm)
sm_list.append(sm)
sm_list = [elem - mx for elem in sm_list]
logZ = np.log(np.sum(np.exp(sm_list))) + mx
return logZ
solver_registry = {
'M4': M4Solver,
'M4+': M4PlusSolver,
'AIS': AISSolver,
'Exact': ExactSolver,
}
def find_solver(name):
'''
Map solver names to solver objects
'''
try:
solver = solver_registry[name]()
except KeyError as e:
raise KeyError('Must be one of '+', '.join(solver_registry.keys())+'.')
return solver
|
locuslab/sdp_mrf | sdp_mrf/models.py | <reponame>locuslab/sdp_mrf
from .solvers import find_solver
class PottsModel(object):
def __init__(self, A=None, h=None, k=None):
'''
Instantiates a k-class Potts Model, with given coupling matrix A, biases h
:param A: The coupling matrix. A symmetric numpy array of dimension (n, n)
:param h: The unary biases. A numpy array of dimension (n, k)
:param k: The number of classes in the Potts model
Example usage on a 4-class Potts model on 10 variables:
n, k = 10, 4
A, h = np.random.rand(n, n), np.random.rand(n, k)
A = (A + A.T) / 2
p = PottsModel(A, h, k)
'''
self.set_model_parameters(A, h, k)
def set_model_parameters(self, A, h, k):
'''
Setter for the model parameters.
:param A: The coupling matrix. A symmetric numpy array of dimension (n, n)
:param h: The unary biases. A numpy array of dimension (n, k)
:param k: The number of classes in the Potts model
'''
self.A = A
self.h = h
self.k = k
def solve_map(self, solver='M4', **kwargs):
'''
Solves the MAP estimation problem.
:param solver: The solver to solve the MAP estimation problem
Can be one of ['M4', 'M4+', 'Exact', 'AIS']. Default: 'M4'.
:param kwargs: Can contain additional parameters for the solvers.
e.g. number of iterations in M4/M4+, gibbs sampling parameters in AIS, etc.
Returns: mode_x (MAP configuration), mode_f(f value at mode_x)
'''
if isinstance(solver, str):
solver = find_solver(solver)
return solver.solve_map(self.A, self.h, self.k, **kwargs)
def solve_partition_function(self, solver='M4', **kwargs):
'''
Solves the MAP estimation problem.
:param solver: The solver to solve the MAP estimation problem
Can be one of ['M4', 'M4+', 'Exact', 'AIS']. Default: 'M4'.
:param kwargs: Can contain additional parameters for the solvers.
e.g. number of iterations in M4/M4+, gibbs sampling parameters in AIS, etc.
Returns: mode_x (MAP configuration), mode_f(f value at mode_x)
'''
if isinstance(solver, str):
solver = find_solver(solver)
return solver.solve_partition_function(self.A, self.h, self.k, **kwargs)
def solve_probability(self, condition=None):
pass
|
locuslab/sdp_mrf | setup.py | <gh_stars>1-10
import os
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(DIR, "third_party", "pybind11"))
from glob import glob
from setuptools import setup
from pybind11.setup_helpers import Pybind11Extension, build_ext # noqa: E402
del sys.path[-1]
pkg_name = 'sdp_mrf'
ext_name = '_solvers'
__version__ = "0.0.2"
ext_modules = [
Pybind11Extension(pkg_name+'.'+ext_name,
include_dirs = ['./src'],
sources = sorted(glob('src/*.c*')),
define_macros = [('EXTENSION_NAME', ext_name)],
extra_compile_args = ['-O3', '-Wall', '-g'],
),
]
setup(
name=pkg_name,
version=__version__,
install_requires=['numpy'],
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/locuslab/sdp_mrf",
description="SDP-based inference for Markov random field (MRF)",
long_description="",
ext_modules=ext_modules,
extras_require={"test": "pytest"},
cmdclass={"build_ext": build_ext},
packages=[pkg_name],
zip_safe=False,
classifiers=[
"License :: OSI Approved :: MIT License",
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.