hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43ab02130e03e9dc87fd6fc4f94260796d9b565e | 585 | pyw | Python | Chapter20/callMouseTrack.pyw | PacktPublishing/Python-GUI-Programming-A-Complete-Reference-Guide | f5202087be74e14deca3f85a537cde3d83bd9d1c | [
"MIT"
] | 26 | 2019-06-18T06:41:12.000Z | 2022-01-09T18:12:00.000Z | Chapter20/callMouseTrack.pyw | PacktPublishing/Python-GUI-Programming-A-Complete-Reference-Guide | f5202087be74e14deca3f85a537cde3d83bd9d1c | [
"MIT"
] | null | null | null | Chapter20/callMouseTrack.pyw | PacktPublishing/Python-GUI-Programming-A-Complete-Reference-Guide | f5202087be74e14deca3f85a537cde3d83bd9d1c | [
"MIT"
] | 27 | 2019-06-19T19:24:01.000Z | 2021-09-13T03:29:06.000Z | import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoMousetrack import *
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
| 24.375 | 49 | 0.565812 | import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoMousetrack import *
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.setMouseTracking(True)
self.ui.setupUi(self)
self.show()
def mouseMoveEvent(self, event):
x = event.x()
y = event.y()
text = "x: {0}, y: {1}".format(x, y)
self.ui.label.setText(text)
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
| 288 | 1 | 87 |
eeb2e819675cf031ab141018d28b7dfb5cdc7186 | 284 | py | Python | 2.py | Polar1ty/euler_problems | bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90 | [
"MIT"
] | 2 | 2020-06-09T10:35:12.000Z | 2020-06-09T11:32:16.000Z | 2.py | Polar1ty/euler_problems | bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90 | [
"MIT"
] | null | null | null | 2.py | Polar1ty/euler_problems | bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90 | [
"MIT"
] | null | null | null | fibo = [1, 2]
fibo_2 = []
i = 0
while True:
a = fibo[-2] + fibo[-1]
fibo.append(a)
i += 1
if a > 4000000:
del fibo[-1]
break
print(fibo)
for c in fibo:
print(c)
if c % 2 == 0:
fibo_2.append(c)
print(sum(fibo_2)) | 13.52381 | 28 | 0.447183 | fibo = [1, 2]
fibo_2 = []
i = 0
while True:
a = fibo[-2] + fibo[-1]
fibo.append(a)
i += 1
if a > 4000000:
del fibo[-1]
break
print(fibo)
for c in fibo:
print(c)
if c % 2 == 0:
fibo_2.append(c)
print(sum(fibo_2)) | 0 | 0 | 0 |
828b1aa2142a892b015173a02cca8deed0099c9a | 2,586 | py | Python | app/__init__.py | isabella232/rva-screening | 99f03a6863c41f159ca12a60c7e34652f4a2e3c3 | [
"BSD-3-Clause"
] | 12 | 2015-06-07T01:21:29.000Z | 2020-06-27T00:19:31.000Z | app/__init__.py | codeforamerica/rva-screening | 99f03a6863c41f159ca12a60c7e34652f4a2e3c3 | [
"BSD-3-Clause"
] | 197 | 2015-03-20T20:54:17.000Z | 2015-11-17T18:06:02.000Z | app/__init__.py | isabella232/rva-screening | 99f03a6863c41f159ca12a60c7e34652f4a2e3c3 | [
"BSD-3-Clause"
] | 11 | 2015-09-03T20:00:45.000Z | 2021-04-16T10:12:50.000Z | import sys
import logging
from flask import Flask, render_template
from flask.ext.babel import Babel
from flask.ext.bcrypt import Bcrypt
from flask.ext.login import LoginManager
from flask.ext.security import Security, SQLAlchemyUserDatastore
from flask.ext.sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_s3 import FlaskS3
from config import ProdConfig
db = SQLAlchemy()
bcrypt = Bcrypt()
babel = Babel()
login_manager = LoginManager()
s3 = FlaskS3()
mail = Mail()
| 27.806452 | 69 | 0.708043 | import sys
import logging
from flask import Flask, render_template
from flask.ext.babel import Babel
from flask.ext.bcrypt import Bcrypt
from flask.ext.login import LoginManager
from flask.ext.security import Security, SQLAlchemyUserDatastore
from flask.ext.sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_s3 import FlaskS3
from config import ProdConfig
def create_app(config=ProdConfig):
app = Flask(__name__, static_url_path='')
app.config.from_object(config)
app.debug = app.config['DEBUG']
register_blueprints(app)
register_extensions(app)
register_context_processors(app)
register_errorhandler(app)
@app.before_first_request
def before_first_request():
if app.debug and not app.testing:
app.logger.setLevel(logging.DEBUG)
elif app.testing:
app.logger.setLevel(logging.CRITICAL)
else:
stdout = logging.StreamHandler(sys.stdout)
stdout.setFormatter(logging.Formatter(
'%(asctime)s | %(name)s | %(levelname)s \
in %(module)s [%(pathname)s:%(lineno)d]: %(message)s'
))
app.logger.addHandler(stdout)
app.logger.setLevel(logging.DEBUG)
return app
def register_blueprints(app):
from app.views import screener
app.register_blueprint(screener)
def register_extensions(app):
db.init_app(app)
bcrypt.init_app(app)
babel.init_app(app)
login_manager.init_app(app)
login_manager.login_view = '/login'
s3.init_app(app)
mail.init_app(app)
from app.models import AppUser, Role
user_datastore = SQLAlchemyUserDatastore(db, AppUser, Role)
Security(app, user_datastore)
def register_context_processors(app):
from app.context_processors import (
inject_static_url,
inject_example_data,
inject_template_constants
)
app.context_processor(inject_static_url)
app.context_processor(inject_example_data)
app.context_processor(inject_template_constants)
def register_errorhandler(app):
def server_error(error):
app.logger.exception(error)
return render_template('500.html')
def page_not_found(error):
return render_template('404.html')
def permission_denied(error):
return render_template('403.html')
app.errorhandler(500)(server_error)
app.errorhandler(404)(page_not_found)
app.errorhandler(403)(permission_denied)
return None
db = SQLAlchemy()
bcrypt = Bcrypt()
babel = Babel()
login_manager = LoginManager()
s3 = FlaskS3()
mail = Mail()
| 1,978 | 0 | 115 |
4ff426f8f33d19e6007ab46e883b7e50c434cee3 | 574 | py | Python | herencia.py | codeneomatrix/curso-python | bfe7d8b1d05989b1d64e67b01ba30465df531224 | [
"MIT"
] | 5 | 2015-07-22T17:10:09.000Z | 2019-12-18T03:15:28.000Z | herencia.py | Jorgehadad/curso-python | bfe7d8b1d05989b1d64e67b01ba30465df531224 | [
"MIT"
] | null | null | null | herencia.py | Jorgehadad/curso-python | bfe7d8b1d05989b1d64e67b01ba30465df531224 | [
"MIT"
] | 3 | 2015-10-09T02:24:51.000Z | 2020-05-30T18:33:47.000Z |
Cocodrilo1 = Cocodrilo(1)
Cocodrilo1.desplazar() | 17.393939 | 43 | 0.724739 | class Terrestre:
def __init__(self, velocidadcaminar):
super(Acuatico, self).__init__()
self.velocidadcaminar = velocidadcaminar
def caminar(self):
print "estoy caminando"
def desplazar(self):
print "el animal anda"
class Acuatico(object):
def __init__(self, velocidaddenado):
super(Acuatico, self).__init__()
self.velocidaddenado = velocidaddenado
def desplazar(self):
print "el animal nada"
def nadar(self):
print "estoy nadando"
class Cocodrilo (Acuatico,Terrestre):
pass
Cocodrilo1 = Cocodrilo(1)
Cocodrilo1.desplazar() | 281 | 19 | 215 |
4cede8ed210c068375c400648372cbc0568941de | 4,017 | py | Python | graph/compute_strongly_connected_components.py | anhtumai/data-structure-and-algorithms-collection | 6eac2a8dfec9a89a9e9f800d2a23d37bf6d82d31 | [
"MIT"
] | 1 | 2021-12-14T14:51:31.000Z | 2021-12-14T14:51:31.000Z | graph/compute_strongly_connected_components.py | anhtumai/data-structure-and-algorithms-collection | 6eac2a8dfec9a89a9e9f800d2a23d37bf6d82d31 | [
"MIT"
] | null | null | null | graph/compute_strongly_connected_components.py | anhtumai/data-structure-and-algorithms-collection | 6eac2a8dfec9a89a9e9f800d2a23d37bf6d82d31 | [
"MIT"
] | 1 | 2021-12-14T14:50:46.000Z | 2021-12-14T14:50:46.000Z | """
Compute Strongly Connected Components
Given a graph, calculate number of strongest connected components
In a subgraph, if we can reach from every vertex to every other vertex,
then it is called SCC.
Example:
>> graph1 = {0: [1], 1: [2], 2: [0, 3], 3: [4], 4: [5, 7], 5: [6], 6: [4, 7]} >>> compute_sccs(graph1))
>> [[0, 2, 1], [3], [4, 6, 5], [7]]
Approach: Using Kosaraju Algorithm:
- Perform DFS traversal of a graph,
to get a stack representing the order of visited nodes while traversal.
(The starting node will be returned when we pop the stack for the first time)
- Perform DFS traversal of a reversed graph, where directions of all edges are reversed
- Collect strongly connected components while traversal
Tested with: https://www.hackerearth.com/practice/algorithms/graphs/strongly-connected-components/tutorial/
"""
from typing import Generator, Union
Node = Union[str, int]
Graph = dict[Node, list[Node]]
Stack = list[Node]
def genrate_all_nodes(graph: Graph) -> Generator[int, None, None]:
"""
Return a generator for all nodes in the graph
"""
mentioned_nodes = set()
for node in graph.keys():
if node not in mentioned_nodes:
yield node
mentioned_nodes.add(node)
for neighbour in graph[node]:
if neighbour not in mentioned_nodes:
yield neighbour
mentioned_nodes.add(neighbour)
def reverse(graph: Graph) -> Graph:
"""
Return a new graph with same vertices as input graph, but all edges are reversed
"""
reverse_graph = {}
for node in graph.keys():
for neighbour in graph[node]:
if neighbour not in reverse_graph:
reverse_graph[neighbour] = [node]
else:
reverse_graph[neighbour].append(node)
return reverse_graph
def get_dfs_stack(graph: Graph) -> Stack:
"""
Perform DFS traversal in an input graph
Return a stack, representing the order of the path
(the starting node will be returned when we pop the stack for the first time)
"""
explored = set()
stack = []
for node in genrate_all_nodes(graph):
if node not in explored:
dfs_util(node)
return stack
def get_strongly_connected_components(graph: Graph, stack: Stack) -> list[list[Node]]:
"""
Perfrom DFS traversal on a reversed graph to get
"""
explored = set()
sccs = []
while len(stack) > 0:
node = stack.pop()
if node not in explored:
sccs.append(dfs_util(node, []))
return sccs
def compute_sccs(graph: Graph) -> list[list[Node]]:
"""
Given a directed graph, return the list of strongly connected components
"""
reverse_graph = reverse(graph)
stack = get_dfs_stack(graph)
sccs = get_strongly_connected_components(reverse_graph, stack)
return sccs
if __name__ == "__main__":
graph1 = {0: [1], 1: [2], 2: [0, 3], 3: [4], 4: [5, 7], 5: [6], 6: [4, 7]}
print(compute_sccs(graph1)) # [[0, 2, 1], [3], [4, 6, 5], [7]]
graph2 = {
1: [10],
3: [6, 9],
7: [12],
9: [2, 14],
12: [9],
4: [11],
2: [5, 10],
5: [3],
15: [3, 8],
8: [11],
11: [5],
}
print(compute_sccs(graph2))
# [[15], [8], [4], [11], [7], [12], [3, 5, 2, 9], [14], [6], [1], [10]]
| 28.899281 | 191 | 0.577545 | """
Compute Strongly Connected Components
Given a graph, calculate number of strongest connected components
In a subgraph, if we can reach from every vertex to every other vertex,
then it is called SCC.
Example:
>> graph1 = {0: [1], 1: [2], 2: [0, 3], 3: [4], 4: [5, 7], 5: [6], 6: [4, 7]} >>> compute_sccs(graph1))
>> [[0, 2, 1], [3], [4, 6, 5], [7]]
Approach: Using Kosaraju Algorithm:
- Perform DFS traversal of a graph,
to get a stack representing the order of visited nodes while traversal.
(The starting node will be returned when we pop the stack for the first time)
- Perform DFS traversal of a reversed graph, where directions of all edges are reversed
- Collect strongly connected components while traversal
Tested with: https://www.hackerearth.com/practice/algorithms/graphs/strongly-connected-components/tutorial/
"""
from typing import Generator, Union
Node = Union[str, int]
Graph = dict[Node, list[Node]]
Stack = list[Node]
def genrate_all_nodes(graph: Graph) -> Generator[int, None, None]:
"""
Return a generator for all nodes in the graph
"""
mentioned_nodes = set()
for node in graph.keys():
if node not in mentioned_nodes:
yield node
mentioned_nodes.add(node)
for neighbour in graph[node]:
if neighbour not in mentioned_nodes:
yield neighbour
mentioned_nodes.add(neighbour)
def reverse(graph: Graph) -> Graph:
"""
Return a new graph with same vertices as input graph, but all edges are reversed
"""
reverse_graph = {}
for node in graph.keys():
for neighbour in graph[node]:
if neighbour not in reverse_graph:
reverse_graph[neighbour] = [node]
else:
reverse_graph[neighbour].append(node)
return reverse_graph
def get_dfs_stack(graph: Graph) -> Stack:
"""
Perform DFS traversal in an input graph
Return a stack, representing the order of the path
(the starting node will be returned when we pop the stack for the first time)
"""
explored = set()
stack = []
def dfs_util(node: Node) -> None:
explored.add(node)
if node in graph:
for neighbour in graph[node]:
if neighbour not in explored:
dfs_util(neighbour)
stack.append(node)
for node in genrate_all_nodes(graph):
if node not in explored:
dfs_util(node)
return stack
def get_strongly_connected_components(graph: Graph, stack: Stack) -> list[list[Node]]:
"""
Perfrom DFS traversal on a reversed graph to get
"""
explored = set()
sccs = []
def dfs_util(node: Node, scc: list[Node]) -> list[Node]:
explored.add(node)
scc.append(node)
if node in graph:
for neighbour in graph[node]:
if neighbour not in explored:
dfs_util(neighbour, scc)
return scc
while len(stack) > 0:
node = stack.pop()
if node not in explored:
sccs.append(dfs_util(node, []))
return sccs
def compute_sccs(graph: Graph) -> list[list[Node]]:
"""
Given a directed graph, return the list of strongly connected components
"""
reverse_graph = reverse(graph)
stack = get_dfs_stack(graph)
sccs = get_strongly_connected_components(reverse_graph, stack)
return sccs
if __name__ == "__main__":
graph1 = {0: [1], 1: [2], 2: [0, 3], 3: [4], 4: [5, 7], 5: [6], 6: [4, 7]}
print(compute_sccs(graph1)) # [[0, 2, 1], [3], [4, 6, 5], [7]]
graph2 = {
1: [10],
3: [6, 9],
7: [12],
9: [2, 14],
12: [9],
4: [11],
2: [5, 10],
5: [3],
15: [3, 8],
8: [11],
11: [5],
}
print(compute_sccs(graph2))
# [[15], [8], [4], [11], [7], [12], [3, 5, 2, 9], [14], [6], [1], [10]]
| 485 | 0 | 54 |
68bc992486a70731d5c46bea85008fbfe6b16f19 | 696 | py | Python | pyshop/utils.py | njarvis/pyshop | 6165a78be19d190b7a6a73efea27378997a22fd7 | [
"MIT"
] | 3 | 2018-02-26T04:56:30.000Z | 2018-03-14T12:50:42.000Z | pyshop/utils.py | njarvis/pyshop | 6165a78be19d190b7a6a73efea27378997a22fd7 | [
"MIT"
] | 1 | 2018-03-14T12:51:21.000Z | 2018-03-14T13:42:43.000Z | pyshop/utils.py | njarvis/pyshop | 6165a78be19d190b7a6a73efea27378997a22fd7 | [
"MIT"
] | null | null | null | def running_threads():
""" Currently running threads
:returns: list of running thread information
:rtype: list of str
"""
import threading
threads = []
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
threads.append('#{}: {}: {}'.format(len(threads) + 1, t.getName(), t))
return threads
def loggers():
""" Currently configured loggers
:returns: list of configured loggers
:rtype: list of Logger objects
"""
import logging
root = logging.root
existing = root.manager.loggerDict.keys()
return [logging.getLogger(name) for name in existing]
| 22.451613 | 78 | 0.635057 | def running_threads():
""" Currently running threads
:returns: list of running thread information
:rtype: list of str
"""
import threading
threads = []
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
threads.append('#{}: {}: {}'.format(len(threads) + 1, t.getName(), t))
return threads
def loggers():
""" Currently configured loggers
:returns: list of configured loggers
:rtype: list of Logger objects
"""
import logging
root = logging.root
existing = root.manager.loggerDict.keys()
return [logging.getLogger(name) for name in existing]
| 0 | 0 | 0 |
76391c85d55c465b478fd3f0a8a495ff7f31d914 | 91 | py | Python | rest_registration/__init__.py | pragex/django-rest-registration | 2750b3e6d33cde15ba46d5c5b4cb683973f7b914 | [
"MIT"
] | null | null | null | rest_registration/__init__.py | pragex/django-rest-registration | 2750b3e6d33cde15ba46d5c5b4cb683973f7b914 | [
"MIT"
] | null | null | null | rest_registration/__init__.py | pragex/django-rest-registration | 2750b3e6d33cde15ba46d5c5b4cb683973f7b914 | [
"MIT"
] | null | null | null | __version__ = "0.5.1"
default_app_config = 'rest_registration.apps.RestRegistrationConfig'
| 30.333333 | 68 | 0.824176 | __version__ = "0.5.1"
default_app_config = 'rest_registration.apps.RestRegistrationConfig'
| 0 | 0 | 0 |
e15495dabf3e05e8368f6c219125453b62b4aae8 | 1,567 | py | Python | examples/basic_gibbs_sampling_via_mlm.py | gdhy9064/bert4keras | 1fae3605dae94b60b225cc4863bff85b8fbb00ac | [
"Apache-2.0"
] | 4,478 | 2019-08-26T03:53:18.000Z | 2022-03-31T12:33:41.000Z | examples/basic_gibbs_sampling_via_mlm.py | gdhy9064/bert4keras | 1fae3605dae94b60b225cc4863bff85b8fbb00ac | [
"Apache-2.0"
] | 436 | 2019-09-04T07:33:03.000Z | 2022-03-30T00:13:27.000Z | examples/basic_gibbs_sampling_via_mlm.py | gdhy9064/bert4keras | 1fae3605dae94b60b225cc4863bff85b8fbb00ac | [
"Apache-2.0"
] | 923 | 2019-08-26T10:46:21.000Z | 2022-03-30T08:23:03.000Z | #! -*- coding: utf-8 -*-
# 测试代码可用性: 结合MLM的Gibbs采样
from tqdm import tqdm
import numpy as np
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import to_array
config_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_L-12_H-768_A-12/vocab.txt'
tokenizer = Tokenizer(dict_path, do_lower_case=True) # 建立分词器
model = build_transformer_model(
config_path=config_path, checkpoint_path=checkpoint_path, with_mlm=True
) # 建立模型,加载权重
sentences = []
init_sent = u'科学技术是第一生产力。' # 给定句子或者None
minlen, maxlen = 8, 32
steps = 10000
converged_steps = 1000
vocab_size = tokenizer._vocab_size
if init_sent is None:
length = np.random.randint(minlen, maxlen + 1)
tokens = ['[CLS]'] + ['[MASK]'] * length + ['[SEP]']
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
else:
token_ids, segment_ids = tokenizer.encode(init_sent)
length = len(token_ids) - 2
for _ in tqdm(range(steps), desc='Sampling'):
# Gibbs采样流程:随机mask掉一个token,然后通过MLM模型重新采样这个token。
i = np.random.choice(length) + 1
token_ids[i] = tokenizer._token_mask_id
probas = model.predict(to_array([token_ids], [segment_ids]))[0, i]
token = np.random.choice(vocab_size, p=probas)
token_ids[i] = token
sentences.append(tokenizer.decode(token_ids))
print(u'部分随机采样结果:')
for _ in range(10):
print(np.random.choice(sentences[converged_steps:]))
| 33.340426 | 75 | 0.733886 | #! -*- coding: utf-8 -*-
# 测试代码可用性: 结合MLM的Gibbs采样
from tqdm import tqdm
import numpy as np
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import to_array
config_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_L-12_H-768_A-12/vocab.txt'
tokenizer = Tokenizer(dict_path, do_lower_case=True) # 建立分词器
model = build_transformer_model(
config_path=config_path, checkpoint_path=checkpoint_path, with_mlm=True
) # 建立模型,加载权重
sentences = []
init_sent = u'科学技术是第一生产力。' # 给定句子或者None
minlen, maxlen = 8, 32
steps = 10000
converged_steps = 1000
vocab_size = tokenizer._vocab_size
if init_sent is None:
length = np.random.randint(minlen, maxlen + 1)
tokens = ['[CLS]'] + ['[MASK]'] * length + ['[SEP]']
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
else:
token_ids, segment_ids = tokenizer.encode(init_sent)
length = len(token_ids) - 2
for _ in tqdm(range(steps), desc='Sampling'):
# Gibbs采样流程:随机mask掉一个token,然后通过MLM模型重新采样这个token。
i = np.random.choice(length) + 1
token_ids[i] = tokenizer._token_mask_id
probas = model.predict(to_array([token_ids], [segment_ids]))[0, i]
token = np.random.choice(vocab_size, p=probas)
token_ids[i] = token
sentences.append(tokenizer.decode(token_ids))
print(u'部分随机采样结果:')
for _ in range(10):
print(np.random.choice(sentences[converged_steps:]))
| 0 | 0 | 0 |
05499e9c72f1e109cc912946bae295fc03f5e194 | 1,692 | py | Python | tensorflow_impl/rsrcs/aggregators/condense.py | sahareslami/Garfield | b620816fe88b2cb2cef4a889342e6b1ad32a0395 | [
"MIT"
] | 8 | 2021-02-03T15:13:44.000Z | 2022-02-24T18:47:13.000Z | tensorflow_impl/rsrcs/aggregators/condense.py | sahareslami/Garfield | b620816fe88b2cb2cef4a889342e6b1ad32a0395 | [
"MIT"
] | 2 | 2021-09-13T14:36:10.000Z | 2022-01-26T16:33:16.000Z | tensorflow_impl/rsrcs/aggregators/condense.py | sahareslami/Garfield | b620816fe88b2cb2cef4a889342e6b1ad32a0395 | [
"MIT"
] | 8 | 2020-12-12T12:53:53.000Z | 2021-11-30T05:58:08.000Z | # coding: utf-8
###
# @file condense.py
# @author Sébastien Rouault <sebastien.rouault@alumni.epfl.ch>
#
# @section LICENSE
#
# Copyright © 2020 École Polytechnique Fédérale de Lausanne (EPFL).
# All rights reserved.
#
# @section DESCRIPTION
#
# Condense parameter vector aggregation random function.
###
import tensorflow as tf
import tools
from . import _GAR, register
# ---------------------------------------------------------------------------- #
# Condense random function
class TFCondenseGAR(_GAR):
""" Full-TensorFlow condense random function class.
"""
# ---------------------------------------------------------------------------- #
# GAR registering
# Register aggregation rule
register("condense", TFCondenseGAR)
| 31.924528 | 147 | 0.61643 | # coding: utf-8
###
# @file condense.py
# @author Sébastien Rouault <sebastien.rouault@alumni.epfl.ch>
#
# @section LICENSE
#
# Copyright © 2020 École Polytechnique Fédérale de Lausanne (EPFL).
# All rights reserved.
#
# @section DESCRIPTION
#
# Condense parameter vector aggregation random function.
###
import tensorflow as tf
import tools
from . import _GAR, register
# ---------------------------------------------------------------------------- #
# Condense random function
class TFCondenseGAR(_GAR):
""" Full-TensorFlow condense random function class.
"""
def __init__(self, nbworkers, nbbyzwrks, args):
# Parse key:val arguments
ps = tools.parse_keyval([] if args is None else args, defaults={"ps": 0.9})["ps"]
if ps <= 0 or ps > 1:
raise tools.UserException("Invalid selection probability, got %s" % (ps,))
# Finalization
self._p = ps
self._f = nbbyzwrks
def aggregate(self, gradients):
# Assertion
assert len(gradients) >= 2 * self._f + 2, "Not enough gradients to aggregate, expected at least %d, got %d" % (2 * self._f + 2, len(gradients))
# Sample selection indications
c = tf.cast(tf.distributions.Bernoulli(probs=tf.ones_like(gradients[0]) * self._p).sample(), dtype=tf.float32)
# Compute median
g = tf.parallel_stack(gradients)
m = tf.transpose(tf.reduce_min(tf.nn.top_k(tf.transpose(g), (g.shape[0] + 1) // 2, sorted=False).values, axis=1))
# Add masked first gradient and return
return m * c + gradients[0] * (1 - c)
# ---------------------------------------------------------------------------- #
# GAR registering
# Register aggregation rule
register("condense", TFCondenseGAR)
| 895 | 0 | 50 |
c7d095a0943cdbd6682947597aefddda3b26859c | 5,221 | py | Python | bathy_datasets/storage.py | ausseabed/bathy-datasets | 4f1b0a61f19c1140f4ba842318908a9250508c45 | [
"Apache-2.0"
] | null | null | null | bathy_datasets/storage.py | ausseabed/bathy-datasets | 4f1b0a61f19c1140f4ba842318908a9250508c45 | [
"Apache-2.0"
] | null | null | null | bathy_datasets/storage.py | ausseabed/bathy-datasets | 4f1b0a61f19c1140f4ba842318908a9250508c45 | [
"Apache-2.0"
] | null | null | null | import numpy
import tiledb
def mbes_domain(tri=False):
"""Set array domain."""
index_filters = tiledb.FilterList([tiledb.ZstdFilter(level=16)])
xdim = tiledb.Dim(
"longitude",
domain=(None, None),
tile=1000,
dtype=numpy.float64,
filters=index_filters,
)
ydim = tiledb.Dim(
"latitude",
domain=(None, None),
tile=1000,
dtype=numpy.float64,
filters=index_filters,
)
if tri:
# define a third dimension, i.e. depth/z/elevation
zdim = tiledb.Dim(
"depth",
domain=(None, None),
tile=1000,
dtype=numpy.float64,
filters=index_filters,
)
domain = tiledb.Domain(xdim, ydim, zdim)
else:
domain = tiledb.Domain(xdim, ydim)
return domain
def mbes_attrs():
"""Create the mbes attributes"""
attrs = [
tiledb.Attr(
"depth", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"timestamp", dtype="datetime64[ns]", filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"across_track", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"along_track", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"travel_time", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"beam_angle", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"mean_cal_amplitude",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"beam_angle_forward",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"vertical_error", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"horizontal_error",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"sector_number",
dtype=numpy.uint8,
filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"beam_flags",
dtype=numpy.uint8,
filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"ping_flags",
dtype=numpy.uint8,
filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"tide_corrector", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"depth_corrector",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"heading", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"pitch", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr("roll", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]),
tiledb.Attr(
"heave", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"course", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"speed", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"height", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"separation", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"gps_tide_corrector",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"centre_beam", dtype=numpy.uint8, filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"beam_number", dtype=numpy.uint16, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"region_code", dtype=str, filters=[tiledb.ZstdFilter(level=16)]
),
]
return attrs
def mbes_schema():
"""Create the tiledb schema"""
domain = mbes_domain(False) # only 2 dims for the project
attributes = mbes_attrs()
schema = tiledb.ArraySchema(
domain=domain,
sparse=True,
attrs=attributes,
cell_order="hilbert",
tile_order="row-major",
capacity=1_000_000,
allows_duplicates=True,
)
return schema
def create_mbes_array(array_uri, ctx=None):
"""Create the TileDB array."""
schema = mbes_schema()
with tiledb.scope_ctx(ctx):
tiledb.Array.create(array_uri, schema)
def append_ping_dataframe(dataframe, array_uri, ctx=None):
"""Append the ping dataframe read from a GSF file."""
kwargs = {
"mode": "append",
"sparse": True,
"ctx": ctx,
}
tiledb.dataframe_.from_pandas(array_uri, dataframe, **kwargs)
| 29.331461 | 103 | 0.559854 | import numpy
import tiledb
def mbes_domain(tri=False):
"""Set array domain."""
index_filters = tiledb.FilterList([tiledb.ZstdFilter(level=16)])
xdim = tiledb.Dim(
"longitude",
domain=(None, None),
tile=1000,
dtype=numpy.float64,
filters=index_filters,
)
ydim = tiledb.Dim(
"latitude",
domain=(None, None),
tile=1000,
dtype=numpy.float64,
filters=index_filters,
)
if tri:
# define a third dimension, i.e. depth/z/elevation
zdim = tiledb.Dim(
"depth",
domain=(None, None),
tile=1000,
dtype=numpy.float64,
filters=index_filters,
)
domain = tiledb.Domain(xdim, ydim, zdim)
else:
domain = tiledb.Domain(xdim, ydim)
return domain
def mbes_attrs():
"""Create the mbes attributes"""
attrs = [
tiledb.Attr(
"depth", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"timestamp", dtype="datetime64[ns]", filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"across_track", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"along_track", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"travel_time", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"beam_angle", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"mean_cal_amplitude",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"beam_angle_forward",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"vertical_error", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"horizontal_error",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"sector_number",
dtype=numpy.uint8,
filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"beam_flags",
dtype=numpy.uint8,
filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"ping_flags",
dtype=numpy.uint8,
filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"tide_corrector", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"depth_corrector",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"heading", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"pitch", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr("roll", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]),
tiledb.Attr(
"heave", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"course", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"speed", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"height", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"separation", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"gps_tide_corrector",
dtype=numpy.float32,
filters=[tiledb.ZstdFilter(level=16)],
),
tiledb.Attr(
"centre_beam", dtype=numpy.uint8, filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"beam_number", dtype=numpy.uint16, filters=[tiledb.ZstdFilter(level=16)]
),
tiledb.Attr(
"region_code", dtype=str, filters=[tiledb.ZstdFilter(level=16)]
),
]
return attrs
def mbes_schema():
"""Create the tiledb schema"""
domain = mbes_domain(False) # only 2 dims for the project
attributes = mbes_attrs()
schema = tiledb.ArraySchema(
domain=domain,
sparse=True,
attrs=attributes,
cell_order="hilbert",
tile_order="row-major",
capacity=1_000_000,
allows_duplicates=True,
)
return schema
def create_mbes_array(array_uri, ctx=None):
"""Create the TileDB array."""
schema = mbes_schema()
with tiledb.scope_ctx(ctx):
tiledb.Array.create(array_uri, schema)
def append_ping_dataframe(dataframe, array_uri, ctx=None):
"""Append the ping dataframe read from a GSF file."""
kwargs = {
"mode": "append",
"sparse": True,
"ctx": ctx,
}
tiledb.dataframe_.from_pandas(array_uri, dataframe, **kwargs)
| 0 | 0 | 0 |
a6e9ec3d46f7c0674cbb330e807de2fad88975a1 | 1,265 | py | Python | bot.py | Zinan100/ADV-GUARDIAN-GROOT | 714973b532d9e3abf120bee3027920a83f5cff62 | [
"MIT"
] | null | null | null | bot.py | Zinan100/ADV-GUARDIAN-GROOT | 714973b532d9e3abf120bee3027920a83f5cff62 | [
"MIT"
] | null | null | null | bot.py | Zinan100/ADV-GUARDIAN-GROOT | 714973b532d9e3abf120bee3027920a83f5cff62 | [
"MIT"
] | null | null | null | import os
import logging
import logging.config
from pyrogram import Client
from config import API_ID, API_HASH, BOT_TOKEN
logging.config.fileConfig('logging.conf')
logging.getLogger().setLevel(logging.INFO)
logging.getLogger("pyrogram").setLevel(logging.ERROR)
FORCE_SUB = os.environ.get("FORCE_SUB", None)
bot = TG()
bot.run()
| 24.326923 | 72 | 0.61502 | import os
import logging
import logging.config
from pyrogram import Client
from config import API_ID, API_HASH, BOT_TOKEN
logging.config.fileConfig('logging.conf')
logging.getLogger().setLevel(logging.INFO)
logging.getLogger("pyrogram").setLevel(logging.ERROR)
FORCE_SUB = os.environ.get("FORCE_SUB", None)
class TG(Client):
def __init__(self):
super().__init__(
"ADV GUARDIAN GROOT",
api_id=API_ID,
api_hash=API_HASH,
bot_token=BOT_TOKEN,
plugins=dict(root="ADV_GUARDIAN_GROOT")
)
async def start(self):
await super().start()
me = await self.get_me()
self.mention = me.mention
self.username = me.username
self.force_channel = FORCE_SUB
if FORCE_SUB:
try:
link = await self.export_chat_invite_link(FORCE_SUB)
self.invitelink = link
except Exception as e:
logging.warning(e)
logging.warning("Make Sure Bot admin in force sub channel")
self.force_channel = None
logging.info(f"{me.first_name} Started 🤩🤩")
async def stop(self, *args):
await super().stop()
logging.info("Bot Stopped")
bot = TG()
bot.run()
| 817 | -4 | 112 |
ecdbc7aa746d7c4a0ee4a1e72f254411b6ef19f2 | 777 | py | Python | setup.py | ousttrue/cywrap | 397ad9e65926c1c8fa4a0096b732cf1640250e4a | [
"MIT"
] | null | null | null | setup.py | ousttrue/cywrap | 397ad9e65926c1c8fa4a0096b732cf1640250e4a | [
"MIT"
] | null | null | null | setup.py | ousttrue/cywrap | 397ad9e65926c1c8fa4a0096b732cf1640250e4a | [
"MIT"
] | null | null | null | from os import path
import setuptools
import pathlib
HERE = pathlib.Path(__file__).parent
SRC_CLANG = HERE / 'src/clang'
SRC_CLANG_BASE_URL = 'https://raw.githubusercontent.com/llvm/llvm-project/llvmorg-13.0.0/clang/bindings/python/clang/'
if not SRC_CLANG.exists():
SRC_CLANG.mkdir(parents=True)
http_get(SRC_CLANG_BASE_URL, SRC_CLANG, '__init__.py')
http_get(SRC_CLANG_BASE_URL, SRC_CLANG, 'cindex.py')
http_get(SRC_CLANG_BASE_URL, SRC_CLANG, 'enumerations.py')
setuptools.setup()
| 35.318182 | 119 | 0.707851 | from os import path
import setuptools
import pathlib
HERE = pathlib.Path(__file__).parent
SRC_CLANG = HERE / 'src/clang'
SRC_CLANG_BASE_URL = 'https://raw.githubusercontent.com/llvm/llvm-project/llvmorg-13.0.0/clang/bindings/python/clang/'
if not SRC_CLANG.exists():
SRC_CLANG.mkdir(parents=True)
def http_get(url_base: str, dst_dir: pathlib.Path, name: str):
import urllib.request
req = urllib.request.Request(url_base + name)
with urllib.request.urlopen(req) as res:
(dst_dir / name).write_bytes(res.read())
http_get(SRC_CLANG_BASE_URL, SRC_CLANG, '__init__.py')
http_get(SRC_CLANG_BASE_URL, SRC_CLANG, 'cindex.py')
http_get(SRC_CLANG_BASE_URL, SRC_CLANG, 'enumerations.py')
setuptools.setup()
| 231 | 0 | 29 |
3140766f70f723e72b2c955c8c9a2d3d4b9959cb | 3,028 | py | Python | examples/Untitled-1.py | PDot5/CarND-Advanced-Lane-Lines | 60ef0a6105b0aa2a31ed59c96783dabdaa308286 | [
"MIT"
] | null | null | null | examples/Untitled-1.py | PDot5/CarND-Advanced-Lane-Lines | 60ef0a6105b0aa2a31ed59c96783dabdaa308286 | [
"MIT"
] | null | null | null | examples/Untitled-1.py | PDot5/CarND-Advanced-Lane-Lines | 60ef0a6105b0aa2a31ed59c96783dabdaa308286 | [
"MIT"
] | null | null | null | #%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
import os
try:
os.chdir(os.path.join(os.getcwd(), 'examples'))
print(os.getcwd())
except:
pass
#%% [markdown]
# ## Advanced Lane Finding Project
#
# The goals / steps of this project are the following:
#
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * Use color transforms, gradients, etc., to create a thresholded binary image.
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
#
# ---
# ## First, I'll compute the camera calibration using chessboard images
#%%
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt')
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
undistorted = cal_undistort(img, objpoints, imgpoints)
print('ok')
print(objp.shape)
print(corners.shape)
img_size = (img.shape[1], img.shape[0])
print(img_size)
%matplotlib inline
plt.figure(figsize=(10.,8))
img = mpimg.imread("../camera_cal/calibration5.jpg")
# Undistort using mtx and dist
undist = cv2.undistort(img, mtx, dist, None, mtx)
plt.subplot(2,2,1)
plt.title('Original')
fig = plt.imshow(img)
plt.subplot(2,2,2)
plt.title('Undistorted')
fig = plt.imshow(undist)
#%% [markdown]
| 30.585859 | 156 | 0.688243 | #%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
import os
try:
os.chdir(os.path.join(os.getcwd(), 'examples'))
print(os.getcwd())
except:
pass
#%% [markdown]
# ## Advanced Lane Finding Project
#
# The goals / steps of this project are the following:
#
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * Use color transforms, gradients, etc., to create a thresholded binary image.
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
#
# ---
# ## First, I'll compute the camera calibration using chessboard images
#%%
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt')
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
def cal_undistort(img, objpoints, imgpoints):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1:], None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
cv2.imshow('img',img)
cv2.waitKey(10)
cv2.destroyAllWindows()
return undist
undistorted = cal_undistort(img, objpoints, imgpoints)
print('ok')
print(objp.shape)
print(corners.shape)
img_size = (img.shape[1], img.shape[0])
print(img_size)
%matplotlib inline
plt.figure(figsize=(10.,8))
img = mpimg.imread("../camera_cal/calibration5.jpg")
# Undistort using mtx and dist
undist = cv2.undistort(img, mtx, dist, None, mtx)
plt.subplot(2,2,1)
plt.title('Original')
fig = plt.imshow(img)
plt.subplot(2,2,2)
plt.title('Undistorted')
fig = plt.imshow(undist)
#%% [markdown]
| 878 | 0 | 23 |
6530caa9cfd841a0a636033c364af522e2ba4ee1 | 73 | py | Python | _teaching/2019Spring/decks/recurse.py | drewyoungren/drewyoungren.github.io | 38ed4e7e053f4a79320bdf5423f1d42eb0ce618b | [
"MIT"
] | null | null | null | _teaching/2019Spring/decks/recurse.py | drewyoungren/drewyoungren.github.io | 38ed4e7e053f4a79320bdf5423f1d42eb0ce618b | [
"MIT"
] | 2 | 2020-02-26T18:01:37.000Z | 2021-09-27T21:28:24.000Z | _teaching/2018Fall/decks/recurse.py | drewyoungren/drewyoungren.github.io | 38ed4e7e053f4a79320bdf5423f1d42eb0ce618b | [
"MIT"
] | 1 | 2018-04-17T03:25:55.000Z | 2018-04-17T03:25:55.000Z |
if __name__ == "__main__":
go_deep() | 14.6 | 26 | 0.657534 | def go_deep():
return go_deep()
if __name__ == "__main__":
go_deep() | 12 | 0 | 22 |
0a522774b992cfe8784e49f852022ccb56803f83 | 2,182 | py | Python | rkqc/tools/gui/core/BaseItemButtonClose.py | clairechingching/ScaffCC | 737ae90f85d9fe79819d66219747d27efa4fa5b9 | [
"BSD-2-Clause"
] | 158 | 2016-07-21T10:45:05.000Z | 2022-03-25T00:56:20.000Z | rkqc/tools/gui/core/BaseItemButtonClose.py | clairechingching/ScaffCC | 737ae90f85d9fe79819d66219747d27efa4fa5b9 | [
"BSD-2-Clause"
] | 35 | 2016-07-25T01:23:07.000Z | 2021-09-27T16:05:50.000Z | rkqc/tools/gui/core/BaseItemButtonClose.py | clairechingching/ScaffCC | 737ae90f85d9fe79819d66219747d27efa4fa5b9 | [
"BSD-2-Clause"
] | 62 | 2016-08-29T17:28:11.000Z | 2021-12-29T17:55:58.000Z | # RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <revkit@informatik.uni-bremen.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from BaseItemButton import *
| 40.407407 | 107 | 0.705775 | # RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <revkit@informatik.uni-bremen.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from BaseItemButton import *
class BaseItemButtonClose( BaseItemButton ):
def __init__( self, parent = None ):
BaseItemButton.__init__( self, parent )
self.setPos( 90, -30 )
self.setOpacity( 0 )
self.setToolTip( "Delete Item from Graph" )
# Appear/Delete Animation
self.parentAppearAnimation = QPropertyAnimation( parent, "scale" )
self.parentAppearAnimation.setDuration( 250 )
self.parentAppearAnimation.setStartValue( 0.1 )
self.parentAppearAnimation.setEndValue( 1.0 )
self.parentAppearAnimation.setEasingCurve( QEasingCurve.OutBack )
def backgroundColor( self ):
return QColor( "#aa0000" )
def paint( self, painter, option, widget = None ):
BaseItemButton.paint( self, painter, option, widget )
# Cross
painter.setPen( QPen( QColor( Qt.gray ), 2 ) )
painter.drawLine( 7, 7, 13, 13 )
painter.drawLine( 13, 7, 7, 13 )
def mousePressEvent( self, event ):
self.parentObject().scene().beforeDelete( self.parentObject() )
self.parentAppearAnimation.setDirection( QAbstractAnimation.Backward )
self.parentAppearAnimation.start()
self.connect( self.parentAppearAnimation, SIGNAL( 'finished()' ), self.parentObject().deleteLater )
| 1,127 | 23 | 130 |
4376e45a0796d26a44aadb03775e48744810c547 | 30,807 | py | Python | gnocchi/storage/__init__.py | openvdro/gnocchi | 8099dfc2a30ddf305d9f904de0106e4dd5e56147 | [
"Apache-2.0"
] | null | null | null | gnocchi/storage/__init__.py | openvdro/gnocchi | 8099dfc2a30ddf305d9f904de0106e4dd5e56147 | [
"Apache-2.0"
] | null | null | null | gnocchi/storage/__init__.py | openvdro/gnocchi | 8099dfc2a30ddf305d9f904de0106e4dd5e56147 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import operator
import daiquiri
import numpy
from oslo_config import cfg
import six
from gnocchi import carbonara
from gnocchi import utils
OPTS = [
cfg.StrOpt('driver',
default='file',
help='Storage driver to use'),
]
LOG = daiquiri.getLogger(__name__)
ATTRGETTER_METHOD = operator.attrgetter("method")
ATTRGETTER_GRANULARITY = operator.attrgetter("granularity")
class MetricDoesNotExist(StorageError):
"""Error raised when this metric does not exist."""
class AggregationDoesNotExist(StorageError):
"""Error raised when the aggregation method doesn't exists for a metric."""
class MetricAlreadyExists(StorageError):
"""Error raised when this metric already exists."""
@utils.retry_on_exception_and_log("Unable to initialize storage driver")
def get_driver(conf):
"""Return the configured driver."""
return utils.get_driver_class('gnocchi.storage', conf.storage)(
conf.storage)
| 42.728155 | 90 | 0.577856 | # -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import operator
import daiquiri
import numpy
from oslo_config import cfg
import six
from gnocchi import carbonara
from gnocchi import utils
OPTS = [
cfg.StrOpt('driver',
default='file',
help='Storage driver to use'),
]
LOG = daiquiri.getLogger(__name__)
ATTRGETTER_METHOD = operator.attrgetter("method")
ATTRGETTER_GRANULARITY = operator.attrgetter("granularity")
class StorageError(Exception):
pass
class MetricDoesNotExist(StorageError):
"""Error raised when this metric does not exist."""
def __init__(self, metric):
self.metric = metric
super(MetricDoesNotExist, self).__init__(
"Metric %s does not exist" % metric)
def jsonify(self):
return {
"cause": "Metric does not exist",
"detail": {
"metric": self.metric,
},
}
class AggregationDoesNotExist(StorageError):
"""Error raised when the aggregation method doesn't exists for a metric."""
def __init__(self, metric, method, granularity):
self.metric = metric
self.method = method
self.granularity = granularity
super(AggregationDoesNotExist, self).__init__(
"Aggregation method '%s' at granularity '%s' "
"for metric %s does not exist" %
(method, utils.timespan_total_seconds(granularity), metric))
def jsonify(self):
return {
"cause": "Aggregation does not exist",
"detail": {
# FIXME(jd) Pecan does not use our JSON renderer for errors
# So we need to convert this
"granularity": utils.timespan_total_seconds(self.granularity),
"aggregation_method": self.method,
},
}
class MetricAlreadyExists(StorageError):
"""Error raised when this metric already exists."""
def __init__(self, metric):
self.metric = metric
super(MetricAlreadyExists, self).__init__(
"Metric %s already exists" % metric)
@utils.retry_on_exception_and_log("Unable to initialize storage driver")
def get_driver(conf):
"""Return the configured driver."""
return utils.get_driver_class('gnocchi.storage', conf.storage)(
conf.storage)
class Statistics(collections.defaultdict):
class StatisticsTimeContext(object):
def __init__(self, stats, name):
self.stats = stats
self.name = name + " time"
def __enter__(self):
self.sw = utils.StopWatch()
self.sw.start()
return self
def __exit__(self, type, value, traceback):
self.stats[self.name] += self.sw.elapsed()
def __init__(self):
super(Statistics, self).__init__(lambda: 0)
def time(self, name):
return self.StatisticsTimeContext(self, name)
class StorageDriver(object):
# NOTE(sileht): By default we use threads, but some driver can disable
# threads by setting this to utils.sequencial_map
MAP_METHOD = staticmethod(utils.parallel_map)
def __init__(self, conf):
self.statistics = Statistics()
@staticmethod
def upgrade():
pass
def _get_splits(self, metrics_aggregations_keys, version=3):
results = collections.defaultdict(
lambda: collections.defaultdict(list))
for metric, aggregation, split in self.MAP_METHOD(
lambda m, k, a, v: (m, a, self._get_splits_unbatched(m, k, a, v)), # noqa
((metric, key, aggregation, version)
for metric, aggregations_and_keys
in six.iteritems(metrics_aggregations_keys)
for aggregation, keys
in six.iteritems(aggregations_and_keys)
for key in keys)):
results[metric][aggregation].append(split)
return results
@staticmethod
def _get_splits_unbatched(metric, timestamp_key, aggregation, version=3):
raise NotImplementedError
@staticmethod
def _get_or_create_unaggregated_timeseries_unbatched(metric, version=3):
"""Get the unaggregated timeserie of metrics.
If the metrics does not exist, it is created.
:param metric: A metric.
:param version: The storage format version number.
"""
raise NotImplementedError
def _get_or_create_unaggregated_timeseries(self, metrics, version=3):
"""Get the unaggregated timeserie of metrics.
If the metrics does not exist, it is created.
:param metrics: A list of metrics.
:param version: The storage format version number.
"""
return dict(
six.moves.zip(
metrics,
self.MAP_METHOD(
utils.return_none_on_failure(
self._get_or_create_unaggregated_timeseries_unbatched),
((metric, version) for metric in metrics))))
@staticmethod
def _store_unaggregated_timeseries_unbatched(metric, data, version=3):
"""Store unaggregated timeseries.
:param metric: A metric.
:param data: The data to store.
:param version: Storage engine data format version
"""
raise NotImplementedError
def _store_unaggregated_timeseries(self, metrics_and_data, version=3):
"""Store unaggregated timeseries.
:param metrics_and_data: A list of (metric, serialized_data) tuples
:param version: Storage engine data format version
"""
self.MAP_METHOD(
utils.return_none_on_failure(
self._store_unaggregated_timeseries_unbatched),
((metric, data, version) for metric, data in metrics_and_data))
@staticmethod
def _store_metric_splits_unbatched(metric, key, aggregation, data, offset,
version=3):
"""Store a metric split.
:param metric: A metric.
:param key: The `carbonara.SplitKey`.
:param aggregation: The `carbonara.Aggregation`.
:param data: The actual data to write.
:param offset: The offset to write to.
:param version: Storage engine format version.
"""
raise NotImplementedError
def _store_metric_splits(self, metrics_keys_aggregations_data_offset,
version=3):
"""Store metric splits.
Store a bunch of splits for some metrics.
:param metrics_keys_aggregations_data_offset: A dict where keys are
`storage.Metric` and
values are a list of
(key, aggregation,
data, offset) tuples.
:param version: Storage engine format version.
"""
self.MAP_METHOD(
self._store_metric_splits_unbatched,
((metric, key, aggregation, data, offset, version)
for metric, keys_aggregations_data_offset
in six.iteritems(metrics_keys_aggregations_data_offset)
for key, aggregation, data, offset
in keys_aggregations_data_offset))
@staticmethod
def _list_split_keys_unbatched(self, metric, aggregations, version=3):
"""List split keys for a metric.
:param metric: The metric to look key for.
:param aggregations: List of Aggregations to look for.
:param version: Storage engine format version.
:return: A dict where keys are Aggregation objects and values are
a set of SplitKey objects.
"""
raise NotImplementedError
def _list_split_keys(self, metrics_and_aggregations, version=3):
"""List split keys for metrics.
:param metrics_and_aggregations: Dict of
{`storage.Metric`:
[`carbonara.Aggregation`]}
to look for.
:param version: Storage engine format version.
:return: A dict where keys are `storage.Metric` and values are dicts
where keys are `carbonara.Aggregation` objects and values are
a set of `carbonara.SplitKey` objects.
"""
metrics = list(metrics_and_aggregations.keys())
r = self.MAP_METHOD(
self._list_split_keys_unbatched,
((metric, metrics_and_aggregations[metric], version)
for metric in metrics))
return {
metric: results
for metric, results in six.moves.zip(metrics, r)
}
@staticmethod
def _version_check(name, v):
"""Validate object matches expected version.
Version should be last attribute and start with 'v'
"""
return name.split("_")[-1] == 'v%s' % v
def get_aggregated_measures(self, metrics_and_aggregations,
from_timestamp=None, to_timestamp=None):
"""Get aggregated measures from a metric.
:param metrics_and_aggregations: The metrics and aggregations to
retrieve in format
{metric: [aggregation, …]}.
:param from timestamp: The timestamp to get the measure from.
:param to timestamp: The timestamp to get the measure to.
"""
metrics_aggs_keys = self._list_split_keys(metrics_and_aggregations)
for metric, aggregations_keys in six.iteritems(metrics_aggs_keys):
for aggregation, keys in six.iteritems(aggregations_keys):
start = (
carbonara.SplitKey.from_timestamp_and_sampling(
from_timestamp, aggregation.granularity)
) if from_timestamp else None
stop = (
carbonara.SplitKey.from_timestamp_and_sampling(
to_timestamp, aggregation.granularity)
) if to_timestamp else None
# Replace keys with filtered version
metrics_aggs_keys[metric][aggregation] = [
key for key in sorted(keys)
if ((not start or key >= start)
and (not stop or key <= stop))
]
metrics_aggregations_splits = self._get_splits_and_unserialize(
metrics_aggs_keys)
results = collections.defaultdict(dict)
for metric, aggregations in six.iteritems(metrics_and_aggregations):
for aggregation in aggregations:
ts = carbonara.AggregatedTimeSerie.from_timeseries(
metrics_aggregations_splits[metric][aggregation],
aggregation)
# We need to truncate because:
# - If the driver is not in WRITE_FULL mode, then it might read
# too much data that will be deleted once the split is
# rewritten. Just truncate so we don't return it.
# - If the driver is in WRITE_FULL but the archive policy has
# been resized, we might still have too much points stored,
# which will be deleted at a later point when new points will
# be processed. Truncate to be sure we don't return them.
if aggregation.timespan is not None:
ts.truncate(aggregation.timespan)
results[metric][aggregation] = ts.fetch(
from_timestamp, to_timestamp)
return results
def get_measures(self, metric, aggregations,
from_timestamp=None, to_timestamp=None,
resample=None):
"""Get aggregated measures from a metric.
Deprecated. Use `get_aggregated_measures` instead.
:param metric: The metric measured.
:param aggregations: The aggregations to retrieve.
:param from timestamp: The timestamp to get the measure from.
:param to timestamp: The timestamp to get the measure to.
:param resample: The granularity to resample to.
"""
timeseries = self.get_aggregated_measures(
{metric: aggregations}, from_timestamp, to_timestamp)[metric]
if resample:
for agg, ts in six.iteritems(timeseries):
timeseries[agg] = ts.resample(resample)
return {
aggmethod: list(itertools.chain(
*[[(timestamp, timeseries[agg].aggregation.granularity, value)
for timestamp, value in timeseries[agg]]
for agg in sorted(aggs,
key=ATTRGETTER_GRANULARITY,
reverse=True)]))
for aggmethod, aggs in itertools.groupby(timeseries.keys(),
ATTRGETTER_METHOD)
}
def _get_splits_and_unserialize(self, metrics_aggregations_keys):
"""Get splits and unserialize them
:param metrics_aggregations_keys: A dict where keys are
`storage.Metric` and values are dict
of {Aggregation: [SplitKey]} to
retrieve.
:return: A dict where keys are `storage.Metric` and values are dict
{aggregation: [`carbonara.AggregatedTimeSerie`]}.
"""
raw_measures = self._get_splits(metrics_aggregations_keys)
results = collections.defaultdict(
lambda: collections.defaultdict(list))
for metric, aggregations_and_raws in six.iteritems(raw_measures):
for aggregation, raws in six.iteritems(aggregations_and_raws):
for key, raw in six.moves.zip(
metrics_aggregations_keys[metric][aggregation], raws):
try:
ts = carbonara.AggregatedTimeSerie.unserialize(
raw, key, aggregation)
except carbonara.InvalidData:
LOG.error("Data corruption detected for %s "
"aggregated `%s' timeserie, granularity "
"`%s' around time `%s', ignoring.",
metric.id, aggregation.method, key.sampling,
key)
ts = carbonara.AggregatedTimeSerie(aggregation)
results[metric][aggregation].append(ts)
return results
def _update_metric_splits(self, metrics_keys_aggregations_splits):
"""Store splits of `carbonara.`AggregatedTimeSerie` for a metric.
This reads the existing split and merge it with the new one give as
argument, then writing it to the storage.
:param metrics_keys_aggregations_splits: A dict where keys are
`storage.Metric` and values
are tuples of the form
({(key, aggregation): split},
oldest_mutable_timestamp)
"""
metrics_splits_to_store = {}
keys_to_get = collections.defaultdict(
lambda: collections.defaultdict(list))
splits_to_rewrite = collections.defaultdict(
lambda: collections.defaultdict(list))
for metric, (keys_and_aggregations_and_splits,
oldest_mutable_timestamp) in six.iteritems(
metrics_keys_aggregations_splits):
for (key, aggregation), split in six.iteritems(
keys_and_aggregations_and_splits):
# NOTE(jd) We write the full split only if the driver works
# that way (self.WRITE_FULL) or if the oldest_mutable_timestamp
# is out of range.
if self.WRITE_FULL or next(key) <= oldest_mutable_timestamp:
# Update the splits that were passed as argument with the
# data already stored in the case that we need to rewrite
# them fully. First, fetch all those existing splits.
keys_to_get[metric][aggregation].append(key)
splits_to_rewrite[metric][aggregation].append(split)
existing_data = self._get_splits_and_unserialize(keys_to_get)
for metric, (keys_and_aggregations_and_splits,
oldest_mutable_timestamp) in six.iteritems(
metrics_keys_aggregations_splits):
for aggregation, existing_list in six.iteritems(
existing_data[metric]):
for key, split, existing in six.moves.zip(
keys_to_get[metric][aggregation],
splits_to_rewrite[metric][aggregation],
existing_list):
existing.merge(split)
keys_and_aggregations_and_splits[
(key, split.aggregation)] = existing
keys_aggregations_data_offset = []
for (key, aggregation), split in six.iteritems(
keys_and_aggregations_and_splits):
# Do not store the split if it's empty.
if split:
offset, data = split.serialize(
key,
compressed=key in keys_to_get[metric][aggregation])
keys_aggregations_data_offset.append(
(key, split.aggregation, data, offset))
metrics_splits_to_store[metric] = keys_aggregations_data_offset
return self._store_metric_splits(metrics_splits_to_store)
def _compute_split_operations(self, metric, aggregations_and_timeseries,
previous_oldest_mutable_timestamp,
oldest_mutable_timestamp):
"""Compute changes to a metric and return operations to be done.
Based on an aggregations list and a grouped timeseries, this computes
what needs to be deleted and stored for a metric and returns it.
:param metric: The metric
:param aggregations_and_timeseries: A dictionary of timeseries of the
form {aggregation: timeseries}.
:param previous_oldest_mutable_timestamp: The previous oldest storable
timestamp from the previous
backwindow.
:param oldest_mutable_timestamp: The current oldest storable timestamp
from the current backwindow.
:return: A tuple (keys_to_delete, keys_to_store) where keys_to_delete
is a set of `carbonara.SplitKey` to delete and where
keys_to_store is a dictionary of the form {key: aggts}
where key is a `carbonara.SplitKey` and aggts a
`carbonara.AggregatedTimeSerie` to be serialized.
"""
# We only need to check for rewrite if driver is not in WRITE_FULL mode
# and if we already stored splits once
need_rewrite = (
not self.WRITE_FULL
and previous_oldest_mutable_timestamp is not None
)
aggregations_needing_list_of_keys = set()
for aggregation, ts in six.iteritems(aggregations_and_timeseries):
# Don't do anything if the timeseries is empty
if not ts:
continue
if aggregation.timespan:
oldest_point_to_keep = ts.truncate(aggregation.timespan)
else:
oldest_point_to_keep = None
if previous_oldest_mutable_timestamp and (aggregation.timespan or
need_rewrite):
previous_oldest_mutable_key = ts.get_split_key(
previous_oldest_mutable_timestamp)
oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp)
# only cleanup if there is a new object, as there must be a new
# object for an old object to be cleanup
if previous_oldest_mutable_key != oldest_mutable_key:
aggregations_needing_list_of_keys.add(aggregation)
all_existing_keys = self._list_split_keys(
{metric: aggregations_needing_list_of_keys})[metric]
# NOTE(jd) This dict uses (key, aggregation) tuples as keys because
# using just (key) would not carry the aggregation method and therefore
# would not be unique per aggregation!
keys_and_split_to_store = {}
deleted_keys = set()
for aggregation, ts in six.iteritems(aggregations_and_timeseries):
# Don't do anything if the timeseries is empty
if not ts:
continue
oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep)
# If we listed the keys for the aggregation, that's because we need
# to check for cleanup and/or rewrite
if aggregation in all_existing_keys:
# FIXME(jd) This should be sorted by the driver and asserted it
# is in tests. It's likely backends already sort anyway.
existing_keys = sorted(all_existing_keys[aggregation])
# First, check for old splits to delete
if aggregation.timespan:
for key in list(existing_keys):
# NOTE(jd) Only delete if the key is strictly
# inferior the timestamp; we don't delete any
# timeserie split that contains our timestamp, so
# we prefer to keep a bit more than deleting too
# much
if key >= oldest_key_to_keep:
break
deleted_keys.add((key, aggregation))
existing_keys.remove(key)
# Rewrite all read-only splits just for fun (and
# compression). This only happens if
# `previous_oldest_mutable_timestamp' exists, which means
# we already wrote some splits at some point – so this is
# not the first time we treat this timeserie.
if need_rewrite:
for key in existing_keys:
if previous_oldest_mutable_key <= key:
if key >= oldest_mutable_key:
break
LOG.debug(
"Compressing previous split %s (%s) for "
"metric %s", key, aggregation.method,
metric)
# NOTE(jd) Rewrite it entirely for fun (and
# later for compression). For that, we just
# pass an empty split.
keys_and_split_to_store[
(key, aggregation)] = (
carbonara.AggregatedTimeSerie(
aggregation)
)
for key, split in ts.split():
if key >= oldest_key_to_keep:
LOG.debug(
"Storing split %s (%s) for metric %s",
key, aggregation.method, metric)
keys_and_split_to_store[(key, aggregation)] = split
return (deleted_keys, keys_and_split_to_store)
@staticmethod
def _delete_metric(metric):
raise NotImplementedError
@staticmethod
def _delete_metric_splits_unbatched(metric, keys, aggregation, version=3):
raise NotImplementedError
def _delete_metric_splits(self, metrics_keys_aggregations, version=3):
"""Delete splits of metrics.
:param metrics_keys_aggregations: A dict where keys are
`storage.Metric` and values are lists
of (key, aggregation) tuples.
"""
self.MAP_METHOD(
utils.return_none_on_failure(self._delete_metric_splits_unbatched),
((metric, key, aggregation)
for metric, keys_and_aggregations
in six.iteritems(metrics_keys_aggregations)
for key, aggregation in keys_and_aggregations))
def add_measures_to_metrics(self, metrics_and_measures):
"""Update a metric with a new measures, computing new aggregations.
:param metrics_and_measures: A dict there keys are `storage.Metric`
objects and values are timeseries array of
the new measures.
"""
with self.statistics.time("raw measures fetch"):
raw_measures = self._get_or_create_unaggregated_timeseries(
metrics_and_measures.keys())
self.statistics["raw measures fetch"] += len(metrics_and_measures)
self.statistics["processed measures"] += sum(
map(len, metrics_and_measures.values()))
new_boundts = []
splits_to_delete = {}
splits_to_update = {}
for metric, measures in six.iteritems(metrics_and_measures):
measures = numpy.sort(measures, order='timestamps')
agg_methods = list(metric.archive_policy.aggregation_methods)
block_size = metric.archive_policy.max_block_size
back_window = metric.archive_policy.back_window
# NOTE(sileht): We keep one more blocks to calculate rate of change
# correctly
if any(filter(lambda x: x.startswith("rate:"), agg_methods)):
back_window += 1
if raw_measures[metric] is None:
ts = None
else:
try:
ts = carbonara.BoundTimeSerie.unserialize(
raw_measures[metric], block_size, back_window)
except carbonara.InvalidData:
LOG.error("Data corruption detected for %s "
"unaggregated timeserie, creating a new one",
metric.id)
ts = None
if ts is None:
# This is the first time we treat measures for this
# metric, or data are corrupted, create a new one
ts = carbonara.BoundTimeSerie(block_size=block_size,
back_window=back_window)
current_first_block_timestamp = None
else:
current_first_block_timestamp = ts.first_block_timestamp()
# NOTE(jd) This is Python where you need such
# hack to pass a variable around a closure,
# sorry.
computed_points = {"number": 0}
def _map_compute_splits_operations(bound_timeserie):
# NOTE (gordc): bound_timeserie is entire set of
# unaggregated measures matching largest
# granularity. the following takes only the points
# affected by new measures for specific granularity
tstamp = max(bound_timeserie.first, measures['timestamps'][0])
new_first_block_timestamp = (
bound_timeserie.first_block_timestamp()
)
computed_points['number'] = len(bound_timeserie)
aggregations = metric.archive_policy.aggregations
grouped_timeseries = {
granularity: bound_timeserie.group_serie(
granularity,
carbonara.round_timestamp(tstamp, granularity))
for granularity, aggregations
# No need to sort the aggregation, they are already
in itertools.groupby(aggregations, ATTRGETTER_GRANULARITY)
}
aggregations_and_timeseries = {
aggregation:
carbonara.AggregatedTimeSerie.from_grouped_serie(
grouped_timeseries[aggregation.granularity],
aggregation)
for aggregation in aggregations
}
deleted_keys, keys_and_split_to_store = (
self._compute_split_operations(
metric, aggregations_and_timeseries,
current_first_block_timestamp,
new_first_block_timestamp)
)
return (new_first_block_timestamp,
deleted_keys,
keys_and_split_to_store)
with self.statistics.time("aggregated measures compute"):
(new_first_block_timestamp,
deleted_keys,
keys_and_splits_to_store) = ts.set_values(
measures,
before_truncate_callback=_map_compute_splits_operations,
)
splits_to_delete[metric] = deleted_keys
splits_to_update[metric] = (keys_and_splits_to_store,
new_first_block_timestamp)
new_boundts.append((metric, ts.serialize()))
with self.statistics.time("splits delete"):
self._delete_metric_splits(splits_to_delete)
self.statistics["splits delete"] += len(splits_to_delete)
with self.statistics.time("splits update"):
self._update_metric_splits(splits_to_update)
self.statistics["splits delete"] += len(splits_to_update)
with self.statistics.time("raw measures store"):
self._store_unaggregated_timeseries(new_boundts)
self.statistics["raw measures store"] += len(new_boundts)
| 4,227 | 24,738 | 204 |
3a36e8f725021dd81d87951dfae88b36531c391e | 14,620 | py | Python | src/raritan/rpc/luaservice/__init__.py | vhirtzel/apc_reboot | 06ab4ef72029b09339a04266970d1546004c9dbe | [
"MIT"
] | 1 | 2021-04-29T23:04:17.000Z | 2021-04-29T23:04:17.000Z | raritan/rpc/luaservice/__init__.py | vhirtzel/desktop_reboot | 553795c1727dedf4532dc97922201df3e9e2ad1e | [
"MIT"
] | null | null | null | raritan/rpc/luaservice/__init__.py | vhirtzel/desktop_reboot | 553795c1727dedf4532dc97922201df3e9e2ad1e | [
"MIT"
] | 2 | 2020-06-20T16:21:23.000Z | 2021-09-28T19:04:44.000Z | # SPDX-License-Identifier: BSD-3-Clause
#
# Copyright 2020 Raritan Inc. All rights reserved.
#
# This is an auto-generated file.
#
# Section generated by IdlC from "LuaService.idl"
#
import raritan.rpc
from raritan.rpc import Interface, Structure, ValueObject, Enumeration, typecheck, DecodeException
import raritan.rpc.luaservice
# structure
# structure
# structure
# interface
| 33.151927 | 192 | 0.618468 | # SPDX-License-Identifier: BSD-3-Clause
#
# Copyright 2020 Raritan Inc. All rights reserved.
#
# This is an auto-generated file.
#
# Section generated by IdlC from "LuaService.idl"
#
import raritan.rpc
from raritan.rpc import Interface, Structure, ValueObject, Enumeration, typecheck, DecodeException
import raritan.rpc.luaservice
# structure
class ScriptState(Structure):
idlType = "luaservice.ScriptState:1.0.0"
elements = ["execState", "exitType", "exitStatus"]
def __init__(self, execState, exitType, exitStatus):
typecheck.is_enum(execState, raritan.rpc.luaservice.ScriptState.ExecState, AssertionError)
typecheck.is_enum(exitType, raritan.rpc.luaservice.ScriptState.ExitType, AssertionError)
typecheck.is_int(exitStatus, AssertionError)
self.execState = execState
self.exitType = exitType
self.exitStatus = exitStatus
@classmethod
def decode(cls, json, agent):
obj = cls(
execState = raritan.rpc.luaservice.ScriptState.ExecState.decode(json['execState']),
exitType = raritan.rpc.luaservice.ScriptState.ExitType.decode(json['exitType']),
exitStatus = json['exitStatus'],
)
return obj
def encode(self):
json = {}
json['execState'] = raritan.rpc.luaservice.ScriptState.ExecState.encode(self.execState)
json['exitType'] = raritan.rpc.luaservice.ScriptState.ExitType.encode(self.exitType)
json['exitStatus'] = self.exitStatus
return json
# enumeration
class ExecState(Enumeration):
idlType = "luaservice.ScriptState.ExecState:1.0.0"
values = ["STAT_NEW", "STAT_RUNNING", "STAT_TERMINATED", "STAT_RESTARTING"]
ExecState.STAT_NEW = ExecState(0)
ExecState.STAT_RUNNING = ExecState(1)
ExecState.STAT_TERMINATED = ExecState(2)
ExecState.STAT_RESTARTING = ExecState(3)
# enumeration
class ExitType(Enumeration):
idlType = "luaservice.ScriptState.ExitType:1.0.0"
values = ["EXIT_CODE", "SIGNAL"]
ExitType.EXIT_CODE = ExitType(0)
ExitType.SIGNAL = ExitType(1)
# structure
class ScriptOptions(Structure):
idlType = "luaservice.ScriptOptions:2.0.0"
elements = ["defaultArgs", "autoStart", "autoRestart"]
def __init__(self, defaultArgs, autoStart, autoRestart):
typecheck.is_bool(autoStart, AssertionError)
typecheck.is_bool(autoRestart, AssertionError)
self.defaultArgs = defaultArgs
self.autoStart = autoStart
self.autoRestart = autoRestart
@classmethod
def decode(cls, json, agent):
obj = cls(
defaultArgs = dict([(
elem['key'],
elem['value'])
for elem in json['defaultArgs']]),
autoStart = json['autoStart'],
autoRestart = json['autoRestart'],
)
return obj
def encode(self):
json = {}
json['defaultArgs'] = [dict(
key = k,
value = v)
for k, v in self.defaultArgs.items()]
json['autoStart'] = self.autoStart
json['autoRestart'] = self.autoRestart
return json
# structure
class Environment(Structure):
idlType = "luaservice.Environment:2.0.0"
elements = ["maxScriptMemoryGrowth", "maxAmountOfScripts", "amountOfScripts", "maxScriptSize", "maxAllScriptSize", "allScriptSize", "outputBufferSize", "restartInterval", "autoStartDelay"]
def __init__(self, maxScriptMemoryGrowth, maxAmountOfScripts, amountOfScripts, maxScriptSize, maxAllScriptSize, allScriptSize, outputBufferSize, restartInterval, autoStartDelay):
typecheck.is_int(maxScriptMemoryGrowth, AssertionError)
typecheck.is_int(maxAmountOfScripts, AssertionError)
typecheck.is_int(amountOfScripts, AssertionError)
typecheck.is_int(maxScriptSize, AssertionError)
typecheck.is_int(maxAllScriptSize, AssertionError)
typecheck.is_int(allScriptSize, AssertionError)
typecheck.is_int(outputBufferSize, AssertionError)
typecheck.is_int(restartInterval, AssertionError)
typecheck.is_int(autoStartDelay, AssertionError)
self.maxScriptMemoryGrowth = maxScriptMemoryGrowth
self.maxAmountOfScripts = maxAmountOfScripts
self.amountOfScripts = amountOfScripts
self.maxScriptSize = maxScriptSize
self.maxAllScriptSize = maxAllScriptSize
self.allScriptSize = allScriptSize
self.outputBufferSize = outputBufferSize
self.restartInterval = restartInterval
self.autoStartDelay = autoStartDelay
@classmethod
def decode(cls, json, agent):
obj = cls(
maxScriptMemoryGrowth = json['maxScriptMemoryGrowth'],
maxAmountOfScripts = json['maxAmountOfScripts'],
amountOfScripts = json['amountOfScripts'],
maxScriptSize = json['maxScriptSize'],
maxAllScriptSize = json['maxAllScriptSize'],
allScriptSize = json['allScriptSize'],
outputBufferSize = json['outputBufferSize'],
restartInterval = json['restartInterval'],
autoStartDelay = json['autoStartDelay'],
)
return obj
def encode(self):
json = {}
json['maxScriptMemoryGrowth'] = self.maxScriptMemoryGrowth
json['maxAmountOfScripts'] = self.maxAmountOfScripts
json['amountOfScripts'] = self.amountOfScripts
json['maxScriptSize'] = self.maxScriptSize
json['maxAllScriptSize'] = self.maxAllScriptSize
json['allScriptSize'] = self.allScriptSize
json['outputBufferSize'] = self.outputBufferSize
json['restartInterval'] = self.restartInterval
json['autoStartDelay'] = self.autoStartDelay
return json
# interface
class Manager(Interface):
idlType = "luaservice.Manager:2.0.1"
NO_ERROR = 0
ERR_INVALID_NAME = 1
ERR_NO_SUCH_SCRIPT = 2
ERR_MAX_SCRIPT_NUMBERS_EXCEEDED = 3
ERR_MAX_SCRIPT_SIZE_EXCEEDED = 4
ERR_MAX_ALL_SCRIPT_SIZE_EXCEEDED = 5
ERR_NOT_TERMINATED = 6
ERR_NOT_RUNNING = 7
ERR_INVALID_ADDR = 8
ERR_TOO_MANY_ARGUMENTS = 10
ERR_ARGUMENT_NOT_VALID = 11
class _setScript(Interface.Method):
name = 'setScript'
@staticmethod
def encode(name, script, options):
typecheck.is_string(name, AssertionError)
typecheck.is_string(script, AssertionError)
typecheck.is_struct(options, raritan.rpc.luaservice.ScriptOptions, AssertionError)
args = {}
args['name'] = name
args['script'] = script
args['options'] = raritan.rpc.luaservice.ScriptOptions.encode(options)
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _getScript(Interface.Method):
name = 'getScript'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
script = rsp['script']
typecheck.is_int(_ret_, DecodeException)
typecheck.is_string(script, DecodeException)
return (_ret_, script)
class _getScriptNames(Interface.Method):
name = 'getScriptNames'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = [x0 for x0 in rsp['_ret_']]
for x0 in _ret_:
typecheck.is_string(x0, DecodeException)
return _ret_
class _deleteScript(Interface.Method):
name = 'deleteScript'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _setScriptOptions(Interface.Method):
name = 'setScriptOptions'
@staticmethod
def encode(name, options):
typecheck.is_string(name, AssertionError)
typecheck.is_struct(options, raritan.rpc.luaservice.ScriptOptions, AssertionError)
args = {}
args['name'] = name
args['options'] = raritan.rpc.luaservice.ScriptOptions.encode(options)
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _getScriptOptions(Interface.Method):
name = 'getScriptOptions'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
options = raritan.rpc.luaservice.ScriptOptions.decode(rsp['options'], agent)
typecheck.is_int(_ret_, DecodeException)
typecheck.is_struct(options, raritan.rpc.luaservice.ScriptOptions, DecodeException)
return (_ret_, options)
class _getEnvironment(Interface.Method):
name = 'getEnvironment'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.luaservice.Environment.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.luaservice.Environment, DecodeException)
return _ret_
class _getScriptOutput(Interface.Method):
name = 'getScriptOutput'
@staticmethod
def encode(name, iAddr):
typecheck.is_string(name, AssertionError)
typecheck.is_long(iAddr, AssertionError)
args = {}
args['name'] = name
args['iAddr'] = iAddr
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
oAddr = int(rsp['oAddr'])
nAddr = int(rsp['nAddr'])
oString = rsp['oString']
more = rsp['more']
typecheck.is_int(_ret_, DecodeException)
typecheck.is_long(oAddr, DecodeException)
typecheck.is_long(nAddr, DecodeException)
typecheck.is_string(oString, DecodeException)
typecheck.is_bool(more, DecodeException)
return (_ret_, oAddr, nAddr, oString, more)
class _clearScriptOutput(Interface.Method):
name = 'clearScriptOutput'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _startScript(Interface.Method):
name = 'startScript'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _startScriptWithArgs(Interface.Method):
name = 'startScriptWithArgs'
@staticmethod
def encode(name, arguments):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
args['arguments'] = [dict(
key = k,
value = v)
for k, v in arguments.items()]
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _terminateScript(Interface.Method):
name = 'terminateScript'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _getScriptState(Interface.Method):
name = 'getScriptState'
@staticmethod
def encode(name):
typecheck.is_string(name, AssertionError)
args = {}
args['name'] = name
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
state = raritan.rpc.luaservice.ScriptState.decode(rsp['state'], agent)
typecheck.is_int(_ret_, DecodeException)
typecheck.is_struct(state, raritan.rpc.luaservice.ScriptState, DecodeException)
return (_ret_, state)
class _getScriptStates(Interface.Method):
name = 'getScriptStates'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = dict([(
elem['key'],
raritan.rpc.luaservice.ScriptState.decode(elem['value'], agent))
for elem in rsp['_ret_']])
return _ret_
def __init__(self, target, agent):
super(Manager, self).__init__(target, agent)
self.setScript = Manager._setScript(self)
self.getScript = Manager._getScript(self)
self.getScriptNames = Manager._getScriptNames(self)
self.deleteScript = Manager._deleteScript(self)
self.setScriptOptions = Manager._setScriptOptions(self)
self.getScriptOptions = Manager._getScriptOptions(self)
self.getEnvironment = Manager._getEnvironment(self)
self.getScriptOutput = Manager._getScriptOutput(self)
self.clearScriptOutput = Manager._clearScriptOutput(self)
self.startScript = Manager._startScript(self)
self.startScriptWithArgs = Manager._startScriptWithArgs(self)
self.terminateScript = Manager._terminateScript(self)
self.getScriptState = Manager._getScriptState(self)
self.getScriptStates = Manager._getScriptStates(self)
| 9,807 | 4,339 | 88 |
a0064ba2f338d8b4a0b9c83ee9fa930a6f519428 | 2,454 | py | Python | api/ProductApi.py | crippledfaith/shop | fb6a520170968e9f90d4d70c3f6a4e793b105e84 | [
"Apache-2.0"
] | null | null | null | api/ProductApi.py | crippledfaith/shop | fb6a520170968e9f90d4d70c3f6a4e793b105e84 | [
"Apache-2.0"
] | null | null | null | api/ProductApi.py | crippledfaith/shop | fb6a520170968e9f90d4d70c3f6a4e793b105e84 | [
"Apache-2.0"
] | null | null | null | from helper.CommonHelper import CommonHelper
from flask_restful import Resource, reqparse
from model.Product import Product
from service.ProductService import ProductService
get_parser = reqparse.RequestParser()
get_parser.add_argument('product_id', type=str, required=False)
get_parser.add_argument('category_id', type=str, required=False)
put_parser = reqparse.RequestParser()
put_parser.add_argument('name', type=str, required=True,
help="name is required")
put_parser.add_argument('unit', type=str, required=True,
help="level is required")
put_parser.add_argument('category_id', type=str,
required=True,
help="category_id is required")
put_parser.add_argument('mrp', type=int,
required=True,
help="mrp is required")
put_parser.add_argument('price', type=int,
required=True,
help="price is required")
put_parser.add_argument('tag', type=list,
required=True,
help="tag is required", location="json")
delete_parser = reqparse.RequestParser()
delete_parser.add_argument('_id', type=str, required=True)
| 37.181818 | 69 | 0.635289 | from helper.CommonHelper import CommonHelper
from flask_restful import Resource, reqparse
from model.Product import Product
from service.ProductService import ProductService
get_parser = reqparse.RequestParser()
get_parser.add_argument('product_id', type=str, required=False)
get_parser.add_argument('category_id', type=str, required=False)
put_parser = reqparse.RequestParser()
put_parser.add_argument('name', type=str, required=True,
help="name is required")
put_parser.add_argument('unit', type=str, required=True,
help="level is required")
put_parser.add_argument('category_id', type=str,
required=True,
help="category_id is required")
put_parser.add_argument('mrp', type=int,
required=True,
help="mrp is required")
put_parser.add_argument('price', type=int,
required=True,
help="price is required")
put_parser.add_argument('tag', type=list,
required=True,
help="tag is required", location="json")
delete_parser = reqparse.RequestParser()
delete_parser.add_argument('_id', type=str, required=True)
class ProductApi(Resource):
def __init__(self) -> None:
super().__init__()
self.service = ProductService()
def get(self):
args = get_parser.parse_args()
product_id = args['product_id']
category_id = args['category_id']
if not product_id and not category_id:
list_of_products = self.service.get_products(None)
elif not product_id and category_id:
list_of_products = self.service.get_products(category_id)
elif product_id and not category_id:
product = self.service.get_product(product_id)
if product is None:
return {}
return product.__dict__
return CommonHelper().objlist_to_dict(list_of_products)
def put(self):
args = put_parser.parse_args()
product = Product().from_dict(args)
if not self.service.add_product(product):
return "Invalid Data", 400
return product.__dict__
def delete(self):
args = delete_parser.parse_args()
product = Product().from_dict(args)
if self.service.delete_product(product):
return "Invalid Data", 400
return "Success", 200
| 1,071 | 6 | 131 |
aafe2626ec76f08a3528ff7391978ee39bf0ff40 | 1,769 | py | Python | module/initializer.py | chencsgit/luoxi_models | ea9e69dfb81b29f41ed92c75faacf81114c69a2f | [
"Apache-2.0"
] | 58 | 2022-03-28T06:16:51.000Z | 2022-03-31T07:36:35.000Z | module/initializer.py | chencsgit/luoxi_models | ea9e69dfb81b29f41ed92c75faacf81114c69a2f | [
"Apache-2.0"
] | null | null | null | module/initializer.py | chencsgit/luoxi_models | ea9e69dfb81b29f41ed92c75faacf81114c69a2f | [
"Apache-2.0"
] | 4 | 2022-03-28T06:23:25.000Z | 2022-03-30T13:45:07.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 The Luoxi Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
import torch
# trunk model init
# lite plugin model init
# naive plugin model init
if __name__ == '__main__':
# model.apply(weight_init_normal)
dimension = 10
plugin_layer = torch.nn.Linear(dimension, dimension // 2, True)
print("-" * 50)
print("original")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias)
default_weight_init(plugin_layer.weight)
default_bias_init(plugin_layer.bias)
print("-" * 50)
print("trunk_init")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias)
default_lite_plugin_init(plugin_layer)
print("-" * 50)
print("lite_plugin_init")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias)
default_naive_plugin_init(plugin_layer)
print("-" * 50)
print("naive_plugin_init")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias) | 34.019231 | 67 | 0.725269 | # -*- coding: utf-8 -*-
# Copyright 2022 The Luoxi Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
import torch
# trunk model init
def default_weight_init(tensor):
# torch.nn.init.xavier_uniform(tensor)
torch.nn.init.xavier_uniform_(tensor)
def default_bias_init(tensor):
torch.nn.init.constant_(tensor, 0)
# lite plugin model init
def default_lite_plugin_init(layer):
# torch.nn.init.xavier_uniform(layer.weight, gain=0.001)
torch.nn.init.xavier_uniform_(layer.weight, gain=0.001)
# torch.nn.init.constant_(layer.weight, 0)
torch.nn.init.constant_(layer.bias, 0)
# naive plugin model init
def default_naive_plugin_init(layer):
torch.nn.init.constant_(layer.weight, 0)
torch.nn.init.constant_(layer.bias, 0)
if __name__ == '__main__':
# model.apply(weight_init_normal)
dimension = 10
plugin_layer = torch.nn.Linear(dimension, dimension // 2, True)
print("-" * 50)
print("original")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias)
default_weight_init(plugin_layer.weight)
default_bias_init(plugin_layer.bias)
print("-" * 50)
print("trunk_init")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias)
default_lite_plugin_init(plugin_layer)
print("-" * 50)
print("lite_plugin_init")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias)
default_naive_plugin_init(plugin_layer)
print("-" * 50)
print("naive_plugin_init")
print("plugin_layer.weight", plugin_layer.weight)
print("plugin_layer.bias", plugin_layer.bias) | 474 | 0 | 89 |
198607942cbec91f90a5251a04748bfcd45cf56d | 224 | py | Python | overrides/__init__.py | brentyi/overrides | 59af886a60236a9a71b60c982bf41dfc6419231c | [
"Apache-2.0"
] | 197 | 2015-05-23T13:51:47.000Z | 2022-03-25T07:14:00.000Z | overrides/__init__.py | brentyi/overrides | 59af886a60236a9a71b60c982bf41dfc6419231c | [
"Apache-2.0"
] | 78 | 2015-05-25T20:00:22.000Z | 2022-03-21T21:50:24.000Z | overrides/__init__.py | brentyi/overrides | 59af886a60236a9a71b60c982bf41dfc6419231c | [
"Apache-2.0"
] | 33 | 2015-05-28T14:14:38.000Z | 2021-04-29T08:01:45.000Z | from overrides.enforce import EnforceOverrides
from overrides.final import final
from overrides.overrides import __VERSION__, overrides
__all__ = [
"__VERSION__",
"overrides",
"final",
"EnforceOverrides",
]
| 20.363636 | 54 | 0.745536 | from overrides.enforce import EnforceOverrides
from overrides.final import final
from overrides.overrides import __VERSION__, overrides
__all__ = [
"__VERSION__",
"overrides",
"final",
"EnforceOverrides",
]
| 0 | 0 | 0 |
db334c7cde67301d7c7a0e412bd0faec23bb3c1a | 5,367 | py | Python | source/vistas/core/graphics/raycaster.py | VISTAS-IVES/pyvistas | 2de1541c0fb40ccbac4014af758ff329ba0677b1 | [
"BSD-3-Clause"
] | 1 | 2017-08-26T20:18:38.000Z | 2017-08-26T20:18:38.000Z | source/vistas/core/graphics/raycaster.py | VISTAS-IVES/pyvistas | 2de1541c0fb40ccbac4014af758ff329ba0677b1 | [
"BSD-3-Clause"
] | 89 | 2017-06-10T21:03:16.000Z | 2022-03-11T23:19:56.000Z | source/vistas/core/graphics/raycaster.py | VISTAS-IVES/pyvistas | 2de1541c0fb40ccbac4014af758ff329ba0677b1 | [
"BSD-3-Clause"
] | 1 | 2019-03-05T21:44:29.000Z | 2019-03-05T21:44:29.000Z | from typing import Optional, List
import numpy
from pyrr import Matrix44, Vector3
from vistas.core.bounds import BoundingBox
from vistas.core.graphics.object import Object3D, Intersection
class Ray:
"""
Representation of a ray in 3D space. Rays emit from an origin along a direction. Implementation inspired by mrdoob -
https://github.com/mrdoob/three.js/blob/master/src/math/Ray.js
"""
def at(self, t):
""" Retrieve a point along the ray. """
return self.direction * t + self.origin
def intersect_triangles(self, a, b, c):
""" Determine face-level triangle intersections from this ray. """
e1 = b - a
e2 = c - a
direction = numpy.array(self.direction)
origin = numpy.array(self.origin)
eps = numpy.finfo(numpy.float32).eps
pvec = numpy.cross(direction, e2)
det = numpy.sum(e1 * pvec, axis=-1)
det_cond = (det >= eps) | (det <= -eps) # Get values outside of range -eps < det < eps
inv_det = 1 / det
tvec = origin - a
u = numpy.sum(tvec * pvec, axis=-1) * inv_det
u_cond = (u <= 1) & (u >= 0) # Get values if not (u < 0 or u > 1)
qvec = numpy.cross(tvec, e1)
v = numpy.sum(direction * qvec, axis=-1) * inv_det
v_cond = (v >= 0) & (u + v <= 1) # Get values if not (if v < 0 or u + v > 1)
# Filter down and determine intersections
result = numpy.sum(e2 * qvec, axis=-1) * inv_det
intersections = numpy.where(det_cond & u_cond & v_cond)
distances = result[intersections]
# Now we return their locations in terms of distance
return distances, intersections[0]
class Raycaster:
"""
A class for mouse picking in 3D space. Inspiration from ThreeJS' Raycaster implementation.
https://github.com/mrdoob/three.js/blob/master/src/core/Raycaster.js
"""
def set_from_camera(self, coords: tuple, camera):
""" Update the Raycaster's ray to extend from the given Camera. """
self.ray.origin = camera.get_position()
self.ray.direction = camera.unproject(coords)
self.ray.direction.normalize()
def intersect_object(self, coords, obj, camera) -> List[Intersection]:
""" Retrieve intersections, sorted in ascending distance, to a given Object3D. """
intersects = []
if issubclass(obj.__class__, Object3D):
camera.push_matrix()
self.set_from_camera(coords, camera)
camera.matrix *= Matrix44.from_translation(obj.position)
intersects = obj.raycast(self)
camera.pop_matrix()
if intersects:
intersects.sort(key=lambda i: i.distance)
return intersects
def intersect_objects(self, coords: tuple, camera) -> List[Intersection]:
""" Retrieve intersections to all Object3D objects in a given Camera's Scene. """
intersects = []
for obj in camera.scene.objects:
intersects += self.intersect_object(coords, obj, camera) or []
if intersects:
intersects.sort(key=lambda i: i.distance)
return intersects
| 36.263514 | 120 | 0.596609 | from typing import Optional, List
import numpy
from pyrr import Matrix44, Vector3
from vistas.core.bounds import BoundingBox
from vistas.core.graphics.object import Object3D, Intersection
class Ray:
"""
Representation of a ray in 3D space. Rays emit from an origin along a direction. Implementation inspired by mrdoob -
https://github.com/mrdoob/three.js/blob/master/src/math/Ray.js
"""
def __init__(self, origin: Optional[Vector3]=None, direction: Optional[Vector3]=None):
self.origin = origin if origin is not None else Vector3()
self.direction = direction if direction is not None else Vector3()
self.direction.normalize()
def at(self, t):
""" Retrieve a point along the ray. """
return self.direction * t + self.origin
def intersects_bbox(self, bbox: BoundingBox):
return self.intersect_bbox(bbox) is not None
def intersect_bbox(self, bbox: BoundingBox):
invdirx, invdiry, invdirz = 1 / self.direction # Any or all could evaluate to numpy.inf, handled below
if invdirx >= 0:
tmin = (bbox.min_x - self.origin.x) * invdirx
tmax = (bbox.max_x - self.origin.x) * invdirx
else:
tmin = (bbox.max_x - self.origin.x) * invdirx
tmax = (bbox.min_x - self.origin.x) * invdirx
if invdiry >= 0:
tymin = (bbox.min_y - self.origin.y) * invdiry
tymax = (bbox.max_y - self.origin.y) * invdiry
else:
tymin = (bbox.max_y - self.origin.y) * invdiry
tymax = (bbox.min_y - self.origin.y) * invdiry
if tmin > tymax or tymin > tmax:
return None
if tymin > tmin or tmin != tmin: # tmin != tmin returns false if t_min is numpy.inf
tmin = tymin
if tymax < tmax or tmax != tmax:
tmax = tymax
if invdirz >= 0:
tzmin = (bbox.min_z - self.origin.z) * invdirz
tzmax = (bbox.max_z - self.origin.z) * invdirz
else:
tzmin = (bbox.max_z - self.origin.z) * invdirz
tzmax = (bbox.min_z - self.origin.z) * invdirz
if tmin > tzmax or tzmin > tmax:
return None
if tzmin > tmin or tmin != tmin:
tmin = tzmin
if tzmax < tmax or tmax != tmax:
tmax = tzmax
# Return point closest to the ray on the positive side
if tmax < 0:
return None
return self.at(tmin if tmin >= 0 else tmax)
def intersect_triangles(self, a, b, c):
""" Determine face-level triangle intersections from this ray. """
e1 = b - a
e2 = c - a
direction = numpy.array(self.direction)
origin = numpy.array(self.origin)
eps = numpy.finfo(numpy.float32).eps
pvec = numpy.cross(direction, e2)
det = numpy.sum(e1 * pvec, axis=-1)
det_cond = (det >= eps) | (det <= -eps) # Get values outside of range -eps < det < eps
inv_det = 1 / det
tvec = origin - a
u = numpy.sum(tvec * pvec, axis=-1) * inv_det
u_cond = (u <= 1) & (u >= 0) # Get values if not (u < 0 or u > 1)
qvec = numpy.cross(tvec, e1)
v = numpy.sum(direction * qvec, axis=-1) * inv_det
v_cond = (v >= 0) & (u + v <= 1) # Get values if not (if v < 0 or u + v > 1)
# Filter down and determine intersections
result = numpy.sum(e2 * qvec, axis=-1) * inv_det
intersections = numpy.where(det_cond & u_cond & v_cond)
distances = result[intersections]
# Now we return their locations in terms of distance
return distances, intersections[0]
class Raycaster:
"""
A class for mouse picking in 3D space. Inspiration from ThreeJS' Raycaster implementation.
https://github.com/mrdoob/three.js/blob/master/src/core/Raycaster.js
"""
def __init__(self, origin=None, direction=None, near=None, far=None):
self.ray = Ray(origin, direction)
self.near = near if near else 0
self.far = far if far else numpy.inf
def set_from_camera(self, coords: tuple, camera):
""" Update the Raycaster's ray to extend from the given Camera. """
self.ray.origin = camera.get_position()
self.ray.direction = camera.unproject(coords)
self.ray.direction.normalize()
def intersect_object(self, coords, obj, camera) -> List[Intersection]:
""" Retrieve intersections, sorted in ascending distance, to a given Object3D. """
intersects = []
if issubclass(obj.__class__, Object3D):
camera.push_matrix()
self.set_from_camera(coords, camera)
camera.matrix *= Matrix44.from_translation(obj.position)
intersects = obj.raycast(self)
camera.pop_matrix()
if intersects:
intersects.sort(key=lambda i: i.distance)
return intersects
def intersect_objects(self, coords: tuple, camera) -> List[Intersection]:
""" Retrieve intersections to all Object3D objects in a given Camera's Scene. """
intersects = []
for obj in camera.scene.objects:
intersects += self.intersect_object(coords, obj, camera) or []
if intersects:
intersects.sort(key=lambda i: i.distance)
return intersects
| 2,068 | 0 | 108 |
76ec92898267fdaec7165caef741a88c4463ae8b | 15,567 | py | Python | synapse/tests/test_axon.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
] | 1 | 2021-02-15T22:07:05.000Z | 2021-02-15T22:07:05.000Z | synapse/tests/test_axon.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
] | null | null | null | synapse/tests/test_axon.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
] | null | null | null | import io
import asyncio
import hashlib
import logging
import unittest.mock as mock
import aiohttp.client_exceptions as a_exc
import synapse.exc as s_exc
import synapse.axon as s_axon
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.httpapi as s_httpapi
import synapse.lib.msgpack as s_msgpack
import synapse.tests.utils as s_t_utils
logger = logging.getLogger(__name__)
# This causes blocks which are not homogeneous when sliced in kibibyte lengths
bbuf = b'0123456' * 4793491
abuf = b'asdfasdf'
pbuf = b'pennywise'
rbuf = b'robert gray'
bbufhash = hashlib.sha256(bbuf).digest()
asdfhash = hashlib.sha256(abuf).digest()
emptyhash = hashlib.sha256(b'').digest()
pennhash = hashlib.sha256(pbuf).digest()
rgryhash = hashlib.sha256(rbuf).digest()
asdfretn = (8, asdfhash)
emptyretn = (0, emptyhash)
pennretn = (9, pennhash)
rgryretn = (11, rgryhash)
bbufretn = (len(bbuf), bbufhash)
| 37.692494 | 126 | 0.556498 | import io
import asyncio
import hashlib
import logging
import unittest.mock as mock
import aiohttp.client_exceptions as a_exc
import synapse.exc as s_exc
import synapse.axon as s_axon
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.httpapi as s_httpapi
import synapse.lib.msgpack as s_msgpack
import synapse.tests.utils as s_t_utils
logger = logging.getLogger(__name__)
# This causes blocks which are not homogeneous when sliced in kibibyte lengths
bbuf = b'0123456' * 4793491
abuf = b'asdfasdf'
pbuf = b'pennywise'
rbuf = b'robert gray'
bbufhash = hashlib.sha256(bbuf).digest()
asdfhash = hashlib.sha256(abuf).digest()
emptyhash = hashlib.sha256(b'').digest()
pennhash = hashlib.sha256(pbuf).digest()
rgryhash = hashlib.sha256(rbuf).digest()
asdfretn = (8, asdfhash)
emptyretn = (0, emptyhash)
pennretn = (9, pennhash)
rgryretn = (11, rgryhash)
bbufretn = (len(bbuf), bbufhash)
class AxonTest(s_t_utils.SynTest):
async def check_blob(self, axon, fhash):
chunks = []
async for chunk in axon.get(fhash):
chunks.append(chunk)
buf = b''.join(chunks)
ahash = hashlib.sha256(buf).digest()
self.eq(fhash, ahash)
async def runAxonTestBase(self, axon):
tick = s_common.now()
logger.info('asdfhash test')
self.false(await axon.has(asdfhash))
with self.raises(s_exc.NoSuchFile):
async for _ in axon.get(asdfhash):
pass
async with await axon.upload() as fd:
await fd.write(abuf)
self.eq(asdfretn, await fd.save())
# do it again to test the short circuit
async with await axon.upload() as fd:
await fd.write(abuf)
self.eq(asdfretn, await fd.save())
bytz = []
async for byts in axon.get(asdfhash):
bytz.append(byts)
self.eq(b'asdfasdf', b''.join(bytz))
self.true(await axon.has(asdfhash))
self.eq(8, await axon.size(asdfhash))
logger.info('bbufhash test')
self.false(await axon.has(bbufhash))
self.eq((bbufhash,), await axon.wants((bbufhash, asdfhash)))
async with await axon.upload() as fd:
await fd.write(bbuf)
self.eq(bbufretn, await fd.save())
self.true(await axon.has(asdfhash))
self.true(await axon.has(bbufhash))
await self.check_blob(axon, bbufhash)
self.eq((), await axon.wants((bbufhash, asdfhash)))
logger.info('put() / puts() tests')
# These don't add new data; but exercise apis to load data
retn = await axon.put(abuf)
self.eq(retn, asdfretn)
retn = await axon.puts([abuf, bbuf])
self.eq(retn, (asdfretn, bbufretn))
logger.info('History and metrics')
items = [x async for x in axon.hashes(0)]
self.eq(((0, (asdfhash, 8)), (1, (bbufhash, 33554437))), items)
items = [x[1] async for x in axon.history(tick)]
self.eq(((asdfhash, 8), (bbufhash, 33554437)), items)
items = [x[1] async for x in axon.history(0, tock=1)]
self.eq((), items)
info = await axon.metrics()
self.eq(33554445, info.get('size:bytes'))
self.eq(2, info.get('file:count'))
logger.info('Empty file test')
async with await axon.upload() as fd:
await fd.write(b'')
self.eq(emptyretn, await fd.save())
info = await axon.metrics()
self.eq(33554445, info.get('size:bytes'))
self.eq(3, info.get('file:count'))
bytz = []
async for byts in axon.get(emptyhash):
bytz.append(byts)
self.eq(b'', b''.join(bytz))
logger.info('Healthcheck test')
snfo = await axon.getHealthCheck()
self.eq(snfo.get('status'), 'nominal')
axfo = [comp for comp in snfo.get('components') if comp.get('name') == 'axon'][0]
self.eq(axfo.get('data'), await axon.metrics())
logger.info('Upload context reuse')
with mock.patch('synapse.axon.MAX_SPOOL_SIZE', s_axon.CHUNK_SIZE * 2):
very_bigbuf = (s_axon.MAX_SPOOL_SIZE + 2) * b'V'
vbighash = hashlib.sha256(very_bigbuf).digest()
vbigretn = (len(very_bigbuf), vbighash)
async with await axon.upload() as fd:
# We can reuse the FD _after_ we have called save() on it.
await fd.write(abuf)
retn = await fd.save()
self.eq(retn, asdfretn)
logger.info('Reuse after uploading an existing file')
# Now write a new file
await fd.write(pbuf)
retn = await fd.save()
self.eq(retn, pennretn)
await self.check_blob(axon, pennhash)
logger.info('Reuse test with large file causing a rollover')
for chunk in s_common.chunks(very_bigbuf, s_axon.CHUNK_SIZE):
await fd.write(chunk)
retn = await fd.save()
self.eq(retn, vbigretn)
await self.check_blob(axon, vbighash)
logger.info('Reuse test with small file post rollover')
await fd.write(rbuf)
retn = await fd.save()
self.eq(retn, rgryretn)
await self.check_blob(axon, rgryhash)
info = await axon.metrics()
self.eq(67108899, info.get('size:bytes'))
self.eq(6, info.get('file:count'))
byts = b''.join([s_msgpack.en('foo'), s_msgpack.en('bar'), s_msgpack.en('baz')])
size, sha256b = await axon.put(byts)
sha256 = s_common.ehex(sha256b)
self.eq(('foo', 'bar', 'baz'), [item async for item in axon.iterMpkFile(sha256)])
# When testing a local axon, we want to ensure that the FD was in fact fini'd
if isinstance(fd, s_axon.UpLoad):
self.true(fd.fd.closed)
async def test_axon_base(self):
async with self.getTestAxon() as axon:
self.isin('axon', axon.dmon.shared)
await self.runAxonTestBase(axon)
async def test_axon_proxy(self):
async with self.getTestAxon() as axon:
async with axon.getLocalProxy() as prox:
await self.runAxonTestBase(prox)
async def test_axon_http(self):
# HTTP handlers on a standalone Axon
async with self.getTestAxon() as axon:
await self.runAxonTestHttp(axon)
async def runAxonTestHttp(self, axon):
host, port = await axon.addHttpsPort(0, host='127.0.0.1')
newb = await axon.auth.addUser('newb')
await newb.setPasswd('secret')
url_ul = f'https://localhost:{port}/api/v1/axon/files/put'
url_hs = f'https://localhost:{port}/api/v1/axon/files/has/sha256'
url_dl = f'https://localhost:{port}/api/v1/axon/files/by/sha256'
asdfhash_h = s_common.ehex(asdfhash)
bbufhash_h = s_common.ehex(bbufhash)
emptyhash_h = s_common.ehex(emptyhash)
# Perms
async with self.getHttpSess(auth=('newb', 'secret'), port=port) as sess:
async with sess.get(f'{url_dl}/{asdfhash_h}') as resp:
self.eq(403, resp.status)
item = await resp.json()
self.eq('err', item.get('status'))
async with sess.get(f'{url_hs}/{asdfhash_h}') as resp:
self.eq(403, resp.status)
item = await resp.json()
self.eq('err', item.get('status'))
async with sess.post(url_ul, data=abuf) as resp:
self.eq(403, resp.status)
item = await resp.json()
self.eq('err', item.get('status'))
# Stream file
byts = io.BytesIO(bbuf)
with self.raises((a_exc.ServerDisconnectedError,
a_exc.ClientOSError)):
async with sess.post(url_ul, data=byts) as resp:
pass
await newb.addRule((True, ('axon', 'get')))
await newb.addRule((True, ('axon', 'has')))
await newb.addRule((True, ('axon', 'upload')))
# Basic
async with self.getHttpSess(auth=('newb', 'secret'), port=port) as sess:
async with sess.get(f'{url_dl}/foobar') as resp:
self.eq(404, resp.status)
async with sess.get(f'{url_dl}/{asdfhash_h}') as resp:
self.eq(404, resp.status)
item = await resp.json()
self.eq('err', item.get('status'))
async with sess.get(f'{url_hs}/{asdfhash_h}') as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
self.false(item.get('result'))
async with sess.post(url_ul, data=abuf) as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
result = item.get('result')
self.eq(set(result.keys()), {'size', 'md5', 'sha1', 'sha256', 'sha512'})
self.eq(result.get('size'), asdfretn[0])
self.eq(result.get('sha256'), asdfhash_h)
self.true(await axon.has(asdfhash))
async with sess.get(f'{url_hs}/{asdfhash_h}') as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
self.true(item.get('result'))
async with sess.put(url_ul, data=abuf) as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
result = item.get('result')
self.eq(result.get('size'), asdfretn[0])
self.eq(result.get('sha256'), asdfhash_h)
self.true(await axon.has(asdfhash))
async with sess.get(f'{url_dl}/{asdfhash_h}') as resp:
self.eq(200, resp.status)
self.eq(abuf, await resp.read())
# Streaming upload
byts = io.BytesIO(bbuf)
async with sess.post(url_ul, data=byts) as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
result = item.get('result')
self.eq(result.get('size'), bbufretn[0])
self.eq(result.get('sha256'), bbufhash_h)
self.true(await axon.has(bbufhash))
byts = io.BytesIO(bbuf)
async with sess.put(url_ul, data=byts) as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
result = item.get('result')
self.eq(result.get('size'), bbufretn[0])
self.eq(result.get('sha256'), bbufhash_h)
self.true(await axon.has(bbufhash))
byts = io.BytesIO(b'')
async with sess.post(url_ul, data=byts) as resp:
self.eq(200, resp.status)
item = await resp.json()
self.eq('ok', item.get('status'))
result = item.get('result')
self.eq(result.get('size'), emptyretn[0])
self.eq(result.get('sha256'), emptyhash_h)
self.true(await axon.has(emptyhash))
# Streaming download
async with sess.get(f'{url_dl}/{bbufhash_h}') as resp:
self.eq(200, resp.status)
byts = []
async for bytz in resp.content.iter_chunked(1024):
byts.append(bytz)
self.gt(len(byts), 1)
self.eq(bbuf, b''.join(byts))
async def test_axon_perms(self):
async with self.getTestAxon() as axon:
user = await axon.auth.addUser('user')
await user.setPasswd('test')
_, port = await axon.dmon.listen('tcp://127.0.0.1:0')
aurl = f'tcp://user:test@127.0.0.1:{port}/axon'
async with await s_telepath.openurl(aurl) as prox: # type: s_axon.AxonApi
# Ensure the user can't do things with bytes they don't have permissions too.
await self.agenraises(s_exc.AuthDeny, prox.get(asdfhash))
await self.asyncraises(s_exc.AuthDeny, prox.has(asdfhash))
await self.agenraises(s_exc.AuthDeny, prox.hashes(0))
await self.agenraises(s_exc.AuthDeny, prox.history(0))
await self.asyncraises(s_exc.AuthDeny, prox.wants((asdfhash,)))
await self.asyncraises(s_exc.AuthDeny, prox.put(abuf))
await self.asyncraises(s_exc.AuthDeny, prox.puts((abuf,)))
await self.asyncraises(s_exc.AuthDeny, prox.upload())
await self.asyncraises(s_exc.AuthDeny, prox.metrics())
# now add rules and run the test suite
await user.addRule((True, ('health',)))
await user.addRule((True, ('axon', 'get',)))
await user.addRule((True, ('axon', 'has',)))
await user.addRule((True, ('axon', 'upload',)))
await self.runAxonTestBase(prox)
async def test_axon_limits(self):
async with self.getTestAxon(conf={'max:count': 10}) as axon:
for i in range(10):
await axon.put(s_common.buid())
with self.raises(s_exc.HitLimit):
await axon.put(s_common.buid())
async with self.getTestAxon(conf={'max:bytes': 320}) as axon:
for i in range(10):
await axon.put(s_common.buid())
with self.raises(s_exc.HitLimit):
await axon.put(s_common.buid())
async def test_axon_wget(self):
async with self.getTestAxon() as axon:
visi = await axon.auth.addUser('visi')
await visi.setAdmin(True)
await visi.setPasswd('secret')
async with await axon.upload() as fd:
await fd.write(b'asdfasdf')
size, sha256 = await fd.save()
host, port = await axon.addHttpsPort(0, host='127.0.0.1')
sha2 = s_common.ehex(sha256)
async with axon.getLocalProxy() as proxy:
resp = await proxy.wget(f'https://visi:secret@127.0.0.1:{port}/api/v1/axon/files/by/sha256/{sha2}', ssl=False)
self.eq(True, resp['ok'])
self.eq(200, resp['code'])
self.eq(8, resp['size'])
self.eq('application/octet-stream', resp['headers']['Content-Type'])
resp = await proxy.wget(f'http://visi:secret@127.0.0.1:{port}/api/v1/axon/files/by/sha256/{sha2}')
self.false(resp['ok'])
async def timeout(self):
await asyncio.sleep(2)
with mock.patch.object(s_httpapi.ActiveV1, 'get', timeout):
resp = await proxy.wget(f'https://visi:secret@127.0.0.1:{port}/api/v1/active', timeout=1)
self.eq(False, resp['ok'])
self.eq('TimeoutError', resp['mesg'])
conf = {'http:proxy': 'socks5://user:pass@127.0.0.1:1'}
async with self.getTestAxon(conf=conf) as axon:
async with axon.getLocalProxy() as proxy:
resp = await proxy.wget('http://vertex.link')
self.ne(-1, resp['mesg'].find('Can not connect to proxy 127.0.0.1:1'))
| 14,358 | 13 | 266 |
d4e7b987c9b17b7b088e79bb764b80f5996f19d7 | 4,277 | py | Python | fewshot/utils/experiment_logger.py | ashok-arjun/few-shot-ssl-public | f7577d80b7491e0f27234a2e9c0113782365c2e1 | [
"MIT"
] | 497 | 2018-03-02T00:50:53.000Z | 2022-03-22T06:30:59.000Z | fewshot/utils/experiment_logger.py | eleniTriantafillou/few-shot-ssl-public | 3cf522031aa40b4ffb61e4693d0b48fdd5669276 | [
"MIT"
] | 20 | 2018-03-19T06:15:30.000Z | 2021-11-20T07:21:38.000Z | fewshot/utils/experiment_logger.py | eleniTriantafillou/few-shot-ssl-public | 3cf522031aa40b4ffb61e4693d0b48fdd5669276 | [
"MIT"
] | 108 | 2018-03-02T06:56:13.000Z | 2021-12-23T03:40:43.000Z | # Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell,
# Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import os
import sys
from fewshot.utils import logger
log = logger.get()
class ExperimentLogger():
"""Writes experimental logs to CSV file."""
def __init__(self, logs_folder):
"""Initialize files."""
self._write_to_csv = logs_folder is not None
if self._write_to_csv:
if not os.path.isdir(logs_folder):
os.makedirs(logs_folder)
catalog_file = os.path.join(logs_folder, "catalog")
with open(catalog_file, "w") as f:
f.write("filename,type,name\n")
with open(catalog_file, "a") as f:
f.write("{},plain,{}\n".format("cmd.txt", "Commands"))
with open(os.path.join(logs_folder, "cmd.txt"), "w") as f:
f.write(" ".join(sys.argv))
with open(catalog_file, "a") as f:
f.write("train_ce.csv,csv,Train Loss (Cross Entropy)\n")
f.write("train_acc.csv,csv,Train Accuracy\n")
f.write("valid_acc.csv,csv,Validation Accuracy\n")
f.write("learn_rate.csv,csv,Learning Rate\n")
self.train_file_name = os.path.join(logs_folder, "train_ce.csv")
if not os.path.exists(self.train_file_name):
with open(self.train_file_name, "w") as f:
f.write("step,time,ce\n")
self.trainval_file_name = os.path.join(logs_folder, "train_acc.csv")
if not os.path.exists(self.trainval_file_name):
with open(self.trainval_file_name, "w") as f:
f.write("step,time,acc\n")
self.val_file_name = os.path.join(logs_folder, "valid_acc.csv")
if not os.path.exists(self.val_file_name):
with open(self.val_file_name, "w") as f:
f.write("step,time,acc\n")
self.lr_file_name = os.path.join(logs_folder, "learn_rate.csv")
if not os.path.exists(self.lr_file_name):
with open(self.lr_file_name, "w") as f:
f.write("step,time,lr\n")
def log_train_ce(self, niter, ce):
"""Writes training CE."""
if self._write_to_csv:
with open(self.train_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), ce))
def log_train_acc(self, niter, acc):
"""Writes training accuracy."""
if self._write_to_csv:
with open(self.trainval_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), acc))
def log_valid_acc(self, niter, acc):
"""Writes validation accuracy."""
if self._write_to_csv:
with open(self.val_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), acc))
def log_learn_rate(self, niter, lr):
"""Writes validation accuracy."""
if self._write_to_csv:
with open(self.lr_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), lr))
| 39.238532 | 80 | 0.654898 | # Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell,
# Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import os
import sys
from fewshot.utils import logger
log = logger.get()
class ExperimentLogger():
"""Writes experimental logs to CSV file."""
def __init__(self, logs_folder):
"""Initialize files."""
self._write_to_csv = logs_folder is not None
if self._write_to_csv:
if not os.path.isdir(logs_folder):
os.makedirs(logs_folder)
catalog_file = os.path.join(logs_folder, "catalog")
with open(catalog_file, "w") as f:
f.write("filename,type,name\n")
with open(catalog_file, "a") as f:
f.write("{},plain,{}\n".format("cmd.txt", "Commands"))
with open(os.path.join(logs_folder, "cmd.txt"), "w") as f:
f.write(" ".join(sys.argv))
with open(catalog_file, "a") as f:
f.write("train_ce.csv,csv,Train Loss (Cross Entropy)\n")
f.write("train_acc.csv,csv,Train Accuracy\n")
f.write("valid_acc.csv,csv,Validation Accuracy\n")
f.write("learn_rate.csv,csv,Learning Rate\n")
self.train_file_name = os.path.join(logs_folder, "train_ce.csv")
if not os.path.exists(self.train_file_name):
with open(self.train_file_name, "w") as f:
f.write("step,time,ce\n")
self.trainval_file_name = os.path.join(logs_folder, "train_acc.csv")
if not os.path.exists(self.trainval_file_name):
with open(self.trainval_file_name, "w") as f:
f.write("step,time,acc\n")
self.val_file_name = os.path.join(logs_folder, "valid_acc.csv")
if not os.path.exists(self.val_file_name):
with open(self.val_file_name, "w") as f:
f.write("step,time,acc\n")
self.lr_file_name = os.path.join(logs_folder, "learn_rate.csv")
if not os.path.exists(self.lr_file_name):
with open(self.lr_file_name, "w") as f:
f.write("step,time,lr\n")
def log_train_ce(self, niter, ce):
"""Writes training CE."""
if self._write_to_csv:
with open(self.train_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), ce))
def log_train_acc(self, niter, acc):
"""Writes training accuracy."""
if self._write_to_csv:
with open(self.trainval_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), acc))
def log_valid_acc(self, niter, acc):
"""Writes validation accuracy."""
if self._write_to_csv:
with open(self.val_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), acc))
def log_learn_rate(self, niter, lr):
"""Writes validation accuracy."""
if self._write_to_csv:
with open(self.lr_file_name, "a") as f:
f.write("{:d},{:s},{:e}\n".format(
niter + 1, datetime.datetime.now().isoformat(), lr))
| 0 | 0 | 0 |
49d05ef263d972799a4855eb4f0bcff36ad44991 | 10,606 | py | Python | cogs/config.py | Novak-maker/DeadBear | 7f69b232a6844e84489852b7e64e3714eb430196 | [
"MIT"
] | null | null | null | cogs/config.py | Novak-maker/DeadBear | 7f69b232a6844e84489852b7e64e3714eb430196 | [
"MIT"
] | null | null | null | cogs/config.py | Novak-maker/DeadBear | 7f69b232a6844e84489852b7e64e3714eb430196 | [
"MIT"
] | null | null | null | import math
import random
from typing import Union, Optional
import discord
from discord.ext import commands
from .utils import db
from .utils import checks
| 38.708029 | 80 | 0.598718 | import math
import random
from typing import Union, Optional
import discord
from discord.ext import commands
from .utils import db
from .utils import checks
class Config(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Set an alias for the bot prefix
@commands.command(
name='PrefixAlias',
description="Sets an alias for the default command prefix.",
brief="Set command prefix alias.",
aliases=['prefixalias', 'prefix', 'pre'])
@commands.guild_only()
@commands.is_owner()
async def change_prefix(self, ctx, prefix):
await db.set_cfg(ctx.guild.id, 'bot_alias', prefix)
await ctx.channel.send(f"My command prefix is now \"{prefix}\".")
# Set perm roles for public commands
@commands.command(
name='PermissionRole',
description="Sets role that can use basic commands.",
brief="Set permrole.",
aliases=['permissionrole', 'permrole', 'pr'])
@commands.guild_only()
@commands.is_owner()
async def set_perms(self, ctx, role: discord.Role):
await db.set_cfg(ctx.guild.id, 'perm_role', role.id)
await ctx.channel.send(f"Added \"{role.name}\" to perm roles.")
# Set the channel for join messages
@commands.group(
name='GuildJoin',
description="Enables or disables the automatic join message in a "
"specified channel. Pass no channel to disable.",
brief="Turn join messages on or off.",
aliases=['guildjoin', 'gjoin', 'gj'],
invoke_without_command=True)
@commands.guild_only()
@commands.is_owner()
async def guild_join(self, ctx, channel: discord.TextChannel=None):
if channel:
await db.set_cfg(ctx.guild.id, 'join_channel', channel.id)
await ctx.channel.send(f"Greeting enabled for \"{channel.name}\".")
else:
await db.set_cfg(ctx.guild.id, 'join_channel', None)
await ctx.channel.send("Greeting disabled.")
# Set the join message
@guild_join.command(
name='Message',
description="Sets the automatic greeting message.",
brief="Modify join message.",
aliases=['message', 'msg'])
@commands.guild_only()
@commands.is_owner()
async def gjoin_message(self, ctx, *, message: str):
await db.set_cfg(ctx.guild.id, 'join_message', message)
await ctx.channel.send(f"The join message is now: \"{message}\"")
# Set the channel for leave messages
@commands.group(
name='GuildLeave',
description="Enables or disables the automatic leave message in a "
"specified channel. Pass no channel to disable.",
brief="Turn leave message on or off.",
aliases=['guildleave', 'gleave', 'gl'],
invoke_without_command=True)
@commands.guild_only()
@commands.is_owner()
async def guild_leave(self, ctx, channel: discord.TextChannel=None):
if channel:
await db.set_cfg(ctx.guild.id, 'leave_channel', channel.id)
await ctx.channel.send(f"Farewells enabled for \"{channel.name}\".")
else:
await db.set_cfg(ctx.guild.id, 'leave_channel', None)
await ctx.channel.send("Farewells disabled.")
# Set the leave message
@guild_leave.command(
name='Message',
description="Sets the automatic leave message.",
brief="Modify leave message.",
aliases=['message', 'msg'])
@commands.guild_only()
@commands.is_owner()
async def gleave_message(self, ctx, *, message: str):
await db.set_cfg(ctx.guild.id, 'leave_message', message)
await ctx.channel.send(f"The farewell message is now: \"{message}\"")
# Set the currency symbol
@commands.command(
name='CurrencySymbol',
description="Sets the server currency symbol.",
aliases=['currencysymbol', 'csymbol'])
@commands.guild_only()
@commands.is_owner()
async def set_currency(self, ctx, emoji: Union[discord.Emoji, str]):
if type(emoji) is str:
await db.set_cfg(ctx.guild.id, 'currency', emoji)
else:
await db.set_cfg(ctx.guild.id, 'currency', emoji.id)
await ctx.channel.send(f"The currency symbol is now: \"{emoji}\"")
# Toggle guild stat tracking
@commands.command(
name='Stats',
description="Toggles guild stats.",
aliases=['stats'])
@commands.guild_only()
@commands.is_owner()
async def stats(self, ctx):
stats = await db.get_cfg(ctx.guild.id, 'guild_stats')
if stats:
reply = "Guild stats have been disabled!"
await db.set_cfg(ctx.guild.id, 'guild_stats', None)
else:
reply = "Guild stats have been enabled!"
await db.set_cfg(ctx.guild.id, 'guild_stats', 'enabled')
await ctx.channel.send(reply)
# Manage starboard settings
@commands.group(
name='Starboard',
description="Sets the configuration for starred messages.",
brief="Modify starboard settings.",
aliases=['starboard', 'star'])
@commands.guild_only()
@commands.is_owner()
async def starboard(self, ctx, channel: discord.TextChannel=None):
starboard = await db.get_cfg(ctx.guild.id, 'star_channel')
if starboard is None:
await db.set_cfg(ctx.guild.id, 'star_channel', channel.id)
await ctx.channel.send(f"Set \"{channel.name}\" as the star board.")
else:
await db.set_cfg(ctx.guild.id, 'star_channel', None)
await ctx.channel.send(f"Starboard disabled.")
# Change starboard threshold
@starboard.command(
name='Threshold',
description="Sets the configuration for starred messages.",
brief="Modify starboard settings.",
aliases=['threshold', 't'])
@commands.guild_only()
@commands.is_owner()
async def star_threshold(self, ctx, threshold):
await db.set_cfg(ctx.guild.id, 'star_threshold', threshold)
await ctx.channel.send(f"Starboard threshold set to {threshold}")
# Event hook for reactions being added to messages
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.user_id == self.bot.user.id:
return
elif payload.guild_id:
await self.star_check(payload, 'add')
# Event hook for reactions being removed from messages
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.user_id == self.bot.user.id:
return
elif payload.guild_id:
await self.star_check(payload, 'rem')
# Do stuff when a message is sent
@commands.Cog.listener()
async def on_message(self, message):
if not message.author.bot and message.guild:
dbcfg = await db.get_cfg(message.guild.id)
if dbcfg['guild_stats'] == 'enabled':
guildID = message.guild.id
member = message.author
profile = await db.get_member(guildID, member.id)
cashaward = random.randrange(
dbcfg['min_cash'],
dbcfg['max_cash'])
await db.add_currency(message.guild.id, member.id, cashaward)
curxp = profile['xp'] + 1
await db.set_member(guildID, member.id, 'xp', curxp)
nextlevel = profile['lvl'] + 1
levelup = math.floor(curxp / ((2 * nextlevel) ** 2))
if levelup == 1:
channel = message.channel
await channel.send(f"**{member.name}** has leveled up to "
f"**level {nextlevel}!**")
await db.set_member(guildID, member.id, 'lvl', nextlevel)
# Handler for guild reaction events
async def star_check(self, payload, event):
dbcfg = await db.get_cfg(payload.guild_id)
if not dbcfg['star_channel']:
return
guild = self.bot.get_guild(payload.guild_id)
channel = guild.get_channel(payload.channel_id)
if channel.is_nsfw() and not dbcfg['star_nsfw']:
return
message = await channel.fetch_message(payload.message_id)
if message.author.bot:
return
prevstar = await db.get_starred(message.id)
starchannel = guild.get_channel(dbcfg['star_channel'])
if not prevstar:
for react in message.reactions:
if react.count >= dbcfg['star_threshold']:
await self.star_add(message, starchannel)
break
else:
if len(message.reactions) < 2:
await self.star_remove(starchannel, prevstar)
else:
for react in message.reactions:
if react.count < dbcfg['star_threshold']:
await self.star_remove(starchannel, prevstar)
break
# Add star to starboard
async def star_add(self, message, starchannel):
star = discord.Embed(description=message.content,
color=0xf1c40f)
star.set_author(name=message.author.display_name,
icon_url=message.author.avatar_url)
if message.attachments:
images = []
files = []
filetypes = ('png', 'jpeg', 'jpg', 'gif', 'webp')
for attachment in message.attachments:
if attachment.url.lower().endswith(filetypes):
images.append(attachment)
else:
files.append(attachment)
for i, file in enumerate(files):
star.add_field(name=f"Attachment {i + 1}",
value=f"[{file.filename}]({file.url})",
inline=True)
star.set_thumbnail(url=files[0].url)
star.add_field(name="--",
value=f"[Jump to original...]({message.jump_url})",
inline=False)
star.set_footer(text="Originally sent")
star.timestamp = message.created_at
newstar = await starchannel.send(embed=star)
await db.add_starred(message.guild.id, message.id, newstar.id)
# Remove star from starboard
async def star_remove(self, starchannel, starred):
oldstar = await starchannel.fetch_message(starred['starred_id'])
await oldstar.delete()
await db.del_starred(starred['original_id'])
| 6,646 | 3,779 | 23 |
166aaf0992b3cc7a65eef361812939dfa9b26d27 | 836 | py | Python | tests/command_line/test_refine_error_model.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | tests/command_line/test_refine_error_model.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | tests/command_line/test_refine_error_model.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import procrunner
| 30.962963 | 80 | 0.702153 | from __future__ import annotations
import procrunner
def test_standalone_error_model_refinement_on_scaled_data(dials_data, tmp_path):
location = dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
refls = location / "scaled_20_25.refl"
expts = location / "scaled_20_25.expt"
# Use a range of options, some default.
command = [
"dials.refine_error_model",
refls,
expts,
"json=error_model.json",
"html=error_model.html",
"intensity_choice=combine",
"combine.Imid=250",
"basic.minimisation=individual",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert tmp_path.joinpath("error_model.html").is_file()
assert tmp_path.joinpath("error_model.json").is_file()
| 758 | 0 | 23 |
a9c18dcb7aec4f486d25b068cc7562327a78bb53 | 2,399 | py | Python | map.py | PyMLGame/pymlmario | dbece821d93b0d90d03507223be9f25997fafa17 | [
"MIT"
] | null | null | null | map.py | PyMLGame/pymlmario | dbece821d93b0d90d03507223be9f25997fafa17 | [
"MIT"
] | null | null | null | map.py | PyMLGame/pymlmario | dbece821d93b0d90d03507223be9f25997fafa17 | [
"MIT"
] | null | null | null | # coding: utf-8
from PIL import Image
from pymlgame.surface import Surface
NONBLOCKING = 0
BLOCKING = 1
DESTROYABLE = 2
| 27.261364 | 71 | 0.551897 | # coding: utf-8
from PIL import Image
from pymlgame.surface import Surface
NONBLOCKING = 0
BLOCKING = 1
DESTROYABLE = 2
class Map:
def __init__(self, name):
self.width = 0
self.height = 0
self._pixmap, self._nakedmap, self._colmap = self.load(name)
def render_pixmap(self):
"""
Renders the current view of of the map.
:returns: Surface - The surface of the map
"""
s = Surface(self.width, self.height)
for y in range(self.height):
for x in range(self.width):
s.draw_dot((x, y), self._pixmap[x, y])
return s
def render_naked_map(self):
"""
Renders the naked map without destroyable objects
:returns: Surface - The surface of the naked map
"""
s = Surface(self.width, self.height)
for y in range(self.height):
for x in range(self.width):
s.draw_dot((x, y), self._nakedmap[x, y])
return s
def generate_collision_matrix(self):
"""
Generates the collision matrix.
:returns: list - 2-dimensional array of the collision map
"""
def translate(color):
if color == (0, 0, 0):
return BLOCKING
elif color == (127, 127, 127):
return DESTROYABLE
else:
return NONBLOCKING
colmat = []
for x in range(self.width):
row = []
for y in range(self.height):
row.append(translate(self._colmap[x, y]))
colmat.append(row)
return colmat
def load(self, name):
"""
Load map files.
:param name: Base filename of map
:type name: str
:returns: tuple - pixel map and collision map
"""
map_file = Image.open('maps/%s.png' % name)
naked_file = Image.open('maps/%s_naked.png' % name)
col_file = Image.open('maps/%s_collide.png' % name)
self.width = map_file.size[0]
self.height = map_file.size[1]
map_data = map_file.load()
naked_data = naked_file.load()
col_data = col_file.load()
if not (map_file.size == naked_file.size == col_file.size):
print('Error: Sizes of map, naked and collide map differ!')
return False
return map_data, naked_data, col_data
| 318 | 1,935 | 23 |
d72e48d0f6f6658d454c40d9f12d4884b3867612 | 1,304 | py | Python | day-30/check-if-a-string-is-a-valid-sequence-from-root-to-leaves-path-in-a-binary-tree.py | MateuszKudla/30-day-leet-coding-challange | fb6832eb83f8fe31fb1723875330b4ec145686c8 | [
"MIT"
] | null | null | null | day-30/check-if-a-string-is-a-valid-sequence-from-root-to-leaves-path-in-a-binary-tree.py | MateuszKudla/30-day-leet-coding-challange | fb6832eb83f8fe31fb1723875330b4ec145686c8 | [
"MIT"
] | null | null | null | day-30/check-if-a-string-is-a-valid-sequence-from-root-to-leaves-path-in-a-binary-tree.py | MateuszKudla/30-day-leet-coding-challange | fb6832eb83f8fe31fb1723875330b4ec145686c8 | [
"MIT"
] | null | null | null | from typing import List
if __name__ == "__main__":
solution = Solution()
root = buildTree(None, 0, [0,1,0,0,1,0,None,None,1,0,0])
result = solution.isValidSequence(root, [0,1,0,1])
print (result) | 28.977778 | 86 | 0.58589 | from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isValidSequence(self, root: TreeNode, arr: List[int]) -> bool:
if root is None:
return len(arr) == 0
return self.isValid(root, arr, 0)
def isValid(self, root: TreeNode, arr: List[int], index: int) -> bool:
if root.val != arr[index]:
return False
if index == len(arr) - 1:
return root.left is None and root.right is None
return ((root.left is not None and self.isValid(root.left, arr, index + 1)) or
(root.right is not None and self.isValid(root.right, arr, index + 1)))
def buildTree(node: TreeNode, index: int, items: List[int]) -> TreeNode:
if len(items) > index:
if items[index] is not None:
node = TreeNode(items[index])
node.left = buildTree(None, 2*index + 1, items)
node.right = buildTree(None, 2*index + 2, items)
return node
return None
if __name__ == "__main__":
solution = Solution()
root = buildTree(None, 0, [0,1,0,0,1,0,None,None,1,0,0])
result = solution.isValidSequence(root, [0,1,0,1])
print (result) | 944 | -12 | 148 |
6c7623541cb960fe011ce94f95f176183f56c17e | 18,894 | py | Python | ArrayOfDotProducts/generatePlots.py | Sandia2014/intrepid | 9a310ddc033da1dda162a09bf80cca6c2dde2d6b | [
"MIT"
] | null | null | null | ArrayOfDotProducts/generatePlots.py | Sandia2014/intrepid | 9a310ddc033da1dda162a09bf80cca6c2dde2d6b | [
"MIT"
] | null | null | null | ArrayOfDotProducts/generatePlots.py | Sandia2014/intrepid | 9a310ddc033da1dda162a09bf80cca6c2dde2d6b | [
"MIT"
] | null | null | null | import math
import os
import sys
import numpy
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
import csv
from mpl_toolkits.mplot3d import Axes3D
from numpy import log10
prefix = 'data/ArrayOfDotProducts_'
suffix = '_clearCache_shadowfax'
outputPrefix = 'figures/'
# read in all of the data.
# TODO: you'll need to disable everything that's not relevant here or it'll be angry about missing files
dotProductSize = numpy.loadtxt(open(prefix + 'dotProductSize' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
memorySize = numpy.loadtxt(open(prefix + 'memorySize' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
numberOfDotProducts = numpy.loadtxt(open(prefix + 'numberOfDotProducts' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
serialTimes = numpy.loadtxt(open(prefix + 'serialTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
ompTimes = numpy.loadtxt(open(prefix + 'ompTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaIndependentTimes = numpy.loadtxt(open(prefix + 'cudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaReductionTimes = numpy.loadtxt(open(prefix + 'cudaReductionTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaSwitchingTimes = numpy.loadtxt(open(prefix + 'cudaSwitchingTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
kokkosOmpTimes = numpy.loadtxt(open(prefix + 'kokkosOmpTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
kokkosCudaIndependentTimes = numpy.loadtxt(open(prefix + 'kokkosCudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
# set up a list of the times and names, for easy iteration later
# TODO: make this consistent with the files that you read in and/or care about
allTimes = []
allNames = []
# NOTE: if you are doing comparisons against serial time, it's assumed that the first entry in allTimes is serial
allTimes.append(serialTimes)
allNames.append('serial')
# NOTE: if you are doing comparisons against omp time, it's assumed that the second entry in allTimes is openmp. if you aren't doing those comparisons, you should go disable that portion of this script.
allTimes.append(ompTimes)
allNames.append('omp')
# NOTE: if you are doing comparisons against cuda time, it's assumed that the third entry in allTimes is cuda. if you aren't doing those comparisons, you should go disable that portion of this script.
allTimes.append(cudaIndependentTimes)
allNames.append('cudaIndependent')
# there are no assumptions about the rest of the ordering
allTimes.append(cudaReductionTimes)
allNames.append('cudaReduction')
allTimes.append(cudaSwitchingTimes)
allNames.append('cudaSwitching')
allTimes.append(kokkosOmpTimes)
allNames.append('kokkosOmp')
allTimes.append(kokkosCudaIndependentTimes)
allNames.append('kokkosCudaIndependent')
# these are toggles for whether to make image files and whether to make orbit files for making movies
makeImageFiles = True
#makeImageFiles = False
makeOrbitFilesForMovies = True
#makeOrbitFilesForMovies = False
numberOfOrbitFrames = 100
#markerPool = ['-', '--', ':']
markerPool = ['-', '--']
colors = cm.gist_ncar(numpy.linspace(1, 0, len(allTimes)))
markers = []
for i in range(len(allTimes)):
markers.append(markerPool[i % len(markerPool)])
fig3d = plt.figure(0)
fig2d = plt.figure(1, figsize=(14, 6))
ax2d = plt.subplot(111)
box2d = ax2d.get_position()
ax2d.set_position([box2d.x0, box2d.y0, box2d.width * 0.60, box2d.height])
bbox_to_anchor2d = (1.87, 0.5)
# make an image of just the number of dot products
# TODO: you might want to make an image of the number of cells, so you'd adjust this.
fig3d = plt.figure(0)
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(numberOfDotProducts), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(numberOfDotProducts)')
plt.title('number of dot products')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'NumberOfDotProducts' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# goal: make images showing just the raw times
# find the min and max values across all flavors so that the color scale is the same for each graph
maxValue = -10
minValue = 10
for timesIndex in numpy.arange(0, len(allTimes)):
maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex]))])
minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex]))])
# make the color scale
colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue)
# for each time
for timesIndex in range(len(allTimes)):
# make a 3d plot
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(raw time) [seconds]')
ax.set_zlim([minValue, maxValue])
plt.title(name + ' raw time')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'RawTimes_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# make a 2D plot of all flavors, for the smallest and largest sizes of memory
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('raw times for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('raw time [seconds]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'RawTimes_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# now make plots that are normalized by memory size
maxValue = -10
minValue = 10
for timesIndex in numpy.arange(0, len(allTimes)):
maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex] / memorySize))])
minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex] / memorySize))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue)
for timesIndex in range(len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times / memorySize), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(normalized time [seconds / memorySize])')
ax.set_zlim([minValue, maxValue])
plt.title(name + ' normalized time')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'NormalizedTime_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
# possibly make orbit plots for movies
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/NormalizedTime_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
# now make relative speedups over serial
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(1, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[0] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[0] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 1 so that i don't compare serial to serial
for timesIndex in numpy.arange(1, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[0] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over serial')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusSerial_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 0):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusSerial_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[0][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over serial for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
#plt.ylim([0, 6])
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusSerial_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# now make relative speedup over openmp
# TODO: you might disable this part
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(2, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[1] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[1] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 2 so that i don't compare serial or omp to omp
for timesIndex in numpy.arange(2, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[1] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over omp')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusOmp_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 1):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusOmp_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[1][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over openmp for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusOmp_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# relative speedup over cudaIndependent
# TODO: you might disable this part
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(3, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[2] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[2] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 3 so that i don't compare cuda or serial or omp to cuda
for timesIndex in numpy.arange(3, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[2] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over cudaIndependent')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusCudaIndependent_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 2):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[2][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over cuda independent for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusCudaIndependent_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# these graphs are essentially duplicates of ones made already, but with a linear scale instead of logarithmic (by request of carter).
# these graphs just compare kokkos omp versus openmp and kokkos cuda versus cuda
# omp
fig3d = plt.figure(0)
plt.clf()
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[1] / allTimes[5]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('speedup [unitless]')
plt.title('kokkos omp speedup over omp')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusOmp_kokkosOmp_linear' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusOmp_kokkosOmp_linear' + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
# cuda
fig3d = plt.figure(0)
plt.clf()
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[2] / allTimes[6]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('speedup [unitless]')
plt.title('kokkos cuda speedup over cuda')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
| 44.772512 | 203 | 0.721869 | import math
import os
import sys
import numpy
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
import csv
from mpl_toolkits.mplot3d import Axes3D
from numpy import log10
prefix = 'data/ArrayOfDotProducts_'
suffix = '_clearCache_shadowfax'
outputPrefix = 'figures/'
# read in all of the data.
# TODO: you'll need to disable everything that's not relevant here or it'll be angry about missing files
dotProductSize = numpy.loadtxt(open(prefix + 'dotProductSize' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
memorySize = numpy.loadtxt(open(prefix + 'memorySize' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
numberOfDotProducts = numpy.loadtxt(open(prefix + 'numberOfDotProducts' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
serialTimes = numpy.loadtxt(open(prefix + 'serialTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
ompTimes = numpy.loadtxt(open(prefix + 'ompTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaIndependentTimes = numpy.loadtxt(open(prefix + 'cudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaReductionTimes = numpy.loadtxt(open(prefix + 'cudaReductionTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaSwitchingTimes = numpy.loadtxt(open(prefix + 'cudaSwitchingTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
kokkosOmpTimes = numpy.loadtxt(open(prefix + 'kokkosOmpTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
kokkosCudaIndependentTimes = numpy.loadtxt(open(prefix + 'kokkosCudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
# set up a list of the times and names, for easy iteration later
# TODO: make this consistent with the files that you read in and/or care about
allTimes = []
allNames = []
# NOTE: if you are doing comparisons against serial time, it's assumed that the first entry in allTimes is serial
allTimes.append(serialTimes)
allNames.append('serial')
# NOTE: if you are doing comparisons against omp time, it's assumed that the second entry in allTimes is openmp. if you aren't doing those comparisons, you should go disable that portion of this script.
allTimes.append(ompTimes)
allNames.append('omp')
# NOTE: if you are doing comparisons against cuda time, it's assumed that the third entry in allTimes is cuda. if you aren't doing those comparisons, you should go disable that portion of this script.
allTimes.append(cudaIndependentTimes)
allNames.append('cudaIndependent')
# there are no assumptions about the rest of the ordering
allTimes.append(cudaReductionTimes)
allNames.append('cudaReduction')
allTimes.append(cudaSwitchingTimes)
allNames.append('cudaSwitching')
allTimes.append(kokkosOmpTimes)
allNames.append('kokkosOmp')
allTimes.append(kokkosCudaIndependentTimes)
allNames.append('kokkosCudaIndependent')
# these are toggles for whether to make image files and whether to make orbit files for making movies
makeImageFiles = True
#makeImageFiles = False
makeOrbitFilesForMovies = True
#makeOrbitFilesForMovies = False
numberOfOrbitFrames = 100
#markerPool = ['-', '--', ':']
markerPool = ['-', '--']
colors = cm.gist_ncar(numpy.linspace(1, 0, len(allTimes)))
markers = []
for i in range(len(allTimes)):
markers.append(markerPool[i % len(markerPool)])
fig3d = plt.figure(0)
fig2d = plt.figure(1, figsize=(14, 6))
ax2d = plt.subplot(111)
box2d = ax2d.get_position()
ax2d.set_position([box2d.x0, box2d.y0, box2d.width * 0.60, box2d.height])
bbox_to_anchor2d = (1.87, 0.5)
# make an image of just the number of dot products
# TODO: you might want to make an image of the number of cells, so you'd adjust this.
fig3d = plt.figure(0)
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(numberOfDotProducts), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(numberOfDotProducts)')
plt.title('number of dot products')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'NumberOfDotProducts' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# goal: make images showing just the raw times
# find the min and max values across all flavors so that the color scale is the same for each graph
maxValue = -10
minValue = 10
for timesIndex in numpy.arange(0, len(allTimes)):
maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex]))])
minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex]))])
# make the color scale
colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue)
# for each time
for timesIndex in range(len(allTimes)):
# make a 3d plot
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(raw time) [seconds]')
ax.set_zlim([minValue, maxValue])
plt.title(name + ' raw time')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'RawTimes_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# make a 2D plot of all flavors, for the smallest and largest sizes of memory
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('raw times for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('raw time [seconds]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'RawTimes_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# now make plots that are normalized by memory size
maxValue = -10
minValue = 10
for timesIndex in numpy.arange(0, len(allTimes)):
maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex] / memorySize))])
minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex] / memorySize))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue)
for timesIndex in range(len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times / memorySize), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(normalized time [seconds / memorySize])')
ax.set_zlim([minValue, maxValue])
plt.title(name + ' normalized time')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'NormalizedTime_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
# possibly make orbit plots for movies
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/NormalizedTime_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
# now make relative speedups over serial
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(1, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[0] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[0] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 1 so that i don't compare serial to serial
for timesIndex in numpy.arange(1, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[0] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over serial')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusSerial_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 0):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusSerial_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[0][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over serial for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
#plt.ylim([0, 6])
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusSerial_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# now make relative speedup over openmp
# TODO: you might disable this part
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(2, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[1] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[1] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 2 so that i don't compare serial or omp to omp
for timesIndex in numpy.arange(2, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[1] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over omp')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusOmp_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 1):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusOmp_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[1][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over openmp for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusOmp_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# relative speedup over cudaIndependent
# TODO: you might disable this part
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(3, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[2] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[2] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 3 so that i don't compare cuda or serial or omp to cuda
for timesIndex in numpy.arange(3, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[2] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over cudaIndependent')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusCudaIndependent_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 2):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[2][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over cuda independent for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusCudaIndependent_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# these graphs are essentially duplicates of ones made already, but with a linear scale instead of logarithmic (by request of carter).
# these graphs just compare kokkos omp versus openmp and kokkos cuda versus cuda
# omp
fig3d = plt.figure(0)
plt.clf()
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[1] / allTimes[5]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('speedup [unitless]')
plt.title('kokkos omp speedup over omp')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusOmp_kokkosOmp_linear' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusOmp_kokkosOmp_linear' + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
# cuda
fig3d = plt.figure(0)
plt.clf()
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[2] / allTimes[6]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('speedup [unitless]')
plt.title('kokkos cuda speedup over cuda')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
| 0 | 0 | 0 |
35d65ee59e17fa97361dad2199329c8fa38477ca | 2,816 | py | Python | users/serializers.py | Mohamed-Kaizen/IWork-test | 1212a06907fed7a6fc216744768f74ad9642c420 | [
"MIT"
] | null | null | null | users/serializers.py | Mohamed-Kaizen/IWork-test | 1212a06907fed7a6fc216744768f74ad9642c420 | [
"MIT"
] | 4 | 2021-04-08T20:14:49.000Z | 2021-09-22T19:45:35.000Z | users/serializers.py | Mohamed-Kaizen/IWork-test | 1212a06907fed7a6fc216744768f74ad9642c420 | [
"MIT"
] | 1 | 2020-12-15T20:50:58.000Z | 2020-12-15T20:50:58.000Z | """Collection serializers."""
from typing import Any, Dict
from dj_rest_auth.registration.serializers import RegisterSerializer
from django.contrib.auth.password_validation import validate_password
from rest_framework import exceptions, serializers
from .models import CustomUser
from .validators import (
validate_confusables,
validate_confusables_email,
validate_reserved_name,
)
class UserDetailsSerializer(serializers.Serializer):
"""User detail serializer."""
email = serializers.EmailField(read_only=True)
username = serializers.CharField(read_only=True)
picture = serializers.ImageField(read_only=True)
is_active = serializers.BooleanField(read_only=True)
class JWTSerializer(serializers.Serializer):
"""JWT serializer."""
access_token = serializers.CharField(read_only=True)
refresh_token = serializers.CharField(read_only=True)
user = UserDetailsSerializer(read_only=True)
class CustomRegisterSerializer(RegisterSerializer):
"""Custom Register serializer."""
full_name = serializers.CharField(max_length=300)
def get_cleaned_data(self: "CustomRegisterSerializer") -> Dict[str, Any]:
"""Cleaning for input data."""
data_dict = super().get_cleaned_data()
data_dict["full_name"] = self.validated_data.get("full_name", "")
return data_dict
class UserSignUpSerializer(serializers.ModelSerializer):
"""User signup serializer."""
class Meta:
"""Meta data."""
model = CustomUser
fields = (
"username",
"password",
"email",
"full_name",
)
extra_kwargs = {
"password": {"write_only": True, "style": {"input_type": "password"}}
}
def validate_password(self: "UserSignUpSerializer", value: str) -> str:
"""Password validation."""
validate_password(value, self.instance)
return value
def create(self: "UserSignUpSerializer", validated_data: Dict) -> CustomUser:
"""Create method for UserSignUpSerializer."""
password = validated_data.pop("password")
username = validated_data.get("username")
email = validated_data.get("email")
local, domain = email.split("@")
validate_reserved_name(
value=username, exception_class=exceptions.ValidationError
)
validate_reserved_name(value=local, exception_class=exceptions.ValidationError)
validate_confusables(value=username, exception_class=exceptions.ValidationError)
validate_confusables_email(
local_part=local, domain=domain, exception_class=exceptions.ValidationError
)
user = CustomUser(**validated_data)
user.set_password(password)
user.save()
return user
| 27.607843 | 88 | 0.686435 | """Collection serializers."""
from typing import Any, Dict
from dj_rest_auth.registration.serializers import RegisterSerializer
from django.contrib.auth.password_validation import validate_password
from rest_framework import exceptions, serializers
from .models import CustomUser
from .validators import (
validate_confusables,
validate_confusables_email,
validate_reserved_name,
)
class UserDetailsSerializer(serializers.Serializer):
"""User detail serializer."""
email = serializers.EmailField(read_only=True)
username = serializers.CharField(read_only=True)
picture = serializers.ImageField(read_only=True)
is_active = serializers.BooleanField(read_only=True)
class JWTSerializer(serializers.Serializer):
"""JWT serializer."""
access_token = serializers.CharField(read_only=True)
refresh_token = serializers.CharField(read_only=True)
user = UserDetailsSerializer(read_only=True)
class CustomRegisterSerializer(RegisterSerializer):
"""Custom Register serializer."""
full_name = serializers.CharField(max_length=300)
def get_cleaned_data(self: "CustomRegisterSerializer") -> Dict[str, Any]:
"""Cleaning for input data."""
data_dict = super().get_cleaned_data()
data_dict["full_name"] = self.validated_data.get("full_name", "")
return data_dict
class UserSignUpSerializer(serializers.ModelSerializer):
"""User signup serializer."""
class Meta:
"""Meta data."""
model = CustomUser
fields = (
"username",
"password",
"email",
"full_name",
)
extra_kwargs = {
"password": {"write_only": True, "style": {"input_type": "password"}}
}
def validate_password(self: "UserSignUpSerializer", value: str) -> str:
"""Password validation."""
validate_password(value, self.instance)
return value
def create(self: "UserSignUpSerializer", validated_data: Dict) -> CustomUser:
"""Create method for UserSignUpSerializer."""
password = validated_data.pop("password")
username = validated_data.get("username")
email = validated_data.get("email")
local, domain = email.split("@")
validate_reserved_name(
value=username, exception_class=exceptions.ValidationError
)
validate_reserved_name(value=local, exception_class=exceptions.ValidationError)
validate_confusables(value=username, exception_class=exceptions.ValidationError)
validate_confusables_email(
local_part=local, domain=domain, exception_class=exceptions.ValidationError
)
user = CustomUser(**validated_data)
user.set_password(password)
user.save()
return user
| 0 | 0 | 0 |
6b2686cc02ae23c65ec39d013e75811b02f9c83f | 6,469 | py | Python | DatasetHandler/DatasetVizualizators.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | DatasetHandler/DatasetVizualizators.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | DatasetHandler/DatasetVizualizators.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | import numpy as np
from PIL import Image
import matplotlib, os
#print "importing visual module"
if not('DISPLAY' in os.environ):
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.backends.backend_pdf
import copy
# customization: http://matplotlib.org/users/customizing.html
def saveAllPlotsToPDF():
'''
# Save all created plots into a pdf file.
'''
pdf = matplotlib.backends.backend_pdf.PdfPages("output.pdf")
for i in plt.get_fignums():
fig = plt.figure(i)
pdf.savefig(fig)
pdf.close()
def xkcd():
'''special style'''
plt.xkcd()
def show():
'''show plots on screen'''
plt.show()
def GenerateAverageImagesFromDictionary(dict, save_to_dir=None, output_folder=None):
'''
Gets a dictionary of d[score_label_value] pointing to an array of images
:param dict:
:return: Up to 100 averaged images
'''
dict_of_images = {}
for i in range(0,len(dict)):
imlist = dict[i]
N = len(imlist)
if N > 0:
w, h = Image.open(imlist[0]).size
arr = np.zeros((h, w, 3), np.float)
for im in imlist:
imarr = np.array(Image.open(im), dtype=np.float)
arr = arr + imarr / N
arr = np.array(np.round(arr), dtype=np.uint8)
dict_of_images[i] = arr
if save_to_dir is not None:
out=Image.fromarray(arr,mode="RGB")
out.save(output_folder+str(i).zfill(3)+"_avgFrom_"+str(N)+".png")
#out.show()
return dict_of_images
def plotX_sortValues(dont_touch_this_x, title='', x_min=0.0, x_max=1.0, notReverse=False, custom_x_label = '# of images', custom_y_label = 'Score value'):
'''Visualization of dataset by the method of sorting array by value and plotting.'''
x = copy.copy(dont_touch_this_x)
if notReverse:
x.sort()
else:
x.sort(reverse=True)
plt.figure()
axes = plt.axes()
axes.set_xlabel(custom_x_label)
axes.set_ylabel(custom_y_label)
plt.plot(x, color='red')
axes.fill_between(range(len(x)), x, facecolor='orange', edgecolor='red', alpha=1)
zoomOut(axes, [0.0, len(x)-1], [x_min, x_max], factor=0.05)
axes.fill_between(x, 0)
axes.set_title(title)
def plotHistogram(x, title='', num_bins=100, x_min=0.0, x_max=1.0, custom_x_label = 'Score value', custom_y_label = 'Count of occurances'):
''' Plot histogram from the x data.'''
plt.figure()
axes = plt.axes()
hist, bins = np.histogram(x, bins=num_bins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width, color='orange', edgecolor='red')
axes.xaxis.set_major_locator(ticker.MultipleLocator(np.abs(x_max-x_min)/10.0))
axes.xaxis.set_minor_locator(ticker.MultipleLocator(np.abs(x_max-x_min)/100.0))
# add a 'best fit' line
axes.set_xlabel(custom_x_label)
axes.set_ylabel(custom_y_label)
zoomOutY(axes, factor=0.05, only_up=True)
zoomOutX(axes, [x_min, x_max], factor=0.05)
# Tweak spacing to prevent clipping of ylabel
axes.set_title(title)
def plotWhisker(data, title='', y_min=0.0, y_max=1.0, legend_on=True, notch=True):
''' Plot box plot / whisker graph from data.'''
plt.figure(figsize=(5, 8))
axes = plt.axes()
axes.yaxis.set_major_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/10.0))
axes.yaxis.set_minor_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/100.0))
meanpointprops = dict(linewidth=1.0)
boxplot = plt.boxplot(data, notch=notch, showmeans=True, meanprops=meanpointprops)
plt.xticks([])
if (legend_on):
boxplot['medians'][0].set_label('median')
boxplot['means'][0].set_label('mean')
boxplot['fliers'][0].set_label('outlayers')
# boxplot['boxes'][0].set_label('boxes')
# boxplot['whiskers'][0].set_label('whiskers')
boxplot['caps'][0].set_label('caps')
axes.set_xlim([0.7, 1.7])
plt.legend(numpoints = 1)
zoomOutY(axes,factor=0.1)
axes.set_title(title)
def plotMultipleWhiskerPlots(datas, whiskers, labels):
# support of generating multiple box plots
'''
Example run:
means_men = (20, 35, 30, 35, 27)
std_men = (2, 3, 4, 1, 2)
means_women = (25, 32, 34, 20, 25)
std_women = (3, 5, 2, 3, 3)
datas = [means_men, means_women, means_men]
whiskers = [std_men, std_women, std_women]
labels = ['1', '2', '3']
plotMultipleWhiskerPlots(datas,whiskers,labels)
'''
fig, ax = plt.subplots()
index = np.arange(len(datas[0]))
bar_width = (1.0 / len(datas)) * 0.9
opacity = 0.6
error_config = {'ecolor': '0.3'}
colors = ['r', 'b', 'y']
for i in range(0,len(datas)):
rects = plt.bar(index + i*bar_width, datas[i], bar_width,
alpha=opacity,
color=colors[min(i,len(colors)-1)],
yerr=whiskers[i],
error_kw=error_config,
label=labels[i])
plt.xticks(index + bar_width / len(datas),np.arange(1,len(datas[0])+1))
plt.legend()
plt.tight_layout()
def subPlot2(fce1, fce2, param1=None, param2=None):
'''
Join two plots.
Example run:
def tmp_fce1(): ...
def tmp_fce2(): ...
subPlot2(tmp_fce1, tmp_fce2)
'''
plt.subplot(2, 1, 1)
fce1()
plt.subplot(2, 1, 2)
fce2()
plt.show()
def zoomOut(axes, xlim=None, ylim=None, factor=0.05):
'''
Set size to fit in limitations.
:param axes: handler to matlibplot
:param xlim: list of [from x, to x] values
:param ylim: list of [from y, to y] values
:param factor: zoom factor
:return:
'''
zoomOutX(axes, xlim, factor)
zoomOutY(axes, ylim, factor)
def zoomOutX(axes,xlim=None,factor=0.05):
''' handle the X axis'''
if xlim == None:
xlim = axes.get_xlim()
axes.set_xlim((xlim[0] + xlim[1]) / 2 + np.array((-0.5, 0.5)) * (xlim[1] - xlim[0]) * (1 + factor))
def zoomOutY(axes,ylim=None,factor=0.05, only_up = False):
''' handle the Y axis'''
if ylim == None:
ylim = axes.get_ylim()
bottom = -0.5
axes.set_ylim((ylim[0] + ylim[1]) / 2 + np.array((-0.5, 0.5)) * (ylim[1] - ylim[0]) * (1 + factor))
if only_up:
ylim = axes.get_ylim()
#print ylim
axes.set_ylim(0.0,ylim[1])
| 29.81106 | 154 | 0.607822 | import numpy as np
from PIL import Image
import matplotlib, os
#print "importing visual module"
if not('DISPLAY' in os.environ):
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.backends.backend_pdf
import copy
# customization: http://matplotlib.org/users/customizing.html
def saveAllPlotsToPDF():
'''
# Save all created plots into a pdf file.
'''
pdf = matplotlib.backends.backend_pdf.PdfPages("output.pdf")
for i in plt.get_fignums():
fig = plt.figure(i)
pdf.savefig(fig)
pdf.close()
def xkcd():
'''special style'''
plt.xkcd()
def show():
'''show plots on screen'''
plt.show()
def GenerateAverageImagesFromDictionary(dict, save_to_dir=None, output_folder=None):
'''
Gets a dictionary of d[score_label_value] pointing to an array of images
:param dict:
:return: Up to 100 averaged images
'''
dict_of_images = {}
for i in range(0,len(dict)):
imlist = dict[i]
N = len(imlist)
if N > 0:
w, h = Image.open(imlist[0]).size
arr = np.zeros((h, w, 3), np.float)
for im in imlist:
imarr = np.array(Image.open(im), dtype=np.float)
arr = arr + imarr / N
arr = np.array(np.round(arr), dtype=np.uint8)
dict_of_images[i] = arr
if save_to_dir is not None:
out=Image.fromarray(arr,mode="RGB")
out.save(output_folder+str(i).zfill(3)+"_avgFrom_"+str(N)+".png")
#out.show()
return dict_of_images
def plotX_sortValues(dont_touch_this_x, title='', x_min=0.0, x_max=1.0, notReverse=False, custom_x_label = '# of images', custom_y_label = 'Score value'):
'''Visualization of dataset by the method of sorting array by value and plotting.'''
x = copy.copy(dont_touch_this_x)
if notReverse:
x.sort()
else:
x.sort(reverse=True)
plt.figure()
axes = plt.axes()
axes.set_xlabel(custom_x_label)
axes.set_ylabel(custom_y_label)
plt.plot(x, color='red')
axes.fill_between(range(len(x)), x, facecolor='orange', edgecolor='red', alpha=1)
zoomOut(axes, [0.0, len(x)-1], [x_min, x_max], factor=0.05)
axes.fill_between(x, 0)
axes.set_title(title)
def plotHistogram(x, title='', num_bins=100, x_min=0.0, x_max=1.0, custom_x_label = 'Score value', custom_y_label = 'Count of occurances'):
''' Plot histogram from the x data.'''
plt.figure()
axes = plt.axes()
hist, bins = np.histogram(x, bins=num_bins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width, color='orange', edgecolor='red')
axes.xaxis.set_major_locator(ticker.MultipleLocator(np.abs(x_max-x_min)/10.0))
axes.xaxis.set_minor_locator(ticker.MultipleLocator(np.abs(x_max-x_min)/100.0))
# add a 'best fit' line
axes.set_xlabel(custom_x_label)
axes.set_ylabel(custom_y_label)
zoomOutY(axes, factor=0.05, only_up=True)
zoomOutX(axes, [x_min, x_max], factor=0.05)
# Tweak spacing to prevent clipping of ylabel
axes.set_title(title)
def plotWhisker(data, title='', y_min=0.0, y_max=1.0, legend_on=True, notch=True):
''' Plot box plot / whisker graph from data.'''
plt.figure(figsize=(5, 8))
axes = plt.axes()
axes.yaxis.set_major_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/10.0))
axes.yaxis.set_minor_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/100.0))
meanpointprops = dict(linewidth=1.0)
boxplot = plt.boxplot(data, notch=notch, showmeans=True, meanprops=meanpointprops)
plt.xticks([])
if (legend_on):
boxplot['medians'][0].set_label('median')
boxplot['means'][0].set_label('mean')
boxplot['fliers'][0].set_label('outlayers')
# boxplot['boxes'][0].set_label('boxes')
# boxplot['whiskers'][0].set_label('whiskers')
boxplot['caps'][0].set_label('caps')
axes.set_xlim([0.7, 1.7])
plt.legend(numpoints = 1)
zoomOutY(axes,factor=0.1)
axes.set_title(title)
def plotMultipleWhiskerPlots(datas, whiskers, labels):
# support of generating multiple box plots
'''
Example run:
means_men = (20, 35, 30, 35, 27)
std_men = (2, 3, 4, 1, 2)
means_women = (25, 32, 34, 20, 25)
std_women = (3, 5, 2, 3, 3)
datas = [means_men, means_women, means_men]
whiskers = [std_men, std_women, std_women]
labels = ['1', '2', '3']
plotMultipleWhiskerPlots(datas,whiskers,labels)
'''
fig, ax = plt.subplots()
index = np.arange(len(datas[0]))
bar_width = (1.0 / len(datas)) * 0.9
opacity = 0.6
error_config = {'ecolor': '0.3'}
colors = ['r', 'b', 'y']
for i in range(0,len(datas)):
rects = plt.bar(index + i*bar_width, datas[i], bar_width,
alpha=opacity,
color=colors[min(i,len(colors)-1)],
yerr=whiskers[i],
error_kw=error_config,
label=labels[i])
plt.xticks(index + bar_width / len(datas),np.arange(1,len(datas[0])+1))
plt.legend()
plt.tight_layout()
def subPlot2(fce1, fce2, param1=None, param2=None):
'''
Join two plots.
Example run:
def tmp_fce1(): ...
def tmp_fce2(): ...
subPlot2(tmp_fce1, tmp_fce2)
'''
plt.subplot(2, 1, 1)
fce1()
plt.subplot(2, 1, 2)
fce2()
plt.show()
def zoomOut(axes, xlim=None, ylim=None, factor=0.05):
'''
Set size to fit in limitations.
:param axes: handler to matlibplot
:param xlim: list of [from x, to x] values
:param ylim: list of [from y, to y] values
:param factor: zoom factor
:return:
'''
zoomOutX(axes, xlim, factor)
zoomOutY(axes, ylim, factor)
def zoomOutX(axes,xlim=None,factor=0.05):
''' handle the X axis'''
if xlim == None:
xlim = axes.get_xlim()
axes.set_xlim((xlim[0] + xlim[1]) / 2 + np.array((-0.5, 0.5)) * (xlim[1] - xlim[0]) * (1 + factor))
def zoomOutY(axes,ylim=None,factor=0.05, only_up = False):
''' handle the Y axis'''
if ylim == None:
ylim = axes.get_ylim()
bottom = -0.5
axes.set_ylim((ylim[0] + ylim[1]) / 2 + np.array((-0.5, 0.5)) * (ylim[1] - ylim[0]) * (1 + factor))
if only_up:
ylim = axes.get_ylim()
#print ylim
axes.set_ylim(0.0,ylim[1])
| 0 | 0 | 0 |
c59d7cc897a4ffe054fd51e0e5bc8ff8e5a73b2a | 8,652 | py | Python | commands/storage/lmi/scripts/storage/cmd/mount.py | jsafrane/openlmi-scripts | 8b1d4e9ec9091a0017d328700df739dfc0bd8eb5 | [
"BSD-2-Clause-FreeBSD"
] | 5 | 2017-09-15T14:18:08.000Z | 2021-11-12T11:47:43.000Z | commands/storage/lmi/scripts/storage/cmd/mount.py | jsafrane/openlmi-scripts | 8b1d4e9ec9091a0017d328700df739dfc0bd8eb5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | commands/storage/lmi/scripts/storage/cmd/mount.py | jsafrane/openlmi-scripts | 8b1d4e9ec9091a0017d328700df739dfc0bd8eb5 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2016-02-27T01:48:44.000Z | 2020-07-27T13:58:22.000Z | # coding=utf-8
# Storage Management Providers
#
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Authors: Jan Synacek <jsynacek@redhat.com>
# Jan Safranek <jsafrane@redhat.com>
#
"""
Mount management.
Usage:
%(cmd)s list [ --all ] [ <target> ... ]
%(cmd)s create <device> <mountpoint> [ (-t <fs_type>) (-o <options>) ]
%(cmd)s delete <target>
%(cmd)s show [ --all ] [ <target> ... ]
Commands:
list List mounted filesystems with a device attached to them.
<target> can be specified either as device names
or mountpoints.
create Mount a specified device on the path given by mountpoint.
Optionally, filesystem type, common options (filesystem
independent) and filesystem specific options can be provided. If no
filesystem type is specified, it is automatically detected.
Options can be provided as a comma-separated string of
'option_name:value' items. Possible option names are:
AllowExecution AllowMandatoryLock AllowSUID AllowUserMount
AllowWrite Auto Dump FileSystemCheckOrder InterpretDevices
Silent SynchronousDirectoryUpdates SynchronousIO
UpdateAccessTimes UpdateDirectoryAccessTimes UpdateFullAccessTimes
UpdateRelativeAccessTimes
Possible option values for all of the options except for
FileSystemCheckOrder are 't', 'true', 'f', 'false'. All of them are
case insensitive.
The FileSystemCheckOrder option's value is a number.
In case an option is not recognized as being one of the possible
options listed above, it's used as a filesystem dependent option.
Examples:
create /dev/vda1 /mnt -t ext4 -o 'AllowWrite:F,InterpretDevices:false'
create /dev/vda2 /mnt -o 'FileSystemCheckOrder:2'
create /dev/vda3 /mnt -o 'user_xattr,barrier=0'
create /dev/vda4 /mnt -o 'Dump:t, AllowMandatoryLock:t, acl'
delete Unmount a mounted filesystem. Can be specified either as a device
path or a mountpoint.
show Show detailed information about mounted filesystems with a device
attached to them. <target> can be specified either as device names
or mountpoints.
<spec>. Optionally, show all mounted filesystems.
"""
from lmi.shell.LMIUtil import lmi_isinstance
from lmi.scripts.common import command
from lmi.scripts.common import get_logger
from lmi.scripts.common.formatter import command as fcmd
from lmi.scripts.storage import show, fs, lvm, mount, raid, partition
from lmi.scripts.storage.common import (size2str, get_devices, get_children,
get_parents, str2device, str2size, str2vg)
from lmi.scripts.common.errors import LmiFailed
LOG = get_logger(__name__)
def get_mounts_for_targets(ns, targets):
"""
Return list of LMI_MountedFilesystem instances for given devices or
directories.
:type mntspec: List of strings or LMIInstance/CIM_StorageExtents.
:param mntspec: Mount specifications. If a string is provided as a mount
specification, it can be either device name or mount
directory.
"""
mounts = []
for target in targets:
try:
device = str2device(ns, target)
if device:
target = device.Name
except LmiFailed:
# we did not find CIM_StorageExtent for the device, it must be non
# device filesystem specification
pass
mnts = ns.LMI_MountedFileSystem.instances({'FileSystemSpec':target}) + \
ns.LMI_MountedFileSystem.instances({'MountPointPath':target})
mounts += mnts
return mounts
| 37.947368 | 85 | 0.646556 | # coding=utf-8
# Storage Management Providers
#
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Authors: Jan Synacek <jsynacek@redhat.com>
# Jan Safranek <jsafrane@redhat.com>
#
"""
Mount management.
Usage:
%(cmd)s list [ --all ] [ <target> ... ]
%(cmd)s create <device> <mountpoint> [ (-t <fs_type>) (-o <options>) ]
%(cmd)s delete <target>
%(cmd)s show [ --all ] [ <target> ... ]
Commands:
list List mounted filesystems with a device attached to them.
<target> can be specified either as device names
or mountpoints.
create Mount a specified device on the path given by mountpoint.
Optionally, filesystem type, common options (filesystem
independent) and filesystem specific options can be provided. If no
filesystem type is specified, it is automatically detected.
Options can be provided as a comma-separated string of
'option_name:value' items. Possible option names are:
AllowExecution AllowMandatoryLock AllowSUID AllowUserMount
AllowWrite Auto Dump FileSystemCheckOrder InterpretDevices
Silent SynchronousDirectoryUpdates SynchronousIO
UpdateAccessTimes UpdateDirectoryAccessTimes UpdateFullAccessTimes
UpdateRelativeAccessTimes
Possible option values for all of the options except for
FileSystemCheckOrder are 't', 'true', 'f', 'false'. All of them are
case insensitive.
The FileSystemCheckOrder option's value is a number.
In case an option is not recognized as being one of the possible
options listed above, it's used as a filesystem dependent option.
Examples:
create /dev/vda1 /mnt -t ext4 -o 'AllowWrite:F,InterpretDevices:false'
create /dev/vda2 /mnt -o 'FileSystemCheckOrder:2'
create /dev/vda3 /mnt -o 'user_xattr,barrier=0'
create /dev/vda4 /mnt -o 'Dump:t, AllowMandatoryLock:t, acl'
delete Unmount a mounted filesystem. Can be specified either as a device
path or a mountpoint.
show Show detailed information about mounted filesystems with a device
attached to them. <target> can be specified either as device names
or mountpoints.
<spec>. Optionally, show all mounted filesystems.
"""
from lmi.shell.LMIUtil import lmi_isinstance
from lmi.scripts.common import command
from lmi.scripts.common import get_logger
from lmi.scripts.common.formatter import command as fcmd
from lmi.scripts.storage import show, fs, lvm, mount, raid, partition
from lmi.scripts.storage.common import (size2str, get_devices, get_children,
get_parents, str2device, str2size, str2vg)
from lmi.scripts.common.errors import LmiFailed
LOG = get_logger(__name__)
def get_mounts_for_targets(ns, targets):
"""
Return list of LMI_MountedFilesystem instances for given devices or
directories.
:type mntspec: List of strings or LMIInstance/CIM_StorageExtents.
:param mntspec: Mount specifications. If a string is provided as a mount
specification, it can be either device name or mount
directory.
"""
mounts = []
for target in targets:
try:
device = str2device(ns, target)
if device:
target = device.Name
except LmiFailed:
# we did not find CIM_StorageExtent for the device, it must be non
# device filesystem specification
pass
mnts = ns.LMI_MountedFileSystem.instances({'FileSystemSpec':target}) + \
ns.LMI_MountedFileSystem.instances({'MountPointPath':target})
mounts += mnts
return mounts
class MountList(command.LmiLister):
COLUMNS = ('FileSystemSpec', 'FileSystemType', 'MountPointPath', 'Options')
ARG_ARRAY_SUFFIX = 's'
def execute(self, ns, targets=None, _all=None):
"""
Implementation of 'mount list' command.
"""
if targets:
mounts = get_mounts_for_targets(ns, targets)
else:
mounts = mount.get_mounts(ns)
if _all is False:
transients = [mnt.Name for mnt in ns.LMI_TransientFileSystem.instances()]
for mnt in mounts:
# treat root specially (can be mounted twice - as a rootfs and with
# a device)
if mnt.FileSystemSpec == 'rootfs':
continue
if _all is False and mnt.MountPointPath != '/':
# do not list nodevice filesystems
name = 'PATH=' + mnt.MountPointPath
if name in transients:
continue
yield(mnt.FileSystemSpec,
mnt.FileSystemType,
mnt.MountPointPath,
mount.build_opts_str(mnt))
class MountShow(command.LmiLister):
COLUMNS = ('Name', 'Value')
ARG_ARRAY_SUFFIX = 's'
def execute(self, ns, _all=None, targets=None):
"""
Implementation of 'mount show' command.
"""
if targets:
mounts = get_mounts_for_targets(ns, targets)
else:
mounts = mount.get_mounts(ns)
if _all is False:
transients = [mnt.Name for mnt in ns.LMI_TransientFileSystem.instances()]
yield fcmd.NewTableCommand('Mounted filesystems')
for mnt in mounts:
# treat root specially (can be mounted twice - as a rootfs and with
# a device)
if mnt.FileSystemSpec == 'rootfs':
continue
if _all is False and mnt.MountPointPath != '/':
# do not list nodevice filesystems
name = 'PATH=' + mnt.MountPointPath
if name in transients:
continue
yield('Filesystem', '%s (%s)' % (mnt.FileSystemSpec, mnt.FileSystemType))
yield('Mountpoint', mnt.MountPointPath)
yield('Options', mount.build_opts_str(mnt))
yield ''
class MountCreate(command.LmiCheckResult):
EXPECT = None
def execute(self, ns, device, mountpoint, fs_type=None, options=None):
"""
Implementation of 'mount create' command.
"""
return mount.mount_create(ns, device, mountpoint, fs_type, options)
class MountDelete(command.LmiCheckResult):
EXPECT = None
def transform_options(self, options):
"""
There is only one <target> option, but docopt passes it as array
(because in other commands it is used with '...'). So let's
transform it to scalar.
"""
options['<target>'] = options.pop('<target>')[0]
def execute(self, ns, target):
"""
Implementation of 'mount delete' command.
"""
return mount.mount_delete(ns, target)
class Mount(command.LmiCommandMultiplexer):
OWN_USAGE = __doc__
COMMANDS = {
'list' : MountList,
'create' : MountCreate,
'delete' : MountDelete,
'show' : MountShow,
}
| 0 | 3,213 | 115 |
9e44f0d5b01695096f14089522499b402b180501 | 5,439 | py | Python | Converter.py | HorningT/GoogleColabPDF | 1e9b4eeca812094b9ae4205a4833c4b3e83f7ac4 | [
"MIT"
] | null | null | null | Converter.py | HorningT/GoogleColabPDF | 1e9b4eeca812094b9ae4205a4833c4b3e83f7ac4 | [
"MIT"
] | null | null | null | Converter.py | HorningT/GoogleColabPDF | 1e9b4eeca812094b9ae4205a4833c4b3e83f7ac4 | [
"MIT"
] | null | null | null | ################################################################################
# CODE TO CONVERT COLAB NOTEBOOK TO PDF #
#This code converts an .ipynb file to a Tex based PDF and saves it in the Colab#
#Notebook folder with the same filename.pdf #
################################################################################
# Function List
################################################################################
#Converter for .ipynb conversion to PDF. Input is a string of the file name
| 45.705882 | 160 | 0.590366 | ################################################################################
# CODE TO CONVERT COLAB NOTEBOOK TO PDF #
#This code converts an .ipynb file to a Tex based PDF and saves it in the Colab#
#Notebook folder with the same filename.pdf #
################################################################################
# Function List
################################################################################
#Converter for .ipynb conversion to PDF. Input is a string of the file name
def find(name, path):
import os
class color:
BLUE = '\033[94m';
GREEN = '\033[92m';
BOLD = '\033[1m';
UNDERLINE = '\033[4m';
END = '\033[0m';
FAIL = '\033[91m';
WARNING = '\033[93m';
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def PDFconvertGC(filename):
# Imports
from google.colab import drive; from datetime import datetime; import sys;
import os; import time; import subprocess
class color:
BLUE = '\033[94m';
GREEN = '\033[92m';
BOLD = '\033[1m';
UNDERLINE = '\033[4m';
END = '\033[0m';
FAIL = '\033[91m';
WARNING = '\033[93m';
# Mount Drive First to give time for mounting to process
drive.mount('/content/gdrive', force_remount = True)
# Install some dependences into the RUNTIME (is not local, needs to reinstall
# every Factory runtime)
t1 = time.time()
subprocess.call('sudo apt-get IPython >> outputsuppressed.txt', shell= True)
subprocess.call('sudo apt-get install jupyter >> outputsuppressed.txt', shell= True)
subprocess.call('sudo apt-get Latex >> outputsuppressed.txt', shell= True)
subprocess.call('sudo apt-get pandoc >> outputsuppressed.txt', shell= True)
subprocess.call('sudo apt-get install nbconvert >> outputsuppressed.txt', shell= True)
subprocess.call('apt-get update >> outputsuppressed.txt', shell= True)
subprocess.call('sudo apt-get install texlive-xetex texlive-fonts-recommended texlive-generic-recommended --fix-missing >> outputsuppressed.txt', shell= True)
subprocess.call('apt-get update >> outputsuppressed.txt', shell= True)
# Searches the Google drive directory for the filename and gives back it's
# location (This accounts for Wildcards and Spaces in the directory names).
# Uses jupyter and nbconvert to convert to a Tex file, then into a pdf
# Handle Common Errors
print('\nFinding file. This may take a minute or two depending on the size of your drive...')
subprocess.call("IFS=$'\n'", shell =True) #Sets the reader to only break at newlines instead of spaces, tabs,and newline
try:
loc = find(filename, '/content/gdrive')
if str(loc) == "None":
print(color.BOLD,color.FAIL, "\nCould not find file in your Drive!\n"
,color.END,color.WARNING
,"- Make sure you input the correct filename\n"
," - Make sure the file is saved in the google drive you mounted\n\n"
,color.END)
Er = str('Error: {}. {}, line: {}'.format(sys.exc_info()[0],sys.exc_info()[1]
,sys.exc_info()[2].tb_lineno))
f = open("ErrorLog.txt","a+"); f.write(Er); f.close()
sys.tracebacklimit=0
sys.exit("Please Try Again")
else:
print('File Found at: ',str(loc))
except Exception as exception:
print(color.BOLD,color.WARNING, "Exception Occured, Please Check Log",color.END)
Er = str('Error: {}. {}, line: {}'.format(sys.exc_info()[0],sys.exc_info()[1]
,sys.exc_info()[2].tb_lineno))
f = open("ErrorLog.txt","a+");f.write(Er);f.close()
sys.tracebacklimit=0
sys.exit("Please Try Again")
# Autosave file
subprocess.call('sleep 30s', shell = True)
# Convert the file
try:
CMD = 'sudo jupyter nbconvert --output-dir='+'./'+' --to pdf "'+str(loc)+'"'
#print(CMD)
#!jupyter nbconvert --output-dir='./content/' --to pdf {loc} --log-level ERROR
subprocess.call(CMD, shell = True)
except Exception as exception:
print(color.BOLD,color.WARNING, "Exception Occured, Please Check Log",color.END)
Er = str('Error: {}. {}, line: {}'.format(sys.exc_info()[0],sys.exc_info()[1]
,sys.exc_info()[2].tb_lineno))
f = open("ErrorLog.txt","a+");f.write(Er);f.close()
sys.tracebacklimit=0
sys.exit("Please Try Again")
# The PDF will be in the same folder as the original file
print(color.GREEN,"Conversion Complete!\nGreat Job and Have a Wonderful Day!"
,color.END,"\U0001F30C")
t4 = time.time();
m = str(int((t4-t1)/60));
s = str(int((t4-t1)% 60))
print("Total Time: ",m," m ",s," s")
def Watermark(filename):
from datetime import datetime; import random;
# To make Watermark distinctive
class color:
BLUE = '\033[94m';
GREEN = '\033[92m';
BOLD = '\033[1m';
UNDERLINE = '\033[4m';
END = '\033[0m';
FAIL = '\033[91m';
WARNING = '\033[93m';
# Make Watermark
now = datetime.now()
print(color.UNDERLINE,'Unique Watermark',color.END)
print(color.BOLD,color.BLUE, str(filename),'\U0001F512',color.END)
print(color.BOLD,color.BLUE,now.strftime("%d/%m/%Y %H:%M:%S")," "
,str(random.randrange(1000000, 9999999, 1)),color.END,'\n')
| 4,786 | 0 | 73 |
27ff340a0b5564291d3085867268d26c5d4044da | 908 | py | Python | test_selenium19/Application.py | NikolayKutsoloshchenko/Selenium-1- | a027bcfd7196dbd78426b24100feb7be13f315ee | [
"Apache-2.0"
] | null | null | null | test_selenium19/Application.py | NikolayKutsoloshchenko/Selenium-1- | a027bcfd7196dbd78426b24100feb7be13f315ee | [
"Apache-2.0"
] | null | null | null | test_selenium19/Application.py | NikolayKutsoloshchenko/Selenium-1- | a027bcfd7196dbd78426b24100feb7be13f315ee | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from Cart import cart
from Main_page import main_page
from Duck_Page import duck_page
| 26.705882 | 50 | 0.631057 | from selenium import webdriver
from Cart import cart
from Main_page import main_page
from Duck_Page import duck_page
class application():
def __init__(self):
self.driver = webdriver.Chrome()
self.cart = cart(self.driver)
self.main_page = main_page(self.driver)
self.duck_page = duck_page(self.driver)
def quit(self):
self.driver.quit()
def add_3_ducks(self):
for i in range(3):
self.main_page.open().click_any_duck()
size = self.duck_page.choose_size
if size:
self.duck_page.choose(size)
self.duck_page.add_to_cart()
def delete_all_ducks(self):
self.cart.open()
number = self.cart.number_of_ducks
for n in range(number):
self.cart.delete_product()
@property
def items_in_bag(self):
return self.cart.open().number_of_ducks | 620 | 147 | 23 |
ecc10ee1793d4b788268a57798f381f55dc3e916 | 2,098 | py | Python | rotkehlchen/tests/fixtures/assets.py | coblee/rotki | d675f5c2d0df5176337b7b10038524ee74923482 | [
"BSD-3-Clause"
] | 1 | 2020-11-14T12:20:37.000Z | 2020-11-14T12:20:37.000Z | rotkehlchen/tests/fixtures/assets.py | coblee/rotki | d675f5c2d0df5176337b7b10038524ee74923482 | [
"BSD-3-Clause"
] | 3 | 2021-01-28T21:30:46.000Z | 2022-03-25T19:17:00.000Z | rotkehlchen/tests/fixtures/assets.py | coblee/rotki | d675f5c2d0df5176337b7b10038524ee74923482 | [
"BSD-3-Clause"
] | null | null | null | from unittest.mock import patch
import pytest
from rotkehlchen.assets.resolver import AssetResolver
from rotkehlchen.tests.utils.mock import MockResponse
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
def query_github_for_assets() -> bool:
"""If True, the default behavior of querying github for latest assets will occur"""
return False
@pytest.fixture()
def force_reinitialize_asset_resolver() -> bool:
"""If True, the asset resolver instance will be force to start frm scratch"""
return False
# We need auto-use here since the fixture needs to be included
# everywhere so as to not have Asset() calls use a Resolver not
# initialized from here which would take more time
@pytest.fixture(autouse=True)
def asset_resolver(
data_dir,
query_github_for_assets,
mock_asset_meta_github_response,
mock_asset_github_response,
force_reinitialize_asset_resolver,
):
"""Run the first initialization of the AssetResolver singleton
It's an autouse fixture so that it always gets initialized
"""
if force_reinitialize_asset_resolver:
AssetResolver._AssetResolver__instance = None
if query_github_for_assets:
AssetResolver(data_dir)
return
# else mock the github request to return version lower than anything possible
get_patch = patch('requests.get', side_effect=mock_get_request)
with get_patch:
AssetResolver(data_dir)
| 32.276923 | 123 | 0.730696 | from unittest.mock import patch
import pytest
from rotkehlchen.assets.resolver import AssetResolver
from rotkehlchen.tests.utils.mock import MockResponse
@pytest.fixture()
def mock_asset_meta_github_response() -> str:
return '{"md5": "", "version": 0}'
@pytest.fixture()
def mock_asset_github_response() -> str:
return '{}'
@pytest.fixture()
def query_github_for_assets() -> bool:
"""If True, the default behavior of querying github for latest assets will occur"""
return False
@pytest.fixture()
def force_reinitialize_asset_resolver() -> bool:
"""If True, the asset resolver instance will be force to start frm scratch"""
return False
# We need auto-use here since the fixture needs to be included
# everywhere so as to not have Asset() calls use a Resolver not
# initialized from here which would take more time
@pytest.fixture(autouse=True)
def asset_resolver(
data_dir,
query_github_for_assets,
mock_asset_meta_github_response,
mock_asset_github_response,
force_reinitialize_asset_resolver,
):
"""Run the first initialization of the AssetResolver singleton
It's an autouse fixture so that it always gets initialized
"""
if force_reinitialize_asset_resolver:
AssetResolver._AssetResolver__instance = None
if query_github_for_assets:
AssetResolver(data_dir)
return
# else mock the github request to return version lower than anything possible
def mock_get_request(url: str) -> MockResponse:
if url == 'https://raw.githubusercontent.com/rotki/rotki/develop/rotkehlchen/data/all_assets.meta': # noqa: E501
return MockResponse(200, mock_asset_meta_github_response)
elif url == 'https://raw.githubusercontent.com/rotki/rotki/develop/rotkehlchen/data/all_assets.json': # noqa: E501
return MockResponse(200, mock_asset_github_response)
raise AssertionError('This mock should receive no other urls')
get_patch = patch('requests.get', side_effect=mock_get_request)
with get_patch:
AssetResolver(data_dir)
| 577 | 0 | 70 |
378107a37b43996269a9fc7970cdbe772aa0c035 | 901 | py | Python | upk/apk.py | Cologler/upk-python | f20f4ff3167d7a5a089523154b0b8f47973ea311 | [
"MIT"
] | null | null | null | upk/apk.py | Cologler/upk-python | f20f4ff3167d7a5a089523154b0b8f47973ea311 | [
"MIT"
] | null | null | null | upk/apk.py | Cologler/upk-python | f20f4ff3167d7a5a089523154b0b8f47973ea311 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from typing import TypedDict, Optional
from logging import Logger
import zipfile
import xml.etree.ElementTree as et
from .androidManifestDecompress import read
def read_package_info(path: str, logger: Logger) -> Optional[_PackageInfo]:
'read package info from *.apk file.'
with zipfile.ZipFile(path) as z:
with z.open('AndroidManifest.xml') as am:
try:
a = read(am)
except:
logger.warning(f'unable decode manifest, skiped.')
else:
xml = et.fromstring(a)
return dict(
package=xml.get('package'),
version=xml.get('versionName')
)
| 26.5 | 75 | 0.578246 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from typing import TypedDict, Optional
from logging import Logger
import zipfile
import xml.etree.ElementTree as et
from .androidManifestDecompress import read
class _PackageInfo(TypedDict):
package: Optional[str]
version: Optional[str]
def read_package_info(path: str, logger: Logger) -> Optional[_PackageInfo]:
'read package info from *.apk file.'
with zipfile.ZipFile(path) as z:
with z.open('AndroidManifest.xml') as am:
try:
a = read(am)
except:
logger.warning(f'unable decode manifest, skiped.')
else:
xml = et.fromstring(a)
return dict(
package=xml.get('package'),
version=xml.get('versionName')
)
| 0 | 63 | 23 |
8826fdd2cefeb6e5e2c40207583a53c500c6da3a | 140 | py | Python | src/ape_plugins/__init__.py | benjyz/ape | b5f3ff28c97c463a764881032cb2cfcd21201d07 | [
"Apache-2.0"
] | null | null | null | src/ape_plugins/__init__.py | benjyz/ape | b5f3ff28c97c463a764881032cb2cfcd21201d07 | [
"Apache-2.0"
] | null | null | null | src/ape_plugins/__init__.py | benjyz/ape | b5f3ff28c97c463a764881032cb2cfcd21201d07 | [
"Apache-2.0"
] | null | null | null | from ape import plugins
from ape.api.config import ConfigDict
@plugins.register(plugins.Config)
| 17.5 | 37 | 0.792857 | from ape import plugins
from ape.api.config import ConfigDict
@plugins.register(plugins.Config)
def config_class():
return ConfigDict
| 20 | 0 | 22 |
974ce64da67572bc816864864c616954b8d904e7 | 825 | py | Python | publisher_subscriber/utils.py | taher-systango/DjangoUnboxed | 808ab771a44564458b897b6ec854c08f43cccf2a | [
"MIT"
] | 68 | 2018-05-04T13:00:59.000Z | 2022-03-25T09:28:28.000Z | publisher_subscriber/utils.py | taher-systango/DjangoUnboxed | 808ab771a44564458b897b6ec854c08f43cccf2a | [
"MIT"
] | 38 | 2020-01-06T07:39:20.000Z | 2022-01-07T07:49:38.000Z | publisher_subscriber/utils.py | aboudzein/Qzzz.me-API | b5ee8e63fb7cf58d26fb5b6e4c9f22c04e90df08 | [
"MIT"
] | 27 | 2018-10-17T17:35:42.000Z | 2022-03-25T09:28:33.000Z | import redis
# redis_instance = redis.StrictRedis(host='aerios-ec-dqu.3mvwix.0001.use1.cache.amazonaws.com', port=9000, db=0)
redis_instance = redis.StrictRedis()
pub_sub = redis_instance.pubsub()
# print(subscriber("otp_channel"))
# print(publisher("otp_channel", "Your otp is 12345"))
# print(listen_message())
# print(unsubsciber(channels)) | 20.121951 | 112 | 0.72 | import redis
# redis_instance = redis.StrictRedis(host='aerios-ec-dqu.3mvwix.0001.use1.cache.amazonaws.com', port=9000, db=0)
redis_instance = redis.StrictRedis()
pub_sub = redis_instance.pubsub()
def publisher(channel, message):
response = redis_instance.publish(channel, message)
return response
def subscriber(channel):
pub_sub.subscribe(channel)
response = pub_sub.get_message()
return response
def unsubsciber(channel):
response = pub_sub.unsubscribe(channel)
return response
def listen_message():
messages = []
for message in pub_sub.listen():
print(message)
messages.append(message)
return messages
# print(subscriber("otp_channel"))
# print(publisher("otp_channel", "Your otp is 12345"))
# print(listen_message())
# print(unsubsciber(channels)) | 382 | 0 | 92 |
0f845ff7713a901c36e313f0ec90833a966a20f2 | 137 | py | Python | dataplicity/_version.py | wildfoundry/dataplicity-agent | 896d6f7d160c987656a158f036024d2858981ccb | [
"BSD-3-Clause"
] | 170 | 2016-09-14T10:35:24.000Z | 2022-03-29T20:29:32.000Z | dataplicity/_version.py | wildfoundry/dataplicity-agent | 896d6f7d160c987656a158f036024d2858981ccb | [
"BSD-3-Clause"
] | 47 | 2016-09-12T16:18:41.000Z | 2022-03-29T02:43:12.000Z | dataplicity/_version.py | wildfoundry/dataplicity-agent | 896d6f7d160c987656a158f036024d2858981ccb | [
"BSD-3-Clause"
] | 38 | 2016-09-14T10:35:25.000Z | 2022-03-29T20:29:50.000Z | __version__ = "0.5.6"
if __name__ == "__main__":
# The build script uses this to extract the current version
print(__version__)
| 22.833333 | 63 | 0.70073 | __version__ = "0.5.6"
if __name__ == "__main__":
# The build script uses this to extract the current version
print(__version__)
| 0 | 0 | 0 |
7409482c38010fc893ea9c527bf413a0027578d0 | 2,005 | py | Python | tools/rgenetics/rgQQ_code.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 2 | 2016-02-23T00:09:14.000Z | 2019-02-11T07:48:44.000Z | tools/rgenetics/rgQQ_code.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | tools/rgenetics/rgQQ_code.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 6 | 2015-05-27T13:09:50.000Z | 2019-02-11T07:48:46.000Z | # before running the qc, need to rename various output files
# <data format="html" name="html_file" />
# <data format="txt" name="log_file" parent="html_file" />
# <data format="tabular" name="marker_file" parent="html_file" />
# <data format="tabular" name="subject_file" parent="html_file" />
from galaxy import datatypes,model
import sys,string
| 39.313725 | 97 | 0.457357 | # before running the qc, need to rename various output files
# <data format="html" name="html_file" />
# <data format="txt" name="log_file" parent="html_file" />
# <data format="tabular" name="marker_file" parent="html_file" />
# <data format="tabular" name="subject_file" parent="html_file" />
from galaxy import datatypes,model
import sys,string
def get_columns( input ):
columns = []
elems = []
if input and input.metadata.columns:
ncols = input.metadata.columns
colnames = ['Col%d' % x for x in range(1,ncols+1)]
for i, line in enumerate( file ( input.file_name ) ):
valid = True
if line and not line.startswith( '#' ):
line = line.rstrip('\r\n')
elems = line.split( '\t' )
"""
Since this tool requires users to select only those columns
that contain numerical values, we'll restrict the column select
list appropriately.
"""
if len(elems) > 0:
for col in range(len(elems)): # zero offset
if i == 0: # header row
colnames[col] = elems[col]
else:
val = elems[col]
try:
val = float(val)
valid = True
except:
valid = False
if valid:
option = colnames[col]
columns.append((option,str(col),False))
if len(columns) > 0:
"""
We have our select list built, so we can break out of the outer most for loop
"""
break
if i == 30:
break # Hopefully we never get here...
else:
columns = [('?','?',False),]
return columns
| 1,609 | 0 | 23 |
3390f88e941ee7fcc9acb0f45d26d6fba8e2744d | 369 | py | Python | backend/sortResults.py | linorallo/Product_Finder | 49ee6796eb661e0fa00546047f085ab6fc012eaf | [
"MIT"
] | null | null | null | backend/sortResults.py | linorallo/Product_Finder | 49ee6796eb661e0fa00546047f085ab6fc012eaf | [
"MIT"
] | null | null | null | backend/sortResults.py | linorallo/Product_Finder | 49ee6796eb661e0fa00546047f085ab6fc012eaf | [
"MIT"
] | null | null | null | import operator
| 33.545455 | 63 | 0.726287 | import operator
def sortIncreasing(results):
print(operator.itemgetter(1))
results.sort(key = operator.itemgetter(1))
return results
def sortDecreasing(results):
results.sort(key = operator.itemgetter(1), reverse = True)
return results
def sortIncreasingDiscoount(results):
results.sort(key = operator.itemgetter(1))
return results | 285 | 0 | 68 |
50ac6c7feceda98367989afa0c3fa92c0012d97f | 7,823 | py | Python | python/seq2seq_all_inputs/model.py | prashant-jayan21/minecraft-dialogue-models | 9becd5cc53d60be4d93b0581aed3ad48db635f47 | [
"NCSA"
] | 1 | 2021-07-01T15:43:01.000Z | 2021-07-01T15:43:01.000Z | python/seq2seq_all_inputs/model.py | prashant-jayan21/minecraft-dialogue-models | 9becd5cc53d60be4d93b0581aed3ad48db635f47 | [
"NCSA"
] | 1 | 2019-11-12T21:15:04.000Z | 2019-11-12T23:19:55.000Z | python/seq2seq_all_inputs/model.py | prashant-jayan21/minecraft-dialogue-models | 9becd5cc53d60be4d93b0581aed3ad48db635f47 | [
"NCSA"
] | 6 | 2020-02-24T22:18:21.000Z | 2021-10-04T11:51:56.000Z | import torch, sys
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
sys.path.append('..')
from utils import *
from seq2seq_attn.model import EncoderRNN as PrevUtterancesEncoder
from seq2seq_world_state.model import NextActionsEncoder, BlockCountersEncoder, BlockRegionCountersEncoder
# NOTE: no need of init weights here as that is done within the sub-modules of this module
class UtterancesAndBlockCountersEncoder(nn.Module):
"""
Integrated model -- combines an encoder RNN for encoding previous utterances with a global block counters encoder
"""
class UtterancesAndBlockRegionCountersEncoder(nn.Module):
"""
Integrated model -- combines an encoder RNN for encoding previous utterances with a regional block counters encoder (which comes with an optional global block counters encoder as well)
"""
| 59.265152 | 321 | 0.785249 | import torch, sys
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
sys.path.append('..')
from utils import *
from seq2seq_attn.model import EncoderRNN as PrevUtterancesEncoder
from seq2seq_world_state.model import NextActionsEncoder, BlockCountersEncoder, BlockRegionCountersEncoder
class AllInputsEncoder(nn.Module):
# NOTE: no need of init weights here as that is done within the sub-modules of this module
def __init__(self, args, train_dl, vocab):
super(AllInputsEncoder, self).__init__()
self.world_state_encoder = WorldStateEncoderRNN(
block_input_size = train_dl.dataset.src_input_size, block_embedding_size = args.block_embedding_size,
block_embedding_layer_nonlinearity = args.block_embedding_layer_nonlinearity,
hidden_size = args.world_state_hidden_size,
num_hidden_layers = args.world_state_num_hidden_layers, dropout = args.dropout, rnn = args.rnn
)
self.prev_utterances_encoder = PrevUtterancesEncoderRNN(
vocabulary = vocab, hidden_size = args.hidden_size,
num_hidden_layers = args.num_encoder_hidden_layers,
dropout = args.dropout, linear_size = args.linear_size,
nonlinearity = args.nonlinearity, rnn = args.rnn,
bidirectional=args.bidirectional, unfreeze_embeddings=args.unfreeze_embeddings
)
self.rnn = args.rnn
# TODO: Generalize to avoid using magic numbers
num_directions = 2 if self.prev_utterances_encoder.bidirectional else 1
self.input_encoding_size = self.world_state_encoder.hidden_size * 2 + self.prev_utterances_encoder.hidden_size * num_directions
def forward(self, encoder_inputs):
world_state_hidden_final = self.world_state_encoder(encoder_inputs)
prev_utterances_outputs, prev_utterances_hidden_final = self.prev_utterances_encoder(encoder_inputs)
# concatenate both hidden states
if self.rnn == "gru":
input_encoding = torch.cat((world_state_hidden_final, prev_utterances_hidden_final), 2)
elif self.rnn == "lstm":
input_encoding = (
torch.cat((world_state_hidden_final[0], prev_utterances_hidden_final[0]), 2),
torch.cat((world_state_hidden_final[1], prev_utterances_hidden_final[1]), 2)
)
return prev_utterances_outputs, input_encoding
class UtterancesAndNextActionsEncoder(nn.Module):
def __init__(self, args, train_dl, encoder_vocab):
super(UtterancesAndNextActionsEncoder, self).__init__()
self.next_actions_encoder = NextActionsEncoder(
block_input_size=train_dl.dataset.src_input_size_next_actions, block_embedding_size=args.block_embedding_size, block_embedding_layer_nonlinearity=args.block_embedding_layer_nonlinearity,
dropout=args.dropout_nae if args.dropout_nae is not None else args.dropout, use_gold_actions=args.use_gold_actions, bypass_embed=args.bypass_block_embedding, pre_concat=args.pre_concat_block_reprs
)
self.prev_utterances_encoder = PrevUtterancesEncoder(
encoder_vocab, args.rnn_hidden_size, args.num_encoder_hidden_layers, dropout=args.dropout_rnn if args.dropout_rnn is not None else args.dropout, linear_size=args.encoder_linear_size, nonlinearity=args.encoder_nonlinearity, rnn=args.rnn, bidirectional=args.bidirectional, train_embeddings=args.train_embeddings
)
self.input_encoding_size = self.next_actions_encoder.input_encoding_size
def forward(self, encoder_inputs):
next_actions_encoding = self.next_actions_encoder(encoder_inputs).decoder_input_concat
rnn_hidden = self.prev_utterances_encoder(encoder_inputs).decoder_hidden
return EncoderContext(decoder_hidden=rnn_hidden, decoder_input_concat=next_actions_encoding)
class UtterancesAndBlockCountersEncoder(nn.Module):
"""
Integrated model -- combines an encoder RNN for encoding previous utterances with a global block counters encoder
"""
def __init__(self, args, train_dl, encoder_vocab):
super(UtterancesAndBlockCountersEncoder, self).__init__()
self.block_counters_encoder = BlockCountersEncoder(
input_size=6, output_embedding_size=args.counter_embedding_size, embedding_layer_nonlinearity=args.counter_embedding_layer_nonlinearity,
dropout=args.dropout_counter if args.dropout_counter is not None else args.dropout, use_separate_encoders=args.use_separate_counter_encoders, pre_concat=args.pre_concat_counter_reprs, bypass_embed=args.bypass_counter_embedding
)
self.prev_utterances_encoder = PrevUtterancesEncoder(
encoder_vocab, args.rnn_hidden_size, args.num_encoder_hidden_layers, dropout=args.dropout_rnn if args.dropout_rnn is not None else args.dropout, linear_size=args.encoder_linear_size, nonlinearity=args.encoder_nonlinearity, rnn=args.rnn, bidirectional=args.bidirectional, train_embeddings=args.train_embeddings
)
self.input_encoding_size = self.block_counters_encoder.input_encoding_size
def forward(self, encoder_inputs):
block_counters_encoding = self.block_counters_encoder(encoder_inputs).decoder_input_concat
rnn_hidden = self.prev_utterances_encoder(encoder_inputs).decoder_hidden
return EncoderContext(decoder_hidden=rnn_hidden, decoder_input_concat=block_counters_encoding, decoder_hidden_concat=block_counters_encoding)
def flatten_parameters(self):
self.prev_utterances_encoder.flatten_parameters()
class UtterancesAndBlockRegionCountersEncoder(nn.Module):
"""
Integrated model -- combines an encoder RNN for encoding previous utterances with a regional block counters encoder (which comes with an optional global block counters encoder as well)
"""
def __init__(self, args, train_dl, encoder_vocab):
super(UtterancesAndBlockRegionCountersEncoder, self).__init__()
input_size_per_region = 24 if args.use_existing_blocks_counter else 18
self.block_region_counters_encoder = BlockRegionCountersEncoder(
input_size=input_size_per_region*(33 if args.spatial_info_window_size > 1 else 27)+1, output_embedding_size=args.counter_embedding_size, embedding_layer_nonlinearity=args.counter_embedding_layer_nonlinearity,
dropout=args.dropout_counter if args.dropout_counter is not None else args.dropout, use_separate_encoders=args.use_separate_counter_encoders, pre_concat=args.pre_concat_counter_reprs, bypass_embed=args.bypass_counter_embedding,
use_global_counters=args.use_global_counters, use_separate_global_embedding=args.use_separate_global_embedding, global_counter_embedding_size=args.global_counter_embedding_size,
use_existing_blocks_counter=args.use_existing_blocks_counter
)
self.prev_utterances_encoder = PrevUtterancesEncoder(
encoder_vocab, args.rnn_hidden_size, args.num_encoder_hidden_layers, dropout=args.dropout_rnn if args.dropout_rnn is not None else args.dropout, linear_size=args.encoder_linear_size, nonlinearity=args.encoder_nonlinearity, rnn=args.rnn, bidirectional=args.bidirectional, train_embeddings=args.train_embeddings
)
self.input_encoding_size = self.block_region_counters_encoder.input_encoding_size
def forward(self, encoder_inputs):
block_counters_encoding = self.block_region_counters_encoder(encoder_inputs).decoder_input_concat
rnn_hidden = self.prev_utterances_encoder(encoder_inputs).decoder_hidden
return EncoderContext(decoder_hidden=rnn_hidden, decoder_input_concat=block_counters_encoding, decoder_hidden_concat=block_counters_encoding)
def flatten_parameters(self):
self.prev_utterances_encoder.flatten_parameters()
| 6,554 | 41 | 314 |
36b6f73164dd36acdc462c622e987758d276f3a5 | 83 | py | Python | Iniciante/1002/1002.py | RodrigoFernandoSilva/Python3-UriOnlineJudge | f9095c2d053b85056c1cad55bc743378b852a4d4 | [
"Apache-2.0"
] | null | null | null | Iniciante/1002/1002.py | RodrigoFernandoSilva/Python3-UriOnlineJudge | f9095c2d053b85056c1cad55bc743378b852a4d4 | [
"Apache-2.0"
] | null | null | null | Iniciante/1002/1002.py | RodrigoFernandoSilva/Python3-UriOnlineJudge | f9095c2d053b85056c1cad55bc743378b852a4d4 | [
"Apache-2.0"
] | null | null | null | PI = float(3.14159)
raio = float(input())
print("A=%0.4f" %(PI * (raio * raio)))
| 13.833333 | 38 | 0.554217 | PI = float(3.14159)
raio = float(input())
print("A=%0.4f" %(PI * (raio * raio)))
| 0 | 0 | 0 |
143a98feebc85c217a2d46c8d63cd705d3f9ca85 | 1,759 | py | Python | code/pylesson/challenge1/disney_checker.py | HeardLibrary/digital-scholarship | c2a791376ecea4efff4ff57c7a93b291b605d956 | [
"CC0-1.0"
] | 25 | 2018-09-27T03:46:38.000Z | 2022-03-13T00:08:22.000Z | code/pylesson/challenge1/disney_checker.py | HeardLibrary/digital-scholarship | c2a791376ecea4efff4ff57c7a93b291b605d956 | [
"CC0-1.0"
] | 22 | 2019-07-23T15:30:14.000Z | 2022-03-29T22:04:37.000Z | code/pylesson/challenge1/disney_checker.py | HeardLibrary/digital-scholarship | c2a791376ecea4efff4ff57c7a93b291b605d956 | [
"CC0-1.0"
] | 18 | 2019-01-28T16:40:28.000Z | 2022-01-13T01:59:00.000Z | #modules for GUI interface
import tkinter
from tkinter import *
from tkinter import ttk
# User interface setup
# this sets up the characteristics of the window
root = Tk()
root.title("Disney Checker")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
#set up array of labels, text entry boxes, and buttons
firstLabel = StringVar()
ttk.Label(mainframe, textvariable=firstLabel).grid(column=3, row=3, sticky=(W, E))
firstLabel.set('Character')
firstInputBox = ttk.Entry(mainframe, width = 60, textvariable = StringVar())
firstInputBox.grid(column=4, row=3, sticky=W)
firstInputBox.insert(END, 'type name here')
#set up action button
doSomethingButton = ttk.Button(mainframe, text = "Check character", width = 30, command = lambda: checkCharacterButtonClick() )
doSomethingButton.grid(column=4, row=15, sticky=W)
# ------------------------------------------------------------------------------------------
# Function definitions
if __name__=="__main__":
main() | 33.188679 | 127 | 0.664014 | #modules for GUI interface
import tkinter
from tkinter import *
from tkinter import ttk
# User interface setup
# this sets up the characteristics of the window
root = Tk()
root.title("Disney Checker")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
#set up array of labels, text entry boxes, and buttons
firstLabel = StringVar()
ttk.Label(mainframe, textvariable=firstLabel).grid(column=3, row=3, sticky=(W, E))
firstLabel.set('Character')
firstInputBox = ttk.Entry(mainframe, width = 60, textvariable = StringVar())
firstInputBox.grid(column=4, row=3, sticky=W)
firstInputBox.insert(END, 'type name here')
#set up action button
def checkCharacterButtonClick():
checkName()
doSomethingButton = ttk.Button(mainframe, text = "Check character", width = 30, command = lambda: checkCharacterButtonClick() )
doSomethingButton.grid(column=4, row=15, sticky=W)
# ------------------------------------------------------------------------------------------
# Function definitions
def checkName(): # This is the function that is invoked when the Do Something button is clicked
name = firstInputBox.get()
print(name)
if name == 'Mickey Mouse':
print('You are a Disney character')
print('You are a mouse')
elif name == 'Donald Duck':
print('You are a Disney character')
print('You are not a mouse')
elif name == 'Minnie Mouse':
print('You are a Disney character')
print('Your boyfriend is getting old')
else:
print('You are not a Disney character')
print("That's all folks!")
def main():
root.mainloop()
if __name__=="__main__":
main() | 593 | 0 | 68 |
d1dc24d065ab992955504784c618791fbfda2b40 | 3,945 | py | Python | docker/pythonpath_dev/superset_config.py | akashanita/superset-keycloak | 1d446ecf723d1cb0c43057d501cf1e7b83fa27ff | [
"Apache-2.0"
] | null | null | null | docker/pythonpath_dev/superset_config.py | akashanita/superset-keycloak | 1d446ecf723d1cb0c43057d501cf1e7b83fa27ff | [
"Apache-2.0"
] | null | null | null | docker/pythonpath_dev/superset_config.py | akashanita/superset-keycloak | 1d446ecf723d1cb0c43057d501cf1e7b83fa27ff | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file is included in the final Docker image and SHOULD be overridden when
# deploying the image to prod. Settings configured here are intended for use in local
# development environments. Also note that superset_config_docker.py is imported
# as a final step as a means to override "defaults" configured here
#
import logging
import os
from cachelib.file import FileSystemCache
logger = logging.getLogger()
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = "The environment variable {} was missing, abort...".format(
var_name
)
raise EnvironmentError(error_msg)
DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT")
DATABASE_USER = get_env_variable("DATABASE_USER")
DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD")
DATABASE_HOST = get_env_variable("DATABASE_HOST")
DATABASE_PORT = get_env_variable("DATABASE_PORT")
DATABASE_DB = get_env_variable("DATABASE_DB")
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % (
DATABASE_DIALECT,
DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_DB,
)
REDIS_HOST = get_env_variable("REDIS_HOST")
REDIS_PORT = get_env_variable("REDIS_PORT")
REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", 0)
REDIS_RESULTS_DB = get_env_variable("REDIS_CELERY_DB", 1)
RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab")
CELERY_CONFIG = CeleryConfig
SQLLAB_CTAS_NO_LIMIT = True
#
# Optionally import superset_config_docker.py (which will have been included on
# the PYTHONPATH) in order to allow for local settings to be overridden
#
try:
import superset_config_docker
from superset_config_docker import * # noqa
logger.info(
f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]"
)
except ImportError:
logger.info("Using default Docker config...")
# source: https://github.com/apache/incubator-superset/pull/1866#issuecomment-347310860
ADDITIONAL_MIDDLEWARE = [ReverseProxied, ]
| 33.432203 | 87 | 0.726996 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file is included in the final Docker image and SHOULD be overridden when
# deploying the image to prod. Settings configured here are intended for use in local
# development environments. Also note that superset_config_docker.py is imported
# as a final step as a means to override "defaults" configured here
#
import logging
import os
from cachelib.file import FileSystemCache
logger = logging.getLogger()
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = "The environment variable {} was missing, abort...".format(
var_name
)
raise EnvironmentError(error_msg)
DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT")
DATABASE_USER = get_env_variable("DATABASE_USER")
DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD")
DATABASE_HOST = get_env_variable("DATABASE_HOST")
DATABASE_PORT = get_env_variable("DATABASE_PORT")
DATABASE_DB = get_env_variable("DATABASE_DB")
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % (
DATABASE_DIALECT,
DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_DB,
)
REDIS_HOST = get_env_variable("REDIS_HOST")
REDIS_PORT = get_env_variable("REDIS_PORT")
REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", 0)
REDIS_RESULTS_DB = get_env_variable("REDIS_CELERY_DB", 1)
RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab")
class CeleryConfig(object):
BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}"
CELERY_IMPORTS = ("superset.sql_lab",)
CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}"
CELERY_ANNOTATIONS = {"tasks.add": {"rate_limit": "10/s"}}
CELERY_TASK_PROTOCOL = 1
CELERY_CONFIG = CeleryConfig
SQLLAB_CTAS_NO_LIMIT = True
#
# Optionally import superset_config_docker.py (which will have been included on
# the PYTHONPATH) in order to allow for local settings to be overridden
#
try:
import superset_config_docker
from superset_config_docker import * # noqa
logger.info(
f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]"
)
except ImportError:
logger.info("Using default Docker config...")
# source: https://github.com/apache/incubator-superset/pull/1866#issuecomment-347310860
class ReverseProxied(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
ADDITIONAL_MIDDLEWARE = [ReverseProxied, ]
| 512 | 305 | 99 |
c54610fc54cef73e3cf59b3da4a82764d06819d4 | 443 | py | Python | 2212.py | WaiNaat/BOJ-Python | 3365ef090c7dcf6e6a598fea0b25c416a5a3e01b | [
"MIT"
] | null | null | null | 2212.py | WaiNaat/BOJ-Python | 3365ef090c7dcf6e6a598fea0b25c416a5a3e01b | [
"MIT"
] | null | null | null | 2212.py | WaiNaat/BOJ-Python | 3365ef090c7dcf6e6a598fea0b25c416a5a3e01b | [
"MIT"
] | null | null | null | # input
n = int(input())
k = int(input())
sensor = map(int, input().split())
# process
'''
센서들의 좌표를 정렬
각 센서마다 본인 다음 센서와의 거리를 계산
거리들 정렬
가장 긴 거리 제거
>> 처음에는 하나였던 센서들이 두 개의 묶음으로 쪼개짐
>> 두 개의 집중국
다시 가장 긴 거리 제거
>> 두 묶음이었던 센서 중 하나가 쪼개지면서 총 세 개의 묶음이 됨
>> 세 개의 집중국
이런 식으로 k개의 집중국이 될 때까지 최대 거리를 제거
남은 거리들의 합 계산
'''
sensor = sorted(sensor)
dist = sorted([sensor[i + 1] - sensor[i] for i in range(n - 1)])
# output
print(sum(dist[: (n - 1) - (k - 1)])) | 20.136364 | 64 | 0.607223 | # input
n = int(input())
k = int(input())
sensor = map(int, input().split())
# process
'''
센서들의 좌표를 정렬
각 센서마다 본인 다음 센서와의 거리를 계산
거리들 정렬
가장 긴 거리 제거
>> 처음에는 하나였던 센서들이 두 개의 묶음으로 쪼개짐
>> 두 개의 집중국
다시 가장 긴 거리 제거
>> 두 묶음이었던 센서 중 하나가 쪼개지면서 총 세 개의 묶음이 됨
>> 세 개의 집중국
이런 식으로 k개의 집중국이 될 때까지 최대 거리를 제거
남은 거리들의 합 계산
'''
sensor = sorted(sensor)
dist = sorted([sensor[i + 1] - sensor[i] for i in range(n - 1)])
# output
print(sum(dist[: (n - 1) - (k - 1)])) | 0 | 0 | 0 |
f2282beaefe4bfc3184a6b7f4f573d5259dc25bd | 346 | py | Python | excercises/6-0002/whatever.py | obsessedyouth/simulacra | 530155664daf1aff06cb575c4c4073acbacdb32d | [
"MIT"
] | null | null | null | excercises/6-0002/whatever.py | obsessedyouth/simulacra | 530155664daf1aff06cb575c4c4073acbacdb32d | [
"MIT"
] | null | null | null | excercises/6-0002/whatever.py | obsessedyouth/simulacra | 530155664daf1aff06cb575c4c4073acbacdb32d | [
"MIT"
] | null | null | null | import random
print(experiment(1_000_000))
| 19.222222 | 48 | 0.473988 | import random
def experiment(trials):
single = 0
for n in range(trials):
x = random.choice((1, 2, 3, 4)) # P(1/4)
y = random.choice((1, 2, 3, 4)) # P(1/4)
# P(A + B) % 2 == 0
if (x + y) % 2 == 0:
single += 1
# P(1/4 + 1/4 == even)
return single/trials
print(experiment(1_000_000))
| 278 | 0 | 23 |
ec566b248c5bad33378d93b615f2cf14703052aa | 1,607 | py | Python | genomestats.py | rvenkatesh99/sequence_alignment | 107c262ef25ddbf025e054339bdd29efd728033a | [
"MIT"
] | null | null | null | genomestats.py | rvenkatesh99/sequence_alignment | 107c262ef25ddbf025e054339bdd29efd728033a | [
"MIT"
] | null | null | null | genomestats.py | rvenkatesh99/sequence_alignment | 107c262ef25ddbf025e054339bdd29efd728033a | [
"MIT"
] | null | null | null | import argparse
import sys
import seqlib
parser = argparse.ArgumentParser(
description='Calculate statistics for genome')
parser.add_argument('--fasta', required = True, type = str,
metavar = '<path>', help='path to a fasta file, may be compressed')
arg = parser.parse_args()
contig_size = []
nt_count = {}
for name, seq in seqlib.read_fasta(arg.fasta):
seq = seq.upper()
contig_size.append(len(seq))
for base in seq:
if base not in nt_count:
nt_count[base] = 1
elif base in nt_count:
nt_count[base] += 1
gc_count = nt_count['G'] + nt_count['C']
# Sort contigs longest to shortest
contig_size.sort(reverse = True)
num_contigs = len(contig_size)
shortest_contig = contig_size[-1]
longest_contig = contig_size[0]
total_size = 0
for i in contig_size:
total_size += i
avg_size = total_size/num_contigs
# Median Calculation
if num_contigs % 2 == 1:
median_contig = contig_size[int(num_contigs/2)]
elif num_contigs % 2 == 0:
med1 = contig_size[int(num_contigs/2)]
med2 = contig_size[int((num_contigs/2) +1)]
median_contig = (med1 + med2)/2
n50 = 0
val = 0
for size in contig_size:
val += size
if val > total_size/2:
n50 = size
break
gc_fraction = gc_count/total_size * 100
print(f'Total size: {total_size}')
print(f'Number of contigs: {num_contigs}')
print(f'Shortest contig: {shortest_contig}')
print(f'Longest contig: {longest_contig}')
print(f'Average contig size: {avg_size}')
print(f'Median contig size: {median_contig}')
print(f'N50: {n50}')
print(f'GC Fraction: {gc_fraction}%')
print(f'Letter Counts: {nt_count}')
| 25.109375 | 68 | 0.697573 | import argparse
import sys
import seqlib
parser = argparse.ArgumentParser(
description='Calculate statistics for genome')
parser.add_argument('--fasta', required = True, type = str,
metavar = '<path>', help='path to a fasta file, may be compressed')
arg = parser.parse_args()
contig_size = []
nt_count = {}
for name, seq in seqlib.read_fasta(arg.fasta):
seq = seq.upper()
contig_size.append(len(seq))
for base in seq:
if base not in nt_count:
nt_count[base] = 1
elif base in nt_count:
nt_count[base] += 1
gc_count = nt_count['G'] + nt_count['C']
# Sort contigs longest to shortest
contig_size.sort(reverse = True)
num_contigs = len(contig_size)
shortest_contig = contig_size[-1]
longest_contig = contig_size[0]
total_size = 0
for i in contig_size:
total_size += i
avg_size = total_size/num_contigs
# Median Calculation
if num_contigs % 2 == 1:
median_contig = contig_size[int(num_contigs/2)]
elif num_contigs % 2 == 0:
med1 = contig_size[int(num_contigs/2)]
med2 = contig_size[int((num_contigs/2) +1)]
median_contig = (med1 + med2)/2
n50 = 0
val = 0
for size in contig_size:
val += size
if val > total_size/2:
n50 = size
break
gc_fraction = gc_count/total_size * 100
print(f'Total size: {total_size}')
print(f'Number of contigs: {num_contigs}')
print(f'Shortest contig: {shortest_contig}')
print(f'Longest contig: {longest_contig}')
print(f'Average contig size: {avg_size}')
print(f'Median contig size: {median_contig}')
print(f'N50: {n50}')
print(f'GC Fraction: {gc_fraction}%')
print(f'Letter Counts: {nt_count}')
| 0 | 0 | 0 |
e7f9da9bcee463beee3749304734d143c54caa04 | 4,012 | py | Python | optht/optht.py | decarsg/optht | 5e2b287cc00b08d9364ffec8d0b9e0c482ec614a | [
"BSD-3-Clause"
] | 19 | 2018-03-19T19:12:07.000Z | 2021-12-03T15:11:58.000Z | optht/optht.py | Benli11/optht | 5e2b287cc00b08d9364ffec8d0b9e0c482ec614a | [
"BSD-3-Clause"
] | 7 | 2019-07-08T16:09:52.000Z | 2022-02-04T14:55:05.000Z | optht/optht.py | decarsg/optht | 5e2b287cc00b08d9364ffec8d0b9e0c482ec614a | [
"BSD-3-Clause"
] | 9 | 2018-06-05T06:19:37.000Z | 2021-07-22T20:10:13.000Z | """Optimal hard threshold for matrix denoising."""
import logging
import numpy as np
from scipy import integrate
# Create logger
log = logging.getLogger(__name__)
def optht(beta, sv, sigma=None):
"""Compute optimal hard threshold for singular values.
Off-the-shelf method for determining the optimal singular value truncation
(hard threshold) for matrix denoising.
The method gives the optimal location both in the case of the known or
unknown noise level.
Parameters
----------
beta : scalar or array_like
Scalar determining the aspect ratio of a matrix, i.e., ``beta = m/n``,
where ``m >= n``. Instead the input matrix can be provided and the
aspect ratio is determined automatically.
sv : array_like
The singular values for the given input matrix.
sigma : real, optional
Noise level if known.
Returns
-------
k : int
Optimal target rank.
Notes
-----
Code is adapted from Matan Gavish and David Donoho, see [1]_.
References
----------
.. [1] Gavish, Matan, and David L. Donoho.
"The optimal hard threshold for singular values is 4/sqrt(3)"
IEEE Transactions on Information Theory 60.8 (2014): 5040-5053.
http://arxiv.org/abs/1305.5870
"""
# Compute aspect ratio of the input matrix
if isinstance(beta, np.ndarray):
m = min(beta.shape)
n = max(beta.shape)
beta = m / n
# Check ``beta``
if beta < 0 or beta > 1:
raise ValueError('Parameter `beta` must be in (0,1].')
if sigma is None:
# Sigma is unknown
log.info('Sigma unknown.')
# Approximate ``w(beta)``
coef_approx = _optimal_SVHT_coef_sigma_unknown(beta)
log.info(f'Approximated `w(beta)` value: {coef_approx}')
# Compute the optimal ``w(beta)``
coef = (_optimal_SVHT_coef_sigma_known(beta)
/ np.sqrt(_median_marcenko_pastur(beta)))
# Compute cutoff
cutoff = coef * np.median(sv)
else:
# Sigma is known
log.info('Sigma known.')
# Compute optimal ``w(beta)``
coef = _optimal_SVHT_coef_sigma_known(beta)
# Compute cutoff
cutoff = coef * np.sqrt(len(sv)) * sigma
# Log cutoff and ``w(beta)``
log.info(f'`w(beta)` value: {coef}')
log.info(f'Cutoff value: {cutoff}')
# Compute and return rank
greater_than_cutoff = np.where(sv > cutoff)
if greater_than_cutoff[0].size > 0:
k = np.max(greater_than_cutoff) + 1
else:
k = 0
log.info(f'Target rank: {k}')
return k
def _optimal_SVHT_coef_sigma_known(beta):
"""Implement Equation (11)."""
return np.sqrt(2 * (beta + 1) + (8 * beta)
/ (beta + 1 + np.sqrt(beta**2 + 14 * beta + 1)))
def _optimal_SVHT_coef_sigma_unknown(beta):
"""Implement Equation (5)."""
return 0.56 * beta**3 - 0.95 * beta**2 + 1.82 * beta + 1.43
def _mar_pas(x, topSpec, botSpec, beta):
"""Implement Marcenko-Pastur distribution."""
if (topSpec - x) * (x - botSpec) > 0:
return np.sqrt((topSpec - x) *
(x - botSpec)) / (beta * x) / (2 * np.pi)
else:
return 0
def _median_marcenko_pastur(beta):
"""Compute median of Marcenko-Pastur distribution."""
botSpec = lobnd = (1 - np.sqrt(beta))**2
topSpec = hibnd = (1 + np.sqrt(beta))**2
change = 1
while change & ((hibnd - lobnd) > .001):
change = 0
x = np.linspace(lobnd, hibnd, 10)
y = np.zeros_like(x)
for i in range(len(x)):
yi, err = integrate.quad(
_mar_pas,
a=x[i],
b=topSpec,
args=(topSpec, botSpec, beta),
)
y[i] = 1.0 - yi
if np.any(y < 0.5):
lobnd = np.max(x[y < 0.5])
change = 1
if np.any(y > 0.5):
hibnd = np.min(x[y > 0.5])
change = 1
return (hibnd + lobnd) / 2.
| 28.863309 | 78 | 0.570289 | """Optimal hard threshold for matrix denoising."""
import logging
import numpy as np
from scipy import integrate
# Create logger
log = logging.getLogger(__name__)
def optht(beta, sv, sigma=None):
"""Compute optimal hard threshold for singular values.
Off-the-shelf method for determining the optimal singular value truncation
(hard threshold) for matrix denoising.
The method gives the optimal location both in the case of the known or
unknown noise level.
Parameters
----------
beta : scalar or array_like
Scalar determining the aspect ratio of a matrix, i.e., ``beta = m/n``,
where ``m >= n``. Instead the input matrix can be provided and the
aspect ratio is determined automatically.
sv : array_like
The singular values for the given input matrix.
sigma : real, optional
Noise level if known.
Returns
-------
k : int
Optimal target rank.
Notes
-----
Code is adapted from Matan Gavish and David Donoho, see [1]_.
References
----------
.. [1] Gavish, Matan, and David L. Donoho.
"The optimal hard threshold for singular values is 4/sqrt(3)"
IEEE Transactions on Information Theory 60.8 (2014): 5040-5053.
http://arxiv.org/abs/1305.5870
"""
# Compute aspect ratio of the input matrix
if isinstance(beta, np.ndarray):
m = min(beta.shape)
n = max(beta.shape)
beta = m / n
# Check ``beta``
if beta < 0 or beta > 1:
raise ValueError('Parameter `beta` must be in (0,1].')
if sigma is None:
# Sigma is unknown
log.info('Sigma unknown.')
# Approximate ``w(beta)``
coef_approx = _optimal_SVHT_coef_sigma_unknown(beta)
log.info(f'Approximated `w(beta)` value: {coef_approx}')
# Compute the optimal ``w(beta)``
coef = (_optimal_SVHT_coef_sigma_known(beta)
/ np.sqrt(_median_marcenko_pastur(beta)))
# Compute cutoff
cutoff = coef * np.median(sv)
else:
# Sigma is known
log.info('Sigma known.')
# Compute optimal ``w(beta)``
coef = _optimal_SVHT_coef_sigma_known(beta)
# Compute cutoff
cutoff = coef * np.sqrt(len(sv)) * sigma
# Log cutoff and ``w(beta)``
log.info(f'`w(beta)` value: {coef}')
log.info(f'Cutoff value: {cutoff}')
# Compute and return rank
greater_than_cutoff = np.where(sv > cutoff)
if greater_than_cutoff[0].size > 0:
k = np.max(greater_than_cutoff) + 1
else:
k = 0
log.info(f'Target rank: {k}')
return k
def _optimal_SVHT_coef_sigma_known(beta):
"""Implement Equation (11)."""
return np.sqrt(2 * (beta + 1) + (8 * beta)
/ (beta + 1 + np.sqrt(beta**2 + 14 * beta + 1)))
def _optimal_SVHT_coef_sigma_unknown(beta):
"""Implement Equation (5)."""
return 0.56 * beta**3 - 0.95 * beta**2 + 1.82 * beta + 1.43
def _mar_pas(x, topSpec, botSpec, beta):
"""Implement Marcenko-Pastur distribution."""
if (topSpec - x) * (x - botSpec) > 0:
return np.sqrt((topSpec - x) *
(x - botSpec)) / (beta * x) / (2 * np.pi)
else:
return 0
def _median_marcenko_pastur(beta):
"""Compute median of Marcenko-Pastur distribution."""
botSpec = lobnd = (1 - np.sqrt(beta))**2
topSpec = hibnd = (1 + np.sqrt(beta))**2
change = 1
while change & ((hibnd - lobnd) > .001):
change = 0
x = np.linspace(lobnd, hibnd, 10)
y = np.zeros_like(x)
for i in range(len(x)):
yi, err = integrate.quad(
_mar_pas,
a=x[i],
b=topSpec,
args=(topSpec, botSpec, beta),
)
y[i] = 1.0 - yi
if np.any(y < 0.5):
lobnd = np.max(x[y < 0.5])
change = 1
if np.any(y > 0.5):
hibnd = np.min(x[y > 0.5])
change = 1
return (hibnd + lobnd) / 2.
| 0 | 0 | 0 |
eae7563a2a9a73c8a12a397987adfafafa20b43e | 2,924 | py | Python | NoLossAsyncGenerator/NoLossAsyncGenerator.py | monk-after-90s/NoLossAsyncGenerator | b5d3872446a88b294d492510e0d3b5c3938dd2b7 | [
"MIT"
] | null | null | null | NoLossAsyncGenerator/NoLossAsyncGenerator.py | monk-after-90s/NoLossAsyncGenerator | b5d3872446a88b294d492510e0d3b5c3938dd2b7 | [
"MIT"
] | null | null | null | NoLossAsyncGenerator/NoLossAsyncGenerator.py | monk-after-90s/NoLossAsyncGenerator | b5d3872446a88b294d492510e0d3b5c3938dd2b7 | [
"MIT"
] | null | null | null | '''
Asynchronous generator without any data loss in case that handling one message costs too much time.
'''
import asyncio
from ensureTaskCanceled.ensureTaskCanceled import ensureTaskCanceled
# def NoLossAsyncGenerator(raw_async_iterater):
# async def no_data_loss_async_generator_wrapper(raw_async_iterater):
# q = asyncio.Queue()
#
# async def yield2q(raw_async_iterater, q: asyncio.Queue):
# async for msg in raw_async_iterater:
# q.put_nowait(msg)
#
# asyncio.create_task(yield2q(raw_async_iterater, q))
# while True:
# msg = await q.get()
# # generator.left = q.qsize()
# # generator.__dict__['left'] = q.qsize()
# yield msg
#
# generator = no_data_loss_async_generator_wrapper(raw_async_iterater)
# return generator
if __name__ == '__main__':
asyncio.run(test_no_data_loss_async_generator())
| 25.876106 | 99 | 0.575923 | '''
Asynchronous generator without any data loss in case that handling one message costs too much time.
'''
import asyncio
from ensureTaskCanceled.ensureTaskCanceled import ensureTaskCanceled
class NoLossAsyncGenerator:
def __init__(self, raw_async_iterater=None):
self.q = asyncio.Queue()
self.raw_async_iterater = raw_async_iterater
self._activate_task = asyncio.create_task(self._activate())
async def _activate(self):
if self.raw_async_iterater:
async for msg in self.raw_async_iterater:
self.q.put_nowait(msg)
def __aiter__(self):
return self
@property
def left(self):
return self.q.qsize()
async def __anext__(self):
try:
next_item = await self.q.get()
except:
pass
else:
self.q.task_done()
return next_item
async def wait_empty(self):
'''
等待下一次空数据的机会
:return:
'''
await self.q.join()
async def close(self):
await self.wait_empty()
await ensureTaskCanceled(self._activate_task)
# def NoLossAsyncGenerator(raw_async_iterater):
# async def no_data_loss_async_generator_wrapper(raw_async_iterater):
# q = asyncio.Queue()
#
# async def yield2q(raw_async_iterater, q: asyncio.Queue):
# async for msg in raw_async_iterater:
# q.put_nowait(msg)
#
# asyncio.create_task(yield2q(raw_async_iterater, q))
# while True:
# msg = await q.get()
# # generator.left = q.qsize()
# # generator.__dict__['left'] = q.qsize()
# yield msg
#
# generator = no_data_loss_async_generator_wrapper(raw_async_iterater)
# return generator
def no_data_loss_async_generator_decorator(async_generator_function):
async def g(*args, **kwargs):
async for msg in NoLossAsyncGenerator(async_generator_function(*args, **kwargs)):
yield msg
return g
if __name__ == '__main__':
async def test_no_data_loss_async_generator():
async def g():
n = 0
while True:
yield n
n += 1
await asyncio.sleep(1)
m = 0
g = NoLossAsyncGenerator(g())
async for n in g:
print(n)
print(f'left:{g.left}')
m += 1
if m <= 5:
await asyncio.sleep(2)
async def test_no_data_loss_async_generator_decorator():
@no_data_loss_async_generator_decorator
async def g():
n = 0
while True:
yield n
n += 1
await asyncio.sleep(1)
m = 0
async for n in g():
print(n)
m += 1
if m <= 5:
await asyncio.sleep(2)
asyncio.run(test_no_data_loss_async_generator())
| 1,589 | 326 | 100 |
6694f9353aaed1385c9fee1c9ada58e4686644ef | 375 | py | Python | products/urls.py | Valli-dev/Price-Comparison-Group2-BE | a331d3ae3e654a7b3b6fc8dc08551afe8c925a9c | [
"MIT"
] | 1 | 2021-07-24T14:43:02.000Z | 2021-07-24T14:43:02.000Z | products/urls.py | Valli-dev/Price-Comparison-Group2-BE | a331d3ae3e654a7b3b6fc8dc08551afe8c925a9c | [
"MIT"
] | null | null | null | products/urls.py | Valli-dev/Price-Comparison-Group2-BE | a331d3ae3e654a7b3b6fc8dc08551afe8c925a9c | [
"MIT"
] | 16 | 2021-07-15T06:16:25.000Z | 2021-08-20T06:06:02.000Z | from django.urls import path
from .import views
from blog.views import like_view
app_name='products'
urlpatterns = [
path('amazon', views.amazon, name='amazon'),
path('ebay', views.ebay, name='ebay'),
path('<int:id>/', views.product_detail, name="product_detail"),
path('product_comparison/<int:id>/',views.product_comparison,name="product_comparison")
] | 26.785714 | 91 | 0.714667 | from django.urls import path
from .import views
from blog.views import like_view
app_name='products'
urlpatterns = [
path('amazon', views.amazon, name='amazon'),
path('ebay', views.ebay, name='ebay'),
path('<int:id>/', views.product_detail, name="product_detail"),
path('product_comparison/<int:id>/',views.product_comparison,name="product_comparison")
] | 0 | 0 | 0 |
4ed2b22e8b87dc385754c0c7e2f40750677926bf | 22 | py | Python | rafa/__init__.py | mjirv/jules-demo | ee43fbcc762f7b8f6d17e0e4b6649a414bcf44ca | [
"Apache-2.0"
] | 1 | 2022-03-08T20:44:09.000Z | 2022-03-08T20:44:09.000Z | rafa/__init__.py | mjirv/jules-demo | ee43fbcc762f7b8f6d17e0e4b6649a414bcf44ca | [
"Apache-2.0"
] | 15 | 2022-02-17T02:48:39.000Z | 2022-03-12T03:00:04.000Z | rafa/__init__.py | mjirv/rafa | ee43fbcc762f7b8f6d17e0e4b6649a414bcf44ca | [
"Apache-2.0"
] | null | null | null | from .Rafa import Rafa | 22 | 22 | 0.818182 | from .Rafa import Rafa | 0 | 0 | 0 |
1da3beb5dfb84cdb492acb3a5482de7e6919a7ed | 5,658 | py | Python | jina/executors/encoders/frameworks.py | yuanl/jina | 989d0689353bbbcd2c7bf11928b652224c3d4bf7 | [
"Apache-2.0"
] | null | null | null | jina/executors/encoders/frameworks.py | yuanl/jina | 989d0689353bbbcd2c7bf11928b652224c3d4bf7 | [
"Apache-2.0"
] | null | null | null | jina/executors/encoders/frameworks.py | yuanl/jina | 989d0689353bbbcd2c7bf11928b652224c3d4bf7 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from . import BaseEncoder
from ..devices import OnnxDevice, PaddleDevice, TorchDevice, TFDevice, MindsporeDevice
from ...excepts import ModelCheckpointNotExist
from ...helper import is_url, cached_property
# mixin classes go first, base classes are read from right to left.
class BaseOnnxEncoder(OnnxDevice, BaseEncoder):
"""
:class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`onnxruntime` library.
:param output_feature: the name of the layer for feature extraction.
:param model_path: the path of the model in the format of `.onnx`. Check a list of available pretrained
models at https://github.com/onnx/models#image_classification and download the git LFS to your local path.
The ``model_path`` is the local path of the ``.onnx`` file, e.g. ``/tmp/onnx/mobilenetv2-1.0.onnx``.
"""
def post_init(self):
"""
Load the model from the `.onnx` file and add outputs for the selected layer, i.e. ``outputs_name``. The modified
models is saved at `tmp_model_path`.
"""
super().post_init()
model_name = self.raw_model_path.split('/')[-1] if self.raw_model_path else None
tmp_model_path = self.get_file_from_workspace(f'{model_name}.tmp') if model_name else None
raw_model_path = self.raw_model_path
if self.raw_model_path and is_url(self.raw_model_path):
import urllib.request
download_path, *_ = urllib.request.urlretrieve(self.raw_model_path)
raw_model_path = download_path
self.logger.info(f'download the model at {self.raw_model_path}')
if tmp_model_path and not os.path.exists(tmp_model_path) and self.outputs_name:
self._append_outputs(raw_model_path, self.outputs_name, tmp_model_path)
self.logger.info(f'save the model with outputs [{self.outputs_name}] at {tmp_model_path}')
if tmp_model_path and os.path.exists(tmp_model_path):
import onnxruntime
self.model = onnxruntime.InferenceSession(tmp_model_path, None)
self.inputs_name = self.model.get_inputs()[0].name
self._device = None
self.to_device(self.model)
else:
raise ModelCheckpointNotExist(f'model at {tmp_model_path} does not exist')
@staticmethod
class BaseTFEncoder(TFDevice, BaseEncoder):
""":class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`tensorflow` library."""
pass
class BaseTorchEncoder(TorchDevice, BaseEncoder):
"""Base encoder class for :mod:`pytorch` library."""
pass
class BasePaddleEncoder(PaddleDevice, BaseEncoder):
""":class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`paddlepaddle` library."""
pass
class BaseMindsporeEncoder(MindsporeDevice, BaseEncoder):
"""
:class:`BaseMindsporeEncoder` is the base class for implementing Encoders with models from `mindspore`.
To implement your own executor with the :mod:`mindspore` lilbrary,
.. highlight:: python
.. code-block:: python
import mindspore.nn as nn
class YourAwesomeModel(nn.Cell):
def __init__(self):
...
def construct(self, x):
...
class YourAwesomeEncoder(BaseMindsporeEncoder):
def encode(self, data, *args, **kwargs):
from mindspore import Tensor
return self.model(Tensor(data)).asnumpy()
def get_cell(self):
return YourAwesomeModel()
:param model_path: the path of the model's checkpoint.
:param args: additional arguments
:param kwargs: additional key value arguments
"""
def post_init(self):
"""
Load the model from the `.ckpt` checkpoint.
"""
super().post_init()
if self.model_path and os.path.exists(self.model_path):
self.to_device()
from mindspore.train.serialization import load_checkpoint, load_param_into_net
_param_dict = load_checkpoint(ckpt_file_name=self.model_path)
load_param_into_net(self.model, _param_dict)
else:
raise ModelCheckpointNotExist(f'model {self.model_path} does not exist')
@cached_property
def model(self):
"""
Get the Mindspore Neural Networks Cells.
:return: model property
"""
return self.get_cell()
def get_cell(self):
"""
Return Mindspore Neural Networks Cells.
Pre-defined building blocks or computing units to construct Neural Networks.
A ``Cell`` could be a single neural network cell, such as conv2d, relu, batch_norm, etc.
or a composition of cells to constructing a network.
"""
raise NotImplementedError
| 39.566434 | 126 | 0.668611 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from . import BaseEncoder
from ..devices import OnnxDevice, PaddleDevice, TorchDevice, TFDevice, MindsporeDevice
from ...excepts import ModelCheckpointNotExist
from ...helper import is_url, cached_property
# mixin classes go first, base classes are read from right to left.
class BaseOnnxEncoder(OnnxDevice, BaseEncoder):
"""
:class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`onnxruntime` library.
:param output_feature: the name of the layer for feature extraction.
:param model_path: the path of the model in the format of `.onnx`. Check a list of available pretrained
models at https://github.com/onnx/models#image_classification and download the git LFS to your local path.
The ``model_path`` is the local path of the ``.onnx`` file, e.g. ``/tmp/onnx/mobilenetv2-1.0.onnx``.
"""
def __init__(self, output_feature: str = None, model_path: str = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.outputs_name = output_feature
self.raw_model_path = model_path
def post_init(self):
"""
Load the model from the `.onnx` file and add outputs for the selected layer, i.e. ``outputs_name``. The modified
models is saved at `tmp_model_path`.
"""
super().post_init()
model_name = self.raw_model_path.split('/')[-1] if self.raw_model_path else None
tmp_model_path = self.get_file_from_workspace(f'{model_name}.tmp') if model_name else None
raw_model_path = self.raw_model_path
if self.raw_model_path and is_url(self.raw_model_path):
import urllib.request
download_path, *_ = urllib.request.urlretrieve(self.raw_model_path)
raw_model_path = download_path
self.logger.info(f'download the model at {self.raw_model_path}')
if tmp_model_path and not os.path.exists(tmp_model_path) and self.outputs_name:
self._append_outputs(raw_model_path, self.outputs_name, tmp_model_path)
self.logger.info(f'save the model with outputs [{self.outputs_name}] at {tmp_model_path}')
if tmp_model_path and os.path.exists(tmp_model_path):
import onnxruntime
self.model = onnxruntime.InferenceSession(tmp_model_path, None)
self.inputs_name = self.model.get_inputs()[0].name
self._device = None
self.to_device(self.model)
else:
raise ModelCheckpointNotExist(f'model at {tmp_model_path} does not exist')
@staticmethod
def _append_outputs(inputs, outputs_name_to_append, output_fn):
import onnx
model = onnx.load(inputs)
feature_map = onnx.helper.ValueInfoProto()
feature_map.name = outputs_name_to_append
model.graph.output.append(feature_map)
onnx.save(model, output_fn)
class BaseTFEncoder(TFDevice, BaseEncoder):
""":class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`tensorflow` library."""
pass
class BaseTorchEncoder(TorchDevice, BaseEncoder):
"""Base encoder class for :mod:`pytorch` library."""
pass
class BasePaddleEncoder(PaddleDevice, BaseEncoder):
""":class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`paddlepaddle` library."""
pass
class BaseMindsporeEncoder(MindsporeDevice, BaseEncoder):
"""
:class:`BaseMindsporeEncoder` is the base class for implementing Encoders with models from `mindspore`.
To implement your own executor with the :mod:`mindspore` lilbrary,
.. highlight:: python
.. code-block:: python
import mindspore.nn as nn
class YourAwesomeModel(nn.Cell):
def __init__(self):
...
def construct(self, x):
...
class YourAwesomeEncoder(BaseMindsporeEncoder):
def encode(self, data, *args, **kwargs):
from mindspore import Tensor
return self.model(Tensor(data)).asnumpy()
def get_cell(self):
return YourAwesomeModel()
:param model_path: the path of the model's checkpoint.
:param args: additional arguments
:param kwargs: additional key value arguments
"""
def __init__(self, model_path: str = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_path = model_path
def post_init(self):
"""
Load the model from the `.ckpt` checkpoint.
"""
super().post_init()
if self.model_path and os.path.exists(self.model_path):
self.to_device()
from mindspore.train.serialization import load_checkpoint, load_param_into_net
_param_dict = load_checkpoint(ckpt_file_name=self.model_path)
load_param_into_net(self.model, _param_dict)
else:
raise ModelCheckpointNotExist(f'model {self.model_path} does not exist')
@cached_property
def model(self):
"""
Get the Mindspore Neural Networks Cells.
:return: model property
"""
return self.get_cell()
def get_cell(self):
"""
Return Mindspore Neural Networks Cells.
Pre-defined building blocks or computing units to construct Neural Networks.
A ``Cell`` could be a single neural network cell, such as conv2d, relu, batch_norm, etc.
or a composition of cells to constructing a network.
"""
raise NotImplementedError
| 591 | 0 | 79 |
9f240a8711f808b0425640824b52b37351a57d27 | 477 | py | Python | bin/prettify_pkg.py | taette/Moa- | d7a30fe74f4cd183b4f13c9b7fd7c11cf4fb9e24 | [
"CC-BY-4.0"
] | null | null | null | bin/prettify_pkg.py | taette/Moa- | d7a30fe74f4cd183b4f13c9b7fd7c11cf4fb9e24 | [
"CC-BY-4.0"
] | null | null | null | bin/prettify_pkg.py | taette/Moa- | d7a30fe74f4cd183b4f13c9b7fd7c11cf4fb9e24 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
import os
import json
import collections
pwd = os.path.dirname(os.path.abspath(__file__))
root = os.path.dirname(pwd)
try:
f = open(os.path.join(root, 'Rules.1blockpkg'))
obj = json.load(f, object_pairs_hook=collections.OrderedDict)
try:
json_file = open(os.path.join(root, 'Rules.1blockpkg.json'), 'w')
json.dump(obj, json_file, indent=4, separators=(',', ': '))
finally:
json_file.close()
finally:
f.close()
| 26.5 | 73 | 0.658281 | #!/usr/bin/env python
import os
import json
import collections
pwd = os.path.dirname(os.path.abspath(__file__))
root = os.path.dirname(pwd)
try:
f = open(os.path.join(root, 'Rules.1blockpkg'))
obj = json.load(f, object_pairs_hook=collections.OrderedDict)
try:
json_file = open(os.path.join(root, 'Rules.1blockpkg.json'), 'w')
json.dump(obj, json_file, indent=4, separators=(',', ': '))
finally:
json_file.close()
finally:
f.close()
| 0 | 0 | 0 |
0566e60581aad6944e3eaf4392d45b2caa45dfda | 2,425 | py | Python | sources/D2sec.py | CriimBow/VIA4CVE | 96253efb74f69608574a66f03e548d8049ff7bdf | [
"BSD-3-Clause"
] | 109 | 2016-11-25T23:27:49.000Z | 2022-03-19T19:22:13.000Z | sources/D2sec.py | CriimBow/VIA4CVE | 96253efb74f69608574a66f03e548d8049ff7bdf | [
"BSD-3-Clause"
] | 20 | 2016-12-21T15:00:35.000Z | 2020-07-07T23:12:21.000Z | sources/D2sec.py | CriimBow/VIA4CVE | 96253efb74f69608574a66f03e548d8049ff7bdf | [
"BSD-3-Clause"
] | 37 | 2016-11-29T10:16:41.000Z | 2021-12-16T07:27:11.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Source file for d2sec exploit information
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2016 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Sources
SOURCE_NAME = 'd2sec'
SOURCE_FILE = "https://www.d2sec.com/exploits/elliot.xml"
# Imports
import copy
from collections import defaultdict
from io import BytesIO
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from lib.Config import Configuration as conf
from lib.Source import Source
| 31.493506 | 72 | 0.644536 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Source file for d2sec exploit information
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2016 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Sources
SOURCE_NAME = 'd2sec'
SOURCE_FILE = "https://www.d2sec.com/exploits/elliot.xml"
# Imports
import copy
from collections import defaultdict
from io import BytesIO
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from lib.Config import Configuration as conf
from lib.Source import Source
class D2secHandler(ContentHandler):
def __init__(self):
self.exploits = []
self.d2sec = None
self.tag = None
def startElement(self, name, attrs):
self.tag = name
if name == 'exploit': self.d2sec={'refs':[]}
elif name == 'ref':
self.d2sec['refs'].append({'type': attrs.get('type').lower()})
def characters(self, ch):
if self.d2sec and self.tag:
if self.tag == 'ref': self.d2sec['refs'][-1]['key'] = ch
elif self.tag != "exploit": self.d2sec[self.tag] = ch
def endElement(self, name):
self.tag = None
if name == 'exploit' and self.d2sec:
self.exploits.append(self.d2sec)
self.saint = None
class D2sec(Source):
def __init__(self):
self.name = SOURCE_NAME
parser = make_parser()
handler = D2secHandler()
_file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
parser.setContentHandler(handler)
parser.parse(BytesIO(_file))
self.cves = defaultdict(list)
self.exploits = defaultdict(dict)
for exploit in handler.exploits:
_exploit = copy.copy(exploit) # clean exploit to add to the list
_exploit.pop('refs')
for ref in exploit.get('refs', []):
if ref['type'] == 'cve': self.cves[ref['key']].append(_exploit)
else:
if ref['key'] not in self.exploits[ref['type']]:
self.exploits[ref['type']][ref['key']] = []
self.exploits[ref['type']][ref['key']].append(_exploit)
def updateRefs(self, cveID, cveData):
if not cveData.get(SOURCE_NAME): cveData[SOURCE_NAME] = []
for key in cveData.get('refmap', {}).keys():
for _id in cveData['refmap'][key]:
cveData[SOURCE_NAME].extend(self.exploits[key].get(_id, []))
if cveData[SOURCE_NAME] == []: cveData.pop(SOURCE_NAME)
def getSearchables(self):
return ['name']
| 1,603 | 13 | 219 |
c5578e027dd764a77aa3c053dbb8719776c4622a | 18,000 | py | Python | tests/parameters_t.py | boosterl/loris | d6e241a487990bf80fcb76e27413ba465aafe6e6 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | tests/parameters_t.py | boosterl/loris | d6e241a487990bf80fcb76e27413ba465aafe6e6 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 4 | 2021-06-08T23:38:37.000Z | 2022-03-12T00:49:54.000Z | tests/parameters_t.py | boosterl/loris | d6e241a487990bf80fcb76e27413ba465aafe6e6 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # parameters_t.py
#-*- coding: utf-8 -*-
from __future__ import absolute_import
from decimal import Decimal
from hypothesis import given
from hypothesis.strategies import text
import pytest
from loris import img_info
from loris.loris_exception import RequestException, SyntaxException
from loris.parameters import (
FULL_MODE, PCT_MODE, PIXEL_MODE,
RegionParameter, RotationParameter, SizeParameter,
)
from tests import loris_t
def build_image_info(width=100, height=100):
"""Produces an ``ImageInfo`` object of the given dimensions."""
info = img_info.ImageInfo(None)
info.width = width
info.height = height
return info
| 37.422037 | 79 | 0.6445 | # parameters_t.py
#-*- coding: utf-8 -*-
from __future__ import absolute_import
from decimal import Decimal
from hypothesis import given
from hypothesis.strategies import text
import pytest
from loris import img_info
from loris.loris_exception import RequestException, SyntaxException
from loris.parameters import (
FULL_MODE, PCT_MODE, PIXEL_MODE,
RegionParameter, RotationParameter, SizeParameter,
)
from tests import loris_t
def build_image_info(width=100, height=100):
"""Produces an ``ImageInfo`` object of the given dimensions."""
info = img_info.ImageInfo(None)
info.width = width
info.height = height
return info
class _ParameterTest(loris_t.LorisTest):
def _get_info_long_y(self):
# jp2, y is long dimension
fp = self.test_jp2_color_fp
fmt = self.test_jp2_color_fmt
ident = self.test_jp2_color_id
uri = self.test_jp2_color_uri
ii = img_info.ImageInfo(self.app, uri, fp, fmt)
return ii
def _get_info_long_x(self):
# jpeg, x is long dimension
fp = self.test_jpeg_fp
fmt = self.test_jpeg_fmt
ident = self.test_jpeg_id
uri = self.test_jpeg_uri
ii = img_info.ImageInfo(self.app, uri, fp, fmt)
return ii
class TestRegionParameter(_ParameterTest):
def test_populate_slots_from_pct(self):
info = self._get_info_long_y()
rp = RegionParameter('pct:25,25,50,50', info)
self.assertEquals(rp.pixel_x, int(info.width*0.25))
self.assertEquals(rp.pixel_y, int(info.height*0.25))
self.assertEquals(rp.pixel_w, int(info.width*0.50))
self.assertEquals(rp.pixel_h, int(info.height*0.50))
self.assertEquals(rp.decimal_x, Decimal('0.25'))
self.assertEquals(rp.decimal_y, Decimal('0.25'))
self.assertEquals(rp.decimal_w, Decimal('0.50'))
self.assertEquals(rp.decimal_h, Decimal('0.50'))
def test_populate_slots_from_pixel(self):
info = self._get_info_long_x()
rp = RegionParameter('797,900,1594,1600', info)
self.assertEquals(rp.pixel_x, 797)
self.assertEquals(rp.pixel_y, 900)
self.assertEquals(rp.pixel_w, 1594)
self.assertEquals(rp.pixel_h, 1600)
self.assertEquals(rp.decimal_x, rp.pixel_x / Decimal(str(info.width)))
self.assertEquals(rp.decimal_y, rp.pixel_y / Decimal(str(info.height)))
self.assertEquals(rp.decimal_w, rp.pixel_w / Decimal(str(info.width)))
self.assertEquals(rp.decimal_h, rp.pixel_h / Decimal(str(info.height)))
def test_square_mode_long_y(self):
# 5906 x 7200
info = self._get_info_long_y()
rp = RegionParameter('square', info)
self.assertEquals(rp.pixel_x, 0)
self.assertEquals(rp.pixel_y, 647)
self.assertEquals(rp.pixel_w, 5906)
self.assertEquals(rp.pixel_h, 5906)
def test_square_mode_long_x(self):
# 3600 x 2987
info = self._get_info_long_x()
rp = RegionParameter('square', info)
self.assertEquals(rp.pixel_x, 306)
self.assertEquals(rp.pixel_y, 0)
self.assertEquals(rp.pixel_w, 2987)
self.assertEquals(rp.pixel_h, 2987)
def test_recognise_full_mode_if_correct_dimensions(self):
info = build_image_info(1500, 800)
rp = RegionParameter('0,0,1500,800', info)
self.assertEquals(rp.mode, FULL_MODE)
def test_anything_except_four_coordinates_is_error(self):
info = build_image_info()
with self.assertRaises(SyntaxException):
RegionParameter('pct:100', info)
def test_percentage_greater_than_100_is_error(self):
info = build_image_info()
with self.assertRaises(RequestException):
RegionParameter('pct:150,150,150,150', info)
def test_x_parameter_greater_than_width_is_error(self):
info = build_image_info(width=100)
with self.assertRaises(RequestException):
RegionParameter('200,0,100,100', info)
def test_y_parameter_greater_than_height_is_error(self):
info = build_image_info(height=100)
with self.assertRaises(RequestException):
RegionParameter('0,200,100,100', info)
def test_canonical_uri_value_oob_w_pixel(self):
info = self._get_info_long_x() # x is long dimension
x = 200
offset = 1
oob_w = info.width - x + offset
rp = RegionParameter('%d,13,%d,17' % (x,oob_w), info)
expected_canonical = '%d,13,%d,17' % (x, info.width - x)
# Note that the below will need to be changed if decimal precision is
# changed (currently 25 places)
self.assertEquals(rp.decimal_w, Decimal('0.9444444444444444444444444'))
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_canonical_uri_value_oob_w_pct(self):
info = self._get_info_long_y() # y is long dimension
x = 20
w = 81
rp = RegionParameter('pct:%d,13,%d,27' % (x,w), info)
self.assertEquals(rp.decimal_w, Decimal('0.8'))
expected_canonical = '1181,936,4725,1944'
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_canonical_uri_value_oob_y_pixel(self):
info = self._get_info_long_y() # y is long dimension
y = 300
offset = 1 # request would be this many pixels OOB
oob_h = info.height - y + offset
rp = RegionParameter('29,%d,31,%d' % (y,oob_h), info)
expected_canonical = '29,%d,31,%d' % (y, info.height - y)
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_canonical_uri_value_oob_y_pct(self):
info = self._get_info_long_x() # x is long dimension
y = 28.3
h = 72.2
rp = RegionParameter('pct:13,%f,17,%f' % (y,h), info)
expected_canonical = '468,845,612,2142'
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_syntax_exceptions(self):
info = self._get_info_long_y()
with self.assertRaises(SyntaxException):
RegionParameter('n:1,2,3,4', info)
with self.assertRaises(SyntaxException):
RegionParameter('1,2,3,q', info)
with self.assertRaises(SyntaxException):
RegionParameter('1,2,3', info)
with self.assertRaises(SyntaxException):
RegionParameter('something', info)
def test_request_exceptions(self):
info = self._get_info_long_y()
with self.assertRaises(RequestException):
RegionParameter('1,2,0,3', info)
with self.assertRaises(RequestException):
RegionParameter('1,2,3,0', info)
with self.assertRaises(RequestException):
RegionParameter('pct:100,2,3,0', info)
def test_str(self):
info = self._get_info_long_y()
rp1 = RegionParameter('full', info)
self.assertEquals(str(rp1), 'full')
rp2 = RegionParameter('125,15,120,140', info)
self.assertEquals(str(rp2), '125,15,120,140')
rp3 = RegionParameter('pct:41.6,7.5,40,70', info)
self.assertEquals(str(rp3), 'pct:41.6,7.5,40,70')
rp4 = RegionParameter('125,15,200,200', info)
self.assertEquals(str(rp4), '125,15,200,200')
rp5 = RegionParameter('pct:41.6,7.5,66.6,100', info)
self.assertEquals(str(rp5), 'pct:41.6,7.5,66.6,100')
class TestSizeParameter(_ParameterTest):
def test_exceptions(self):
info = self._get_info_long_y()
rp = RegionParameter('pct:25,25,75,75', info)
with self.assertRaises(SyntaxException):
SizeParameter('!25,', rp)
with self.assertRaises(SyntaxException):
SizeParameter('!25', rp)
with self.assertRaises(SyntaxException):
SizeParameter('25', rp)
def test_zero_or_negative_percentage_is_rejected(self):
info = build_image_info(100, 100)
rp = RegionParameter('full', info)
with self.assertRaises(RequestException):
SizeParameter('pct:0', rp)
def test_very_small_pixel_width_is_positive(self):
info = build_image_info(width=1, height=100)
rp = RegionParameter('full', info)
sp = SizeParameter(',50', rp)
self.assertEquals(sp.w, 1)
def test_very_small_pixel_height_is_positive(self):
info = build_image_info(width=100, height=1)
rp = RegionParameter('full', info)
sp = SizeParameter('50,', rp)
self.assertEquals(sp.h, 1)
def test_very_small_percentage_width_is_positive(self):
info = build_image_info(width=1, height=100)
rp = RegionParameter('full', info)
sp = SizeParameter('pct:50', rp)
self.assertEquals(sp.w, 1)
def test_decimal_percentage_is_allowed(self):
info = build_image_info(width=400, height=200)
rp = RegionParameter('full', info)
sp = SizeParameter('pct:6.25', rp)
self.assertEquals(sp.w, 25)
def test_negative_x_percentage_is_rejected(self):
info = build_image_info()
with self.assertRaises(RequestException):
rp = RegionParameter('pct:-5,100,100,100', info)
def test_negative_y_percentage_is_rejected(self):
info = build_image_info()
with self.assertRaises(RequestException):
rp = RegionParameter('pct:100,-5,100,100', info)
def test_str_representation(self):
info = build_image_info()
rp = RegionParameter('full', info)
for uri_value in [
'full',
'pct:50',
'50,',
',50',
'!50,50',
]:
sp = SizeParameter(uri_value, rp)
self.assertEquals(str(sp), uri_value)
def test_very_small_percentage_height_is_positive(self):
info = build_image_info(width=100, height=1)
rp = RegionParameter('full', info)
sp = SizeParameter('pct:50', rp)
self.assertEquals(sp.h, 1)
def test_populate_slots_from_full(self):
# full
info = self._get_info_long_y()
rp = RegionParameter('full', info)
sp = SizeParameter('full',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, FULL_MODE)
self.assertEquals(sp.canonical_uri_value, FULL_MODE)
rp = RegionParameter('256,256,256,256', info)
sp = SizeParameter('full',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, FULL_MODE)
self.assertEquals(sp.canonical_uri_value, FULL_MODE)
def test_populate_slots_from_pct(self):
# pct:n
info = self._get_info_long_y()
rp = RegionParameter('full', info)
sp = SizeParameter('pct:25',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PCT_MODE)
self.assertEquals(sp.canonical_uri_value, '1476,')
rp = RegionParameter('256,256,256,256', info)
sp = SizeParameter('pct:25',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PCT_MODE)
self.assertEquals(sp.canonical_uri_value, '64,')
rp = RegionParameter('pct:0,0,50,50', info)
sp = SizeParameter('pct:25',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PCT_MODE)
self.assertEquals(sp.canonical_uri_value, '738,')
def test_populate_slots_from_w_only(self):
# w,
info = self._get_info_long_y()
rp = RegionParameter('full', info)
sp = SizeParameter('180,',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '180,')
rp = RegionParameter('200,300,500,600', info)
sp = SizeParameter('125,',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '125,')
self.assertEquals(type(sp.w), int)
self.assertEquals(sp.w, 125)
self.assertEquals(type(sp.h), int)
self.assertEquals(sp.h, 150)
def test_tiny_image(self):
info = self._get_info_long_x()
rp = RegionParameter('full', info)
sp = SizeParameter('1,', rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '1,')
self.assertEquals(sp.w, 1)
self.assertEquals(sp.h, 1)
def test_populate_slots_from_h_only(self):
# ,h
info = self._get_info_long_y()
rp = RegionParameter('full', info)
sp = SizeParameter(',90',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '73,')
rp = RegionParameter('50,290,360,910', info)
sp = SizeParameter(',275',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '108,')
def test_populate_slots_from_wh(self):
# w,h
info = self._get_info_long_y()
rp = RegionParameter('full', info)
sp = SizeParameter('48,48',rp)
self.assertEquals(sp.force_aspect, True)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '48,48')
rp = RegionParameter('15,16,23,42', info)
sp = SizeParameter('60,60',rp) # upsample!
self.assertEquals(sp.force_aspect, True)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '60,60')
def test_populate_slots_from_bang_wh(self):
# !w,h
info = self._get_info_long_y()
rp = RegionParameter('full', info)
sp = SizeParameter('!120,140',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '114,')
rp = RegionParameter('0,0,125,160', info)
sp = SizeParameter('!120,140',rp,)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '109,')
rp = RegionParameter('0,0,125,160', info)
sp = SizeParameter('!130,140',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '109,')
rp = RegionParameter('50,80,140,160', info)
sp = SizeParameter('!130,180',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '130,')
rp = RegionParameter('50,80,140,160', info)
sp = SizeParameter('!145,165',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '144,')
rp = RegionParameter('50,80,140,180', info)
sp = SizeParameter('!145,185',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '143,')
def test_zero_width_or_height_is_rejected(self):
info = build_image_info()
rp = RegionParameter('full', info)
with pytest.raises(RequestException):
SizeParameter('0,', rp)
with pytest.raises(RequestException):
SizeParameter(',0', rp)
with pytest.raises(RequestException):
SizeParameter('0,0', rp)
@given(text(alphabet='0123456789.,!'))
def test_parsing_parameter_either_passes_or_is_exception(self, uri_value):
info = build_image_info()
rp = RegionParameter('full', info)
try:
SizeParameter(uri_value, rp)
except (RequestException, SyntaxException):
pass
class TestRotationParameter(_ParameterTest):
def test_exceptions(self):
with self.assertRaises(SyntaxException):
rp = RotationParameter('a')
with self.assertRaises(SyntaxException):
rp = RotationParameter('361')
with self.assertRaises(SyntaxException):
rp = RotationParameter('-1')
with self.assertRaises(SyntaxException):
rp = RotationParameter('!-1')
with self.assertRaises(SyntaxException):
rp = RotationParameter('!361')
with self.assertRaises(SyntaxException):
rp = RotationParameter('-0.1')
with self.assertRaises(SyntaxException):
rp = RotationParameter('1.3.6')
with self.assertRaises(SyntaxException):
rp = RotationParameter('!2.7.13')
with self.assertRaises(SyntaxException):
rp = RotationParameter('.')
with self.assertRaises(SyntaxException):
rp = RotationParameter('.0.')
@given(text(alphabet='0123456789.!'))
def test_parsing_parameter_either_passes_or_is_syntaxexception(self, xs):
try:
RotationParameter(xs)
except SyntaxException:
pass
def test_uri_value(self):
rp = RotationParameter('0')
self.assertEquals(rp.rotation, '0')
rp = RotationParameter('46')
self.assertEquals(rp.rotation, '46')
rp = RotationParameter('180')
self.assertEquals(rp.rotation, '180')
def test_mirroring(self):
rp = RotationParameter('180')
self.assertFalse(rp.mirror)
rp = RotationParameter('!180')
self.assertTrue(rp.mirror)
def test_c14n(self):
rp = RotationParameter('42.10')
self.assertEquals(rp.canonical_uri_value, '42.1')
rp = RotationParameter('180.0')
self.assertEquals(rp.canonical_uri_value, '180')
rp = RotationParameter('!180.0')
self.assertEquals(rp.canonical_uri_value, '!180')
rp = RotationParameter('!180.10')
self.assertEquals(rp.canonical_uri_value, '!180.1')
| 15,953 | 813 | 576 |
935dd7948ece10a4eeaa8cadd2a7d13d41ab929c | 2,408 | py | Python | packages/api-server/api_server/routes/lifts.py | mayman99/rmf-web | 5670bd943567c6a866ec6345c972e6fb84d73476 | [
"Apache-2.0"
] | 23 | 2021-04-13T23:01:12.000Z | 2022-03-21T02:15:24.000Z | packages/api-server/api_server/routes/lifts.py | mayman99/rmf-web | 5670bd943567c6a866ec6345c972e6fb84d73476 | [
"Apache-2.0"
] | 326 | 2021-03-10T17:32:17.000Z | 2022-03-30T04:42:14.000Z | packages/api-server/api_server/routes/lifts.py | mayman99/rmf-web | 5670bd943567c6a866ec6345c972e6fb84d73476 | [
"Apache-2.0"
] | 13 | 2021-04-10T10:33:36.000Z | 2022-02-22T15:39:58.000Z | from typing import List, cast
from fastapi import Depends, HTTPException
from rx import operators as rxops
from api_server.dependencies import sio_user
from api_server.fast_io import FastIORouter, SubscriptionRequest
from api_server.gateway import rmf_gateway
from api_server.models import Lift, LiftHealth, LiftRequest, LiftState
from api_server.repositories import RmfRepository, rmf_repo_dep
from api_server.rmf_io import rmf_events
router = FastIORouter(tags=["Lifts"])
@router.get("", response_model=List[Lift])
@router.get("/{lift_name}/state", response_model=LiftState)
async def get_lift_state(
lift_name: str, rmf_repo: RmfRepository = Depends(rmf_repo_dep)
):
"""
Available in socket.io
"""
lift_state = await rmf_repo.get_lift_state(lift_name)
if lift_state is None:
raise HTTPException(status_code=404)
return lift_state
@router.sub("/{lift_name}/state", response_model=LiftState)
@router.get("/{lift_name}/health", response_model=LiftHealth)
async def get_lift_health(
lift_name: str, rmf_repo: RmfRepository = Depends(rmf_repo_dep)
):
"""
Available in socket.io
"""
lift_health = await rmf_repo.get_lift_health(lift_name)
if lift_health is None:
raise HTTPException(status_code=404)
return lift_health
@router.sub("/{lift_name}/health", response_model=LiftHealth)
@router.post("/{lift_name}/request")
| 30.871795 | 73 | 0.741279 | from typing import List, cast
from fastapi import Depends, HTTPException
from rx import operators as rxops
from api_server.dependencies import sio_user
from api_server.fast_io import FastIORouter, SubscriptionRequest
from api_server.gateway import rmf_gateway
from api_server.models import Lift, LiftHealth, LiftRequest, LiftState
from api_server.repositories import RmfRepository, rmf_repo_dep
from api_server.rmf_io import rmf_events
router = FastIORouter(tags=["Lifts"])
@router.get("", response_model=List[Lift])
async def get_lifts(rmf_repo: RmfRepository = Depends(rmf_repo_dep)):
return await rmf_repo.get_lifts()
@router.get("/{lift_name}/state", response_model=LiftState)
async def get_lift_state(
lift_name: str, rmf_repo: RmfRepository = Depends(rmf_repo_dep)
):
"""
Available in socket.io
"""
lift_state = await rmf_repo.get_lift_state(lift_name)
if lift_state is None:
raise HTTPException(status_code=404)
return lift_state
@router.sub("/{lift_name}/state", response_model=LiftState)
async def sub_lift_state(req: SubscriptionRequest, lift_name: str):
user = sio_user(req)
lift_state = await get_lift_state(lift_name, RmfRepository(user))
await req.sio.emit(req.room, lift_state, req.sid)
return rmf_events.lift_states.pipe(
rxops.filter(lambda x: cast(LiftState, x).lift_name == lift_name)
)
@router.get("/{lift_name}/health", response_model=LiftHealth)
async def get_lift_health(
lift_name: str, rmf_repo: RmfRepository = Depends(rmf_repo_dep)
):
"""
Available in socket.io
"""
lift_health = await rmf_repo.get_lift_health(lift_name)
if lift_health is None:
raise HTTPException(status_code=404)
return lift_health
@router.sub("/{lift_name}/health", response_model=LiftHealth)
async def sub_lift_health(req: SubscriptionRequest, lift_name: str):
user = sio_user(req)
health = await get_lift_health(lift_name, RmfRepository(user))
await req.sio.emit(req.room, health, req.sid)
return rmf_events.lift_health.pipe(
rxops.filter(lambda x: cast(LiftHealth, x).id_ == lift_name),
)
@router.post("/{lift_name}/request")
def _post_lift_request(
lift_name: str,
lift_request: LiftRequest,
):
rmf_gateway.request_lift(
lift_name,
lift_request.destination,
lift_request.request_type,
lift_request.door_mode,
)
| 918 | 0 | 88 |
134219f719a18cd1d68a4417c0352c735e650bcc | 3,348 | py | Python | ax/core/metric.py | xiecong/Ax | f6501807bbc6bb952d636391231ebeb10646769a | [
"MIT"
] | 1 | 2020-03-07T08:26:05.000Z | 2020-03-07T08:26:05.000Z | ax/core/metric.py | aerometu/Ax | 1dc24d52fcb21308f9559374409296260e1bfc79 | [
"MIT"
] | null | null | null | ax/core/metric.py | aerometu/Ax | 1dc24d52fcb21308f9559374409296260e1bfc79 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Any, Iterable, Optional
from ax.core.base import Base
from ax.core.data import Data
if TYPE_CHECKING: # pragma: no cover
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401
class Metric(Base):
"""Base class for representing metrics.
Attributes:
lower_is_better: Flag for metrics which should be minimized.
"""
def __init__(self, name: str, lower_is_better: Optional[bool] = None) -> None:
"""Inits Metric.
Args:
name: Name of metric.
lower_is_better: Flag for metrics which should be minimized.
"""
self._name = name
self.lower_is_better = lower_is_better
@property
def name(self) -> str:
"""Get name of metric."""
return self._name
def fetch_trial_data(
self, trial: "core.base_trial.BaseTrial", **kwargs: Any
) -> Data:
"""Fetch data for one trial."""
raise NotImplementedError # pragma: no cover
def fetch_experiment_data(
self, experiment: "core.experiment.Experiment", **kwargs: Any
) -> Data:
"""Fetch this metric's data for an experiment.
Default behavior is to fetch data from all trials expecting data
and concatenate the results.
"""
return Data.from_multiple_data(
[
self.fetch_trial_data(trial, **kwargs)
if trial.status.expecting_data
else Data()
for trial in experiment.trials.values()
]
)
@classmethod
def fetch_trial_data_multi(
cls,
trial: "core.base_trial.BaseTrial",
metrics: Iterable["Metric"],
**kwargs: Any,
) -> Data:
"""Fetch multiple metrics data for one trial.
Default behavior calls `fetch_trial_data` for each metric.
Subclasses should override this to trial data computation for multiple metrics.
"""
return Data.from_multiple_data(
[metric.fetch_trial_data(trial, **kwargs) for metric in metrics]
)
@classmethod
def fetch_experiment_data_multi(
cls,
experiment: "core.experiment.Experiment",
metrics: Iterable["Metric"],
**kwargs: Any,
) -> Data:
"""Fetch multiple metrics data for an experiment.
Default behavior calls `fetch_trial_data_multi` for each trial.
Subclasses should override to batch data computation across trials + metrics.
"""
return Data.from_multiple_data(
[
cls.fetch_trial_data_multi(trial, metrics, **kwargs)
if trial.status.expecting_data
else Data()
for trial in experiment.trials.values()
]
)
def clone(self) -> "Metric":
"""Create a copy of this Metric."""
return Metric(name=self.name, lower_is_better=self.lower_is_better)
| 31 | 87 | 0.616786 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Any, Iterable, Optional
from ax.core.base import Base
from ax.core.data import Data
if TYPE_CHECKING: # pragma: no cover
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401
class Metric(Base):
"""Base class for representing metrics.
Attributes:
lower_is_better: Flag for metrics which should be minimized.
"""
def __init__(self, name: str, lower_is_better: Optional[bool] = None) -> None:
"""Inits Metric.
Args:
name: Name of metric.
lower_is_better: Flag for metrics which should be minimized.
"""
self._name = name
self.lower_is_better = lower_is_better
@property
def name(self) -> str:
"""Get name of metric."""
return self._name
def fetch_trial_data(
self, trial: "core.base_trial.BaseTrial", **kwargs: Any
) -> Data:
"""Fetch data for one trial."""
raise NotImplementedError # pragma: no cover
def fetch_experiment_data(
self, experiment: "core.experiment.Experiment", **kwargs: Any
) -> Data:
"""Fetch this metric's data for an experiment.
Default behavior is to fetch data from all trials expecting data
and concatenate the results.
"""
return Data.from_multiple_data(
[
self.fetch_trial_data(trial, **kwargs)
if trial.status.expecting_data
else Data()
for trial in experiment.trials.values()
]
)
@classmethod
def fetch_trial_data_multi(
cls,
trial: "core.base_trial.BaseTrial",
metrics: Iterable["Metric"],
**kwargs: Any,
) -> Data:
"""Fetch multiple metrics data for one trial.
Default behavior calls `fetch_trial_data` for each metric.
Subclasses should override this to trial data computation for multiple metrics.
"""
return Data.from_multiple_data(
[metric.fetch_trial_data(trial, **kwargs) for metric in metrics]
)
@classmethod
def fetch_experiment_data_multi(
cls,
experiment: "core.experiment.Experiment",
metrics: Iterable["Metric"],
**kwargs: Any,
) -> Data:
"""Fetch multiple metrics data for an experiment.
Default behavior calls `fetch_trial_data_multi` for each trial.
Subclasses should override to batch data computation across trials + metrics.
"""
return Data.from_multiple_data(
[
cls.fetch_trial_data_multi(trial, metrics, **kwargs)
if trial.status.expecting_data
else Data()
for trial in experiment.trials.values()
]
)
def clone(self) -> "Metric":
"""Create a copy of this Metric."""
return Metric(name=self.name, lower_is_better=self.lower_is_better)
def __repr__(self) -> str:
return "{class_name}('{metric_name}')".format(
class_name=self.__class__.__name__, metric_name=self.name
)
| 140 | 0 | 27 |
9f9e61519a9ea605ad4da969e28f7e4c1c33fc4b | 5,498 | py | Python | http_page_monitor/tests/test_page_watcher.py | obver-se/http_page_monitor | f6a91c21805f06d957dfa8d2dbba2646a7255517 | [
"MIT"
] | null | null | null | http_page_monitor/tests/test_page_watcher.py | obver-se/http_page_monitor | f6a91c21805f06d957dfa8d2dbba2646a7255517 | [
"MIT"
] | null | null | null | http_page_monitor/tests/test_page_watcher.py | obver-se/http_page_monitor | f6a91c21805f06d957dfa8d2dbba2646a7255517 | [
"MIT"
] | null | null | null | """ Tests the PageWatcher class to ensure that
requests are being made and compared properly """
import unittest
import time
from http_page_monitor.tests.logging_http_server\
import setup_logging_server
from .. import watchers
class TestPageWatcher(unittest.TestCase):
""" Tests the PageWatcher class """
@classmethod
@classmethod
def setUp(self):
""" Reset the log before each test """
self.server.reset_log()
def test_initial_request(self):
""" Make sure that the initial request is made when the watcher is started """
page_w = watchers.PageWatcher(self.server.generate_address('/'),
comparison_function=lambda a, b: None)
# Start and immediatly stop to trigger a single request
page_w.start()
page_w.stop()
# Allow the request to go through
time.sleep(1)
# Assert that there was an initial request
self.assertEqual(self.server.request_count, 1)
def test_single_request(self):
""" Test to ensure a second request is made after the initial request """
page_w = watchers.PageWatcher(self.server.generate_address('/'),
time_interval=0.5)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.75)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
def test_equal_pages_difference(self):
""" Test how the watcher responds to a page that doesn't
change with the default comparison function """
alerts = []
page_w = watchers.PageWatcher(self.server.generate_address('/'),
time_interval=0.5,
alert_function=dummy_alert_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.75)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that an alert wasn't made
self.assertEqual(len(alerts), 0)
def test_page_difference(self):
""" Test how the watcher responds to a page
difference with the default comparison function """
alerts = []
page_w = watchers.PageWatcher(self.server.generate_address('/every2'),
time_interval=0.5,
alert_function=dummy_alert_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.7)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that an alert was made
self.assertEqual(len(alerts), 1)
def test_custom_page_difference_function(self):
""" Test how the watcher responds to a page
difference with the default comparison function """
alerts = []
page_w = watchers.PageWatcher(self.server.generate_address('/every2'),
time_interval=0.5,
alert_function=dummy_alert_function,
comparison_function=custom_comparison_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.7)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that an alert was made
self.assertEqual(len(alerts), 1)
# Make sure that the message was passed down
self.assertEqual(alerts[0][1], "Response 1, Response 2 there was a difference")
def test_custom_page_difference_function_no_difference(self):
""" Test how the watcher responds to a page
difference with the default comparison function """
alerts = []
page_w = watchers.PageWatcher(self.server.generate_address('/every2'),
time_interval=0.5,
alert_function=dummy_alert_function,
comparison_function=custom_comparison_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.7)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that no alerts were made
self.assertEqual(len(alerts), 0)
| 36.171053 | 87 | 0.611131 | """ Tests the PageWatcher class to ensure that
requests are being made and compared properly """
import unittest
import time
from http_page_monitor.tests.logging_http_server\
import setup_logging_server
from .. import watchers
class TestPageWatcher(unittest.TestCase):
""" Tests the PageWatcher class """
@classmethod
def setUpClass(cls):
cls.server = setup_logging_server()
cls.server.handle_requests()
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
def setUp(self):
""" Reset the log before each test """
self.server.reset_log()
def test_initial_request(self):
""" Make sure that the initial request is made when the watcher is started """
page_w = watchers.PageWatcher(self.server.generate_address('/'),
comparison_function=lambda a, b: None)
# Start and immediatly stop to trigger a single request
page_w.start()
page_w.stop()
# Allow the request to go through
time.sleep(1)
# Assert that there was an initial request
self.assertEqual(self.server.request_count, 1)
def test_single_request(self):
""" Test to ensure a second request is made after the initial request """
page_w = watchers.PageWatcher(self.server.generate_address('/'),
time_interval=0.5)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.75)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
def test_equal_pages_difference(self):
""" Test how the watcher responds to a page that doesn't
change with the default comparison function """
alerts = []
def dummy_alert_function(url, info):
alerts.append((url, info))
page_w = watchers.PageWatcher(self.server.generate_address('/'),
time_interval=0.5,
alert_function=dummy_alert_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.75)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that an alert wasn't made
self.assertEqual(len(alerts), 0)
def test_page_difference(self):
""" Test how the watcher responds to a page
difference with the default comparison function """
alerts = []
def dummy_alert_function(url, info):
alerts.append((url, info))
page_w = watchers.PageWatcher(self.server.generate_address('/every2'),
time_interval=0.5,
alert_function=dummy_alert_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.7)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that an alert was made
self.assertEqual(len(alerts), 1)
def test_custom_page_difference_function(self):
""" Test how the watcher responds to a page
difference with the default comparison function """
def custom_comparison_function(old, new):
return "%s, %s there was a difference" %\
(old.decode('UTF-8'), new.decode('UTF-8'))
alerts = []
def dummy_alert_function(url, info):
alerts.append((url, info))
page_w = watchers.PageWatcher(self.server.generate_address('/every2'),
time_interval=0.5,
alert_function=dummy_alert_function,
comparison_function=custom_comparison_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.7)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that an alert was made
self.assertEqual(len(alerts), 1)
# Make sure that the message was passed down
self.assertEqual(alerts[0][1], "Response 1, Response 2 there was a difference")
def test_custom_page_difference_function_no_difference(self):
""" Test how the watcher responds to a page
difference with the default comparison function """
def custom_comparison_function(old, new):
return None
alerts = []
def dummy_alert_function(url, info):
alerts.append((url, info))
page_w = watchers.PageWatcher(self.server.generate_address('/every2'),
time_interval=0.5,
alert_function=dummy_alert_function,
comparison_function=custom_comparison_function)
# Give the page watcher time to make a second request
page_w.start()
time.sleep(0.7)
page_w.stop()
# Assert that there was an initial request and a second request
self.assertEqual(self.server.request_count, 2)
# Assert that no alerts were made
self.assertEqual(len(alerts), 0)
| 505 | 0 | 232 |
a07ac499c1e610cf7378565a845011d5aa52bace | 2,987 | py | Python | geth.py | blockchainhelppro/CelvinRost | aa2661747d06e4610928466521e4da1db77aeadc | [
"MIT"
] | 2 | 2018-08-15T21:27:59.000Z | 2018-08-21T17:56:12.000Z | geth.py | blockchainhelppro/CelvinRost | aa2661747d06e4610928466521e4da1db77aeadc | [
"MIT"
] | null | null | null | geth.py | blockchainhelppro/CelvinRost | aa2661747d06e4610928466521e4da1db77aeadc | [
"MIT"
] | 1 | 2018-07-27T07:19:49.000Z | 2018-07-27T07:19:49.000Z | import os
import sys
import datetime
from .filesystem import (
remove_dir_if_exists,
remove_file_if_exists,
normpath,
)
from .chains import (
get_base_blockchain_storage_dir,
)
@normpath
CHAINDATA_DIR = './chaindata'
@normpath
DAPP_DIR = './dapp'
@normpath
NODEKEY_FILENAME = 'nodekey'
@normpath
IPC_FILENAME = 'geth.ipc'
@normpath
@normpath
@normpath
@normpath
| 22.976923 | 78 | 0.642451 | import os
import sys
import datetime
from .filesystem import (
remove_dir_if_exists,
remove_file_if_exists,
normpath,
)
from .chains import (
get_base_blockchain_storage_dir,
)
@normpath
def get_data_dir(project_dir, chain_name):
base_blockchain_storage_dir = get_base_blockchain_storage_dir(project_dir)
return os.path.join(base_blockchain_storage_dir, chain_name)
CHAINDATA_DIR = './chaindata'
@normpath
def get_chaindata_dir(data_dir):
return os.path.join(data_dir, CHAINDATA_DIR)
DAPP_DIR = './dapp'
@normpath
def get_dapp_dir(data_dir):
return os.path.join(data_dir, DAPP_DIR)
NODEKEY_FILENAME = 'nodekey'
@normpath
def get_nodekey_path(data_dir):
return os.path.join(data_dir, NODEKEY_FILENAME)
IPC_FILENAME = 'geth.ipc'
@normpath
def get_geth_ipc_path(data_dir):
return os.path.join(data_dir, IPC_FILENAME)
@normpath
def get_geth_default_datadir_path(testnet=False):
if testnet:
testnet = "ropsten"
else:
testnet = ""
if sys.platform == 'darwin':
return os.path.expanduser(os.path.join(
"~",
"Library",
"Ethereum",
testnet,
))
elif sys.platform.startswith('linux'):
return os.path.expanduser(os.path.join(
"~",
".ethereum",
testnet,
))
elif sys.platform == 'win32':
return os.path.expanduser(os.path.join(
"~",
"AppData",
"Roaming",
"Ethereum",
))
else:
raise ValueError(
"Unsupported platform '{0}'. Only darwin/linux2/win32 are "
"supported.".format(sys.platform)
)
@normpath
def get_geth_default_ipc_path(testnet=False):
data_dir = get_geth_default_datadir_path(testnet=testnet)
if sys.platform == 'darwin' or sys.platform.startswith('linux'):
return os.path.join(data_dir, "geth.ipc")
elif sys.platform == 'win32':
return os.path.expanduser(os.path.join(
"~",
"AppData",
"Roaming",
"Ethereum",
))
else:
raise ValueError(
"Unsupported platform '{0}'. Only darwin/linux2/win32 are "
"supported.".format(sys.platform)
)
@normpath
def get_geth_logfile_path(project_dir, prefix, suffix):
logs_dir = os.path.join(project_dir, 'logs')
logfile_name = datetime.datetime.now().strftime(
'geth-%Y%m%d-%H%M%S-{prefix}-{suffix}.log'.format(
prefix=prefix, suffix=suffix,
),
)
return os.path.join(logs_dir, logfile_name)
def reset_chain(data_dir):
chaindata_dir = get_chaindata_dir(data_dir)
remove_dir_if_exists(chaindata_dir)
dapp_dir = get_dapp_dir(data_dir)
remove_dir_if_exists(dapp_dir)
nodekey_path = get_nodekey_path(data_dir)
remove_file_if_exists(nodekey_path)
geth_ipc_path = get_geth_ipc_path(data_dir)
remove_file_if_exists(geth_ipc_path)
| 2,384 | 0 | 199 |
cb5985a9c6c2cc841fc757d28ef97e2ad726951a | 65,641 | py | Python | MetaScreener/external_sw/mgltools/MGLToolsPckgs/ViewerFramework/VF.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | MetaScreener/external_sw/mgltools/MGLToolsPckgs/ViewerFramework/VF.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | null | null | null | MetaScreener/external_sw/mgltools/MGLToolsPckgs/ViewerFramework/VF.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | null | null | null | #############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2000
#
# revision: Guillaume Vareille
#
#########################################################################
#
# $Header: /opt/cvs/python/packages/share1.5/ViewerFramework/VF.py,v 1.218 2013/10/03 22:31:33 annao Exp $
#
# $Id: VF.py,v 1.218 2013/10/03 22:31:33 annao Exp $
#
"""defines base classe ViewerFramework
The ViewerFramework class can be subclassed to create applications that
use a DejaVu Camera object to display 3D geometries. In the following
we'll call Viewer a class derived from ViewerFramework.
The design features of the viewer framework include:
- extensibility: new commands can be written by subclassing the VFCommand
base class.
- dynamically configurable: commands (or set of commands called modules)
can be loaded dynamically from libraries.
- Commands loaded into an application can create their own GUI elements
(menus, cascading menus, buttons, sliders, etc...). The viewer framework
provides support for the creation of such GUI elements.
- Commands can be invoked either through the GUI or throught the Python
Shell.
- Macros provide a lightweight mechanism to add simple commands. In fact
any Python function can be added as a Macro
- Support for logging of commands: this allows to record and play back a
session.
- documentation: the module and command documentation is provided in the
source code. This documentation can be extracted using existing tools and
made available in various formats including HTML and man pages. The document
ation is also accessible through the application's Help command which uses
Python's introspection capabilities to retrieve the documentation.
A ViewerFramework always has at least one menu bar called "menuRoot" and at
least one buttonbar called "Toolbar".
The geometried displayed in a Viewer can be stored in objects derived from the
base class GeometryContainer. This container holds a dictionnary of geometries
where the keys are the geometry's name and the values instances of DejaVu
Geometries.
Commands:
Commands for an application derived from ViewerFramework can be developped by
sub-classing the VFCommand base class (see VFCommand overview).The class
VFCommandGUI allows to define GUI to be associated with a command. Command can
be added dynamically to an application using the AddCommand command of the
ViewerFramework.
example:
# derive a new command
class ExitCommand(Command):
doit(self):
import sys
sys.exit()
# get a CommandGUI object
g = CommandGUI()
# add information to create a pull-down menu in a menu bar called
# 'MoVi' under a menu-button called 'File' with a menu Command called
# 'Exit'. We also specify that we want a separator to appear above
# this entry in the menu
g.addMenuCommand('MoVi', 'File', 'Exit', separatorAbove=1)
# add an instance of an ExitCommand with the alias 'myExit' to a
viewer
# v. This will automatically add the menu bar, the menu button
# (if necessary) the menu entry and bind the default callback function
v.addCommand( ExitCommand(), 'myExit', g )
The command is immediately operational and can be invoked through the
pull down menu OR using the Python shell: v.myExit()
CommandGUI objects allow to specify what type of GUI a command should have. It
is possible to create pull-down menu entries, buttons of different kinds etc..
Modules:
A bunch of related commands can be groupped into a module. A module is a
.py file that defines a number of commands and provides a functions called
initModule(viewer) used to register the module with an instance of a
viewer.
When a module is added to a viewer, the .py file is imported and the
initModule function is executed. Usually this functions instanciates a
number of command objects and their CommandGUI objects and adds them to
the viewer.
"""
import os, string, warnings
import traceback, sys, glob, time
class VFEvent:
"""Base class for ViewerFramework events.
"""
def __init__(self, arg=None, objects=[], *args, **kw):
""" """
self.arg = arg
self.objects = objects
self.args = args
self.kw = kw
if len(args):
self.args = args
if len(kw):
for k,v in kw.items():
setattr(self, k, v)
class LogEvent(VFEvent):
"""created each time a log string is written to the log"""
def __init__(self, logstr):
""" """
self.logstr = logstr
class GeomContainer:
"""Class to hold geometries to be shown in a viewer.
This class provides a dictionnary called geoms in which the name of a
DejaVu geometry is the key to access that particular geometry object
Geometries can be added using the addGeometry method.
"""
def __init__(self, viewer=None):
"""constructor of the geometry container"""
## Dictionary of geometries used to display atoms from that molecule
## using sticks, balls, CPK spheres etc ...
self.geoms = {}
self.VIEWER = viewer # DejaVu Viewer object
self.masterGeom = None
## Dictionary linking geom names to cmds which updates texture coords
## for the current set of coordinates
self.texCoordsLookup = {}
self.updateTexCoords = {}
def delete(self):
"""Function to remove self.geoms['master'] and
self.geoms['selectionSpheres'] from the viewer when deleted"""
# switch the object and descendant to protected=False
for c in self.geoms['master'].AllObjects():
c.protected = False
if self.VIEWER:
self.VIEWER.RemoveObject(self.geoms['master'])
#for item in self.geoms.values():
# item.delete()
# if item.children!=[]:
# self.VIEWER.RemoveObject(item)
def addGeom(self, geom, parent=None, redo=False):
"""
This method should be called to add a molecule-specific geometry.
geom -- DejaVu Geom instance
parent -- parent geometry, if not specified we use self.masterGeom
"""
if parent is None:
parent = self.masterGeom
# we need to make sure the geometry name is unique in self.geoms
# and in parent.children
nameUsed=False
geomName = geom.name
for object in parent.children:
if object.name==geomName:
nameUsed=True
break
if nameUsed or self.geoms.has_key(geomName):
newName = geomName+str(len(self.geoms))
geom.name = newName
warnings.warn("renaming geometry %s to %s"%(geomName, newName))#, stacklevel=14)
self.geoms[geomName]=geom
# add the geometry to the viewer. At this point the name should be
# unique in both the parent geoemtry and the geomContainer.geoms dict
if self.VIEWER:
self.VIEWER.AddObject( geom, parent=parent, redo=redo)
else:
parent.children.append(geom)
geom.parent = parent
geom.fullName = parent.fullName+'|'+geom.name
#from DejaVu.Labels import Labels
from DejaVu.Spheres import Spheres
## from ViewerFramework.gui import InputFormDescr
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
from mglutil.util.callback import CallBackFunction
from mglutil.util.packageFilePath import findResourceFile, getResourceFolderWithVersion
try:
from ViewerFramework.VFGUI import ViewerFrameworkGUI
except:
pass
from ViewerFramework.VFCommand import Command,CommandGUI,InteractiveCmdCaller
# Import basic commands.
from ViewerFramework.basicCommand import loadCommandCommand, loadMacroCommand
from ViewerFramework.basicCommand import ShellCommand, ShellCommandGUI, ExitCommand
from ViewerFramework.basicCommand import loadModuleCommand
from ViewerFramework.basicCommand import BrowseCommandsCommand, RemoveCommand
from ViewerFramework.basicCommand import SaveSessionCommand, SaveSessionCommandGUI
from ViewerFramework.helpCommands import helpCommand
try:
from comm import Comm
except:
pass
from DejaVu import Viewer
from DejaVu.Camera import Camera
import types, Tkinter
import thread
import os, sys, traceback
import tkMessageBox
from mglutil.preferences import UserPreference
class ViewerFramework:
"""
Base class for applications providing a 3D geometry Viewer based on a
DejaVu Camera object along with support for adding GUI and commands
dynamically.
"""
def __init__(self, title='ViewerFrameWork', logMode='no',
libraries=[], gui=1, resourceFile = '_vfrc',
viewerClass=Viewer, master=None, guiVisible=1, withShell=1,
verbose=True, trapExceptions=True):
"""
Construct an instance of a ViewerFramework object with:
- an instance of a VFGUI that provides support for adding to the
GUI of the application
- a dictionnary of commands
- a list of commands that create geometry
- a list of objects to be displayed
- a dictionary of colorMaps
* logMode can be:
'no': for no loging of commands at all
'overwrite': the log files overwrite the one from the previous
session
'unique': the log file name include the date and time
* libraries is a list of names of Python package that provide a
cmdlib.py and modlib.py
- trapExceptions should be set to False when creating a ViewerFramework
for testing, such that exception are seen by the testing framework
"""
self.__frozen = False
self.hasGui = gui
self.embeded=False
self.cmdHistory = [] # history of command [(cmd, args, kw)]
global __debug__
self.withShell = withShell
self.trapExceptions = trapExceptions
#self.__debug__ = 0
# create a socket communication object
try:
self.socketComm = Comm()
self.webControl = Comm()
self.cmdQueue = None # queue of command comming from server
except:
self.socketComm = None
self.webControl = None
self.timeUsedForLastCmd = 0. # -1 when command fails
assert logMode in ['no', 'overwrite', 'unique']
self.resourceFile = resourceFile
self.commands = {} # dictionnary of command added to a Viewer
self.userpref = UserPreference()
#self.removableCommands = UserPreference(os.path.dirname(self.resourceFile), 'commands')
self.userpref.add('Sharp Color Boundaries for MSMS', 'sharp', ('sharp', 'blur'),
doc="""Specifies color boundaries for msms surface [sharp or blur]
(will not modify already displayed msms surfaces,
only new surfaces will be affected)""", category="DejaVu")
#Warning: changing the cursor tends to make the window flash.""")
# Interface to Visual Programming Environment, if available
self.visionAPI = None
if self.hasGui :
try:
# does this package exists?
from Vision.API import VisionInterface
# create empty object. Note that this will be filled with life
# when the visionCommand is executed
self.visionAPI = VisionInterface()
except:
pass
self.objects = [] # list of objects
self.colorMaps = {} # available colorMaps
self.colorMapCt = 0 # used to make sure names are unique
self.undoCmdStack = [] # list of strings used to undo
# lock needs to be acquired before object can be added
self.objectsLock = thread.allocate_lock()
# lock needs to be acquired before topcommands can be run
self.commandsLock = thread.allocate_lock()
# nexted commands counter
self.commandNestingLevel = 0
# place holder for a list of command that can be carried out each time
# an object is added to the application
# every entry is a tuple (function, args_tuple, kw_dict)
self.onAddObjectCmds = []
# list of commands that have an onRemoveMol
self.cmdsWithOnAddObj = []
# list of commands that have an onAddMol
self.cmdsWithOnRemoveObj = []
# dict cmd:[cm1, cmd2, ... cmdn]. When cmd runs the onCmdRun method
# of all cmds in the list will be called with the arguments passed
# to cmd
self.cmdsWithOnRun = {}
# list of commands that have an onExit
self.cmdsWithOnExit = []
self.firstPerspectiveSet = True
self.logMode = logMode
self.libraries = libraries + ['ViewerFramework']
self.topNegateCmds = [] # used in Command.doitWrapper() to accumulate negation commands
# for sub commands of a top command
# you cannot create a GUI and have it visible.
if not self.hasGui:
self.guiVisible=0
else:
self.guiVisible=guiVisible
self.master=master
if gui:
self.GUI = ViewerFrameworkGUI(self, title=title,
viewerClass=viewerClass,
root=master, withShell=withShell,
verbose=verbose)
self.GUI.VIEWER.suspendRedraw = True
self.viewSelectionIcon = 'cross' # or 'spheres' or 'labels'
self.userpref.add('Show Progress Bar', 'hide',
['show','hide'],
doc = """When set to 'show' the progress bar is displayed.
When set to 'hide', the progress bar widget is widthdrawn, but can be
redisplayed by choosing 'show' again.""", category='Viewer',
callbackFunc=[self.GUI.showHideProgressBar_CB],
)
if gui:
cbList = [self.GUI.logUserPref_cb,]
else:
cbList = []
#if gui:
# self.guiSupport = __import__( "DejaVu.ViewerFramework.gui", globals(),
# locals(), ['gui'])
if gui and self.guiVisible==0:
# if gui == 1 but self.guiVisible == 0: the gui is created but
# withdrawn immediatly
self.GUI.ROOT.withdraw()
if self.withShell:
# Uses the pyshell as the interpreter when the VFGUI is hidden.
self.GUI.pyshell.top.deiconify()
self.viewSelectionIcon = 'cross' # or 'spheres' or 'labels'
self.userpref.add( 'Transformation Logging', 'no',
validValues = ['no', 'continuous', 'final'],
callbackFunc = cbList,
doc="""Define when transformation get logged.\n'no' : never; 'continuous': after every transformation; 'final': when the Exit command is called""")
self.userpref.add( 'Visual Picking Feedback', 1,
[0, 1], category="DejaVu",
callbackFunc = [self.SetVisualPickingFeedBack,],
doc="""When set to 1 a sphere is drawn at picked vertex""")
self.userpref.add( 'Fill Selection Box', 1,
[0,1], category="DejaVu",
callbackFunc = [self.fillSelectionBoxPref_cb],
doc="""Set this option to 1 to have the program
draw a solid selection box after 'fillSelectionBoxDelay' miliseconds without a motion""")
self.userpref.add( 'Fill Selection Box Delay', 200, category="DejaVu",
validateFunc = self.fillDelayValidate,
callbackFunc = [self.fillSelectionBoxDelayPref_cb],
doc="""Delay in miliseconds after which the selection box turns solid if the 'fillSelectionBox' is set. Valide values are >0 and <10000""")
self.userpref.add( 'Warning Message Format', 'pop-up',
['pop-up', 'printed'],
callbackFunc = [self.setWarningMsgFormat],
category="Viewer",
doc="""Set format for warning messages. valid values are 'pop-up' and 'printed'""")
self._cwd = os.getcwd()
self.userpref.add( 'Startup Directory', self._cwd,
validateFunc = self.startupDirValidate,
callbackFunc = [self.startupDirPref_cb],
doc="""Startup Directory uses os.chdir to change the startup directory.
Startup Directory is set to current working directory by default.""")
rcFolder = getResourceFolderWithVersion()
self.rcFolder = rcFolder
self.userpref.add( 'Log Mode', 'no', ['no', 'overwrite', 'unique'],
callbackFunc = [self.setLogMode],
category="Viewer",
doc="""Set the log mode which can be one of the following:
no - do not log the commands.
overwrite - stores the log in mvAll.log.py.
unique - stores the log in mvAll_$time.log.py.
log.py files are stored in resource folder located under ~/.mgltools/$Version
""")
self.userpref.add( 'Command History Depth', 500,
validateFunc=self.commmandHistoryValidate,
#callbackFunc = []
doc="Set Command Hsistory Depth - number of commands kept in the command history list and displayed in the MESSAGE BOX")
if self.hasGui:
# add an interactive command caller
self.ICmdCaller = InteractiveCmdCaller( self )
# remove DejaVu's default picking behavior
vi = self.GUI.VIEWER
vi.RemovePickingCallback(vi.unsolicitedPick)
# overwrite the Camera's DoPick method to set the proper pickLevel
# based on the interactive command that will be called for the
# current modifier configuration
for c in vi.cameras:
c.DoPick = self.DoPick
self.addBasicCommands()
if self.hasGui:
from mglutil.util.recentFiles import RecentFiles
fileMenu = self.GUI.menuBars['menuRoot'].menubuttons['File'].menu
rcFile = rcFolder
if rcFile:
rcFile += os.sep + 'Pmv' + os.sep + "recent.pkl"
self.recentFiles = RecentFiles(
self, fileMenu, filePath=rcFile,
menuLabel='Recent Files', index=2)
self.logMode = 'no'
self.GUI.dockCamera()
self.logMode = logMode
# load out default interactive command which prints out object names
self.ICmdCaller.setCommands( self.printGeometryName )
self.ICmdCaller.go()
if gui:
self.userpref.add( 'Icon Size', 'medium',
['very small', 'small', 'medium', 'large',
'very large'],
callbackFunc = [self.SetIconSize,],
category="Viewer",
doc="""Sets the size of icons for the Toolbar.""")
self.userpref.add( 'Save Perspective on Exit', 'yes',
validValues = ['yes', 'no'],
doc="""Saves GUI perspective on Exit. The following features are saved:
GUI geometry, and whether camera is docked or not.
""")
self.GUI.VIEWER.suspendRedraw = False
self.GUI.VIEWER.currentCamera.height = 600
# dictionary of event:[functions]. functions will be called by
# self.dispatchEvent
self.eventListeners = {}
self.userpref.saveDefaults()
self.userpref.loadSettings()
if self.userpref.has_key('Save Perspective on Exit') and self.userpref['Save Perspective on Exit']['value'] == 'yes':
self.restorePerspective()
#self.GUI.VIEWER.ReallyRedraw()
def registerListener(self, event, function):
"""registers a function to be called for a given event.
event has to be a class subclassing VFEvent
"""
assert issubclass(event, VFEvent)
assert callable(function)
if not self.eventListeners.has_key(event):
self.eventListeners[event] = [function]
else:
if function in self.eventListeners[event]:
warnings.warn('function %s already registered for event %s'%(
function,event))
else:
self.eventListeners[event].append(function)
def dispatchEvent(self, event):
"""call all registered listeners for this event type
"""
assert isinstance(event, VFEvent)
if self.eventListeners.has_key(event.__class__):
if self.hasGui:
vi=self.GUI.VIEWER
autoRedraw = vi.autoRedraw
vi.stopAutoRedraw()
for func in self.eventListeners[event.__class__]:
func(event)
if autoRedraw:
vi.startAutoRedraw()
else:
for func in self.eventListeners[event.__class__]:
func(event)
def clients_cb(self, client, data):
"""get called every time a client sends a message"""
import sys
sys.stdout.write('%s sent %s\n'%(client,data) )
#exec(data)
def embedInto(self, hostApp,debug=0):
"""
function to define an hostapplication, take the string name of the application
"""
if self.hasGui:
raise RuntiomeError("VF with GUI cannot be embedded")
from ViewerFramework.hostApp import HostApp
self.hostApp = HostApp(self, hostApp, debug=debug)
self.embeded=True
def updateIMD(self):
"""get called every time the server we are connected to sends a
message
what about more than one molecule attached
currently under develppment
"""
from Pmv.moleculeViewer import EditAtomsEvent
#print "pause",self.imd.pause
if self.imd.mindy:
#print "ok update mindy"
self.imd.updateMindy()
if self.hasGui and self.imd.gui :
self.GUI.VIEWER.OneRedraw()
self.GUI.VIEWER.update()
self.GUI.ROOT.after(1, self.updateIMD)
else :
if not self.imd.pause:
self.imd.lock.acquire()
coord = self.imd.imd_coords[:]
self.imd.lock.release()
if coord != None:
#how many mol
if type(self.imd.mol) is list :
b=0
for i,m in enumerate(self.imd.mol) :
n1 = len(m.allAtoms.coords)
self.imd.mol.allAtoms.updateCoords(coord[b:n1], self.imd.slot[i])
b=n1
else :
self.imd.mol.allAtoms.updateCoords(coord, self.imd.slot)
import DejaVu
if DejaVu.enableVBO :
if type(self.imd.mol) is list :
b=0
for i,m in enumerate(self.imd.mol) :
N=len(m.geomContainer.geoms['cpk'].vertexSet.vertices.array)
m.geomContainer.geoms['cpk'].vertexSet.vertices.array[:]=coord[b:N]
b=N
else :
N=len(self.imd.mol.geomContainer.geoms['cpk'].vertexSet.vertices.array)
self.imd.mol.geomContainer.geoms['cpk'].vertexSet.vertices.array[:]=coord[:N]
#self.GUI.VIEWER.OneRedraw()
#self.GUI.VIEWER.update()
else :
from Pmv.moleculeViewer import EditAtomsEvent
if type(self.imd.mol) is list :
for i,m in enumerate(self.imd.mol) :
event = EditAtomsEvent('coords', m.allAtoms)
self.dispatchEvent(event)
else :
event = EditAtomsEvent('coords', self.imd.mol.allAtoms)
self.dispatchEvent(event)
#self.imd.mol.geomContainer.geoms['balls'].Set(vertices=coord)
#self.imd.mol.geomContainer.geoms['sticks'].Set(vertices=coord.tolist())
#self.imd.mol.geomContainer.geoms['lines'].Set(vertices=coord)
#self.imd.mol.geomContainer.geoms['bonds'].Set(vertices=coord)
#self.imd.mol.geomContainer.geoms['cpk'].Set(vertices=coord)
if self.handler.isinited :
self.handler.getForces(None)
self.handler.updateArrow()
#"""
if self.hasGui and self.imd.gui :
self.GUI.VIEWER.OneRedraw()
self.GUI.VIEWER.update()
self.GUI.ROOT.after(5, self.updateIMD)
#self.GUI.ROOT.after(10, self.updateIMD)
def server_cb(self, server, data):
"""get called every time the server we are connected to sends a
message"""
import sys
#sys.stderr.write('server %s sent> %s'%(server,data) )
self.cmdQueue.put( (server,data) )
#exec(data) # cannot exec because we are not in main thread
# and Tkitner is not thread safe
#self.GUI.VIEWER.Redraw()
def customize(self, file=None):
"""if a file is specified, this files gets sourced, else we look for
the file specified in self.resourceFile in the following directories:
1 - current directory
2 - user's home directory
3 - the package to which this instance belongs to
"""
#print 'ZZZZZZZZZZZZZZZZZZZZZZZZ'
#import traceback
#traceback.print_stack()
if file is not None:
if not os.path.exists(file):
return
self.source(file, globalNames=1, log=0)
return
resourceFileLocation = findResourceFile(self,
resourceFile=self.resourceFile)
if resourceFileLocation.has_key('currentdir') and \
not resourceFileLocation['currentdir'] is None:
path = resourceFileLocation['currentdir']
elif resourceFileLocation.has_key('home') and \
not resourceFileLocation['home'] is None:
path = resourceFileLocation['home']
elif resourceFileLocation.has_key('package') and \
not resourceFileLocation['package'] is None:
path = resourceFileLocation['package']
else:
return
self.source(path, globalNames=1, log=0)
path = os.path.split(path)[-1]
if os.path.exists(path):
self.source(path, globalNames=1, log=0)
return
def after(func, *args, **kw):
"""method to run a thread enabled command and wait for its completion.
relies on the command to release a lock called self.done
only works for commands, not for macros
"""
lock = thread.allocate_lock()
lock.acquire()
func.private_threadDone = lock
apply( func, args, kw )
func.waitForCompletion()
def getLog(self):
"""
generate log strings for all commands so far
"""
logs = []
i = 0
for cmd, args, kw in self.cmdHistory:
try:
log = cmd.logString( *args, **kw)+'\n'
except:
log = '#failed to create log for %d in self.cmdHistory: %s\n'%(
i, cmd.name)
logs.append(log)
i += 1
return logs
def addCmdToHistory(self, cmd, args, kw):
"""
append a command to the history of commands
"""
#print "ADDING Command to history", cmd.name
self.cmdHistory.append( (cmd, args, kw))
maxLen = self.userpref['Command History Depth']['value']
lenCmds = len(self.cmdHistory)
if maxLen>0 and lenCmds > maxLen:
#print "maxLen", maxLen, lenCmds
self.cmdHistory = self.cmdHistory[-maxLen:]
if self.hasGui:
nremoved = lenCmds-maxLen
# update text in the message box
message_box = self.GUI.MESSAGE_BOX
nlines = float(nremoved+1)
try:
message_box.tx.delete('1.0', str(nlines))
except:
pass
def log(self, cmdString=''):
"""append command to logfile
FIXME: this should also get whatever is typed in the PythonShell
"""
if self.logMode == 'no': return
if cmdString[-1]!='\n': cmdString = cmdString + '\n'
if hasattr(self, 'logAllFile'):
self.logAllFile.write( cmdString )
self.logAllFile.flush()
if self.socketComm is not None and len(self.socketComm.clients):
#is it really need?
cmdString=cmdString.replace("log=0","log=1")
self.socketComm.sendToClients(cmdString)
self.dispatchEvent( LogEvent( cmdString ) )
## if self.selectLog:
## self.logSelectFile.write( cmdString )
def tryto(self, command, *args, **kw ):
"""result <- tryto(command, *args, **kw )
if an exception is raised print traceback and continue
"""
self.commandNestingLevel = self.commandNestingLevel + 1
try:
if self.commandNestingLevel==1:
self.commandsLock.acquire()
if not self.trapExceptions:
# we are running tests and want exceptions not to be caught
result = command( *args, **kw )
else:
# exception should be caught and displayed
try:
result = command( *args, **kw )
except:
print 'ERROR *********************************************'
if self.guiVisible==1 and self.withShell:
self.GUI.pyshell.top.deiconify()
self.GUI.ROOT.config(cursor='')
self.GUI.VIEWER.master.config(cursor='')
self.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm')
traceback.print_exc()
sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
result = 'ERROR'
# sets cursors back to normal
finally:
if self.commandNestingLevel==1:
self.commandsLock.release()
self.commandNestingLevel = self.commandNestingLevel - 1
return result
def message(self, str, NL=1):
""" write into the message box """
if self.hasGui:
self.GUI.message(str,NL)
else:
print str
def unsolicitedPick(self, pick):
"""treat and unsollicited picking event"""
vi = self.GUI.VIEWER
if vi.isShift() or vi.isControl():
vi.unsolicitedPick(pick)
else:
#print picked geometry
for k in pick.hits.keys():
self.message(k)
def addBasicCommands(self):
"""Create a frame to hold menu and button bars"""
from ViewerFramework.dejaVuCommands import PrintGeometryName, \
SetCameraSizeCommand, SetCamSizeGUI
# Basic command that needs to be added manually.
self.addCommand( PrintGeometryName(), 'printGeometryName ', None )
g = CommandGUI()
g.addMenuCommand('menuRoot', 'File', 'Browse Commands',
separatorAbove=1, )
self.addCommand( BrowseCommandsCommand(), 'browseCommands', g)
self.addCommand( SetCameraSizeCommand(), 'setCameraSize',
SetCamSizeGUI)
from ViewerFramework.basicCommand import UndoCommand, \
ResetUndoCommand, NEWUndoCommand, RedoCommand
# g = CommandGUI()
# g.addMenuCommand('menuRoot', 'File', 'Remove Command')
# self.addCommand( RemoveCommand(), 'removeCommand', g)
from mglutil.util.packageFilePath import getResourceFolderWithVersion
self.vfResourcePath = getResourceFolderWithVersion()
if self.vfResourcePath is not None:
self.vfResourcePath += os.sep + "ViewerFramework"
if not os.path.isdir(self.vfResourcePath):
try:
os.mkdir(self.vfResourcePath)
except Exception, inst:
print inst
txt="Cannot create the Resource Folder %s" %self.vfResourcePath
self.vfResourcePath = None
g = CommandGUI()
g.addMenuCommand('menuRoot', 'Edit', 'Undo ', index=0)
g.addToolBar('Undo', icon1 = '_undo.gif', icon2 = 'undo.gif',
type = 'ToolBarButton',state = 'disabled',
balloonhelp = 'Undo', index = 1)
self.addCommand( NEWUndoCommand(), 'NEWundo', g)
g = CommandGUI()
g.addMenuCommand('menuRoot', 'Edit', 'Redo ', index=1)
g.addToolBar('Redo', icon1 = '_redo.gif', icon2 = 'redo.gif',
type = 'ToolBarButton',state = 'disabled',
balloonhelp = 'Redo', index = 2)
self.addCommand( RedoCommand(), 'redo', g)
# keep old undo command for now for backward compatibility
self.addCommand( UndoCommand(), 'undo', None )
self.addCommand( ResetUndoCommand(), 'resetUndo ', None)
g = CommandGUI()
#g.addMenuCommand('menuRoot', 'File', 'Load Command')
self.addCommand( loadCommandCommand(), 'loadCommand', g)
g = CommandGUI()
#g.addMenuCommand('menuRoot', 'File', 'Load Module')
self.addCommand( loadModuleCommand(), 'loadModule', g)
g = CommandGUI()
g.addMenuCommand('menuRoot', 'File', 'Load Macros', separatorBelow=1)
self.addCommand( loadMacroCommand(), 'loadMacro', g)
# Load Source command from customizationCommands module:
self.browseCommands('customizationCommands', commands=['source',],
package='ViewerFramework', topCommand=0)
# force the creation of the default buttonbar and PyShell checkbutton
# by viewing the Python Shell widget
if self.withShell:
self.addCommand( ShellCommand(), 'Shell', ShellCommandGUI )
# add the default 'Help' menubutton in the default menubar
if self.hasGui:
bar = self.GUI.menuBars['menuRoot']
help = self.GUI.addMenuButton( bar, 'Help', {}, {'side':'right'})
self.GUI.addMenuButton( bar, 'Grid3D', {}, {'side':'right'})
try:
import grid3DCommands
self.browseCommands("grid3DCommands", package="ViewerFramework", topCommand=0)
except Exception, inst:
print inst
print "Cannot import grid3DCommands. Disabling grid3DCommands..."
#self.GUI.ROOT.after(1500, self.removeCommand.loadCommands)
# load helpCommand and searchForCmd
self.browseCommands('helpCommands',
commands=['helpCommand','searchForCmd', 'citeThisScene',
'showCitation'],
package='ViewerFramework', topCommand = 0)
# load SetUserPreference and setOnAddObjectCmds Commands
self.browseCommands('customizationCommands',
commands=['setUserPreference',
'setOnAddObjectCommands'],
package='ViewerFramework', topCommand = 0)
# load ChangeVFGUIvisGUI and SetOnAddObjectCmds Command
self.browseCommands('customizeVFGUICommands',
package='ViewerFramework', topCommand = 0)
self.addCommand( SaveSessionCommand(), 'saveSession ', SaveSessionCommandGUI)
# Add the Exit command under File
g = CommandGUI()
g.addMenuCommand('menuRoot', 'File', 'Exit', separatorAbove=1)
self.addCommand( ExitCommand(), 'Exit', g )
# load object transformation, camera transformation,
# light transformation, Clipping Plane transformation,
# CenterGeom, centerScene commands
self.browseCommands("dejaVuCommands", commands=[
'transformObject', 'transformCamera', 'setObject',
'setCamera', 'setLight', 'setClip', 'addClipPlane',
'centerGeom', 'centerScene', 'centerSceneOnVertices',
'alignGeomsnogui','alignGeoms', 'toggleStereo',
'centerSceneOnPickedPixel'],
package='ViewerFramework', topCommand = 0)
def validInstance(self, classList, obj):
"""Checks whether an object is an instance of one the classes in the
list"""
ok = 0
for Klass in classList:
if isinstance(obj, Klass):
OK=1
break
return OK
def getOnAddObjectCmd(self):
"""
returns a copy of the list of commands currently executed when a new object
is added
"""
return self.onAddObjectCmds[:]
def addOnAddObjectCmd(self, cmd, args=[], kw={}):
"""
adds a command to the list of commands currently executed when a new object
is added
"""
assert callable(cmd)
assert type(args)==types.TupleType or type(args)==types.ListType
assert type(kw)==types.DictType
assert cmd.flag & Command.objArgOnly
kw['topCommand'] = 0
kw['setupNegate'] = 0
if type(args)==types.ListType:
args = tuple(args)
self.onAddObjectCmds.append( (cmd, args, kw) )
def removeOnAddObjectCmd(self, cmd):
"""
removes a command to the list of commands currently executed when a new object
is added
"""
for com in self.onAddObjectCmds:
if com[0]==cmd:
self.onAddObjectCmds.remove(com)
return com
print 'WARNING: command %s not found'%cmd.name
return None
def addObject(self, name, obj, geomContainer=None):
"""Add an object to a Viewer"""
#print 'acquiring addObject lock'
self.objectsLock.acquire()
self.objects.append(obj)
self.objectsLock.release()
#print 'releasing addObject lock'
## if geomContainer is None:
## obj.geomContainer = GeomContainer( self.GUI.VIEWER )
## else:
## obj.geomContainer = geomContainer
obj.geomContainer = geomContainer
# prepare progress bar
lenCommands = len(self.cmdsWithOnAddObj)
if self.hasGui:
self.GUI.configureProgressBar(init=1, mode='increment',
max=lenCommands,
progressformat='ratio',
labeltext='call initGeom methods')
#call initGeom method of all commands creating geometry
from time import time
#t0 = time()
for com in self.cmdsWithOnAddObj:
com.onAddObjectToViewer(obj)
#t1 = time()
#print 'INITI', com, t1-t0
#check for gui
if self.hasGui:
self.GUI.updateProgressBar()
# now set progress bar back to '%' format
if self.hasGui:
self.GUI.configureProgressBar(progressformat='percent')
# prepare progress bar
lenCommands = len(self.onAddObjectCmds)
#call functions that need to be called on object
#t0 = time()
for com in self.onAddObjectCmds:
com[2]['redraw']=0
com[2]['log']=0
#t1 = time()
#print 'INITI2', com, t1-t0
com[0]( *((obj,)+com[1]), **com[2] )
# note we have to re-configure the progress bar because doitWrapper
# will overwrite the mode to 'percent'
#check for gui
if self.hasGui:
self.GUI.configureProgressBar(init=1, mode='increment',
max=lenCommands,
progressformat='ratio',
labeltext='call geom functions')
self.GUI.updateProgressBar()
if self.hasGui:
# now set progress bar back to '%' format
self.GUI.configureProgressBar(progressformat='percent')
# create add object event
event = AddObjectEvent(objects=[obj])
self.dispatchEvent(event)
if self.hasGui:
self.centerScene(topCommand=0)
self.GUI.VIEWER.Redraw()
def removeObject(self, obj, undoable=False):
"""Remove an object from a Viewer"""
#1 Delete the obj from the list of objects.
del(self.objects[self.objects.index(obj)])
# call onRemoveMol method of all commands creating geometry
# To remove geometries created by these commands from the VIEWER
## MS chose to cerate undoableDelete__ variable in VF to let cmd's
## onRemoveObjectFromViewer method decide what to do when delete is
## undoable. Passign undoable into th method would require changing
## the signature in each implementation when onyl a hand full do
## something s[pecial when undoable is True
self.undoableDelete__ = undoable
for com in self.cmdsWithOnRemoveObj:
self.tryto( com.onRemoveObjectFromViewer, (obj) )
del self.undoableDelete__
# clean up the managedGeometries list
if obj.geomContainer:
for cmd in self.commands.values():
if len(cmd.managedGeometries)==0: continue
geomList = []
for g in cmd.managedGeometries:
if hasattr(g, 'mol') and g.mol==obj:
continue
geomList.append(g)
cmd.managedGeometries = geomList
# remove everything created in the geomContainer associated to the
# mol we want to destroy,
if obj.geomContainer:
obj.geomContainer.delete()
# create remove object event
event = DeleteObjectEvent(objects=[obj])
self.dispatchEvent(event)
def addCommandProxy(self, commandProxy):
"""To make startup time faster this function add GUI elements without
importing and loading the full dependiencies for a command
"""
if self.hasGui:
gui = commandProxy.gui
if gui is not None:
gui.register(self, commandProxy)
gui.registered = True
def addCommand(self, command, name, gui=None):
"""
Add a command to a viewer.
arguments:
command: Command instance
name: string
gui: optional CommandGUI object
objectType: optional type of object for which we need to add geoms
geomDescr: optional dictionary of 'name:objectType' items
name is used to create an alias for the command in the viewer
if a gui is specified, call gui.register to add the gui to the viewer
"""
#print "addCommand", name, command
assert isinstance(command, Command)
# happens because of dependencies
if name in self.commands.keys():
return self.commands[name]
error = self.tryto(command.checkDependencies, self)
if error=='ERROR':
print '\nWARNING: dependency check failed for command %s' % name
return
## def download_cb():
## import os
## os.system('netscape http://www.scripps.edu/pub/olson-web/people/scoon/login.html &')
## def Ok_cb(idf):
## idf.form.destroy()
## tb = traceback.extract_tb(sys.exc_traceback)
## from gui import InputFormDescr, CallBackFunction
## import Tkinter
## idf = InputFormDescr("Missing dependencies !")
## idf.append({'widgetType': Tkinter.Label,
## 'text':"%s can't be loaded, needs %s module"
## % (tb[1][-1][7:],command.__class__.__name__),
## 'gridcfg':{'columnspan':2}})
## idf.append({'widgetType':Tkinter.Button, 'text':'OK',
## 'command':CallBackFunction(Ok_cb, idf),
## 'gridcfg':{'sticky':Tkinter.W+Tkinter.E}})
## idf.append({'widgetType':Tkinter.Button, 'text':'Download',
## 'command':download_cb,
## 'gridcfg':{'row':-1, 'sticky':Tkinter.W+Tkinter.E,
## 'columnspan':5 }})
## form = self.getUserInput(idf, modal=0, blocking=0)
## self.warningMsg(title = "Missing dependencies !",
## message = "%s can't be loaded, needs %s module"
## % (tb[1][-1][7:],command.__class__.__name__))
## return
command.vf = self
name = string.strip(name)
name = string.replace(name, ' ', '_')
self.commands[name] = command
command.name=name
command.undoMenuString=name # string used to change menu entry for Undo
command.undoMenuStringPrefix='' # prefix used to change menu entry for Undo
setattr(self, name, command)
#exec ( 'self.%s = command' % name )
if self.hasGui:
if gui is not None:
assert isinstance(gui, CommandGUI)
gui.register(self, command)
gui.registered = True
#call the onAddCmdToViewer method of the new command
command.onAddCmdToViewer()
for c in self.commands.values():
c.onAddNewCmd(command)
#if hasattr(command, 'onAddObjectToViewer'):
# if callable(command.onAddObjectToViewer):
# self.cmdsWithOnAddObj.append(command)
# for o in self.objects:
# command.onAddObjectToViewer(o)
if hasattr(command, 'onRemoveObjectFromViewer'):
if callable(command.onRemoveObjectFromViewer):
self.cmdsWithOnRemoveObj.append(command)
if hasattr(command, 'onExitFromViewer'):
if callable(command.onExitFromViewer):
self.cmdsWithOnExit.append(command)
def updateGeomContainers(self, objectType, geomDescr):
"""To be called when a new command that requires geometry is add to
a viewer. This method loops over existing objects to create the
required geometry for already existing objects"""
for o in self.objects:
if not isinstance(object, objectType): continue
o.geomContainer.addGeom( geomDescr )
def askFileOpen(self, idir=None, ifile=None, types=None, title='Open',
relative=True, parent=None, multiple=False):
"""filename <- askFileOpen( idir, ifile, types, title)
if the viewer is run with a gui this function displays a file browser
else it askes for a file name
idir: optional inital directory
ifile: optional initial filename
types: list of tuples [('PDB files','*.pdb'),]
title: widget's title
relative: when set to True the file name is realtive to the directory
where the application has been started
multiple: allow selecting multiple files
returns: a filename ot None if the Cancel button
"""
if self.hasGui:
if parent:
file = self.GUI.askFileOpen(parent, idir=idir, ifile=ifile,
types=types, title=title,
multiple=multiple)
else:
file = self.GUI.askFileOpen(
self.GUI.ROOT, idir=idir, ifile=ifile,
types=types, title=title, multiple=multiple)
if file is () or file is None: # this is returned if one click on the file list and
# then clicks Cancel
return
else:
default = ''
if idir: default = idir
if ifile: default = os.path.join( default, ifile )
file = raw_input("file name [%s] :"%default)
if file=='':
if default != '' and os.path.exists(file):
file = default
if multiple is False:
fpath,fname = os.path.split(file)
if relative and file and os.path.abspath(os.path.curdir) == fpath:
file = os.path.join(
os.path.curdir,
file[len(os.path.abspath(os.path.curdir))+1:])
return file
else:
files = []
for f in file:
fpath,fname = os.path.split(f)
if relative and f and os.path.abspath(os.path.curdir) == fpath:
f = os.path.join(os.path.curdir,
f[len(os.path.abspath(os.path.curdir))+1:])
files.append(f)
return files
def setLogMode(self, name, oldval, newval):
"Sets the Lig Mode"
self.logMode = newval
# open log file for all commands
if self.logMode == 'unique':
import time
t = time.localtime(time.time())
fname1 = 'mvAll_%04d-%02d-%02d_%02d-%02d-%02d.log.py'%(t[0],t[1],t[2],t[3],t[4],t[5])
fname1 = os.path.join(self.rcFolder, fname1)
if self.hasGui:
self.GUI.ROOT.after_idle(self.clearOldLogs)
elif self.logMode == 'overwrite':
fname1 = os.path.join(self.rcFolder, 'mvAll.log.py')
if self.logMode != 'no':
flag = self.tryOpenFileInWrite(fname1)
while flag == 0:
idf = InputFormDescr(title = 'Directory not writable ...')
variable = Tkinter.StringVar()
idf.append({'name':'noLog','widgetType': Tkinter.Radiobutton,
'text':'noLog','variable':variable,
'value':'noLog','defaultValue':'noLog',
'gridcfg':{'sticky':Tkinter.W}})
idf.append({'name':'browse','widgetType': 'SaveButton',
'typeofwidget':Tkinter.Radiobutton,
'types':[ ('Python Files', '*.py')],
'title':'Choose a log File...',
'text':'browse',
'variable':variable,
'defaultValue':'noLog',
'value':'browse',
'gridcfg':{'sticky':Tkinter.W}})
self.GUI.ROOT.deiconify()
self.GUI.ROOT.update()
result = self.getUserInput(idf)
if result == {}:
self.GUI.ROOT.destroy()
return
elif result['noLog'] == 'noLog':
self.logMode = 'no'
flag = 1
elif result['noLog'] == 'browse' and result.has_key('browse'):
assert not result['browse'] in ['']
flag = self.tryOpenFileInWrite(result['browse'])
elif result['noLog'] == 'browse' and not result.has_key('browse'):
print "you didn't enter a proper file name try again"
flag = 0
def setWarningMsgFormat(self, name, oldval, newval):
""" newval can be either 'pop-up' or 'printed'"""
self.messageFormat = newval
def warningMsg(self, msg, title='WARNING: ', parent = None):
"""None <- warningMsg(msg)"""
if type(title) is not types.StringType:
title = 'WARNING: '
if self.hasGui and self.messageFormat=='pop-up':
tkMessageBox.showwarning(title, msg,parent = parent)
else:
sys.stdout.write(title+msg+'\n')
def askOkCancelMsg(self, msg):
"""None <- okCancelMsg(msg)"""
if self.hasGui:
return tkMessageBox.askyesno('expand selection', msg)
else:
val = raw_input('anser [0]/1: '+msg+'\n')
if val=='1': return 1
else: return 0
## FIXME .. do we need this ?
def errorMsg(self, msg, errtype=RuntimeError):
"""None <- errorMsg(errorType, msg)"""
if self.hasGui:
tkMessageBox.showerror(msg)
raise errtype(msg)
def getUserInput(self, formDescription, master=None, root=None,
modal=0, blocking=1,
defaultDirection = 'row', closeWithWindow = 1,
okCfg={'text':'OK'}, cancelCfg={'text':'Cancel'},
initFunc=None, scrolledFrame=0, width=None, height=None,
okcancel=1, onDestroy = None,
postCreationFunc=None, postUsingFormFunc=None):
"""val[] <- getUserInput(formDescription)
Returns a list of values obtained either from an InputForm or by
prompting the user for values
"""
## from gui import InputForm, InputFormDescr
from mglutil.gui.InputForm.Tk.gui import InputForm, InputFormDescr
assert isinstance(formDescription, InputFormDescr)
if self.hasGui:
if master==None:
master = self.GUI.ROOT
#root = self.GUI.getCmdsParamsMaster()
#if not postCreationFunc:
# postCreationFunc = self.GUI.getAfterCreatingFormFunc()
#if not postUsingFormFunc:
# postUsingFormFunc = self.GUI.getAfterUsingFormFunc()
form = InputForm(master, root, formDescription,
modal=modal, blocking=blocking,
defaultDirection=defaultDirection,
closeWithWindow=closeWithWindow,
okCfg=okCfg, cancelCfg=cancelCfg,
initFunc=initFunc, scrolledFrame=scrolledFrame,
width=width, height=height,
okcancel=okcancel, onDestroy=onDestroy)
if form.ownsRoot:
geom = form.root.geometry()
# make sure the upper left dorner is visible
w = string.split(geom, '+')
changepos = 0
if w[1][0]=='-':
posx = '+50'
changepos=1
else:
posx = '+'+w[1]
if w[2][0]=='-':
posy ='+50'
changepos=1
else:
posy = '+'+w[2]
if changepos:
form.root.geometry(posx+posy)
if postCreationFunc:
postCreationFunc(form.root)
if not (modal or blocking):
return form
else:
values = form.go()
if postUsingFormFunc:
postUsingFormFunc(form.root)
return values
else:
self.warningMsg("nogui InputForm not yet implemented")
def transformedCoordinatesWithInstances(self, hits):
""" hist is pick.hits = {geom: [(vertexInd, intance),...]}
This function will use the instance information to return a list of transformed
coordinates
"""
# FIXME this is in DejaVu.VIewer and should go away here
vt = []
for geom, values in hits.items():
coords = geom.vertexSet.vertices.array
for vert, instance in values:
M = geom.GetMatrix(geom.LastParentBeforeRoot(), instance[1:])
pt = coords[vert]
ptx = M[0][0]*pt[0]+M[0][1]*pt[1]+M[0][2]*pt[2]+M[0][3]
pty = M[1][0]*pt[0]+M[1][1]*pt[1]+M[1][2]*pt[2]+M[1][3]
ptz = M[2][0]*pt[0]+M[2][1]*pt[1]+M[2][2]*pt[2]+M[2][3]
vt.append( (ptx, pty, ptz) )
return vt
if __name__ == '__main__':
v = ViewerFramework()
import pdb
| 39.854888 | 174 | 0.566262 | #############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2000
#
# revision: Guillaume Vareille
#
#########################################################################
#
# $Header: /opt/cvs/python/packages/share1.5/ViewerFramework/VF.py,v 1.218 2013/10/03 22:31:33 annao Exp $
#
# $Id: VF.py,v 1.218 2013/10/03 22:31:33 annao Exp $
#
"""defines base classe ViewerFramework
The ViewerFramework class can be subclassed to create applications that
use a DejaVu Camera object to display 3D geometries. In the following
we'll call Viewer a class derived from ViewerFramework.
The design features of the viewer framework include:
- extensibility: new commands can be written by subclassing the VFCommand
base class.
- dynamically configurable: commands (or set of commands called modules)
can be loaded dynamically from libraries.
- Commands loaded into an application can create their own GUI elements
(menus, cascading menus, buttons, sliders, etc...). The viewer framework
provides support for the creation of such GUI elements.
- Commands can be invoked either through the GUI or throught the Python
Shell.
- Macros provide a lightweight mechanism to add simple commands. In fact
any Python function can be added as a Macro
- Support for logging of commands: this allows to record and play back a
session.
- documentation: the module and command documentation is provided in the
source code. This documentation can be extracted using existing tools and
made available in various formats including HTML and man pages. The document
ation is also accessible through the application's Help command which uses
Python's introspection capabilities to retrieve the documentation.
A ViewerFramework always has at least one menu bar called "menuRoot" and at
least one buttonbar called "Toolbar".
The geometried displayed in a Viewer can be stored in objects derived from the
base class GeometryContainer. This container holds a dictionnary of geometries
where the keys are the geometry's name and the values instances of DejaVu
Geometries.
Commands:
Commands for an application derived from ViewerFramework can be developped by
sub-classing the VFCommand base class (see VFCommand overview).The class
VFCommandGUI allows to define GUI to be associated with a command. Command can
be added dynamically to an application using the AddCommand command of the
ViewerFramework.
example:
# derive a new command
class ExitCommand(Command):
doit(self):
import sys
sys.exit()
# get a CommandGUI object
g = CommandGUI()
# add information to create a pull-down menu in a menu bar called
# 'MoVi' under a menu-button called 'File' with a menu Command called
# 'Exit'. We also specify that we want a separator to appear above
# this entry in the menu
g.addMenuCommand('MoVi', 'File', 'Exit', separatorAbove=1)
# add an instance of an ExitCommand with the alias 'myExit' to a
viewer
# v. This will automatically add the menu bar, the menu button
# (if necessary) the menu entry and bind the default callback function
v.addCommand( ExitCommand(), 'myExit', g )
The command is immediately operational and can be invoked through the
pull down menu OR using the Python shell: v.myExit()
CommandGUI objects allow to specify what type of GUI a command should have. It
is possible to create pull-down menu entries, buttons of different kinds etc..
Modules:
A bunch of related commands can be groupped into a module. A module is a
.py file that defines a number of commands and provides a functions called
initModule(viewer) used to register the module with an instance of a
viewer.
When a module is added to a viewer, the .py file is imported and the
initModule function is executed. Usually this functions instanciates a
number of command objects and their CommandGUI objects and adds them to
the viewer.
"""
import os, string, warnings
import traceback, sys, glob, time
class VFEvent:
"""Base class for ViewerFramework events.
"""
def __init__(self, arg=None, objects=[], *args, **kw):
""" """
self.arg = arg
self.objects = objects
self.args = args
self.kw = kw
if len(args):
self.args = args
if len(kw):
for k,v in kw.items():
setattr(self, k, v)
class AddObjectEvent(VFEvent):
pass
class DeleteObjectEvent(VFEvent):
pass
class LogEvent(VFEvent):
"""created each time a log string is written to the log"""
def __init__(self, logstr):
""" """
self.logstr = logstr
class ModificationEvent:
def __init__(self, action, arg=None, objects=[]):
""" """
self.action = action
self.arg = arg
self.objects = objects
class GeomContainer:
"""Class to hold geometries to be shown in a viewer.
This class provides a dictionnary called geoms in which the name of a
DejaVu geometry is the key to access that particular geometry object
Geometries can be added using the addGeometry method.
"""
def __init__(self, viewer=None):
"""constructor of the geometry container"""
## Dictionary of geometries used to display atoms from that molecule
## using sticks, balls, CPK spheres etc ...
self.geoms = {}
self.VIEWER = viewer # DejaVu Viewer object
self.masterGeom = None
## Dictionary linking geom names to cmds which updates texture coords
## for the current set of coordinates
self.texCoordsLookup = {}
self.updateTexCoords = {}
def delete(self):
"""Function to remove self.geoms['master'] and
self.geoms['selectionSpheres'] from the viewer when deleted"""
# switch the object and descendant to protected=False
for c in self.geoms['master'].AllObjects():
c.protected = False
if self.VIEWER:
self.VIEWER.RemoveObject(self.geoms['master'])
#for item in self.geoms.values():
# item.delete()
# if item.children!=[]:
# self.VIEWER.RemoveObject(item)
def addGeom(self, geom, parent=None, redo=False):
"""
This method should be called to add a molecule-specific geometry.
geom -- DejaVu Geom instance
parent -- parent geometry, if not specified we use self.masterGeom
"""
if parent is None:
parent = self.masterGeom
# we need to make sure the geometry name is unique in self.geoms
# and in parent.children
nameUsed=False
geomName = geom.name
for object in parent.children:
if object.name==geomName:
nameUsed=True
break
if nameUsed or self.geoms.has_key(geomName):
newName = geomName+str(len(self.geoms))
geom.name = newName
warnings.warn("renaming geometry %s to %s"%(geomName, newName))#, stacklevel=14)
self.geoms[geomName]=geom
# add the geometry to the viewer. At this point the name should be
# unique in both the parent geoemtry and the geomContainer.geoms dict
if self.VIEWER:
self.VIEWER.AddObject( geom, parent=parent, redo=redo)
else:
parent.children.append(geom)
geom.parent = parent
geom.fullName = parent.fullName+'|'+geom.name
#from DejaVu.Labels import Labels
from DejaVu.Spheres import Spheres
## from ViewerFramework.gui import InputFormDescr
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
from mglutil.util.callback import CallBackFunction
from mglutil.util.packageFilePath import findResourceFile, getResourceFolderWithVersion
try:
from ViewerFramework.VFGUI import ViewerFrameworkGUI
except:
pass
from ViewerFramework.VFCommand import Command,CommandGUI,InteractiveCmdCaller
# Import basic commands.
from ViewerFramework.basicCommand import loadCommandCommand, loadMacroCommand
from ViewerFramework.basicCommand import ShellCommand, ShellCommandGUI, ExitCommand
from ViewerFramework.basicCommand import loadModuleCommand
from ViewerFramework.basicCommand import BrowseCommandsCommand, RemoveCommand
from ViewerFramework.basicCommand import SaveSessionCommand, SaveSessionCommandGUI
from ViewerFramework.helpCommands import helpCommand
try:
from comm import Comm
except:
pass
from DejaVu import Viewer
from DejaVu.Camera import Camera
import types, Tkinter
import thread
import os, sys, traceback
import tkMessageBox
from mglutil.preferences import UserPreference
class ViewerFramework:
"""
Base class for applications providing a 3D geometry Viewer based on a
DejaVu Camera object along with support for adding GUI and commands
dynamically.
"""
def freeze(self):
self.__frozen = True
def unfreeze(self):
self.__frozen = False
def frozen(self):
return self.__frozen == True
def __init__(self, title='ViewerFrameWork', logMode='no',
libraries=[], gui=1, resourceFile = '_vfrc',
viewerClass=Viewer, master=None, guiVisible=1, withShell=1,
verbose=True, trapExceptions=True):
"""
Construct an instance of a ViewerFramework object with:
- an instance of a VFGUI that provides support for adding to the
GUI of the application
- a dictionnary of commands
- a list of commands that create geometry
- a list of objects to be displayed
- a dictionary of colorMaps
* logMode can be:
'no': for no loging of commands at all
'overwrite': the log files overwrite the one from the previous
session
'unique': the log file name include the date and time
* libraries is a list of names of Python package that provide a
cmdlib.py and modlib.py
- trapExceptions should be set to False when creating a ViewerFramework
for testing, such that exception are seen by the testing framework
"""
self.__frozen = False
self.hasGui = gui
self.embeded=False
self.cmdHistory = [] # history of command [(cmd, args, kw)]
global __debug__
self.withShell = withShell
self.trapExceptions = trapExceptions
#self.__debug__ = 0
# create a socket communication object
try:
self.socketComm = Comm()
self.webControl = Comm()
self.cmdQueue = None # queue of command comming from server
except:
self.socketComm = None
self.webControl = None
self.timeUsedForLastCmd = 0. # -1 when command fails
assert logMode in ['no', 'overwrite', 'unique']
self.resourceFile = resourceFile
self.commands = {} # dictionnary of command added to a Viewer
self.userpref = UserPreference()
#self.removableCommands = UserPreference(os.path.dirname(self.resourceFile), 'commands')
self.userpref.add('Sharp Color Boundaries for MSMS', 'sharp', ('sharp', 'blur'),
doc="""Specifies color boundaries for msms surface [sharp or blur]
(will not modify already displayed msms surfaces,
only new surfaces will be affected)""", category="DejaVu")
#Warning: changing the cursor tends to make the window flash.""")
# Interface to Visual Programming Environment, if available
self.visionAPI = None
if self.hasGui :
try:
# does this package exists?
from Vision.API import VisionInterface
# create empty object. Note that this will be filled with life
# when the visionCommand is executed
self.visionAPI = VisionInterface()
except:
pass
self.objects = [] # list of objects
self.colorMaps = {} # available colorMaps
self.colorMapCt = 0 # used to make sure names are unique
self.undoCmdStack = [] # list of strings used to undo
# lock needs to be acquired before object can be added
self.objectsLock = thread.allocate_lock()
# lock needs to be acquired before topcommands can be run
self.commandsLock = thread.allocate_lock()
# nexted commands counter
self.commandNestingLevel = 0
# place holder for a list of command that can be carried out each time
# an object is added to the application
# every entry is a tuple (function, args_tuple, kw_dict)
self.onAddObjectCmds = []
# list of commands that have an onRemoveMol
self.cmdsWithOnAddObj = []
# list of commands that have an onAddMol
self.cmdsWithOnRemoveObj = []
# dict cmd:[cm1, cmd2, ... cmdn]. When cmd runs the onCmdRun method
# of all cmds in the list will be called with the arguments passed
# to cmd
self.cmdsWithOnRun = {}
# list of commands that have an onExit
self.cmdsWithOnExit = []
self.firstPerspectiveSet = True
self.logMode = logMode
self.libraries = libraries + ['ViewerFramework']
self.topNegateCmds = [] # used in Command.doitWrapper() to accumulate negation commands
# for sub commands of a top command
# you cannot create a GUI and have it visible.
if not self.hasGui:
self.guiVisible=0
else:
self.guiVisible=guiVisible
self.master=master
if gui:
self.GUI = ViewerFrameworkGUI(self, title=title,
viewerClass=viewerClass,
root=master, withShell=withShell,
verbose=verbose)
self.GUI.VIEWER.suspendRedraw = True
self.viewSelectionIcon = 'cross' # or 'spheres' or 'labels'
self.userpref.add('Show Progress Bar', 'hide',
['show','hide'],
doc = """When set to 'show' the progress bar is displayed.
When set to 'hide', the progress bar widget is widthdrawn, but can be
redisplayed by choosing 'show' again.""", category='Viewer',
callbackFunc=[self.GUI.showHideProgressBar_CB],
)
if gui:
cbList = [self.GUI.logUserPref_cb,]
else:
cbList = []
#if gui:
# self.guiSupport = __import__( "DejaVu.ViewerFramework.gui", globals(),
# locals(), ['gui'])
if gui and self.guiVisible==0:
# if gui == 1 but self.guiVisible == 0: the gui is created but
# withdrawn immediatly
self.GUI.ROOT.withdraw()
if self.withShell:
# Uses the pyshell as the interpreter when the VFGUI is hidden.
self.GUI.pyshell.top.deiconify()
self.viewSelectionIcon = 'cross' # or 'spheres' or 'labels'
self.userpref.add( 'Transformation Logging', 'no',
validValues = ['no', 'continuous', 'final'],
callbackFunc = cbList,
doc="""Define when transformation get logged.\n'no' : never; 'continuous': after every transformation; 'final': when the Exit command is called""")
self.userpref.add( 'Visual Picking Feedback', 1,
[0, 1], category="DejaVu",
callbackFunc = [self.SetVisualPickingFeedBack,],
doc="""When set to 1 a sphere is drawn at picked vertex""")
self.userpref.add( 'Fill Selection Box', 1,
[0,1], category="DejaVu",
callbackFunc = [self.fillSelectionBoxPref_cb],
doc="""Set this option to 1 to have the program
draw a solid selection box after 'fillSelectionBoxDelay' miliseconds without a motion""")
self.userpref.add( 'Fill Selection Box Delay', 200, category="DejaVu",
validateFunc = self.fillDelayValidate,
callbackFunc = [self.fillSelectionBoxDelayPref_cb],
doc="""Delay in miliseconds after which the selection box turns solid if the 'fillSelectionBox' is set. Valide values are >0 and <10000""")
self.userpref.add( 'Warning Message Format', 'pop-up',
['pop-up', 'printed'],
callbackFunc = [self.setWarningMsgFormat],
category="Viewer",
doc="""Set format for warning messages. valid values are 'pop-up' and 'printed'""")
self._cwd = os.getcwd()
self.userpref.add( 'Startup Directory', self._cwd,
validateFunc = self.startupDirValidate,
callbackFunc = [self.startupDirPref_cb],
doc="""Startup Directory uses os.chdir to change the startup directory.
Startup Directory is set to current working directory by default.""")
rcFolder = getResourceFolderWithVersion()
self.rcFolder = rcFolder
self.userpref.add( 'Log Mode', 'no', ['no', 'overwrite', 'unique'],
callbackFunc = [self.setLogMode],
category="Viewer",
doc="""Set the log mode which can be one of the following:
no - do not log the commands.
overwrite - stores the log in mvAll.log.py.
unique - stores the log in mvAll_$time.log.py.
log.py files are stored in resource folder located under ~/.mgltools/$Version
""")
self.userpref.add( 'Command History Depth', 500,
validateFunc=self.commmandHistoryValidate,
#callbackFunc = []
doc="Set Command Hsistory Depth - number of commands kept in the command history list and displayed in the MESSAGE BOX")
if self.hasGui:
# add an interactive command caller
self.ICmdCaller = InteractiveCmdCaller( self )
# remove DejaVu's default picking behavior
vi = self.GUI.VIEWER
vi.RemovePickingCallback(vi.unsolicitedPick)
# overwrite the Camera's DoPick method to set the proper pickLevel
# based on the interactive command that will be called for the
# current modifier configuration
for c in vi.cameras:
c.DoPick = self.DoPick
self.addBasicCommands()
if self.hasGui:
from mglutil.util.recentFiles import RecentFiles
fileMenu = self.GUI.menuBars['menuRoot'].menubuttons['File'].menu
rcFile = rcFolder
if rcFile:
rcFile += os.sep + 'Pmv' + os.sep + "recent.pkl"
self.recentFiles = RecentFiles(
self, fileMenu, filePath=rcFile,
menuLabel='Recent Files', index=2)
self.logMode = 'no'
self.GUI.dockCamera()
self.logMode = logMode
# load out default interactive command which prints out object names
self.ICmdCaller.setCommands( self.printGeometryName )
self.ICmdCaller.go()
if gui:
self.userpref.add( 'Icon Size', 'medium',
['very small', 'small', 'medium', 'large',
'very large'],
callbackFunc = [self.SetIconSize,],
category="Viewer",
doc="""Sets the size of icons for the Toolbar.""")
self.userpref.add( 'Save Perspective on Exit', 'yes',
validValues = ['yes', 'no'],
doc="""Saves GUI perspective on Exit. The following features are saved:
GUI geometry, and whether camera is docked or not.
""")
self.GUI.VIEWER.suspendRedraw = False
self.GUI.VIEWER.currentCamera.height = 600
# dictionary of event:[functions]. functions will be called by
# self.dispatchEvent
self.eventListeners = {}
self.userpref.saveDefaults()
self.userpref.loadSettings()
if self.userpref.has_key('Save Perspective on Exit') and self.userpref['Save Perspective on Exit']['value'] == 'yes':
self.restorePerspective()
#self.GUI.VIEWER.ReallyRedraw()
def registerListener(self, event, function):
"""registers a function to be called for a given event.
event has to be a class subclassing VFEvent
"""
assert issubclass(event, VFEvent)
assert callable(function)
if not self.eventListeners.has_key(event):
self.eventListeners[event] = [function]
else:
if function in self.eventListeners[event]:
warnings.warn('function %s already registered for event %s'%(
function,event))
else:
self.eventListeners[event].append(function)
def dispatchEvent(self, event):
"""call all registered listeners for this event type
"""
assert isinstance(event, VFEvent)
if self.eventListeners.has_key(event.__class__):
if self.hasGui:
vi=self.GUI.VIEWER
autoRedraw = vi.autoRedraw
vi.stopAutoRedraw()
for func in self.eventListeners[event.__class__]:
func(event)
if autoRedraw:
vi.startAutoRedraw()
else:
for func in self.eventListeners[event.__class__]:
func(event)
def DoPick(self, x, y, x1=None, y1=None, type=None, event=None):
vi = self.GUI.VIEWER
def getType(vf, mod):
cmd = vf.ICmdCaller.commands.value[mod]
if cmd:
vf.ICmdCaller.currentModifier = mod
vf.ICmdCaller.getObjects = cmd.getObjects
return cmd.pickLevel
else: return None
if vi.isShift(): type = getType(self, 'Shift_L')
elif vi.isControl(): type = getType(self, 'Control_L')
elif vi.isAlt(): type = getType(self, 'Alt_L')
else: type = getType(self, None)
if type:
return Camera.DoPick(vi.currentCamera, x, y, x1, y1, type, event)
else:
from DejaVu.Camera import PickObject
return PickObject('pick', self.GUI.VIEWER.currentCamera)
def clients_cb(self, client, data):
"""get called every time a client sends a message"""
import sys
sys.stdout.write('%s sent %s\n'%(client,data) )
#exec(data)
def embedInto(self, hostApp,debug=0):
"""
function to define an hostapplication, take the string name of the application
"""
if self.hasGui:
raise RuntiomeError("VF with GUI cannot be embedded")
from ViewerFramework.hostApp import HostApp
self.hostApp = HostApp(self, hostApp, debug=debug)
self.embeded=True
def sendViewerState (self, event=None):
# get call every so often when this PMV is a server
state1 = self.GUI.VIEWER.getViewerStateDefinitionCode(
'self.GUI.VIEWER', withMode=False)
state2 = self.GUI.VIEWER.getObjectsStateDefinitionCode(
'self.GUI.VIEWER', withMode=False)
if self.socketComm is not None and len(self.socketComm.clients):
cmdString = """"""
for line in state1:
cmdString += line
for line in state2:
cmdString += line
self.socketComm.sendToClients(cmdString)
self.GUI.ROOT.after(500, self.sendViewerState)
def runServerCommands (self, event=None):
# get call every so often when this PMV is a client of a server
if not self.cmdQueue.empty():
cmd = self.cmdQueue.get(False) # do not block if queue empty
if cmd:
#if pmv is embedded without a gui in a third application
#have to parse the command and remove all cmd that imply GUI
if self.embedded :
#mesg=cmd[1]
mesg=self.hostApp.driver.parsePmvStates(cmd[1])
exec(mesg, {'self':self})
#print 'client executing', cmd
else :
exec(cmd[1])
if not self.embeded :
#if embeded the runServerCommand is handle by a thread define by the hosAppli class
self.GUI.ROOT.after(10, self.runServerCommands)
def updateIMD(self):
"""get called every time the server we are connected to sends a
message
what about more than one molecule attached
currently under develppment
"""
from Pmv.moleculeViewer import EditAtomsEvent
#print "pause",self.imd.pause
if self.imd.mindy:
#print "ok update mindy"
self.imd.updateMindy()
if self.hasGui and self.imd.gui :
self.GUI.VIEWER.OneRedraw()
self.GUI.VIEWER.update()
self.GUI.ROOT.after(1, self.updateIMD)
else :
if not self.imd.pause:
self.imd.lock.acquire()
coord = self.imd.imd_coords[:]
self.imd.lock.release()
if coord != None:
#how many mol
if type(self.imd.mol) is list :
b=0
for i,m in enumerate(self.imd.mol) :
n1 = len(m.allAtoms.coords)
self.imd.mol.allAtoms.updateCoords(coord[b:n1], self.imd.slot[i])
b=n1
else :
self.imd.mol.allAtoms.updateCoords(coord, self.imd.slot)
import DejaVu
if DejaVu.enableVBO :
if type(self.imd.mol) is list :
b=0
for i,m in enumerate(self.imd.mol) :
N=len(m.geomContainer.geoms['cpk'].vertexSet.vertices.array)
m.geomContainer.geoms['cpk'].vertexSet.vertices.array[:]=coord[b:N]
b=N
else :
N=len(self.imd.mol.geomContainer.geoms['cpk'].vertexSet.vertices.array)
self.imd.mol.geomContainer.geoms['cpk'].vertexSet.vertices.array[:]=coord[:N]
#self.GUI.VIEWER.OneRedraw()
#self.GUI.VIEWER.update()
else :
from Pmv.moleculeViewer import EditAtomsEvent
if type(self.imd.mol) is list :
for i,m in enumerate(self.imd.mol) :
event = EditAtomsEvent('coords', m.allAtoms)
self.dispatchEvent(event)
else :
event = EditAtomsEvent('coords', self.imd.mol.allAtoms)
self.dispatchEvent(event)
#self.imd.mol.geomContainer.geoms['balls'].Set(vertices=coord)
#self.imd.mol.geomContainer.geoms['sticks'].Set(vertices=coord.tolist())
#self.imd.mol.geomContainer.geoms['lines'].Set(vertices=coord)
#self.imd.mol.geomContainer.geoms['bonds'].Set(vertices=coord)
#self.imd.mol.geomContainer.geoms['cpk'].Set(vertices=coord)
if self.handler.isinited :
self.handler.getForces(None)
self.handler.updateArrow()
#"""
if self.hasGui and self.imd.gui :
self.GUI.VIEWER.OneRedraw()
self.GUI.VIEWER.update()
self.GUI.ROOT.after(5, self.updateIMD)
#self.GUI.ROOT.after(10, self.updateIMD)
def server_cb(self, server, data):
"""get called every time the server we are connected to sends a
message"""
import sys
#sys.stderr.write('server %s sent> %s'%(server,data) )
self.cmdQueue.put( (server,data) )
#exec(data) # cannot exec because we are not in main thread
# and Tkitner is not thread safe
#self.GUI.VIEWER.Redraw()
def drawSelectionRectangle(self, event):
c = self.GUI.VIEWER.currentCamera
c.drawSelectionRectangle(event)
def initSelectionRectangle(self, event):
c = self.GUI.VIEWER.currentCamera
c.initSelectionRectangle(event)
def endSelectionRectangle(self, event):
c = self.GUI.VIEWER.currentCamera
c.endSelectionRectangle(event)
def fillSelectionBoxPref_cb(self, name, old, new):
if self.hasGui:
for c in self.GUI.VIEWER.cameras:
c.fillSelectionBox = new
def fillDelayValidate(self, value):
return (value > 0 and value < 10000)
def commmandHistoryValidate(self, value):
try:
val = int(value)
if val >-1:
return 1
else:
return 0
except:
return 0
def fillSelectionBoxDelayPref_cb(self, name, old, new):
if self.hasGui:
for c in self.GUI.VIEWER.cameras:
c.fillDelay = new
def SetVisualPickingFeedBack(self, name, old, new):
if self.hasGui:
self.GUI.VIEWER.showPickedVertex = new
def SetIconSize(self, name, old, new):
if self.hasGui:
self.GUI.configureToolBar(iconsize=new)
def startupDirPref_cb(self, name, old, new):
if not os.path.isdir(new):
if not hasattr(self,'setUserPreference') and not hasattr(self.setUserPreference, 'form'): return
root = self.setUserPreference.form.root
from tkMessageBox import showerror
showerror("Invalid Startup Directory", "Directory %s "%new +
" does not exists. Please select a valid Directory", parent=root)
from tkFileDialog import askdirectory
dir = askdirectory(parent=root,
title='Please select startup directory')
if dir:
os.chdir(dir)
self.userpref.data["Startup Directory"]['value'] = dir
w=self.setUserPreference.form.descr.entryByName[name]['widget']
w.setentry(dir)
#this removes updateGUI so that wrong new is not shown
self.userpref.data["Startup Directory"]['callbackFunc'].pop(-1)
else:
self.userpref.set("Startup Directory", old)
else:
os.chdir(new)
def startupDirValidate(self, value):
return 1
def restorePerspective(self):
if not self.hasGui:
return
if self.resourceFile:
rcFile = os.path.join(os.path.split(self.resourceFile)[0], "perspective")
else:
rcFolder = getResourceFolderWithVersion()
rcFile = os.path.join(rcFolder, "ViewerFramework", "perspective")
if os.path.exists(rcFile):
try:
self.source(rcFile, globalNames=1, log=1)
return True
except Exception, inst:
print inst, rcFile
return
def tryOpenFileInWrite(self, filename):
try:
self.logAllFile = open( filename, 'w' )
from Support.version import __version__
from mglutil import __revision__
self.logAllFile.write("# Pmv version %s revision %s\n"%(__version__, __revision__))
return 1
except:
try:
from mglutil.util.packageFilePath import getResourceFolderWithVersion
rc = getResourceFolderWithVersion()
self.logAllFile = open(rc + os.sep + filename, 'w' )
except:
return 0
def customize(self, file=None):
"""if a file is specified, this files gets sourced, else we look for
the file specified in self.resourceFile in the following directories:
1 - current directory
2 - user's home directory
3 - the package to which this instance belongs to
"""
#print 'ZZZZZZZZZZZZZZZZZZZZZZZZ'
#import traceback
#traceback.print_stack()
if file is not None:
if not os.path.exists(file):
return
self.source(file, globalNames=1, log=0)
return
resourceFileLocation = findResourceFile(self,
resourceFile=self.resourceFile)
if resourceFileLocation.has_key('currentdir') and \
not resourceFileLocation['currentdir'] is None:
path = resourceFileLocation['currentdir']
elif resourceFileLocation.has_key('home') and \
not resourceFileLocation['home'] is None:
path = resourceFileLocation['home']
elif resourceFileLocation.has_key('package') and \
not resourceFileLocation['package'] is None:
path = resourceFileLocation['package']
else:
return
self.source(path, globalNames=1, log=0)
path = os.path.split(path)[-1]
if os.path.exists(path):
self.source(path, globalNames=1, log=0)
return
def after(func, *args, **kw):
"""method to run a thread enabled command and wait for its completion.
relies on the command to release a lock called self.done
only works for commands, not for macros
"""
lock = thread.allocate_lock()
lock.acquire()
func.private_threadDone = lock
apply( func, args, kw )
func.waitForCompletion()
def getLog(self):
"""
generate log strings for all commands so far
"""
logs = []
i = 0
for cmd, args, kw in self.cmdHistory:
try:
log = cmd.logString( *args, **kw)+'\n'
except:
log = '#failed to create log for %d in self.cmdHistory: %s\n'%(
i, cmd.name)
logs.append(log)
i += 1
return logs
def addCmdToHistory(self, cmd, args, kw):
"""
append a command to the history of commands
"""
#print "ADDING Command to history", cmd.name
self.cmdHistory.append( (cmd, args, kw))
maxLen = self.userpref['Command History Depth']['value']
lenCmds = len(self.cmdHistory)
if maxLen>0 and lenCmds > maxLen:
#print "maxLen", maxLen, lenCmds
self.cmdHistory = self.cmdHistory[-maxLen:]
if self.hasGui:
nremoved = lenCmds-maxLen
# update text in the message box
message_box = self.GUI.MESSAGE_BOX
nlines = float(nremoved+1)
try:
message_box.tx.delete('1.0', str(nlines))
except:
pass
def log(self, cmdString=''):
"""append command to logfile
FIXME: this should also get whatever is typed in the PythonShell
"""
if self.logMode == 'no': return
if cmdString[-1]!='\n': cmdString = cmdString + '\n'
if hasattr(self, 'logAllFile'):
self.logAllFile.write( cmdString )
self.logAllFile.flush()
if self.socketComm is not None and len(self.socketComm.clients):
#is it really need?
cmdString=cmdString.replace("log=0","log=1")
self.socketComm.sendToClients(cmdString)
self.dispatchEvent( LogEvent( cmdString ) )
## if self.selectLog:
## self.logSelectFile.write( cmdString )
def tryto(self, command, *args, **kw ):
"""result <- tryto(command, *args, **kw )
if an exception is raised print traceback and continue
"""
self.commandNestingLevel = self.commandNestingLevel + 1
try:
if self.commandNestingLevel==1:
self.commandsLock.acquire()
if not self.trapExceptions:
# we are running tests and want exceptions not to be caught
result = command( *args, **kw )
else:
# exception should be caught and displayed
try:
result = command( *args, **kw )
except:
print 'ERROR *********************************************'
if self.guiVisible==1 and self.withShell:
self.GUI.pyshell.top.deiconify()
self.GUI.ROOT.config(cursor='')
self.GUI.VIEWER.master.config(cursor='')
self.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm')
traceback.print_exc()
sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
result = 'ERROR'
# sets cursors back to normal
finally:
if self.commandNestingLevel==1:
self.commandsLock.release()
self.commandNestingLevel = self.commandNestingLevel - 1
return result
def message(self, str, NL=1):
""" write into the message box """
if self.hasGui:
self.GUI.message(str,NL)
else:
print str
def unsolicitedPick(self, pick):
"""treat and unsollicited picking event"""
vi = self.GUI.VIEWER
if vi.isShift() or vi.isControl():
vi.unsolicitedPick(pick)
else:
#print picked geometry
for k in pick.hits.keys():
self.message(k)
def addBasicCommands(self):
"""Create a frame to hold menu and button bars"""
from ViewerFramework.dejaVuCommands import PrintGeometryName, \
SetCameraSizeCommand, SetCamSizeGUI
# Basic command that needs to be added manually.
self.addCommand( PrintGeometryName(), 'printGeometryName ', None )
g = CommandGUI()
g.addMenuCommand('menuRoot', 'File', 'Browse Commands',
separatorAbove=1, )
self.addCommand( BrowseCommandsCommand(), 'browseCommands', g)
self.addCommand( SetCameraSizeCommand(), 'setCameraSize',
SetCamSizeGUI)
from ViewerFramework.basicCommand import UndoCommand, \
ResetUndoCommand, NEWUndoCommand, RedoCommand
# g = CommandGUI()
# g.addMenuCommand('menuRoot', 'File', 'Remove Command')
# self.addCommand( RemoveCommand(), 'removeCommand', g)
from mglutil.util.packageFilePath import getResourceFolderWithVersion
self.vfResourcePath = getResourceFolderWithVersion()
if self.vfResourcePath is not None:
self.vfResourcePath += os.sep + "ViewerFramework"
if not os.path.isdir(self.vfResourcePath):
try:
os.mkdir(self.vfResourcePath)
except Exception, inst:
print inst
txt="Cannot create the Resource Folder %s" %self.vfResourcePath
self.vfResourcePath = None
g = CommandGUI()
g.addMenuCommand('menuRoot', 'Edit', 'Undo ', index=0)
g.addToolBar('Undo', icon1 = '_undo.gif', icon2 = 'undo.gif',
type = 'ToolBarButton',state = 'disabled',
balloonhelp = 'Undo', index = 1)
self.addCommand( NEWUndoCommand(), 'NEWundo', g)
g = CommandGUI()
g.addMenuCommand('menuRoot', 'Edit', 'Redo ', index=1)
g.addToolBar('Redo', icon1 = '_redo.gif', icon2 = 'redo.gif',
type = 'ToolBarButton',state = 'disabled',
balloonhelp = 'Redo', index = 2)
self.addCommand( RedoCommand(), 'redo', g)
# keep old undo command for now for backward compatibility
self.addCommand( UndoCommand(), 'undo', None )
self.addCommand( ResetUndoCommand(), 'resetUndo ', None)
g = CommandGUI()
#g.addMenuCommand('menuRoot', 'File', 'Load Command')
self.addCommand( loadCommandCommand(), 'loadCommand', g)
g = CommandGUI()
#g.addMenuCommand('menuRoot', 'File', 'Load Module')
self.addCommand( loadModuleCommand(), 'loadModule', g)
g = CommandGUI()
g.addMenuCommand('menuRoot', 'File', 'Load Macros', separatorBelow=1)
self.addCommand( loadMacroCommand(), 'loadMacro', g)
# Load Source command from customizationCommands module:
self.browseCommands('customizationCommands', commands=['source',],
package='ViewerFramework', topCommand=0)
# force the creation of the default buttonbar and PyShell checkbutton
# by viewing the Python Shell widget
if self.withShell:
self.addCommand( ShellCommand(), 'Shell', ShellCommandGUI )
# add the default 'Help' menubutton in the default menubar
if self.hasGui:
bar = self.GUI.menuBars['menuRoot']
help = self.GUI.addMenuButton( bar, 'Help', {}, {'side':'right'})
self.GUI.addMenuButton( bar, 'Grid3D', {}, {'side':'right'})
try:
import grid3DCommands
self.browseCommands("grid3DCommands", package="ViewerFramework", topCommand=0)
except Exception, inst:
print inst
print "Cannot import grid3DCommands. Disabling grid3DCommands..."
#self.GUI.ROOT.after(1500, self.removeCommand.loadCommands)
# load helpCommand and searchForCmd
self.browseCommands('helpCommands',
commands=['helpCommand','searchForCmd', 'citeThisScene',
'showCitation'],
package='ViewerFramework', topCommand = 0)
# load SetUserPreference and setOnAddObjectCmds Commands
self.browseCommands('customizationCommands',
commands=['setUserPreference',
'setOnAddObjectCommands'],
package='ViewerFramework', topCommand = 0)
# load ChangeVFGUIvisGUI and SetOnAddObjectCmds Command
self.browseCommands('customizeVFGUICommands',
package='ViewerFramework', topCommand = 0)
self.addCommand( SaveSessionCommand(), 'saveSession ', SaveSessionCommandGUI)
# Add the Exit command under File
g = CommandGUI()
g.addMenuCommand('menuRoot', 'File', 'Exit', separatorAbove=1)
self.addCommand( ExitCommand(), 'Exit', g )
# load object transformation, camera transformation,
# light transformation, Clipping Plane transformation,
# CenterGeom, centerScene commands
self.browseCommands("dejaVuCommands", commands=[
'transformObject', 'transformCamera', 'setObject',
'setCamera', 'setLight', 'setClip', 'addClipPlane',
'centerGeom', 'centerScene', 'centerSceneOnVertices',
'alignGeomsnogui','alignGeoms', 'toggleStereo',
'centerSceneOnPickedPixel'],
package='ViewerFramework', topCommand = 0)
def validInstance(self, classList, obj):
"""Checks whether an object is an instance of one the classes in the
list"""
ok = 0
for Klass in classList:
if isinstance(obj, Klass):
OK=1
break
return OK
def getOnAddObjectCmd(self):
"""
returns a copy of the list of commands currently executed when a new object
is added
"""
return self.onAddObjectCmds[:]
def addOnAddObjectCmd(self, cmd, args=[], kw={}):
"""
adds a command to the list of commands currently executed when a new object
is added
"""
assert callable(cmd)
assert type(args)==types.TupleType or type(args)==types.ListType
assert type(kw)==types.DictType
assert cmd.flag & Command.objArgOnly
kw['topCommand'] = 0
kw['setupNegate'] = 0
if type(args)==types.ListType:
args = tuple(args)
self.onAddObjectCmds.append( (cmd, args, kw) )
def removeOnAddObjectCmd(self, cmd):
"""
removes a command to the list of commands currently executed when a new object
is added
"""
for com in self.onAddObjectCmds:
if com[0]==cmd:
self.onAddObjectCmds.remove(com)
return com
print 'WARNING: command %s not found'%cmd.name
return None
def addObject(self, name, obj, geomContainer=None):
"""Add an object to a Viewer"""
#print 'acquiring addObject lock'
self.objectsLock.acquire()
self.objects.append(obj)
self.objectsLock.release()
#print 'releasing addObject lock'
## if geomContainer is None:
## obj.geomContainer = GeomContainer( self.GUI.VIEWER )
## else:
## obj.geomContainer = geomContainer
obj.geomContainer = geomContainer
# prepare progress bar
lenCommands = len(self.cmdsWithOnAddObj)
if self.hasGui:
self.GUI.configureProgressBar(init=1, mode='increment',
max=lenCommands,
progressformat='ratio',
labeltext='call initGeom methods')
#call initGeom method of all commands creating geometry
from time import time
#t0 = time()
for com in self.cmdsWithOnAddObj:
com.onAddObjectToViewer(obj)
#t1 = time()
#print 'INITI', com, t1-t0
#check for gui
if self.hasGui:
self.GUI.updateProgressBar()
# now set progress bar back to '%' format
if self.hasGui:
self.GUI.configureProgressBar(progressformat='percent')
# prepare progress bar
lenCommands = len(self.onAddObjectCmds)
#call functions that need to be called on object
#t0 = time()
for com in self.onAddObjectCmds:
com[2]['redraw']=0
com[2]['log']=0
#t1 = time()
#print 'INITI2', com, t1-t0
com[0]( *((obj,)+com[1]), **com[2] )
# note we have to re-configure the progress bar because doitWrapper
# will overwrite the mode to 'percent'
#check for gui
if self.hasGui:
self.GUI.configureProgressBar(init=1, mode='increment',
max=lenCommands,
progressformat='ratio',
labeltext='call geom functions')
self.GUI.updateProgressBar()
if self.hasGui:
# now set progress bar back to '%' format
self.GUI.configureProgressBar(progressformat='percent')
# create add object event
event = AddObjectEvent(objects=[obj])
self.dispatchEvent(event)
if self.hasGui:
self.centerScene(topCommand=0)
self.GUI.VIEWER.Redraw()
def removeObject(self, obj, undoable=False):
"""Remove an object from a Viewer"""
#1 Delete the obj from the list of objects.
del(self.objects[self.objects.index(obj)])
# call onRemoveMol method of all commands creating geometry
# To remove geometries created by these commands from the VIEWER
## MS chose to cerate undoableDelete__ variable in VF to let cmd's
## onRemoveObjectFromViewer method decide what to do when delete is
## undoable. Passign undoable into th method would require changing
## the signature in each implementation when onyl a hand full do
## something s[pecial when undoable is True
self.undoableDelete__ = undoable
for com in self.cmdsWithOnRemoveObj:
self.tryto( com.onRemoveObjectFromViewer, (obj) )
del self.undoableDelete__
# clean up the managedGeometries list
if obj.geomContainer:
for cmd in self.commands.values():
if len(cmd.managedGeometries)==0: continue
geomList = []
for g in cmd.managedGeometries:
if hasattr(g, 'mol') and g.mol==obj:
continue
geomList.append(g)
cmd.managedGeometries = geomList
# remove everything created in the geomContainer associated to the
# mol we want to destroy,
if obj.geomContainer:
obj.geomContainer.delete()
# create remove object event
event = DeleteObjectEvent(objects=[obj])
self.dispatchEvent(event)
def addColorMap(self, colorMap):
from DejaVu.colorMap import ColorMap
assert isinstance(colorMap, ColorMap)
if self.colorMaps.has_key('colorMap.name'):
warnings.warn('invalid attemp to replace an existing colormap')
else:
self.colorMaps[colorMap.name] = colorMap
def addCommandProxy(self, commandProxy):
"""To make startup time faster this function add GUI elements without
importing and loading the full dependiencies for a command
"""
if self.hasGui:
gui = commandProxy.gui
if gui is not None:
gui.register(self, commandProxy)
gui.registered = True
def addCommand(self, command, name, gui=None):
"""
Add a command to a viewer.
arguments:
command: Command instance
name: string
gui: optional CommandGUI object
objectType: optional type of object for which we need to add geoms
geomDescr: optional dictionary of 'name:objectType' items
name is used to create an alias for the command in the viewer
if a gui is specified, call gui.register to add the gui to the viewer
"""
#print "addCommand", name, command
assert isinstance(command, Command)
# happens because of dependencies
if name in self.commands.keys():
return self.commands[name]
error = self.tryto(command.checkDependencies, self)
if error=='ERROR':
print '\nWARNING: dependency check failed for command %s' % name
return
## def download_cb():
## import os
## os.system('netscape http://www.scripps.edu/pub/olson-web/people/scoon/login.html &')
## def Ok_cb(idf):
## idf.form.destroy()
## tb = traceback.extract_tb(sys.exc_traceback)
## from gui import InputFormDescr, CallBackFunction
## import Tkinter
## idf = InputFormDescr("Missing dependencies !")
## idf.append({'widgetType': Tkinter.Label,
## 'text':"%s can't be loaded, needs %s module"
## % (tb[1][-1][7:],command.__class__.__name__),
## 'gridcfg':{'columnspan':2}})
## idf.append({'widgetType':Tkinter.Button, 'text':'OK',
## 'command':CallBackFunction(Ok_cb, idf),
## 'gridcfg':{'sticky':Tkinter.W+Tkinter.E}})
## idf.append({'widgetType':Tkinter.Button, 'text':'Download',
## 'command':download_cb,
## 'gridcfg':{'row':-1, 'sticky':Tkinter.W+Tkinter.E,
## 'columnspan':5 }})
## form = self.getUserInput(idf, modal=0, blocking=0)
## self.warningMsg(title = "Missing dependencies !",
## message = "%s can't be loaded, needs %s module"
## % (tb[1][-1][7:],command.__class__.__name__))
## return
command.vf = self
name = string.strip(name)
name = string.replace(name, ' ', '_')
self.commands[name] = command
command.name=name
command.undoMenuString=name # string used to change menu entry for Undo
command.undoMenuStringPrefix='' # prefix used to change menu entry for Undo
setattr(self, name, command)
#exec ( 'self.%s = command' % name )
if self.hasGui:
if gui is not None:
assert isinstance(gui, CommandGUI)
gui.register(self, command)
gui.registered = True
#call the onAddCmdToViewer method of the new command
command.onAddCmdToViewer()
for c in self.commands.values():
c.onAddNewCmd(command)
#if hasattr(command, 'onAddObjectToViewer'):
# if callable(command.onAddObjectToViewer):
# self.cmdsWithOnAddObj.append(command)
# for o in self.objects:
# command.onAddObjectToViewer(o)
if hasattr(command, 'onRemoveObjectFromViewer'):
if callable(command.onRemoveObjectFromViewer):
self.cmdsWithOnRemoveObj.append(command)
if hasattr(command, 'onExitFromViewer'):
if callable(command.onExitFromViewer):
self.cmdsWithOnExit.append(command)
def updateGeomContainers(self, objectType, geomDescr):
"""To be called when a new command that requires geometry is add to
a viewer. This method loops over existing objects to create the
required geometry for already existing objects"""
for o in self.objects:
if not isinstance(object, objectType): continue
o.geomContainer.addGeom( geomDescr )
def askFileOpen(self, idir=None, ifile=None, types=None, title='Open',
relative=True, parent=None, multiple=False):
"""filename <- askFileOpen( idir, ifile, types, title)
if the viewer is run with a gui this function displays a file browser
else it askes for a file name
idir: optional inital directory
ifile: optional initial filename
types: list of tuples [('PDB files','*.pdb'),]
title: widget's title
relative: when set to True the file name is realtive to the directory
where the application has been started
multiple: allow selecting multiple files
returns: a filename ot None if the Cancel button
"""
if self.hasGui:
if parent:
file = self.GUI.askFileOpen(parent, idir=idir, ifile=ifile,
types=types, title=title,
multiple=multiple)
else:
file = self.GUI.askFileOpen(
self.GUI.ROOT, idir=idir, ifile=ifile,
types=types, title=title, multiple=multiple)
if file is () or file is None: # this is returned if one click on the file list and
# then clicks Cancel
return
else:
default = ''
if idir: default = idir
if ifile: default = os.path.join( default, ifile )
file = raw_input("file name [%s] :"%default)
if file=='':
if default != '' and os.path.exists(file):
file = default
if multiple is False:
fpath,fname = os.path.split(file)
if relative and file and os.path.abspath(os.path.curdir) == fpath:
file = os.path.join(
os.path.curdir,
file[len(os.path.abspath(os.path.curdir))+1:])
return file
else:
files = []
for f in file:
fpath,fname = os.path.split(f)
if relative and f and os.path.abspath(os.path.curdir) == fpath:
f = os.path.join(os.path.curdir,
f[len(os.path.abspath(os.path.curdir))+1:])
files.append(f)
return files
def askFileSave(self, idir=None, ifile=None, types=None, title='Save',
relative=True, defaultextension=None):
if self.hasGui:
file = self.GUI.askFileSave(self.GUI.ROOT, idir=idir, ifile=ifile,
types=types, title=title,
defaultextension=defaultextension)
if file is () or file is None: # this is returned if one clcik on the file list and
# then clicks Cancel
return
else:
default = ''
if idir: default = idir
if ifile: default = os.path.join( default, ifile )
file = raw_input("file name [%s] :"%default)
if file=='':
if default != '' and os.path.exists(file):
file = default
fpath,fname = os.path.split(file)
if relative and file and os.path.abspath(os.path.curdir) == fpath:
file = os.path.join(os.path.curdir,
file[len(os.path.abspath(os.path.curdir))+1:])
return file
def setLogMode(self, name, oldval, newval):
"Sets the Lig Mode"
self.logMode = newval
# open log file for all commands
if self.logMode == 'unique':
import time
t = time.localtime(time.time())
fname1 = 'mvAll_%04d-%02d-%02d_%02d-%02d-%02d.log.py'%(t[0],t[1],t[2],t[3],t[4],t[5])
fname1 = os.path.join(self.rcFolder, fname1)
if self.hasGui:
self.GUI.ROOT.after_idle(self.clearOldLogs)
elif self.logMode == 'overwrite':
fname1 = os.path.join(self.rcFolder, 'mvAll.log.py')
if self.logMode != 'no':
flag = self.tryOpenFileInWrite(fname1)
while flag == 0:
idf = InputFormDescr(title = 'Directory not writable ...')
variable = Tkinter.StringVar()
idf.append({'name':'noLog','widgetType': Tkinter.Radiobutton,
'text':'noLog','variable':variable,
'value':'noLog','defaultValue':'noLog',
'gridcfg':{'sticky':Tkinter.W}})
idf.append({'name':'browse','widgetType': 'SaveButton',
'typeofwidget':Tkinter.Radiobutton,
'types':[ ('Python Files', '*.py')],
'title':'Choose a log File...',
'text':'browse',
'variable':variable,
'defaultValue':'noLog',
'value':'browse',
'gridcfg':{'sticky':Tkinter.W}})
self.GUI.ROOT.deiconify()
self.GUI.ROOT.update()
result = self.getUserInput(idf)
if result == {}:
self.GUI.ROOT.destroy()
return
elif result['noLog'] == 'noLog':
self.logMode = 'no'
flag = 1
elif result['noLog'] == 'browse' and result.has_key('browse'):
assert not result['browse'] in ['']
flag = self.tryOpenFileInWrite(result['browse'])
elif result['noLog'] == 'browse' and not result.has_key('browse'):
print "you didn't enter a proper file name try again"
flag = 0
def setWarningMsgFormat(self, name, oldval, newval):
""" newval can be either 'pop-up' or 'printed'"""
self.messageFormat = newval
def warningMsg(self, msg, title='WARNING: ', parent = None):
"""None <- warningMsg(msg)"""
if type(title) is not types.StringType:
title = 'WARNING: '
if self.hasGui and self.messageFormat=='pop-up':
tkMessageBox.showwarning(title, msg,parent = parent)
else:
sys.stdout.write(title+msg+'\n')
def askOkCancelMsg(self, msg):
"""None <- okCancelMsg(msg)"""
if self.hasGui:
return tkMessageBox.askyesno('expand selection', msg)
else:
val = raw_input('anser [0]/1: '+msg+'\n')
if val=='1': return 1
else: return 0
## FIXME .. do we need this ?
def errorMsg(self, msg, errtype=RuntimeError):
"""None <- errorMsg(errorType, msg)"""
if self.hasGui:
tkMessageBox.showerror(msg)
raise errtype(msg)
def getUserInput(self, formDescription, master=None, root=None,
modal=0, blocking=1,
defaultDirection = 'row', closeWithWindow = 1,
okCfg={'text':'OK'}, cancelCfg={'text':'Cancel'},
initFunc=None, scrolledFrame=0, width=None, height=None,
okcancel=1, onDestroy = None,
postCreationFunc=None, postUsingFormFunc=None):
"""val[] <- getUserInput(formDescription)
Returns a list of values obtained either from an InputForm or by
prompting the user for values
"""
## from gui import InputForm, InputFormDescr
from mglutil.gui.InputForm.Tk.gui import InputForm, InputFormDescr
assert isinstance(formDescription, InputFormDescr)
if self.hasGui:
if master==None:
master = self.GUI.ROOT
#root = self.GUI.getCmdsParamsMaster()
#if not postCreationFunc:
# postCreationFunc = self.GUI.getAfterCreatingFormFunc()
#if not postUsingFormFunc:
# postUsingFormFunc = self.GUI.getAfterUsingFormFunc()
form = InputForm(master, root, formDescription,
modal=modal, blocking=blocking,
defaultDirection=defaultDirection,
closeWithWindow=closeWithWindow,
okCfg=okCfg, cancelCfg=cancelCfg,
initFunc=initFunc, scrolledFrame=scrolledFrame,
width=width, height=height,
okcancel=okcancel, onDestroy=onDestroy)
if form.ownsRoot:
geom = form.root.geometry()
# make sure the upper left dorner is visible
w = string.split(geom, '+')
changepos = 0
if w[1][0]=='-':
posx = '+50'
changepos=1
else:
posx = '+'+w[1]
if w[2][0]=='-':
posy ='+50'
changepos=1
else:
posy = '+'+w[2]
if changepos:
form.root.geometry(posx+posy)
if postCreationFunc:
postCreationFunc(form.root)
if not (modal or blocking):
return form
else:
values = form.go()
if postUsingFormFunc:
postUsingFormFunc(form.root)
return values
else:
self.warningMsg("nogui InputForm not yet implemented")
def transformedCoordinatesWithInstances(self, hits):
""" hist is pick.hits = {geom: [(vertexInd, intance),...]}
This function will use the instance information to return a list of transformed
coordinates
"""
# FIXME this is in DejaVu.VIewer and should go away here
vt = []
for geom, values in hits.items():
coords = geom.vertexSet.vertices.array
for vert, instance in values:
M = geom.GetMatrix(geom.LastParentBeforeRoot(), instance[1:])
pt = coords[vert]
ptx = M[0][0]*pt[0]+M[0][1]*pt[1]+M[0][2]*pt[2]+M[0][3]
pty = M[1][0]*pt[0]+M[1][1]*pt[1]+M[1][2]*pt[2]+M[1][3]
ptz = M[2][0]*pt[0]+M[2][1]*pt[1]+M[2][2]*pt[2]+M[2][3]
vt.append( (ptx, pty, ptz) )
return vt
def clearOldLogs(self):
currentTime = time.time()
for file in glob.glob(os.path.join(self.rcFolder, "*.log.py")):
stats = os.stat(file)
if currentTime - stats[8] > 3000000: #~month
os.remove(file)
if __name__ == '__main__':
v = ViewerFramework()
import pdb
| 7,289 | 198 | 751 |
0b0ea4d141447e0f90defb3e572d86ff6e3fe1d9 | 1,033 | py | Python | 4_web-scraping/04_02/fx.py | scaotravis/Data-Ingestion | dec4fcc024680ff264ef215579f2e8c4dd898fce | [
"MIT"
] | 1 | 2021-02-26T13:12:22.000Z | 2021-02-26T13:12:22.000Z | 4_web-scraping/04_02/fx.py | scaotravis/Data-Ingestion | dec4fcc024680ff264ef215579f2e8c4dd898fce | [
"MIT"
] | null | null | null | 4_web-scraping/04_02/fx.py | scaotravis/Data-Ingestion | dec4fcc024680ff264ef215579f2e8c4dd898fce | [
"MIT"
] | null | null | null | """Parsing HTML with BeautifulSoup"""
from datetime import datetime
from bs4 import BeautifulSoup
def parse_html(html):
"""Parse FX html, return date and dict of {symbol -> rate}"""
soup = BeautifulSoup(html, 'html.parser')
# <h4>Date: <i class="date">2019-11-11</i></h4>
i = soup('i', {'class': 'date'})
if not i:
raise ValueError('cannot find date')
date = datetime.strptime(i[0].text, '%Y-%m-%d')
rates = {}
for tr in soup('tr'):
# <tr>
# <td><i class="fas fa-pound-sign" data-toggle="tooltip"
# title="GBP"></i></td>
# <td>0.83</td>
# </tr>
symbol_td, rate_td = tr('td')
symbol = symbol_td('i')[0]['title']
rate = float(rate_td.text)
rates[symbol] = rate
return date, rates
if __name__ == '__main__':
with open('fx.html') as fp:
html = fp.read()
date, rates = parse_html(html)
print(f'date: {date}')
for symbol, rate in rates.items():
print(f'USD/{symbol} = {rate:f}')
| 25.195122 | 65 | 0.554695 | """Parsing HTML with BeautifulSoup"""
from datetime import datetime
from bs4 import BeautifulSoup
def parse_html(html):
"""Parse FX html, return date and dict of {symbol -> rate}"""
soup = BeautifulSoup(html, 'html.parser')
# <h4>Date: <i class="date">2019-11-11</i></h4>
i = soup('i', {'class': 'date'})
if not i:
raise ValueError('cannot find date')
date = datetime.strptime(i[0].text, '%Y-%m-%d')
rates = {}
for tr in soup('tr'):
# <tr>
# <td><i class="fas fa-pound-sign" data-toggle="tooltip"
# title="GBP"></i></td>
# <td>0.83</td>
# </tr>
symbol_td, rate_td = tr('td')
symbol = symbol_td('i')[0]['title']
rate = float(rate_td.text)
rates[symbol] = rate
return date, rates
if __name__ == '__main__':
with open('fx.html') as fp:
html = fp.read()
date, rates = parse_html(html)
print(f'date: {date}')
for symbol, rate in rates.items():
print(f'USD/{symbol} = {rate:f}')
| 0 | 0 | 0 |
93eb493f45e15941ab5340ed2a3379cb5719924f | 706 | py | Python | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/04_For-Loop/00-Book-Exercise-5.1-08-Odd-Even-Sum.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/04_For-Loop/00-Book-Exercise-5.1-08-Odd-Even-Sum.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/04_For-Loop/00-Book-Exercise-5.1-08-Odd-Even-Sum.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | # четна / нечетна сума
# Да се напише програма, която въвежда n цели числа и проверява дали сумата на числата на четни позиции е равна на сумата
# на числата на нечетни позиции. При равенство печата "Yes" + сумата, иначе печата "No" + разликата.
# Разликата се изчислява по абсолютна стойност. Форматът на изхода трябва да е като в примерите по-долу.
n = int(input())
sum_even = 0
sum_odd = 0
for i in range(1, n + 1):
current_num = int(input())
if i % 2 == 0:
sum_even = sum_even + current_num
else:
sum_odd = sum_odd + current_num
if sum_even == sum_odd:
print('Yes')
print(f'Sum = {sum_even}')
else:
print('No')
print(f'Diff = {abs(sum_even - sum_odd)}')
| 29.416667 | 121 | 0.668555 | # четна / нечетна сума
# Да се напише програма, която въвежда n цели числа и проверява дали сумата на числата на четни позиции е равна на сумата
# на числата на нечетни позиции. При равенство печата "Yes" + сумата, иначе печата "No" + разликата.
# Разликата се изчислява по абсолютна стойност. Форматът на изхода трябва да е като в примерите по-долу.
n = int(input())
sum_even = 0
sum_odd = 0
for i in range(1, n + 1):
current_num = int(input())
if i % 2 == 0:
sum_even = sum_even + current_num
else:
sum_odd = sum_odd + current_num
if sum_even == sum_odd:
print('Yes')
print(f'Sum = {sum_even}')
else:
print('No')
print(f'Diff = {abs(sum_even - sum_odd)}')
| 0 | 0 | 0 |
85613e1836186b23c7151266b7a97e5cd634efa3 | 29 | py | Python | terrascript/softlayer/d.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/softlayer/d.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/softlayer/d.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/softlayer/d.py
| 14.5 | 28 | 0.793103 | # terrascript/softlayer/d.py
| 0 | 0 | 0 |
2c69ec7773598c5f8a3a90135beee7b6b38da4ad | 745 | py | Python | setup.py | yonghoonlee/pyMaxPro-lite | 011a149e2424251645b31e7c290c7bb94646941b | [
"BSD-3-Clause"
] | 2 | 2021-11-17T14:57:24.000Z | 2021-11-17T14:57:27.000Z | setup.py | yonghoonlee/pyMaxPro_lite | 011a149e2424251645b31e7c290c7bb94646941b | [
"BSD-3-Clause"
] | null | null | null | setup.py | yonghoonlee/pyMaxPro_lite | 011a149e2424251645b31e7c290c7bb94646941b | [
"BSD-3-Clause"
] | null | null | null | import os
from setuptools import find_packages
from numpy.distutils.core import setup
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "pymaxpro_lite")
about = {}
with open(os.path.join(src_dir, "__about__.py")) as f:
exec(f.read(), about)
pkgs = find_packages()
if __name__ == "__main__":
metadata = dict(
name = about["__title__"],
version = about["__version__"],
description = about["__description__"],
author = about["__author__"],
license = about["__license__"],
url = about["__uri__"],
packages = pkgs,
install_requires = ['numpy', 'scipy'],
python_requires = '>=3.6',
)
setup(**metadata)
| 26.607143 | 54 | 0.597315 | import os
from setuptools import find_packages
from numpy.distutils.core import setup
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "pymaxpro_lite")
about = {}
with open(os.path.join(src_dir, "__about__.py")) as f:
exec(f.read(), about)
pkgs = find_packages()
if __name__ == "__main__":
metadata = dict(
name = about["__title__"],
version = about["__version__"],
description = about["__description__"],
author = about["__author__"],
license = about["__license__"],
url = about["__uri__"],
packages = pkgs,
install_requires = ['numpy', 'scipy'],
python_requires = '>=3.6',
)
setup(**metadata)
| 0 | 0 | 0 |
d0370a0ce2b2d3b1b3c3b027875135b72d85974e | 5,342 | py | Python | functionaltests/client/v1/smoke/test_orders.py | mail2nsrajesh/python-barbicanclient | 439ee25dc6c998e5571022ce0094a10c2611d717 | [
"Apache-2.0"
] | 35 | 2015-01-29T20:10:47.000Z | 2022-02-28T12:39:06.000Z | functionaltests/client/v1/smoke/test_orders.py | mail2nsrajesh/python-barbicanclient | 439ee25dc6c998e5571022ce0094a10c2611d717 | [
"Apache-2.0"
] | null | null | null | functionaltests/client/v1/smoke/test_orders.py | mail2nsrajesh/python-barbicanclient | 439ee25dc6c998e5571022ce0094a10c2611d717 | [
"Apache-2.0"
] | 19 | 2015-01-19T08:32:17.000Z | 2021-01-26T12:19:52.000Z | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from functionaltests.client import base
from functionaltests.common import cleanup
from functionaltests import utils
order_create_key_data = {
"name": "barbican functional test secret name",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload_content_type": "application/octet-stream",
}
# Any field with None will be created in the model with None as the value
# but will be omitted in the final request (via the requests package)
# to the server.
#
# Given that fact, order_create_nones_data is effectively an empty json request
# to the server.
order_create_nones_data = {
'type': None,
"meta": {
"name": None,
"algorithm": None,
"bit_length": None,
"mode": None,
"payload_content_type": None,
}
}
@utils.parameterized_test_case
| 32.573171 | 79 | 0.646574 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from functionaltests.client import base
from functionaltests.common import cleanup
from functionaltests import utils
order_create_key_data = {
"name": "barbican functional test secret name",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload_content_type": "application/octet-stream",
}
# Any field with None will be created in the model with None as the value
# but will be omitted in the final request (via the requests package)
# to the server.
#
# Given that fact, order_create_nones_data is effectively an empty json request
# to the server.
order_create_nones_data = {
'type': None,
"meta": {
"name": None,
"algorithm": None,
"bit_length": None,
"mode": None,
"payload_content_type": None,
}
}
@utils.parameterized_test_case
class OrdersTestCase(base.TestCase):
def setUp(self):
super(OrdersTestCase, self).setUp()
self.cleanup = cleanup.CleanUp(self.barbicanclient)
def tearDown(self):
self.cleanup.delete_all_entities()
super(OrdersTestCase, self).tearDown()
@testcase.attr('positive')
def test_create_order_defaults(self):
"""Covers simple order creation."""
order = self.barbicanclient.orders.create_key(
**order_create_key_data)
order_ref = self.cleanup.add_entity(order)
self.assertIsNotNone(order_ref)
@testcase.attr('positive')
def test_get_order_defaults_metadata(self):
"""Covers order metadata.
Assumes that the order status will be active or pending.
"""
# first create an order
order = self.barbicanclient.orders.create_key(
**order_create_key_data)
order_ref = self.cleanup.add_entity(order)
# verify that the order was created successfully
self.assertIsNotNone(order_ref)
# given the order href, retrieve the order
order_resp = self.barbicanclient.orders.get(order_ref)
# verify that the get was successful
self.assertTrue(order_resp.status == "ACTIVE" or
order_resp.status == "PENDING")
# verify the metadata
self.assertEqual(order.name,
order_resp.name)
self.assertEqual(order.mode,
order_resp.mode)
self.assertEqual(order.algorithm,
order_resp.algorithm)
self.assertEqual(order.bit_length,
order_resp.bit_length)
self.assertEqual(order.payload_content_type,
order_resp.payload_content_type)
@testcase.attr('positive')
def test_get_order_defaults(self):
"""Covers getting an order.
Assumes that the order status will be active or pending.
"""
# create an order
order = self.barbicanclient.orders.create_key(
**order_create_key_data)
order_ref = self.cleanup.add_entity(order)
self.assertIsNotNone(order_ref)
# get the order
order_resp = self.barbicanclient.orders.get(order_ref)
# verify the order
self.assertIsNotNone(order_resp.order_ref)
self.assertEqual('key', order_resp._type)
self.assertTrue(order_resp.status == "ACTIVE" or
order_resp.status == "PENDING")
if order_resp.status == "ACTIVE":
self.assertIsNotNone(order_resp.secret_ref)
@testcase.attr('positive')
def test_delete_order_defaults(self):
"""Covers simple order deletion."""
# create an order
order = self.barbicanclient.orders.create_key(
**order_create_key_data)
order_ref = order.submit()
secret_ref = self.barbicanclient.orders.get(order_ref).secret_ref
# delete the order
delete_resp = self.barbicanclient.orders.delete(order_ref)
self.assertIsNone(delete_resp)
# delete the secret
self.barbicanclient.secrets.delete(secret_ref)
@testcase.attr('positive')
def test_get_orders_defaults(self):
"""Covers getting a list of orders."""
limit = 7
offset = 0
total = 10
# create the orders
for i in range(0, total + 1):
order = self.barbicanclient.orders.create_key(
**order_create_key_data)
order_ref = self.cleanup.add_entity(order)
self.assertIsNotNone(order_ref)
# get a list of orders
orders_list = self.barbicanclient.orders.list(limit=limit,
offset=offset)
# verify that the get for the list was successful
self.assertEqual(limit, len(orders_list))
| 187 | 3,697 | 22 |
451bcfc900601e7113ff912c1afc38f859714e33 | 9,872 | py | Python | src/anim_test.py | MaxThom/SpectrumLight | 34fcc670ccb35c956518453ceef94bd17611c771 | [
"MIT"
] | null | null | null | src/anim_test.py | MaxThom/SpectrumLight | 34fcc670ccb35c956518453ceef94bd17611c771 | [
"MIT"
] | null | null | null | src/anim_test.py | MaxThom/SpectrumLight | 34fcc670ccb35c956518453ceef94bd17611c771 | [
"MIT"
] | null | null | null | from rpi_ws281x import Color, PixelStrip, ws
import time
from threading import Thread
import random
from PIL import Image
import numpy as np
#import cv2
from skimage import io, transform
#from skimage.transform import resize, rescale
# LED strip configuration:
LED_COUNT = 2304 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 20 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0
##LED_STRIP = ws.SK6812_STRIP_RGBW
#LED_STRIP = ws.SK6812W_STRIP
LED_STRIP = ws.WS2812_STRIP
#
##frame = utils.get_colorless_array_2d(self.width, self.height)
#for ix,iy,iz in np.ndindex(image_resized.shape):
# image_resized[ix,iy] = tuple(image_resized[ix,iy])
#print(image_resized)
#io.imshow(image_resized)
#img = Image.fromarray(image_resized, 'RGB')
#img.show()
#io.imsave(f"../anim_frames_processed/{image_name}", image_resized)
#dsize = (width, height)
#output = cv2.resize(src, dsize)
#cv2.imwrite('../anim_frames/processed.bmp',output)
#im = np.array(Image.open('../anim_frames/anim_test.bmp'))
##im = np.array(im.tolist())
#print(im)
#print(np.shape(im))
#print(im.dtype)
##new_im = im.view(dtype=np.dtype([('x', im.dtype), ('y', im.dtype)]))
##new_im = new_im.reshape(new_im.shape[:-1])
##print(new_im)
#x = np.empty((im.shape[0], im.shape[1]), dtype=tuple)
##x.fill(init_value)
#for ix,iy,iz in np.ndindex(im.shape):
# x[ix,iy] = tuple(im[ix,iy])
# print(tuple(im[ix,iy]))
#print(x)
##arr = misc.imread('../anim_frames/anim_test.bmp') # 640x480x3 array
##print(arr)
##printt(np.shape(arr))
def color_full(strip):
"""Wipe color across display a pixel at a time."""
color = [None] * 9
color[0] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[1] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[2] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[3] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[4] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[5] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[6] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[7] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[8] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
for i in range(0, strip.numPixels(), 256):
for j in range(i, i+256, 1):
strip.setPixelColor(j, color[i // 256])
strip.show()
def color_wipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
start = time.time()
strip.show()
end = time.time()
print(f"{(end - start) * 1000} ms")
#time.sleep(wait_ms / 1000.0)
def color_wipe_infinite(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
while True:
color_clear(strip)
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
#start = time.time()
strip.show()
#end = time.time()
#print(f"{(end - start) * 1000} ms")
#time.sleep(wait_ms / 1000.0)
init_animation() | 35.257143 | 116 | 0.597549 | from rpi_ws281x import Color, PixelStrip, ws
import time
from threading import Thread
import random
from PIL import Image
import numpy as np
#import cv2
from skimage import io, transform
#from skimage.transform import resize, rescale
# LED strip configuration:
LED_COUNT = 2304 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 20 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0
##LED_STRIP = ws.SK6812_STRIP_RGBW
#LED_STRIP = ws.SK6812W_STRIP
LED_STRIP = ws.WS2812_STRIP
def init_animation():
print('> Starting LED animation...')
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
load_gif()
#color_clear(strip)
#while True:
# color_wipe(strip, Color(255, 0, 0)) # Red wipe
# color_wipe(strip, Color(0, 255, 0)) # Gree wipe
# color_wipe(strip, Color(0, 0, 255)) # Blue wipe
#
# #color_wipe(strip, Color(0, 0, 0, 255)) # White wipe
def load_gif():
image_name = "banana.gif_fit_24_24"
frames = []
try:
with open(f'../anim_frames_processed/{image_name}', 'rb') as f:
while True:
frames.append(np.load(f, allow_pickle=True))
except OSError:
pass # end of sequence
print(frames)
def get_image_gif():
image_name = "banana.gif"
im = Image.open(f'../anim_frames/{image_name}')
print(im.n_frames)
im_array = np.asarray(im)
try:
print(im)
while 1:
im.seek(im.tell()+1)
image = im.convert('RGB')
image = image.resize((5, 5))
print(image)
im_array = np.asarray(image)
print(im_array)
except EOFError:
pass # end of sequence
def get_image_rgb_array():
print("loading")
image_name = "banana.gif"
#im = io.imread(f'../anim_frames/{image_name}')
im = Image.open(f'../anim_frames/{image_name}')
im_array = np.asarray(im)
print(im_array)
print(im)
print(np.shape(im))
print("loading done")
MAX_WIDTH = 24
MAX_HEIGHT = 48
original_width = int(im.size[1])
original_height = int(im.size[0])
if original_width > original_height:
width = int(MAX_WIDTH)
height = int(original_height / (original_width / MAX_WIDTH))
if height > MAX_HEIGHT:
width = int(width / (height / MAX_HEIGHT))
height = int(MAX_HEIGHT)
else:
height = int(MAX_HEIGHT)
width = int(original_width / (original_height / MAX_HEIGHT))
if width > MAX_WIDTH:
height = int(height / (width / MAX_WIDTH))
width = int(MAX_WIDTH)
print(width, height)
im1 = im.resize((width, height), Image.LANCZOS)
im1.show()
print(im1)
im1_array = np.asarray(im1)
print(im1_array)
#with open(f'../anim_frames_processed/{image_name}', 'wb') as f:
# np.save(f, im1_array)
#img_rescaled = transform.rescale(im, 0.16, anti_aliasing=False)
#print(img_rescaled.shape)
#print(img_rescaled)
#image_resized = transform.resize(im, (width, height), anti_aliasing=True)
#print(image_resized.shape)
#image_resized = 255 * image_resized
#image_resized = image_resized.astype(np.uint8)
#print(image_resized)
#
##frame = utils.get_colorless_array_2d(self.width, self.height)
#for ix,iy,iz in np.ndindex(image_resized.shape):
# image_resized[ix,iy] = tuple(image_resized[ix,iy])
#print(image_resized)
#io.imshow(image_resized)
#img = Image.fromarray(image_resized, 'RGB')
#img.show()
#io.imsave(f"../anim_frames_processed/{image_name}", image_resized)
#dsize = (width, height)
#output = cv2.resize(src, dsize)
#cv2.imwrite('../anim_frames/processed.bmp',output)
#im = np.array(Image.open('../anim_frames/anim_test.bmp'))
##im = np.array(im.tolist())
#print(im)
#print(np.shape(im))
#print(im.dtype)
##new_im = im.view(dtype=np.dtype([('x', im.dtype), ('y', im.dtype)]))
##new_im = new_im.reshape(new_im.shape[:-1])
##print(new_im)
#x = np.empty((im.shape[0], im.shape[1]), dtype=tuple)
##x.fill(init_value)
#for ix,iy,iz in np.ndindex(im.shape):
# x[ix,iy] = tuple(im[ix,iy])
# print(tuple(im[ix,iy]))
#print(x)
##arr = misc.imread('../anim_frames/anim_test.bmp') # 640x480x3 array
##print(arr)
##printt(np.shape(arr))
def color_full(strip):
"""Wipe color across display a pixel at a time."""
color = [None] * 9
color[0] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[1] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[2] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[3] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[4] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[5] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[6] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[7] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
color[8] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255))
for i in range(0, strip.numPixels(), 256):
for j in range(i, i+256, 1):
strip.setPixelColor(j, color[i // 256])
strip.show()
def color_wipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
start = time.time()
strip.show()
end = time.time()
print(f"{(end - start) * 1000} ms")
#time.sleep(wait_ms / 1000.0)
def color_wipe_infinite(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
while True:
color_clear(strip)
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
#start = time.time()
strip.show()
#end = time.time()
#print(f"{(end - start) * 1000} ms")
#time.sleep(wait_ms / 1000.0)
def color_wipe_ms_triple(strip1, strip2, strip3):
while True:
color_clear(strip1)
color_clear(strip2)
color_clear(strip3)
for i in range(0, strip1.numPixels(), 3):
strip1.setPixelColor(i, Color(255, 0, 0))
strip2.setPixelColor(i+1, Color(0, 255, 0))
strip3.setPixelColor(i+2, Color(0, 0, 255))
strip1.show()
strip2.show()
strip3.show()
#for i in range(1, strip1.numPixels(), 3):
# strip1.setPixelColor(i, Color(0, 255, 0))
# strip2.setPixelColor(i+1, Color(0, 0, 255))
# strip3.setPixelColor(i+2, Color(255, 0, 0))
# strip1.show()
# strip2.show()
# strip3.show()
#for i in range(2, strip1.numPixels(), 3):
# strip1.setPixelColor(i, Color(0, 0, 255))
# strip2.setPixelColor(i+1, Color(255, 0, 0))
# strip3.setPixelColor(i+2, Color(0, 255, 0))
# strip1.show()
# strip2.show()
# strip3.show()
def color_wipe_ms(strip1, strip2, strip3):
while True:
color_clear(strip1)
color_clear(strip2)
color_clear(strip3)
for i in range(strip1.numPixels()):
strip1.setPixelColor(i, Color(255, 0, 0))
strip2.setPixelColor(i, Color(0, 255, 0))
strip3.setPixelColor(i, Color(0, 0, 255))
start = time.time()
strip1.show()
print(f"1: {(time.time() - start) * 1000} ms")
strip2.show()
print(f"2: {(time.time() - start) * 1000} ms")
strip3.show()
print(f"3: {(time.time() - start) * 1000} ms")
def color_blink_ms(strip1, strip2, strip3):
while True:
color_clear(strip1)
color_clear(strip2)
color_clear(strip3)
for i in range(strip1.numPixels()):
strip1.setPixelColor(i, Color(255, 0, 0))
strip2.setPixelColor(i, Color(0, 255, 0))
strip3.setPixelColor(i, Color(0, 0, 255))
#start = time.time()
#strip1.show()
#print(f"1: {(time.time() - start) * 1000} ms")
#strip2.show()
#print(f"2: {(time.time() - start) * 1000} ms")
#strip3.show()
#print(f"3: {(time.time() - start) * 1000} ms")
th_strip1 = Thread(target=strip1.show()) #, args=(strip, Color(0, 255, 0))
th_strip2 = Thread(target=strip2.show()) #, args=(strip, Color(0, 255, 0))
th_strip3 = Thread(target=strip3.show()) #, args=(strip, Color(0, 255, 0))
th_strip1.start()
th_strip2.start()
th_strip3.start()
th_strip1.join()
th_strip2.join()
th_strip3.join()
time.sleep(100/1000)
def color_clear(strip):
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(0, 0, 0))
strip.show()
def luka_animation(strip):
strip.setPixelColor(0, Color(170, 0, 170, 0))
strip.setPixelColor(1, Color(170, 0, 170, 0))
strip.setPixelColor(2, Color(170, 0, 170, 0))
strip.setPixelColor(4, Color(0, 50, 75, 0))
strip.setPixelColor(5, Color(130, 25, 70, 0))
strip.setPixelColor(6, Color(10, 255, 70, 200))
strip.show()
init_animation() | 5,884 | 0 | 207 |
dbf32bdc2869b3fc95d7331db281c1acc339c283 | 332 | py | Python | ems_auth/tests/auth_objects.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 36 | 2019-11-26T11:46:32.000Z | 2022-02-17T13:18:18.000Z | ems_auth/tests/auth_objects.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 13 | 2020-02-14T09:30:16.000Z | 2022-03-12T00:58:09.000Z | ems_auth/tests/auth_objects.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 16 | 2019-06-14T12:11:29.000Z | 2022-02-14T15:16:07.000Z | from django.utils import timezone
from employees.models import Employee
| 20.75 | 42 | 0.662651 | from django.utils import timezone
from employees.models import Employee
def get_employee(currency):
employee = Employee.objects.create(
first_name="Test",
last_name="Employee",
start_date=timezone.now().today(),
dob=timezone.now().today(),
currency=currency
)
return employee
| 234 | 0 | 23 |
fe37078b1662582839cdd656d08771935b757fd4 | 2,256 | py | Python | components/channel.py | MizaGBF/MizaBOT | 416043f3eef4365611ae2516293f741e27862623 | [
"MIT"
] | 19 | 2019-05-05T17:43:26.000Z | 2022-03-25T12:25:22.000Z | components/channel.py | MizaGBF/MizaBOT | 416043f3eef4365611ae2516293f741e27862623 | [
"MIT"
] | 12 | 2019-08-10T04:13:18.000Z | 2021-12-13T13:34:11.000Z | components/channel.py | MizaGBF/MizaBOT | 416043f3eef4365611ae2516293f741e27862623 | [
"MIT"
] | 12 | 2019-08-09T10:38:46.000Z | 2022-01-20T21:20:44.000Z |
# ----------------------------------------------------------------------------------------------------------------
# Channel Component
# ----------------------------------------------------------------------------------------------------------------
# This component lets you register channels with a keyword to be later used by the send() function of the bot
# ----------------------------------------------------------------------------------------------------------------
class Channel():
"""set()
Register a channel with a name
Parameters
----------
name: Channel name
id_key: Channel name in config.json
"""
"""setID()
Register a channel with a name
Parameters
----------
name: Channel name
id: Channel id
"""
"""setMultiple()
Register multiple channels
Parameters
----------
channel_list: List of pair [name, id_key or id]
"""
"""get()
Get a registered channel
Returns
----------
discord.Channel: Discord Channel
"""
| 32.695652 | 116 | 0.448582 |
# ----------------------------------------------------------------------------------------------------------------
# Channel Component
# ----------------------------------------------------------------------------------------------------------------
# This component lets you register channels with a keyword to be later used by the send() function of the bot
# ----------------------------------------------------------------------------------------------------------------
class Channel():
def __init__(self, bot):
self.bot = bot
self.cache = {}
def init(self):
self.cache = {}
"""set()
Register a channel with a name
Parameters
----------
name: Channel name
id_key: Channel name in config.json
"""
def set(self, name, id_key : str): # "register" a channel to use with send()
try:
c = self.bot.get_channel(self.bot.data.config['ids'][id_key])
if c is not None: self.cache[name] = c
except:
self.bot.errn += 1
print("Invalid key: {}".format(id_key))
"""setID()
Register a channel with a name
Parameters
----------
name: Channel name
id: Channel id
"""
def setID(self, name, id : int): # same but using an id instead of an id defined in config.json
try:
c = self.bot.get_channel(id)
if c is not None: self.cache[name] = c
except:
self.bot.errn += 1
print("Invalid ID: {}".format(id))
"""setMultiple()
Register multiple channels
Parameters
----------
channel_list: List of pair [name, id_key or id]
"""
def setMultiple(self, channel_list: list): # the above, all in one, format is [[channel_name, channel_id], ...]
for c in channel_list:
if len(c) == 2 and isinstance(c[0], str):
if isinstance(c[1], str): self.set(c[0], c[1])
elif isinstance(c[1], int): self.setID(c[0], c[1])
"""get()
Get a registered channel
Returns
----------
discord.Channel: Discord Channel
"""
def get(self, name):
return self.cache.get(name, None) | 999 | 0 | 163 |
7805cd7d49cc4b86e9dff77d16c6b12bea8344ac | 1,410 | py | Python | flaskr/main.py | nEXO-collaboration/nexo-asset-tracker | 936b31221db5e756240eddcd713091b8c74c8860 | [
"BSD-3-Clause"
] | null | null | null | flaskr/main.py | nEXO-collaboration/nexo-asset-tracker | 936b31221db5e756240eddcd713091b8c74c8860 | [
"BSD-3-Clause"
] | null | null | null | flaskr/main.py | nEXO-collaboration/nexo-asset-tracker | 936b31221db5e756240eddcd713091b8c74c8860 | [
"BSD-3-Clause"
] | null | null | null | from flask import Blueprint, redirect, render_template, request, session, url_for
# from asset_tracker_restapi import asset_tracker_restapi
bp = Blueprint("main", __name__)
@bp.route("/", methods=['GET', 'POST'])
@bp.route("/action", methods=['GET', 'POST'])
@bp.route("/search", methods=['GET','POST'])
| 32.790698 | 125 | 0.644681 | from flask import Blueprint, redirect, render_template, request, session, url_for
# from asset_tracker_restapi import asset_tracker_restapi
bp = Blueprint("main", __name__)
@bp.route("/", methods=['GET', 'POST'])
def index():
jira_username = session.get("jira_username")
if jira_username is None:
return redirect(url_for("auth.login"))
else:
print("Username :", jira_username)
return render_template('main.html')
@bp.route("/action", methods=['GET', 'POST'])
def action():
if request.method == "POST":
if request.form['submit'] == 'Perform Action':
action_id = 0
asset_id = request.form["asset_id"]
asset_action = request.form["asset_action"]
if 'action_id' in request.form:
if request.form["action_id"] != '':
action_id = int(request.form["action_id"])
return redirect(url_for("asset.asset_action", asset_id=asset_id, asset_action=asset_action, action_id=action_id))
elif request.form['submit'] == 'Find Asset':
asset_id = request.form["asset_id"]
return redirect(url_for("asset.asset", asset_id=asset_id))
return render_template('main.html')
@bp.route("/search", methods=['GET','POST'])
def search():
# Probably should add in search functionality one of these days I suppose.
return render_template('main.html')
| 1,032 | 0 | 66 |
a8f188809433b18e800e2b7f543dac18c31aa965 | 1,545 | py | Python | tests/test_util.py | resendislab/corda | 15f4a8e1a046c6191f22e46099dad10aafb1fdce | [
"MIT"
] | 9 | 2017-08-21T09:44:19.000Z | 2021-09-22T12:18:06.000Z | tests/test_util.py | resendislab/corda | 15f4a8e1a046c6191f22e46099dad10aafb1fdce | [
"MIT"
] | 9 | 2017-08-23T15:50:39.000Z | 2021-08-10T17:10:51.000Z | tests/test_util.py | resendislab/corda | 15f4a8e1a046c6191f22e46099dad10aafb1fdce | [
"MIT"
] | 7 | 2017-09-12T12:50:10.000Z | 2021-02-22T18:42:15.000Z | # tests.py
#
# Copyright 2016 Christian Diener <mail[at]cdiener.com>
#
# MIT license. See LICENSE for more information.
import pytest
from corda import reaction_confidence, test_model
from cobra import Model, Reaction, Metabolite
from cobra.manipulation import convert_to_irreversible, revert_to_reversible
if __name__ == '__main__':
pytest.main()
| 28.090909 | 76 | 0.605178 | # tests.py
#
# Copyright 2016 Christian Diener <mail[at]cdiener.com>
#
# MIT license. See LICENSE for more information.
import pytest
from corda import reaction_confidence, test_model
from cobra import Model, Reaction, Metabolite
from cobra.manipulation import convert_to_irreversible, revert_to_reversible
class TestConf:
@pytest.mark.parametrize("case", [
("g1 and g2 or g3", 2), ("g1 and (g2 or g3)", -1),
("g1 or g2 or g4 or g5", 3), ("g3 and g6", 0), ("", 0)
])
def test_confidence(self, case):
vals = {"g1": -1, "g2": 1, "g3": 2, "g4": 3}
conf = reaction_confidence(case[0], vals)
assert conf == case[1]
@pytest.mark.parametrize("case", ["print()", "A + B", "A ^ B"])
def test_eval_safe(self, case):
with pytest.raises(TypeError):
reaction_confidence(case, {})
def test_none(self):
assert reaction_confidence(" ", {}) == 0
class TestMisc:
def test_remove_breaks(self):
model = Model("test model")
A = Metabolite("A")
r = Reaction("r")
r.add_metabolites({A: -1})
r.lower_bound = -1000
r.upper_bound = 1000
model.add_reaction(r)
convert_to_irreversible(model)
model.remove_reactions(["r"])
with pytest.raises(KeyError):
revert_to_reversible(model)
def test_cemet(self):
model = test_model()
assert len(model.reactions) == 60
assert len(model.metabolites) == 43
if __name__ == '__main__':
pytest.main()
| 777 | 308 | 100 |
865b1b0fc99aaa2ae00d600df8e4d6285b2fc4d7 | 172 | py | Python | dkjason/__init__.py | datakortet/dkjason | b4405ce3e710c4f7018e507f135a774d5c851888 | [
"MIT"
] | null | null | null | dkjason/__init__.py | datakortet/dkjason | b4405ce3e710c4f7018e507f135a774d5c851888 | [
"MIT"
] | 4 | 2020-03-28T22:38:04.000Z | 2021-11-15T10:49:33.000Z | dkjason/__init__.py | datakortet/dkjason | b4405ce3e710c4f7018e507f135a774d5c851888 | [
"MIT"
] | null | null | null | """
This module knows how to serialize general object, objects specialized
with a ``__json__()`` method, Django QuerySets, and ``ttcal`` objects.
"""
__version__ = '3.0.4'
| 28.666667 | 70 | 0.715116 | """
This module knows how to serialize general object, objects specialized
with a ``__json__()`` method, Django QuerySets, and ``ttcal`` objects.
"""
__version__ = '3.0.4'
| 0 | 0 | 0 |
76dac9c6aa60a48771a47013e74a942392f8e0e3 | 3,663 | py | Python | insights/parsers/sssd_conf.py | skateman/insights-core | e7cd3001ffc2558757b9e7759dbe27b8b29f4bac | [
"Apache-2.0"
] | 1 | 2021-11-08T16:25:01.000Z | 2021-11-08T16:25:01.000Z | insights/parsers/sssd_conf.py | ahitacat/insights-core | 0ba58dbe5edceef0bd4a74c1caf6b826381ccda5 | [
"Apache-2.0"
] | null | null | null | insights/parsers/sssd_conf.py | ahitacat/insights-core | 0ba58dbe5edceef0bd4a74c1caf6b826381ccda5 | [
"Apache-2.0"
] | null | null | null | """
SSSD_Config - file ``/etc/sssd/sssd.config``
============================================
"""
from insights.core import IniConfigFile
from insights.core.plugins import parser
from insights.specs import Specs
@parser(Specs.sssd_config)
class SSSD_Config(IniConfigFile):
"""
Parse the content of the ``/etc/sssd/sssd.config`` file.
The 'sssd' section must always exist. Within that, the 'domains'
parameter is usually defined to give a comma-separated list of the
domains that sssd is to manage.
The 'sssd' section will define one or more active domains, which are then
configured in the 'domain/{domain}' section of the configuration. These
domains are then available via the 'domains' method, and the configuration
of a domain can be fetched as a dictionary using the 'domain_config' method.
Sample configuration::
[sssd]
config_file_version = 2
# Number of times services should attempt to reconnect in the
# event of a crash or restart before they give up
reconnection_retries = 3
# If a back end is particularly slow you can raise this timeout here
sbus_timeout = 30
services = nss, pam
# SSSD will not start if you do not configure any domains.
# Add new domain configurations as [domain/<NAME>] sections, and
# then add the list of domains (in the order you want them to be
# queried) to the "domains" attribute below and uncomment it.
# domains = LOCAL,LDAP
domains = example.com
debug_level = 9
[nss]
# The following prevents SSSD from searching for the root user/group in
# all domains (you can add here a comma-separated list of system accounts that
# are always going to be /etc/passwd users, or that you want to filter out).
filter_groups = root
filter_users = root
reconnection_retries = 3
[pam]
reconnection_retries = 3
[domain/example.com]
id_provider = ldap
lookup_family_order = ipv4_only
ldap_uri = ldap://ldap.example.com/
ldap_search_base = dc=example,dc=com
enumerate = False
hbase_directory= /home
create_homedir = True
override_homedir = /home/%u
auth_provider = krb5
krb5_server = kerberos.example.com
krb5_realm = EXAMPLE.COM
Example:
>>> type(conf)
<class 'insights.parsers.sssd_conf.SSSD_Config'>
>>> conf.get('nss', 'filter_users')
'root'
>>> conf.getint('pam', 'reconnection_retries')
3
>>> conf.domains
['example.com']
>>> domain = conf.domain_config('example.com')
>>> 'ldap_uri' in domain
True
"""
@property
def domains(self):
"""
Returns the list of domains defined in the 'sssd' section. This is
used to refer to the domain-specific sections of the configuration.
"""
if self.has_option('sssd', 'domains'):
domains = self.get('sssd', 'domains')
if domains:
return domains.split(',')
# Return a blank list if no domains.
return []
def domain_config(self, domain):
"""
Return the configuration dictionary for a specific domain, given as
the raw name as listed in the 'domains' property of the sssd section.
This then looks for the equivalent 'domain/{domain}' section of the
config file.
"""
full_domain = 'domain/' + domain
if full_domain not in self:
return {}
return self.items(full_domain)
| 34.233645 | 86 | 0.620803 | """
SSSD_Config - file ``/etc/sssd/sssd.config``
============================================
"""
from insights.core import IniConfigFile
from insights.core.plugins import parser
from insights.specs import Specs
@parser(Specs.sssd_config)
class SSSD_Config(IniConfigFile):
"""
Parse the content of the ``/etc/sssd/sssd.config`` file.
The 'sssd' section must always exist. Within that, the 'domains'
parameter is usually defined to give a comma-separated list of the
domains that sssd is to manage.
The 'sssd' section will define one or more active domains, which are then
configured in the 'domain/{domain}' section of the configuration. These
domains are then available via the 'domains' method, and the configuration
of a domain can be fetched as a dictionary using the 'domain_config' method.
Sample configuration::
[sssd]
config_file_version = 2
# Number of times services should attempt to reconnect in the
# event of a crash or restart before they give up
reconnection_retries = 3
# If a back end is particularly slow you can raise this timeout here
sbus_timeout = 30
services = nss, pam
# SSSD will not start if you do not configure any domains.
# Add new domain configurations as [domain/<NAME>] sections, and
# then add the list of domains (in the order you want them to be
# queried) to the "domains" attribute below and uncomment it.
# domains = LOCAL,LDAP
domains = example.com
debug_level = 9
[nss]
# The following prevents SSSD from searching for the root user/group in
# all domains (you can add here a comma-separated list of system accounts that
# are always going to be /etc/passwd users, or that you want to filter out).
filter_groups = root
filter_users = root
reconnection_retries = 3
[pam]
reconnection_retries = 3
[domain/example.com]
id_provider = ldap
lookup_family_order = ipv4_only
ldap_uri = ldap://ldap.example.com/
ldap_search_base = dc=example,dc=com
enumerate = False
hbase_directory= /home
create_homedir = True
override_homedir = /home/%u
auth_provider = krb5
krb5_server = kerberos.example.com
krb5_realm = EXAMPLE.COM
Example:
>>> type(conf)
<class 'insights.parsers.sssd_conf.SSSD_Config'>
>>> conf.get('nss', 'filter_users')
'root'
>>> conf.getint('pam', 'reconnection_retries')
3
>>> conf.domains
['example.com']
>>> domain = conf.domain_config('example.com')
>>> 'ldap_uri' in domain
True
"""
@property
def domains(self):
"""
Returns the list of domains defined in the 'sssd' section. This is
used to refer to the domain-specific sections of the configuration.
"""
if self.has_option('sssd', 'domains'):
domains = self.get('sssd', 'domains')
if domains:
return domains.split(',')
# Return a blank list if no domains.
return []
def domain_config(self, domain):
"""
Return the configuration dictionary for a specific domain, given as
the raw name as listed in the 'domains' property of the sssd section.
This then looks for the equivalent 'domain/{domain}' section of the
config file.
"""
full_domain = 'domain/' + domain
if full_domain not in self:
return {}
return self.items(full_domain)
| 0 | 0 | 0 |
0b64e1c456ed5eb9b9d67e8cb18b41d35d79e683 | 1,283 | py | Python | check_main_line.py | y-ich/EOPGA_tools | 0eb6fc13d1236edcd04c4a32e18da8d18bf6f244 | [
"MIT"
] | null | null | null | check_main_line.py | y-ich/EOPGA_tools | 0eb6fc13d1236edcd04c4a32e18da8d18bf6f244 | [
"MIT"
] | null | null | null | check_main_line.py | y-ich/EOPGA_tools | 0eb6fc13d1236edcd04c4a32e18da8d18bf6f244 | [
"MIT"
] | null | null | null | """
check if main line of each SGF is legal.
"""
__author__ = "ICHIKAWA, Yuji <ichikawa.yuji@gmail.com>"
import sys
import os
from multiprocessing import Pool
import psutil
import sgf
from board import Board, move2ev
from utilities import file_pathes_under
def check_and_arg(e: any):
"""
is defined explicitly because Pool instance cannot treat lambda function lambda e: (check(e), e).
"""
return check(e), e
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python {} <directory>".format(sys.argv[0]))
sys.exit(0)
with Pool(psutil.cpu_count(logical=False)) as pool:
for result, filename in pool.imap_unordered(check_and_arg, file_pathes_under(sys.argv[1], "sgf"), 10):
if not result:
print(filename)
| 27.891304 | 110 | 0.639127 | """
check if main line of each SGF is legal.
"""
__author__ = "ICHIKAWA, Yuji <ichikawa.yuji@gmail.com>"
import sys
import os
from multiprocessing import Pool
import psutil
import sgf
from board import Board, move2ev
from utilities import file_pathes_under
def check_game(game: sgf.GameTree):
board = Board()
for node in game.rest:
move = node.properties.get("B", node.properties.get("W", None))
if move is None:
continue
if board.play(move2ev(move[0])) != 0:
return False
return True
def check(sgf_path: str):
with open(sgf_path) as f:
collection = sgf.parse(f.read())
for game in collection:
if not check_game(game):
return False
return True
def check_and_arg(e: any):
"""
is defined explicitly because Pool instance cannot treat lambda function lambda e: (check(e), e).
"""
return check(e), e
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python {} <directory>".format(sys.argv[0]))
sys.exit(0)
with Pool(psutil.cpu_count(logical=False)) as pool:
for result, filename in pool.imap_unordered(check_and_arg, file_pathes_under(sys.argv[1], "sgf"), 10):
if not result:
print(filename)
| 443 | 0 | 46 |
bc33b06378c23dbee4531b4dfaeb791d28741cc4 | 3,746 | py | Python | sudoku/variable.py | prowe12/game-solver | 0c197c077a82c79c97c9cf1ed5bcda0dc38eed61 | [
"CC0-1.0"
] | null | null | null | sudoku/variable.py | prowe12/game-solver | 0c197c077a82c79c97c9cf1ed5bcda0dc38eed61 | [
"CC0-1.0"
] | null | null | null | sudoku/variable.py | prowe12/game-solver | 0c197c077a82c79c97c9cf1ed5bcda0dc38eed61 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 10 12:20:03 2021
@author: prowe
By Penny Rowe
2021/03/10
AI with Prof. America Chambers, Spring 2021
Based on Variable.java
"""
class Variable:
"""
A variable in a Sudoku CSP with domain {1, 2, 3, ..., 9}
The value of the variable may be fixed by the original problem.
"""
def __init__(self, row, col, nside, val={1, 2, 3, 4, 5, 6, 7, 8, 9},
fix=False):
"""
Create a new variable with the domain specified.
domain may be:
- {1...9}
- a single value
- a collection
"""
self.row = row
self.col = col
self.fixed = fix
self.max_domain_val = nside
self.domain = val.copy()
def replace(self, value):
"""
Replace the domain of the variable with a value
@param val The value to be added
@throws IllegalStateException The domain is fixed
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot replace value.')
self.domain = {value}
def add(self, value):
"""
Add a value to the domain of the variable
@param val The value to be added
@throws IllegalStateException The domain is fixed
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot add value.')
self.domain.add(value)
def add_all(self, collection):
"""
Adds a collection of values to the domain of the variable
@param input A collection of integer values to be added
@throws IllegalStateException The domain is fixed
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot add collection.')
self.domain.union(collection)
def remove(self, val):
"""
Removes a value from the domain
@param val The value to be removed
@throws IllegalStateException The domain is fixed
@returns: False
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot remove value.')
self.domain.remove(val)
def clear(self):
"""
#
# Removes all values from the domain
#
# @throws IllegalStateException
# The domain is fixed
#
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot clear values.')
self.domain = {}
def get_domain(self):
"""
Returns the domain of the variable
@return The domain of the variable
"""
return self.domain
def get_domain_size(self):
"""
Returns the size of the variable's domain
@return The size of the variable's domain
"""
return len(self.domain)
def get_only_value(self):
"""
Returns the only value in the variable's domain
@throws IllegalStateException The domain has more than 1 value or is empty
@return The only value in the variable's domain
"""
if self.get_domain_size() != 1:
raise ValueError('Domain of one expected, but was 0 or > 1')
return next(iter(self.domain))
#def isfixed(self):
# """
# Returns true if domain is fixed
# @return True if the domain is fixed and false otherwise
# """
# return self.fixed
def contains(self, value):
"""
Returns true if domain contains value
@param value The value to be checked
@return True if the domain contains the value, false otherwise
"""
return value in self.domain
| 27.144928 | 82 | 0.567005 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 10 12:20:03 2021
@author: prowe
By Penny Rowe
2021/03/10
AI with Prof. America Chambers, Spring 2021
Based on Variable.java
"""
class Variable:
"""
A variable in a Sudoku CSP with domain {1, 2, 3, ..., 9}
The value of the variable may be fixed by the original problem.
"""
def __init__(self, row, col, nside, val={1, 2, 3, 4, 5, 6, 7, 8, 9},
fix=False):
"""
Create a new variable with the domain specified.
domain may be:
- {1...9}
- a single value
- a collection
"""
self.row = row
self.col = col
self.fixed = fix
self.max_domain_val = nside
self.domain = val.copy()
def replace(self, value):
"""
Replace the domain of the variable with a value
@param val The value to be added
@throws IllegalStateException The domain is fixed
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot replace value.')
self.domain = {value}
def add(self, value):
"""
Add a value to the domain of the variable
@param val The value to be added
@throws IllegalStateException The domain is fixed
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot add value.')
self.domain.add(value)
def add_all(self, collection):
"""
Adds a collection of values to the domain of the variable
@param input A collection of integer values to be added
@throws IllegalStateException The domain is fixed
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot add collection.')
self.domain.union(collection)
def remove(self, val):
"""
Removes a value from the domain
@param val The value to be removed
@throws IllegalStateException The domain is fixed
@returns: False
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot remove value.')
self.domain.remove(val)
def clear(self):
"""
#
# Removes all values from the domain
#
# @throws IllegalStateException
# The domain is fixed
#
"""
if self.fixed:
raise ValueError('The domain is fixed; cannot clear values.')
self.domain = {}
def get_domain(self):
"""
Returns the domain of the variable
@return The domain of the variable
"""
return self.domain
def get_domain_size(self):
"""
Returns the size of the variable's domain
@return The size of the variable's domain
"""
return len(self.domain)
def get_only_value(self):
"""
Returns the only value in the variable's domain
@throws IllegalStateException The domain has more than 1 value or is empty
@return The only value in the variable's domain
"""
if self.get_domain_size() != 1:
raise ValueError('Domain of one expected, but was 0 or > 1')
return next(iter(self.domain))
#def isfixed(self):
# """
# Returns true if domain is fixed
# @return True if the domain is fixed and false otherwise
# """
# return self.fixed
def contains(self, value):
"""
Returns true if domain contains value
@param value The value to be checked
@return True if the domain contains the value, false otherwise
"""
return value in self.domain
| 0 | 0 | 0 |
0a1a359a4636f368d0f28057e4bf1af274c7fb79 | 3,332 | py | Python | influxdb_service_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | influxdb_service_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | influxdb_service_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=206,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 40.144578 | 380 | 0.801921 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=206,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
6f6b0bbb965c6b6f93d18d3f914bd8661e8f5777 | 1,219 | py | Python | weatherapp/migrations/views.py | mirza450/Django_weatherApp | d2a5b8241dbc8d95300bb98d1fa06fd7767dcf57 | [
"MIT"
] | 1 | 2021-03-10T11:57:50.000Z | 2021-03-10T11:57:50.000Z | weatherapp/migrations/views.py | mirza450/Django_weatherApp | d2a5b8241dbc8d95300bb98d1fa06fd7767dcf57 | [
"MIT"
] | null | null | null | weatherapp/migrations/views.py | mirza450/Django_weatherApp | d2a5b8241dbc8d95300bb98d1fa06fd7767dcf57 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from . import models
import requests
# Create your views here. | 25.93617 | 125 | 0.636587 | from django.shortcuts import render
from . import models
import requests
# Create your views here.
def home(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&appid=847ad8d22725feb921178a969b049211'
if request.method=='POST':
city=request.POST['city']
r = requests.get(url.format(city)).json()
# print(r)
weather = {
'city': city,
'temperature': r["main"]["temp"],
'humidity': r["main"]["humidity"],
'description': r["weather"][0]["description"],
'icon': r["weather"][0]["icon"],
}
data= models.weather(city=city, desc=r["weather"][0]["description"], temp=r["main"]["temp"],humidity=r["main"]["humidity"])
data.save()
# print(weather)
context = {'location_weather' : weather}
return render(request, 'index.html',context)
else:
city = 'kolkata'
r = requests.get(url.format(city)).json()
# print(r)
weather = {
'city': city,
'temperature': r["main"]["temp"],
'humidity': r["main"]["humidity"],
'description': r["weather"][0]["description"],
'icon': r["weather"][0]["icon"],
}
# print(weather)
context = {'location_weather' : weather}
return render(request, 'index.html',context)
return render(request, 'index.html') | 1,097 | 0 | 23 |
5c6790cf97d941206ac4135494a6139465d27768 | 2,304 | py | Python | lpthw/showdad.py | jaredmanning/learning | 1d1767ea78a8f9f72275b18147d47bfc44a1696e | [
"MIT"
] | null | null | null | lpthw/showdad.py | jaredmanning/learning | 1d1767ea78a8f9f72275b18147d47bfc44a1696e | [
"MIT"
] | null | null | null | lpthw/showdad.py | jaredmanning/learning | 1d1767ea78a8f9f72275b18147d47bfc44a1696e | [
"MIT"
] | null | null | null | # When I made this, I was not comletely sure how to make functions, so when it
# doesn't understand the input, it just spits out a line saying it didn't
# understand then moves on.
author = 'Zed A. Shaw'
book = 'Learn Python the Hard Way by %s' % author
print """
Hi dad! This is a little program I wrote
to show you some of the cool things I've
learned today while hammering away at the
book %s\n.
""" % book
print "\nI'm just going to ask you a few simple questions."
print "When answering a 'yes' or 'no' question, please"
print "use 'y' or 'n' for your response."
ready = raw_input('Are you ready? ')
if ready == 'y':
print "\nGood! Let's get started.\n"
elif ready == 'n':
print "\nAh, I see. Well, we're moving on anyways!\n"
else:
print "\nI'm sorry, I didn't understand that... Moving on!\n"
name = raw_input('What is your name? ')
print "\nHello %s!\n" % name
day_going = raw_input("How is your day going? (say 1 for good, 2 for so-so, or 3 for bad) ")
strDayGoing = None
if day_going == '1':
print "\nI'm glad you're having a good day.\n"
strDayGoing = 'good'
elif day_going == '2':
print "\nAw, cheer up ol' chum!\n"
strDayGoing = 'so-so'
elif day_going == '3':
print "\nI'm sorry to hear that... Moving on!\n"
strDayGoing = 'bad'
else:
print "\nI'm sorry, I didn't understand that... Moving on!\n"
fave_color = raw_input("What's your favorite color? ")
if fave_color.lower() == 'blue':
print "\nI knew that already, dad. You're probably wearing",
print "blue shirt too, right?\n"
else:
print "\nThat... that can't be right! It's supposed to be blue!!\n"
lunch = raw_input('What did you have for lunch today? ')
print "\nMmmm! %s sounds delicious!\n" % lunch
print """
So let me get this straight...
Your name is %s.
You're having a %s day.
Your favorite color is %s.
And you had %s for lunch.
""" % (name, strDayGoing, fave_color, lunch)
correct = raw_input("Is all of that information correct? ")
if correct == 'y':
print "\nGreat! Thanks for checking out all the stuff I learned today!",
print "I'm now going to give you a brief tour of the script itself.\n"
elif correct == 'n':
print "\nWelp, can't be my fault! I wrote this script perfectly!!\n"
else:
print "\nSorry, I didn't get that. Oh well!\n"
| 30.72 | 92 | 0.66276 | # When I made this, I was not comletely sure how to make functions, so when it
# doesn't understand the input, it just spits out a line saying it didn't
# understand then moves on.
author = 'Zed A. Shaw'
book = 'Learn Python the Hard Way by %s' % author
print """
Hi dad! This is a little program I wrote
to show you some of the cool things I've
learned today while hammering away at the
book %s\n.
""" % book
print "\nI'm just going to ask you a few simple questions."
print "When answering a 'yes' or 'no' question, please"
print "use 'y' or 'n' for your response."
ready = raw_input('Are you ready? ')
if ready == 'y':
print "\nGood! Let's get started.\n"
elif ready == 'n':
print "\nAh, I see. Well, we're moving on anyways!\n"
else:
print "\nI'm sorry, I didn't understand that... Moving on!\n"
name = raw_input('What is your name? ')
print "\nHello %s!\n" % name
day_going = raw_input("How is your day going? (say 1 for good, 2 for so-so, or 3 for bad) ")
strDayGoing = None
if day_going == '1':
print "\nI'm glad you're having a good day.\n"
strDayGoing = 'good'
elif day_going == '2':
print "\nAw, cheer up ol' chum!\n"
strDayGoing = 'so-so'
elif day_going == '3':
print "\nI'm sorry to hear that... Moving on!\n"
strDayGoing = 'bad'
else:
print "\nI'm sorry, I didn't understand that... Moving on!\n"
fave_color = raw_input("What's your favorite color? ")
if fave_color.lower() == 'blue':
print "\nI knew that already, dad. You're probably wearing",
print "blue shirt too, right?\n"
else:
print "\nThat... that can't be right! It's supposed to be blue!!\n"
lunch = raw_input('What did you have for lunch today? ')
print "\nMmmm! %s sounds delicious!\n" % lunch
print """
So let me get this straight...
Your name is %s.
You're having a %s day.
Your favorite color is %s.
And you had %s for lunch.
""" % (name, strDayGoing, fave_color, lunch)
correct = raw_input("Is all of that information correct? ")
if correct == 'y':
print "\nGreat! Thanks for checking out all the stuff I learned today!",
print "I'm now going to give you a brief tour of the script itself.\n"
elif correct == 'n':
print "\nWelp, can't be my fault! I wrote this script perfectly!!\n"
else:
print "\nSorry, I didn't get that. Oh well!\n"
| 0 | 0 | 0 |
816c2aa87fcca01c52d68c4da80bd4cc0a0c0bad | 721 | py | Python | migrations/versions/b986a61de65c_change_some_model_user_and_pay_type_to_.py | borko81/parking_system_with_flask | 0ff10422cd1892bcb8c4c6958a159b08c1da919b | [
"MIT"
] | 1 | 2022-01-14T15:31:11.000Z | 2022-01-14T15:31:11.000Z | migrations/versions/b986a61de65c_change_some_model_user_and_pay_type_to_.py | borko81/parking_system_with_flask | 0ff10422cd1892bcb8c4c6958a159b08c1da919b | [
"MIT"
] | 5 | 2021-12-03T13:27:44.000Z | 2021-12-05T11:46:08.000Z | migrations/versions/b986a61de65c_change_some_model_user_and_pay_type_to_.py | borko81/parking_system_with_flask | 0ff10422cd1892bcb8c4c6958a159b08c1da919b | [
"MIT"
] | null | null | null | """Change some model user and pay_type to use helper function that strip unique field
Revision ID: b986a61de65c
Revises: 29128332c534
Create Date: 2021-12-08 19:10:55.573019
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b986a61de65c'
down_revision = '29128332c534'
branch_labels = None
depends_on = None
| 24.862069 | 85 | 0.708738 | """Change some model user and pay_type to use helper function that strip unique field
Revision ID: b986a61de65c
Revises: 29128332c534
Create Date: 2021-12-08 19:10:55.573019
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b986a61de65c'
down_revision = '29128332c534'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'pay_type', ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'pay_type', type_='unique')
# ### end Alembic commands ###
| 307 | 0 | 46 |
77787be849f6948db95f361275d321eea52e6b9b | 940 | py | Python | stnm/run.py | talhasch/stnm | 075dcf673cc7ac9c3c79687890d5f712bd44b632 | [
"MIT"
] | 1 | 2020-12-08T18:00:55.000Z | 2020-12-08T18:00:55.000Z | stnm/run.py | talhasch/stnm | 075dcf673cc7ac9c3c79687890d5f712bd44b632 | [
"MIT"
] | 4 | 2020-12-08T21:10:43.000Z | 2020-12-15T18:06:04.000Z | stnm/run.py | talhasch/stnm | 075dcf673cc7ac9c3c79687890d5f712bd44b632 | [
"MIT"
] | null | null | null | import argparse
import os
import sys
assert sys.version_info[0] == 3 and sys.version_info[1] >= 5, "Requires Python 3.5 or newer"
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
if sys.platform not in ["linux", "linux2", "darwin"]:
print("stnm supports only macos and linux")
sys.exit(1)
if __name__ == '__main__':
main()
| 22.380952 | 92 | 0.6 | import argparse
import os
import sys
assert sys.version_info[0] == 3 and sys.version_info[1] >= 5, "Requires Python 3.5 or newer"
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
if sys.platform not in ["linux", "linux2", "darwin"]:
print("stnm supports only macos and linux")
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description="")
cmd_list = (
"install",
"status",
"start",
"stop",
"config",
"web",
"test"
)
parser.add_argument("cmd", choices=cmd_list, nargs="?", default="")
parser.add_argument("arg", nargs="?", default="")
args = parser.parse_args()
cmd = args.cmd
arg = args.arg
if cmd == "config" and arg == "":
parser.error("configuration parameter required. e.g node.miner=true")
from stnm import main
main(cmd, arg)
if __name__ == '__main__':
main()
| 544 | 0 | 23 |
0c467bcc6404925cedd4674f0752f38ff1a06b80 | 2,702 | py | Python | simple_graphql/django/fields/connection.py | JoaRiski/django-simple-graphql | f529b67e52c8e1d191d1c2880f8003e906c020fb | [
"MIT"
] | null | null | null | simple_graphql/django/fields/connection.py | JoaRiski/django-simple-graphql | f529b67e52c8e1d191d1c2880f8003e906c020fb | [
"MIT"
] | null | null | null | simple_graphql/django/fields/connection.py | JoaRiski/django-simple-graphql | f529b67e52c8e1d191d1c2880f8003e906c020fb | [
"MIT"
] | null | null | null | from functools import partial
from typing import Any, Dict, Optional, Type, Union
import graphene
from django.db.models import QuerySet
from graphene.types.mountedtype import MountedType
from graphene.types.unmountedtype import UnmountedType
from graphene_django.filter import DjangoFilterConnectionField
from simple_graphql.django.config import extract_extra_meta_config
from simple_graphql.django.fields.authorize import authorize_query
from simple_graphql.django.search import order_qs, search_qs
from simple_graphql.django.types import ModelInstance, ModelSchemaConfig
| 35.552632 | 88 | 0.699852 | from functools import partial
from typing import Any, Dict, Optional, Type, Union
import graphene
from django.db.models import QuerySet
from graphene.types.mountedtype import MountedType
from graphene.types.unmountedtype import UnmountedType
from graphene_django.filter import DjangoFilterConnectionField
from simple_graphql.django.config import extract_extra_meta_config
from simple_graphql.django.fields.authorize import authorize_query
from simple_graphql.django.search import order_qs, search_qs
from simple_graphql.django.types import ModelInstance, ModelSchemaConfig
class DjangoAutoConnectionField(DjangoFilterConnectionField):
ordering_options: Optional[graphene.Enum]
config: ModelSchemaConfig
def __init__(
self,
node_cls: Type[graphene.ObjectType],
**kwargs: Union[UnmountedType, MountedType],
):
extra_meta = getattr(node_cls, "ExtraMeta", None)
self.config = extract_extra_meta_config(extra_meta)
self.ordering_options = getattr(extra_meta, "ordering_options", None)
if self.ordering_options:
kwargs.setdefault("order_by", graphene.Argument(self.ordering_options))
if self.config.search_fields:
kwargs.setdefault("search_query", graphene.String())
# graphene-django is shadowing "order_by", so we're skipping it's super
# call by copying its initializaiton here
self._fields = None
self._provided_filterset_class = None
self._filterset_class = None
self._filtering_args = None
self._extra_filter_meta = None
self._base_args = None
super(DjangoFilterConnectionField, self).__init__(node_cls, **kwargs)
@classmethod
def resolve_queryset(
cls, connection, iterable, info, args: Dict[str, Any], *_args, **kwargs
) -> QuerySet[ModelInstance]:
config: ModelSchemaConfig = kwargs.pop("config")
ordering_options: Optional[graphene.Enum] = kwargs.pop("ordering_options", None)
authorize_query(config, info)
qs = super().resolve_queryset(
connection, iterable, info, args, *_args, **kwargs
)
if config.search_fields:
qs = search_qs(qs, config.search_fields, args.get("search_query", None))
if ordering_options:
ordering = args.get("order_by", None)
qs = order_qs(qs, ordering)
return qs
def get_queryset_resolver(self):
return partial(
self.resolve_queryset,
filterset_class=self.filterset_class,
filtering_args=self.filtering_args,
ordering_options=self.ordering_options,
config=self.config,
)
| 1,889 | 214 | 23 |
93a5a722569a29437ef186f706fd49ee0c7cda2c | 97,173 | py | Python | veritastool/fairness/fairness.py | mas-veritas2/veritastool | 37f36b620c3637e230efd8ed69cbb5e4ef87fe2b | [
"Apache-2.0"
] | 2 | 2022-01-12T07:12:50.000Z | 2022-03-08T10:57:10.000Z | veritastool/fairness/fairness.py | mas-veritas2/veritastool | 37f36b620c3637e230efd8ed69cbb5e4ef87fe2b | [
"Apache-2.0"
] | 18 | 2021-11-02T03:03:00.000Z | 2021-12-10T07:44:40.000Z | veritastool/fairness/fairness.py | mas-veritas2/veritastool | 37f36b620c3637e230efd8ed69cbb5e4ef87fe2b | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import datetime
import json
from ..util.utility import *
from ..metrics.fairness_metrics import FairnessMetrics
from ..metrics.performance_metrics import PerformanceMetrics
from ..metrics.newmetric import *
from ..metrics.tradeoff import TradeoffRate
import ipywidgets as widgets
import IPython
from ipywidgets import Layout, Button, Box, VBox, HBox, Text, GridBox
from IPython.display import display, clear_output, HTML
from IPython.core.display import HTML
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import sys
import warnings
from ..util.errors import *
from math import floor
import concurrent.futures
from tqdm.auto import tqdm
from pathlib import Path
import matplotlib.lines as mlines
class Fairness:
"""
Base Class with attributes used across all use cases within Machine Learning model fairness evaluation.
"""
def __init__(self, model_params):
"""
Parameters
------------------
model_params : list
It holds ModelContainer object(s).
Data holder that contains all the attributes of the model to be assessed. Compulsory input for initialization.
Instance Attributes
-------------------
fair_metric_obj : object, default=None
Stores the FairnessMetrics() object and contains the result of the computations.
perf_metric_obj : object, default=None
Stores the PerformanceMetrics() object and contains the result of the computations.
percent_distribution : dict, default=None
Stores the percentage breakdown of the classes in y_true.
calibration_score : float, default=None
The brier score loss computed for calibration. Computable if y_prob is given.
tradeoff_obj : object, default=None
Stores the TradeoffRate() object and contains the result of the computations.
correlation_output : dict, default=None
Pairwise correlation of most important features (top 20 feature + protected variables).
feature_mask : dict of list, default=None
Stores the mask array for every protected variable applied on the x_test dataset.
fair_conclusion : dict, default=None
Contains conclusion of how the primary fairness metric compares against the fairness threshold. The key will be the protected variable and the conclusion will be "fair" or "unfair".
e.g. {"gender": {'fairness_conclusion': "fair", "threshold": 0.01}, "race":{'fairness_conclusion': "unfair", "threshold": 0.01}}
evaluate_status : int, default=0
Tracks the status of the completion of the evaluate() method to be checked in compile(). Either 1 for complete or -1 for error if any exceptions were raised.
evaluate_status_cali: boolean, default=False
Tracks the status of the completion of the calibration curve step within evaluate() method to be checked in compile().
False = Skipped (if y_prob is not provided)
True = Complete
tradeoff_status : int, default=0
Tracks the status of the completion of the tradeoff() method to be checked in compile().
0 = Not started
1 = Complete
-1 = Skipped (if y_prob is not provided)
feature_imp_status : int, default=0
Tracks the status of the completion of the compute_feature_imp() method to be checked in compile().
0 = Not started
1 = Complete
-1 = Skipped (if model_object not provided, wrong train_op_name/predict_op_name, x_train or x_test error)
feature_imp_status_loo: boolean, default=False
Tracks the status of the completion of the leave-one-out analysis step within feature_importance() method to be checked in compile().
False = Skipped (if x_train or y_train or model object or fit/predict operator names are not provided)
True = Complete
feature_imp_status_corr: boolean, default=False
Tracks the status of the completion of the correlation matrix computation step within feature_importance() method to be checked in compile().
False = Skipped (if the correlation dataframe is not provided in ModelContainer)
True = Complete
feature_imp_values: dict of list, default = None
Contains the difference in metric values between the original and loco models for each protected variable.
{"gender":
{
"gender": (perf_delta, fair_delta, flip, suggestion),
"race": (perf_delta, fair_delta, flip, suggestion)
},
"race":
{
"gender": (perf_delta, fair_delta, flip, suggestion),
"race": (perf_delta, fair_delta, flip, suggestion)
}
}
flip = "fair to fair", "unfair to fair", "fair to unfair", "unfair to unfair"
sigma : float or int , default = 0
Standard deviation for Gaussian kernel for smoothing the contour lines of primary fairness metric.
When sigma <= 0, smoothing is turn off.
Suggested to try sigma = 3 or above if noisy contours are observed.
err : object
VeritasError object
"""
self.model_params = model_params
self.fair_metric_obj = None
self.perf_metric_obj = None
self.percent_distribution = None
self.calibration_score = None
self.calibration_curve_bin = None
self.tradeoff_obj = None
self.correlation_output = None
self.feature_mask = self._set_feature_mask()
self.fair_conclusion = None
self.evaluate_status = 0
self.tradeoff_status = 0
self.feature_imp_status = 0
self.feature_imp_values = None
self.feature_imp_status_corr = False
self.feature_imp_status_loo = False
self.sigma = None
self.err = VeritasError()
def evaluate(self, visualize=False, output=True, n_threads=1, seed=None):
"""
Computes the percentage count of subgroups, performance, and fairness metrics together with their confidence intervals, calibration score & fairness metric self.fair_conclusion for all protected variables.
If visualize = True, output will be overwritten to False (will not be shown) and run fairness_widget() from Fairness.
Parameters
----------
visualize : boolean, default=False
If visualize = True, output will be overwritten to False and run fairness_widget() from Fairness.
output : boolean, default=True
If output = True, _print_evaluate() from Fairness will run.
n_threads : int, default=1
Number of currently active threads of a job
seed : int, default=None
Used to initialize the random number generator.
Returns
----------
_fairness_widget() or _print_evaluate()
"""
#check if evaluate hasn't run, only run if haven't
if self.evaluate_status == 0:
#to show progress bar
eval_pbar = tqdm(total=100, desc='Evaluate performance', bar_format='{l_bar}{bar}')
eval_pbar.update(1)
#execute performance metrics from PerformanceMetrics class
self._compute_performance(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
eval_pbar.set_description('Evaluate fairness')
#execute fairness metrics from FairnessMetrics class
self._compute_fairness(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
#to determine fairness conclusion based on inputs
self._fairness_conclusion()
#set status to 1 after evaluate has run
self.evaluate_status = 1
eval_pbar.set_description('Evaluate')
eval_pbar.update(100 - eval_pbar.n)
eval_pbar.close()
print('', flush=True)
#to trigger widget
if visualize == True:
output = False
self._fairness_widget()
#to trigger evaluate printout
if output == True:
self._print_evaluate()
def _fair_conclude(self, protected_feature_name, **kwargs):
"""
Checks the fairness_conclusion for the selected protected feature with the primary fairness metric value against the fair_threshold
Parameters
----------
protected_feature_name : string
Name of a protected feature
Other Parameters
----------------
priv_m_v : float
Privileged metric value
Returns
----------
out : dict
Fairness threshold and conclusion for the chosen protected variable
"""
#for feature importance, when privileged metric values have been overwritten during leave-one-out analysis
if "priv_m_v" in kwargs:
priv_m_v = kwargs["priv_m_v"]
value = kwargs["value"]
#else run as per input values
else:
priv_m_v = self.fair_metric_obj.result.get(protected_feature_name).get("fair_metric_values").get(self.fair_metric_name)[1]
value = self.fair_metric_obj.result[protected_feature_name]["fair_metric_values"].get(self.fair_metric_name)[0]
#to handle different variations of threhold value provided e.g. float, decimals, integer
fair_threshold = self._compute_fairness_metric_threshold(priv_m_v)
out = {}
#append threshold value to result
out['threshold'] = fair_threshold
#if metric used is ratio based, means it will either be more than 1 or less than 1. So set n = 1 to see the difference.
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'ratio':
n = 1
#if metric used is pairty based, means it will either be more than 0 or less than 0 So set n = 0 to see the difference.
elif FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'parity':
n = 0
#find absolute difference of fair values calculated after metric has been applied
f_value = abs(value - n)
#determine whether input values are fair or unfair depending on metrics applied
if f_value <= fair_threshold:
out['fairness_conclusion'] = 'fair'
else:
out['fairness_conclusion'] = 'unfair'
return out
def _fairness_conclusion(self):
"""
Computes _fair_conclude() for all the protected features and returns results in a dictionary
Returns
----------
self.fair_conclusion : dict
fair_conclusion and threshold for every protected variable
"""
self.fair_conclusion = {}
#to append each fair conclusion for each protected variable into a single dictionary
for i in self.model_params[0].p_var:
self.fair_conclusion[i] = self._fair_conclude(i)
def _compute_fairness_metric_threshold(self, priv_m_v):
"""
Computes the fairness metric threshold based on the fair_threshold variable
Parameters
----------
priv_m_v : float
Privileged metric value
Returns
----------
fair_threshold : float
Fairness metric threshold
"""
#to handle different variations of threhold value provided e.g. float, decimals, integer
if self.fair_threshold > 1:
self.fair_threshold = floor(self.fair_threshold)
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'ratio':
fair_threshold = 1 - (self.fair_threshold / 100)
elif FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'parity':
fair_threshold = (1 - (self.fair_threshold / 100)) * priv_m_v
return fair_threshold
else:
return self.fair_threshold
def _compute_performance(self, n_threads, seed, eval_pbar):
"""
Computes the percentage count of subgroups, all the performance metrics together with their confidence intervals & the calibration curve data.
Parameters
-----------
n_threads : int
Number of currently active threads of a job
seed : int
Used to initialize the random number generator.
eval_pbar : tqdm object
Progress bar
Returns
----------
All calculations from every performance metric
"""
#to initialize PerformanceMetrics and exceute all the perf metrics at one go
self.perf_metric_obj = PerformanceMetrics(self)
self.perf_metric_obj.execute_all_perf(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
#bring status bar to full after all perf metrics have been ran
eval_pbar.update(1)
#if calibration_curve function has been run, then set status to True
if self.perf_metric_obj.result["calibration_curve"] is None:
self.evaluate_status_cali = False
else:
self.evaluate_status_cali = True
#if perf_dynamic function has been run, then set status to True
if self.perf_metric_obj.result['perf_dynamic'] is None:
self.evaluate_status_perf_dynamics = False
else:
self.evaluate_status_perf_dynamics = True
def _compute_fairness(self, n_threads, seed, eval_pbar):
"""
Computes all the fairness metrics together with their confidence intervals & the self.fair_conclusion for every protected variable
Parameters
-----------
n_threads : int
Number of currently active threads of a job
seed : int
Used to initialize the random number generator.
eval_pbar : tqdm object
Progress bar
Returns
----------
All calculations from every fairness metric
"""
#to initialize FairnessMetrics and exceute all the fair metrics at one go
self.fair_metric_obj = FairnessMetrics(self)
self.fair_metric_obj.execute_all_fair(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
#bring status bar to full after all fair metrics have been ran
eval_pbar.update(1)
for i in self.model_params[0].p_var:
for j in self._use_case_metrics['fair']:
#if user provides fair metric value input value for each protected variable
if self.fairness_metric_value_input is not None :
if i in self.fairness_metric_value_input.keys():
if j in self.fairness_metric_value_input[i].keys():
self.fair_metric_obj.result[i]["fair_metric_values"][j]= (self.fairness_metric_value_input[i][j], self.fair_metric_obj.result[i]["fair_metric_values"][j][1], self.fair_metric_obj.result[i]["fair_metric_values"][j][2] )
msg = "{} value for {} is overwritten by user input, CI and privileged metric value may be inconsistent."
msg = msg.format(FairnessMetrics.map_fair_metric_to_group[j][0], i)
warnings.warn(msg)
def compile(self, skip_tradeoff_flag=0, skip_feature_imp_flag=0, n_threads=1):
"""
Runs the evaluation function together with the trade-off and feature importance sections and saves all the results to a JSON file locally.
Parameters
-------------
skip_tradeoff_flag : int, default=0
Skip running tradeoff function if it is 1.
skip_feature_imp_flag : int, default=0
Skip running feature importance function if it is 1.
n_threads : int, default=1
Number of currently active threads of a job
Returns
----------
Prints messages for the status of evaluate and tradeoff and generates model artifact
"""
#check if evaluate hasn't run, only run if haven't
if self.evaluate_status == 0:
self.evaluate(visualize=False, output=False, n_threads=n_threads)
#printout
print('{:40s}{:<10}'.format('Running evaluate','done'))
print('{:5s}{:35s}{:<10}'.format('','performance measures','done'))
print('{:5s}{:35s}{:<10}'.format('','bias detection','done'))
if self.evaluate_status_cali:
print('{:5s}{:35s}{:<10}'.format('','probability calibration','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','probability calibration','skipped'))
if self.evaluate_status_perf_dynamics:
print('{:5s}{:35s}{:<10}'.format('','performance dynamics','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','performance dynamics','skipped'))
#check if user wants to skip tradeoff, if yes tradeoff will not run, print skipped
if self.tradeoff_status == -1:
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
#check if tradeoff hasn't run and user does not want to skip, only run if haven't
elif self.tradeoff_status == 0 and skip_tradeoff_flag==0:
try :
self.tradeoff(output=False, n_threads=n_threads)
#if user wants to skip tradeoff, print skipped
if self.tradeoff_status == -1 :
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
#set status to 1 after evaluate has run
elif self.tradeoff_status == 1 :
print('{:40s}{:<10}'.format('Running tradeoff','done'))
except :
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
#check if tradeoff hasn't run and user wants to skip, print skipped
elif self.tradeoff_status == 0 and skip_tradeoff_flag==1:
self.tradeoff_status = -1
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
else:
print('{:40s}{:<10}'.format('Running tradeoff','done'))
#check if user wants to skip feature_importance, if yes feature_importance will not run, print skipped
if self.feature_imp_status_corr:
print('{:40s}{:<10}'.format('Running feature importance','done'))
elif self.feature_imp_status == -1:
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
#check if feature_importance hasn't run and user does not want to skip, only run if haven't
elif self.feature_imp_status == 0 and skip_feature_imp_flag ==0:
try :
self.feature_importance(output=False, n_threads=n_threads)
if self.feature_imp_status == 1:
print('{:40s}{:<10}'.format('Running feature importance','done'))
elif self.feature_imp_status_corr:
print('{:40s}{:<10}'.format('Running feature importance','done'))
else:
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
except:
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
#check if feature_importance hasn't run and user wants to skip, print skipped
elif self.feature_imp_status == 0 and skip_feature_imp_flag ==1:
self.feature_imp_status = -1
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
else:
print('{:40s}{:<10}'.format('Running feature importance','done'))
#check if feature_importance_loo has ran, if not print skipped
if self.feature_imp_status_loo:
print('{:5s}{:35s}{:<10}'.format('','leave-one-out analysis','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','leave-one-out analysis','skipped'))
#check if feature_importance_corr has ran, if not print skipped
if self.feature_imp_status_corr:
print('{:5s}{:35s}{:<10}'.format('','correlation analysis','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','correlation analysis','skipped'))
#run function to generate json model artifact file after all API functions have ran
self._generate_model_artifact()
def tradeoff(self, output=True, n_threads=1, sigma = 0):
"""
Computes the trade-off between performance and fairness over a range of threshold values.
If output = True, run the _print_tradeoff() function.
Parameters
-----------
output : boolean, default=True
If output = True, run the _print_tradeoff() function.
n_threads : int, default=1
Number of currently active threads of a job
sigma : float or int , default = 0
Standard deviation for Gaussian kernel for smoothing the contour lines of primary fairness metric.
When sigma <= 0, smoothing is turn off.
Suggested to try sigma = 3 or above if noisy contours are observed.
"""
#if y_prob is None, skip tradeoff
if self.model_params[0].y_prob is None:
self.tradeoff_status = -1
print("Tradeoff has been skipped due to y_prob")
#if user wants to skip tradeoff, return None
if self.tradeoff_status == -1:
return
#check if tradeoff hasn't run, only run if haven't
elif self.tradeoff_status == 0:
self.sigma = sigma
n_threads = check_multiprocessing(n_threads)
#to show progress bar
tdff_pbar = tqdm(total=100, desc='Tradeoff', bar_format='{l_bar}{bar}')
tdff_pbar.update(5)
sys.stdout.flush()
#initialize tradeoff
self.tradeoff_obj = TradeoffRate(self)
tdff_pbar.update(10)
#run tradeoff
self.tradeoff_obj.compute_tradeoff(n_threads, tdff_pbar)
tdff_pbar.update(100 - tdff_pbar.n)
tdff_pbar.close()
print('', flush=True)
#if after running tradoeff, result is None, print skipped
if self.tradeoff_obj.result == {}:
print(self.tradeoff_obj.msg)
self.tradeoff_status = -1
else:
#set status to 1 after tradeoff has ran
self.tradeoff_status = 1
#if tradeoff has already ran once, just print result
if output and self.tradeoff_status == 1:
self._print_tradeoff()
def feature_importance(self, output=True, n_threads=1):
"""
Trains models using the leave-one-variable-out method for each protected variable and computes the performance and fairness metrics each time to assess the impact of those variables.
If output = True, run the _print_feature_importance() function.
Parameters
------------
output : boolean, default=True
Flag to print out the results of evaluation in the console. This flag will be False if visualize=True.
n_threads : int
Number of currently active threads of a job
Returns
------------
self.feature_imp_status_loo : boolean
Tracks the status of the completion of the leave-one-out analysis step within feature_importance() method to be checked in compile().
self.feature_imp_status : int
Tracks the status of the completion of the feature_importance() method to be checked in compile().
self._compute_correlation()
self._print_feature_importance()
"""
#if feature_imp_status_corr hasn't run
if self.feature_imp_status_corr == False:
self._compute_correlation()
#if user wants to skip feature_importance, return None
if self.feature_imp_status == -1:
self.feature_imp_values = None
return
#check if feature_importance hasn't run, only run if haven't
if self.feature_imp_status == 0:
for k in self.model_params:
x_train = k.x_train
y_train = k.y_train
model_object = k.model_object
x_test = k.x_test
train_op_name = k.train_op_name
predict_op_name = k.predict_op_name
# if model_object is not provided, skip feature_importance
if model_object is None:
self.feature_imp_status = -1
print("Feature importance has been skipped due to model_object")
return
else :
for var_name in [train_op_name, predict_op_name]:
#to check callable functions
try:
callable(getattr(model_object, var_name))
except:
self.feature_imp_status = -1
print("Feature importance has been skipped due to train_op_name/predict_op_name error")
return
#to show progress bar
fimp_pbar = tqdm(total=100, desc='Feature importance', bar_format='{l_bar}{bar}')
fimp_pbar.update(1)
self.feature_imp_values = {}
for h in self.model_params[0].p_var:
self.feature_imp_values[h] = {}
fimp_pbar.update(1)
#if evaluate_status = 0, run evaluate() first
if self.evaluate_status == 0:
self.evaluate(output=False)
#if user wants to skip feature_importance, return None
if self.feature_imp_status == -1:
self.feature_imp_values = None
return
fimp_pbar.update(1)
num_p_var = len(self.model_params[0].p_var)
n_threads = check_multiprocessing(n_threads)
max_workers = min(n_threads, num_p_var)
#if require to run with 1 thread, will skip deepcopy
worker_progress = 80/num_p_var
if max_workers >=1:
threads = []
with concurrent.futures.ThreadPoolExecutor(max_workers = max_workers) as executor:
fimp_pbar.update(5)
#iterate through protected variables to drop one by one as part of leave-one-out
for i in self.model_params[0].p_var:
if max_workers == 1:
use_case_object = self
else:
use_case_object = deepcopy(self)
threads.append(executor.submit(Fairness._feature_imp_loo, p_variable=i, use_case_object=use_case_object, fimp_pbar=fimp_pbar, worker_progress=worker_progress ))
for thread in threads:
fimp_pbar.update(round(8/num_p_var, 2))
if thread.result() is None:
self.feature_imp_status = -1
return
else:
for removed_pvar, values in thread.result().items():
for pvar, v in values.items():
self.feature_imp_values[pvar][removed_pvar] = v
#change flag after feature_importance has finished running
self.feature_imp_status_loo = True
self.feature_imp_status = 1
fimp_pbar.update(2)
fimp_pbar.update(100.0-fimp_pbar.n)
fimp_pbar.close()
print('', flush=True)
#if feature_importance has already ran once, just print result
if output == True:
self._print_feature_importance()
def _feature_imp_loo(p_variable, use_case_object, fimp_pbar, worker_progress):
"""
Maps each thread's work for feature_importance()
Parameters
------------
p_variable : str
Name of protected variable
use_case_object : object
Initialised use case object
fimp_pbar :
worker_progress :
Returns
------------
dictionary of loo_result of each p_var
"""
#get baseline values
baseline_perf_values = use_case_object.perf_metric_obj.result.get("perf_metric_values").get(use_case_object.perf_metric_name)[0]
baseline_fair_values = use_case_object.fair_metric_obj.result.get(p_variable).get("fair_metric_values").get(use_case_object.fair_metric_name)[0]
baseline_fairness_conclusion = use_case_object.fair_conclusion.get(p_variable).get("fairness_conclusion")
#toDel#baseline_values = [baseline_perf_values, baseline_fair_values, baseline_fairness_conclusion]
# empty y_pred_new list to be appended
y_pred_new = []
loo_result = {}
# loop through model_params
for k in range(len(use_case_object.model_params)):
## for uplift model type --> two model container --> need to train two models
## when model param len =2, then it is uplift model
p_var = use_case_object.model_params[k].p_var
x_train = use_case_object.model_params[k].x_train
y_train = use_case_object.model_params[k].y_train
model_object = use_case_object.model_params[k].model_object
x_test = use_case_object.model_params[k].x_test
y_pred = use_case_object.model_params[k].y_pred
y_prob = use_case_object.model_params[k].y_prob
pos_label = use_case_object.model_params[k].pos_label
neg_label = use_case_object.model_params[k].neg_label
train_op = getattr(model_object, use_case_object.model_params[k].train_op_name)
predict_op = getattr(model_object, use_case_object.model_params[k].predict_op_name)
#show progress bar
fimp_pbar.update(round(worker_progress*0.9/len(use_case_object.model_params), 2))
try:
#check if x_train is a dataframe
if isinstance(x_train, pd.DataFrame):
#drop protected variable and train model
pre_loo_model_obj = train_op(x_train.drop(columns=[p_variable]), y_train) # train_op_name is string, need to use getattr[] to get the attribute?
else :
pre_loo_model_obj = train_op(x_train, y_train, p_variable) # train_op to handle drop column i inside train_op
# Predict and compute performance Metrics (PerformanceMetrics.result.balanced_acc)
except:
#else print skipped and return None
print("LOO analysis is skipped for [", p_variable, "] due to x_train/y_train error")
use_case_object.feature_imp_status = -1
return None
try:
#check if x_test is a dataframe
if isinstance(x_test, pd.DataFrame):
#drop protected variable and predict
pre_y_pred_new = np.array(predict_op(x_test.drop(columns=[p_variable])))
else :
pre_y_pred_new = predict_op(x_train, y_train, p_variable) # train_op to handle drop column i inside train_op
except:
#else print skipped and return None
print("LOO analysis is skipped for [", p_variable, "] due to x_test/y_test error")
use_case_object.feature_imp_status = -1
return None
fimp_pbar.update(round(worker_progress*0.02, 2))
pre_y_pred_new = predict_op(x_test.drop(columns=[p_variable]))
#to ensure labels and datatype for predicted values are correct before running metrics
if len(pre_y_pred_new.shape) == 1 and pre_y_pred_new.dtype.kind in ['i','O','U']:
pre_y_pred_new, pos_label2 = check_label(pre_y_pred_new, pos_label, neg_label)
else:
pre_y_pred_new = pre_y_pred_new.astype(np.float64)
y_pred_new.append(pre_y_pred_new)
#run performance and fairness evaluation only for primary performance and fair metric
loo_perf_value = use_case_object.perf_metric_obj.translate_metric(use_case_object.perf_metric_name, y_pred_new=y_pred_new)
##to find deltas (removed - baseline) for primary perf metric
deltas_perf = loo_perf_value - baseline_perf_values #toDel#baseline_values[0]
# to iterate through each protected variable for each protected variable that is being dropped
for j in use_case_object.model_params[0].p_var:
fimp_pbar.update(round(worker_progress*0.08/len(p_var), 2))
use_case_object.fair_metric_obj.curr_p_var = j #will this work under multithreading? will not work, should changes to a copy
## get loo_perf_value,loo_fair_values
loo_fair_value, loo_priv_m_v = use_case_object.fair_metric_obj.translate_metric(use_case_object.fair_metric_name, y_pred_new=y_pred_new)[:2]
##to find deltas (removed - baseline) for each protected variable in iteration for primary fair metric
#toDel#deltas_fair = loo_fair_value - baseline_values[1]
baseline_fair_values_j = use_case_object.fair_metric_obj.result.get(j).get("fair_metric_values").get(use_case_object.fair_metric_name)[0]
baseline_fairness_conclusion_j = use_case_object.fair_conclusion.get(j).get("fairness_conclusion")
deltas_fair = loo_fair_value - baseline_fair_values_j
##fairness fair_conclusion
loo_fairness_conclusion = use_case_object._fair_conclude(j, priv_m_v=loo_priv_m_v, value=loo_fair_value)
#toDel#delta_conclusion = baseline_values[2] + " to " + loo_fairness_conclusion["fairness_conclusion"]
delta_conclusion = baseline_fairness_conclusion_j + " to " + loo_fairness_conclusion["fairness_conclusion"]
##suggestion
#if metric used is parity based, means it will either be more than 0 or less than 0. So set n = 0 to see the difference.
if FairnessMetrics.map_fair_metric_to_group.get(use_case_object.fair_metric_name)[2] == 'parity':
n = 0
#if metric used is ratio based, means it will either be more than 1 or less than 1. So set n = 1 to see the difference.
else:
n = 1
if abs(loo_fair_value - n) < abs(baseline_fair_values_j - n):
if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" :
if deltas_perf <= 0:
suggestion = 'exclude'
else:
suggestion = 'examine further'
else :
if deltas_perf >= 0:
suggestion = 'exclude'
else:
suggestion = 'examine further'
delta_conclusion += " (+)"
elif abs(loo_fair_value - n) > abs(baseline_fair_values_j - n):
if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" :
if deltas_perf >= 0:
suggestion = 'include'
else:
suggestion = 'examine further'
else:
if deltas_perf <= 0:
suggestion = 'include'
else:
suggestion = 'examine further'
delta_conclusion += " (-)"
else:
if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" :
if deltas_perf < 0:
suggestion = 'exclude'
elif deltas_perf > 0:
suggestion = 'include'
else:
suggestion = 'exclude'
else:
if deltas_perf > 0:
suggestion = 'exclude'
elif deltas_perf < 0:
suggestion = 'include'
else:
suggestion = 'exclude'
loo_result[j] = [deltas_perf, deltas_fair, delta_conclusion, suggestion]
return {p_variable: loo_result}
def _compute_correlation(self):
"""
Computes the top-20 correlation matrix inclusive of the protected variables
"""
try :
if isinstance(self.model_params[0].x_test, str):
self.feature_imp_status_corr = False
return
if isinstance(self.model_params[0].feature_imp, pd.DataFrame) and isinstance(self.model_params[0].x_test, pd.DataFrame):
#sort feature_imp dataframe by values (descending)
sorted_dataframe = self.model_params[0].feature_imp.sort_values(by=self.model_params[0].feature_imp.columns[1], ascending=False)
#extract n_features and pass into array
feature_cols = np.array(sorted_dataframe.iloc[:,0])
p_var_cols = np.array(self.model_params[0].p_var)
feature_cols = [col for col in feature_cols if col not in p_var_cols]
feature_cols = feature_cols[:20-len(p_var_cols)]
#feature_columns value from x_test
feature_columns = self.model_params[0].x_test[feature_cols]
#p_var_columns value from protected_features_cols
p_var_columns = self.model_params[0].x_test[p_var_cols]
#create final columns and apply corr()
df = pd.concat([feature_columns, p_var_columns], axis=1).corr()
self.correlation_output = {"feature_names":df.columns.values, "corr_values":df.values}
#return correlation_output as dataframe
self.feature_imp_status_corr = True
else:
#extract n_features and pass into array
feature_cols = np.array(self.model_params[0].x_test.columns[:20])
p_var_cols = np.array(self.model_params[0].p_var)
feature_cols = [col for col in feature_cols if col not in p_var_cols]
feature_cols = feature_cols[:20-len(p_var_cols)]
#feature_columns value from x_test
feature_columns = self.model_params[0].x_test[feature_cols]
#p_var_columns value from protected_features_cols
p_var_columns = self.model_params[0].x_test[p_var_cols]
#create final columns and apply corr()
df = pd.concat([feature_columns, p_var_columns], axis=1).corr()
self.correlation_output = {"feature_names":df.columns.values, "corr_values":df.values}
self.feature_imp_status_corr = True
except:
self.feature_imp_status_corr = False
def _print_evaluate(self):
"""
Formats the results of the evaluate() method before printing to console.
"""
if ("_rejection_inference_flag" in dir(self)):
if True in self._rejection_inference_flag.values():
print("Special Parameters")
print("Rejection Inference = True")
name = []
for i in self.model_params[0].p_grp.keys():
name += [i + " - " + str(self.model_params[0].p_grp.get(i)[0])]
str1 = ", ".join(
str(e) for e in list(set(filter(lambda a: a != self.model_params[0].p_grp.get(i)[0],
self.model_params[0].protected_features_cols[i]))))
name += [i + " - " + str1]
titles = ['Group', 'Base Rate', 'Number of Rejected Applicants']
a = []
for i in self.spl_params['base_default_rate'].keys():
a += self.spl_params['base_default_rate'].get(i)
b = []
for i in self.spl_params['num_applicants'].keys():
b += self.spl_params['num_applicants'].get(i)
data = [titles] + list(zip(name, a, b))
for i, d in enumerate(data):
line = '| '.join(str(x).ljust(16) for x in d)
print(line)
if i == 0:
print('-' * len(line))
print("\n")
elif hasattr(self, 'spl_params') and ('revenue' in self.spl_params or 'treatment_cost' in self.spl_params):
print("Special Parameters")
titles = ['Revenue', 'Treatment Cost']
a = [self.spl_params['revenue']]
b = [self.spl_params['treatment_cost']]
data = [titles] + list(zip(a, b))
for i, d in enumerate(data):
line = '| '.join(str(x).ljust(16) for x in d)
print(line)
if i == 0:
print('-' * len(line))
print("\n")
if PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
print("Class Distribution")
if self.model_params[0].model_type != "uplift":
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "pos_label",
self.perf_metric_obj.result.get("class_distribution").get("pos_label") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "neg_label",
self.perf_metric_obj.result.get("class_distribution").get("neg_label") * 100, decimal_pts=self.decimals))
else:
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "CN",
self.perf_metric_obj.result.get("class_distribution").get("CN") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "TN",
self.perf_metric_obj.result.get("class_distribution").get("TN") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "CR",
self.perf_metric_obj.result.get("class_distribution").get("CR") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "TR",
self.perf_metric_obj.result.get("class_distribution").get("TR") * 100, decimal_pts=self.decimals))
else:
pass
print("\n")
if self.model_params[0].sample_weight is not None:
print("Performance Metrics (Sample Weight = True)")
else:
print("Performance Metrics")
for k in self._use_case_metrics["perf"]:
print_metric_value(k, 0)
if self.perf_metric_obj.result.get("calibration_curve") is None:
pass
else:
print("\n")
print("Probability Calibration")
m = "\tBrier Loss Score"
v = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get("calibration_curve").get("score"),
decimal_pts=self.decimals)
print("{0:<45s}{1:>30s}".format(m, v))
print("\n")
if self.fair_metric_input == 'auto':
print('Primary Fairness Metric Suggestion')
print('\t{}'.format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]))
print('based on')
print('\tfair_priority = {}'.format(self.fair_priority))
print('\tfair_concern = {}'.format(self.fair_concern))
print('\tfair_impact = {}'.format(self.fair_impact))
print('\n')
for i, i_var in enumerate(self.model_params[0].p_var):
p_len = len(str(i + 1) + ": " + i_var)
print("-" * 35 + str(i + 1) + ": " + i_var.title() + "-" * int((45 - p_len)))
print("Value Distribution")
print("{:<45s}{:>29.{decimal_pts}f}%".format('\tPrivileged Group',
self.fair_metric_obj.result.get(i_var).get(
"feature_distribution").get("privileged_group") * 100,
decimal_pts=self.decimals))
print("{:<45s}{:>29.{decimal_pts}f}%".format('\tUnprivileged Group',
self.fair_metric_obj.result.get(i_var).get(
"feature_distribution").get("unprivileged_group") * 100,
decimal_pts=self.decimals))
print("\n")
if self.model_params[0].sample_weight is not None:
print("Fairness Metrics (Sample Weight = True)")
else:
print("Fairness Metrics")
for h in self._use_case_metrics["fair"]:
print_metric_value(h, 1)
print("\n")
print("Fairness Conclusion")
m = "\tOutcome ({})".format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0])
v = self.fair_conclusion.get(i_var).get("fairness_conclusion").title()
print("{0:<55s}{1:>20s}*".format(m, v))
m = "\tFairness Threshold"
if self.fair_threshold > 0 and self.fair_threshold < 1:
v = str(self.fair_threshold)
elif self.fair_threshold > 1 and self.fair_threshold < 100:
v = str(self.fair_threshold) + "%"
print("{0:<45s}{1:>30s}".format(m, v))
print("\n")
print('* The outcome is calculated based on your inputs and is provided for informational purposes only. Should you decide to act upon the information herein, you do so at your own risk and Veritas Toolkit will not be liable or responsible in any way. ')
sys.stdout.flush()
def _print_tradeoff(self):
"""
Formats the results of the tradeoff() method before printing to console.
"""
i = 1
p_var = self.model_params[0].p_var
for p_variable in p_var:
#title
title_str = " "+ str(i) + ". " + p_variable +" "
if len(title_str)%2 == 1:
title_str+=" "
line_str = int((72-len(title_str))/2) * "-"
print(line_str + title_str +line_str)
print("Performance versus Fairness Trade-Off")
#Single Threshold
print("\t Single Threshold")
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged/Unprivileged Threshold",
self.tradeoff_obj.result[p_variable]["max_perf_single_th"][
0], decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format(
str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"),
self.tradeoff_obj.result[p_variable]["max_perf_single_th"][2], decimal_pts=self.decimals))
# Separated Thresholds
print("\t Separated Thresholds")
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged Threshold",
self.tradeoff_obj.result[p_variable]["max_perf_point"][0],
decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Unprivileged Threshold",
self.tradeoff_obj.result[p_variable]["max_perf_point"][1],
decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format(
str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"),
self.tradeoff_obj.result[p_variable]["max_perf_point"][2], decimal_pts=self.decimals))
# Separated Thresholds under Neutral Fairness (0.01)
print("\t Separated Thresholds under Neutral Fairness ({})".format(self.fair_neutral_tolerance))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged Threshold", self.tradeoff_obj.result[p_variable][
"max_perf_neutral_fair"][0], decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Unprivileged Threshold",
self.tradeoff_obj.result[p_variable][
"max_perf_neutral_fair"][1], decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format(
str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"),
self.tradeoff_obj.result[p_variable]["max_perf_neutral_fair"][2], decimal_pts=self.decimals))
print("\t\t*estimated by approximation, subject to the resolution of mesh grid")
print("")
i+=1
sys.stdout.flush()
def _print_feature_importance(self):
"""
Formats the results of the feature_importance() method before printing to console.
"""
for i, i_var in enumerate(self.model_params[0].p_var):
print("\n")
p_len = len(str(i + 1) + ": Fairness on " + i_var)
print("-" * 50 + str(i + 1) + ": Fairness on " + i_var.title() + "-" * int((116 - 50 - p_len)))
print()
print("-" * 116)
print("|{:<30}|{:<20}|{:<20}|{:<20}|{:<20}|".format("Removed Protected Variable", self.perf_metric_name,
self.fair_metric_name, "Fairness Conclusion",
"Suggestion"))
print("-" * 116)
for j in self.model_params[0].p_var:
col1, col2, col3, col4 = self.feature_imp_values[i_var][j]
print("|{:<30}|{:<20.{decimal_pts}f}|{:<20.{decimal_pts}f}|{:<20}|{:<20}|".format(j, col1, col2, col3, (col4).title(), decimal_pts=self.decimals))
print("-" * 116)
print()
if self.feature_imp_status_corr == False:
print("Correlation matrix skippped")
else:
return self.correlation_output
sys.stdout.flush()
def _generate_model_artifact(self):
"""
Generates the JSON file to be saved locally at the end of compile()
"""
#aggregate the results into model artifact
print('{:40s}'.format('Generating model artifact'), end='')
artifact = {}
# Section 1 - fairness_init
#write results to fairness_init
fairness_init = {}
fairness_init["fair_metric_name_input"] = self.fair_metric_input
fairness_init["fair_metric_name"] = FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]
fairness_init["perf_metric_name"] = PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[0]
fairness_init["protected_features"] = self.model_params[0].p_var
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[1] != "regression":
fairness_init["fair_priority"] = self.fair_priority
fairness_init["fair_concern"] = self.fair_concern
fairness_init["fair_impact"] = self.fair_impact
if self.model_params[0].model_type == "uplift" or self.model_params[0].model_type == "credit":
fairness_init["special_params"] = self.spl_params #num_applicants and base_default_rate for creditscoring, treatment_cost, revenue and selection_threshold for customermarketing
fairness_init["fair_threshold_input"] = self.fair_threshold_input
fairness_init["fair_neutral_tolerance"] = self.fair_neutral_tolerance
model_type = self.model_params[0].model_type
#add fairness_init results to artifact
artifact["fairness_init"] = fairness_init
perf_result = deepcopy(self.perf_metric_obj.result)
perf_vals_wth_metric_names = {}
for key in self.perf_metric_obj.result["perf_metric_values"].keys():
if key in PerformanceMetrics.map_perf_metric_to_group.keys():
perf_vals_wth_metric_names[PerformanceMetrics.map_perf_metric_to_group.get(key)[0]] = \
self.perf_metric_obj.result["perf_metric_values"][key]
perf_result["perf_metric_values"] = perf_vals_wth_metric_names
artifact = {**artifact, **(perf_result)}
artifact["correlation_matrix"] = self.correlation_output
# above part will only be tested when Credit Scoring and Customer Marketing classes can be run
p_var = self.model_params[0].p_var
#write results to features_dict
features_dict = {}
for pvar in p_var:
dic_h = {}
dic_h["fair_threshold"] = self.fair_conclusion.get(pvar).get("threshold")
dic_h["privileged"] = self.model_params[0].p_grp[pvar]
dic_t = {}
dic_t["fairness_conclusion"] = self.fair_conclusion.get(pvar).get("fairness_conclusion")
dic_t["tradeoff"] = None
if self.tradeoff_status != -1:
dic_t["tradeoff"] = self.tradeoff_obj.result.get(pvar)
dic_t["feature_importance"] = None
if self.feature_imp_status != -1:
dic_t["feature_importance"] = self.feature_imp_values.get(pvar)
fair_vals_wth_metric_names = {}
for key in self.fair_metric_obj.result.get(pvar)['fair_metric_values'].keys():
if key in FairnessMetrics.map_fair_metric_to_group.keys():
fair_vals_wth_metric_names[FairnessMetrics.map_fair_metric_to_group.get(key)[0]] = \
self.fair_metric_obj.result.get(pvar)['fair_metric_values'][key]
fair_result = deepcopy(self.fair_metric_obj.result.get(pvar))
fair_result['fair_metric_values'] = fair_vals_wth_metric_names
for k, v in fair_result['fair_metric_values'].items():
fair_result['fair_metric_values'][k] = [v[0], v[2]]
features_dict[str(pvar)] = {**dic_h, **fair_result, **dic_t}
#add features_dict results to artifact
artifact["features"] = features_dict
print('done')
model_name = (self.model_params[0].model_name +"_").replace(" ","_")
filename = "model_artifact_" + model_name + datetime.datetime.today().strftime('%Y%m%d_%H%M') + ".json"
self.artifact = artifact
artifactJson = json.dumps(artifact, cls=NpEncoder)
jsonFile = open(filename, "w")
jsonFile.write(artifactJson)
jsonFile.close()
print("Saved model artifact to " + filename)
def _fairness_widget(self):
"""
Runs to pop up a widget to visualize the evaluation output
"""
try :
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
display(HTML("""
<style>
.dropdown_clr {
background-color: #E2F0D9;
}
.fair_green{
width:auto;
background-color:#E2F0D9;
}
.perf_blue {
width:auto;
background-color:#DEEBF7;
}
</style>
"""))
result_fairness = self.fair_metric_obj.result
option_p_var = self.fair_metric_obj.p_var[0]
options = []
for i in self.fair_metric_obj.p_var[0]:
options += [i + " (privileged group = " + str(self.model_params[0].p_grp.get(i))+ ")"]
model_type = self.model_params[0].model_type.title()
if PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
model_concern = self.fair_concern.title()
model_priority = self.fair_priority.title()
model_impact = self.fair_impact.title()
else:
model_concern = "N/A"
model_priority = "N/A"
model_impact = "N/A"
model_name = self.model_params[0].model_name.title()
html_pink = '<div style="color:black; text-align:left; padding-left:5px; background-color:#FBE5D6; font-size:12px">{}</div>'
html_grey_true = '<div style="color:black; text-align:center; background-color:#AEAEB2; font-size:12px">{}</div>'
html_grey_false = '<div style="color:#8E8E93; text-align:center; background-color:#E5E5EA; font-size:12px">{}</div>'
html_yellow_left = '<div style="color:black; text-align:left; padding-left:5px; background-color:#FFF2CC; font-size:12px">{}</div>'
html_yellow_right = '<div style="color:black; text-align:right; padding-right:5px; background-color:#FFF2CC; font-size:12px">{}</div>'
html_model_type = widgets.HTML(value=html_yellow_left.format('Model Type: ' + model_type),
layout=Layout(display="flex", width='30%'))
html_model_name = widgets.HTML(value=html_yellow_right.format('Model Name: ' + model_name),
layout=Layout(display="flex", justify_content="flex-end", width='45%'))
dropdown_protected_feature = widgets.Dropdown(options=options, description=r'Protected Feature:',
layout=Layout(display="flex", justify_content="flex-start",
width='62.5%', padding='0px 0px 0px 5px'),
style=dict(description_width='initial'))
dropdown_protected_feature.add_class("dropdown_clr")
html_model_priority = widgets.HTML(value=html_pink.format("Priority: " + model_priority),
layout=Layout(display="flex", width='12.5%'))
html_model_impact = widgets.HTML(value=html_pink.format("Impact: " + model_impact),
layout=Layout(display="flex", width='12.5%'))
html_model_concern = widgets.HTML(value=html_pink.format('Concern: ' + model_concern),
layout=Layout(display="flex", width='12.5%'))
if (self.model_params[0].sample_weight is not None):
sw = html_grey_true
else:
sw = html_grey_false
if "_rejection_inference_flag" in dir(self):
if True in self._rejection_inference_flag.values():
ri = html_grey_true
else:
ri = html_grey_false
elif hasattr(self, 'spl_params') and model_type == "Uplift":
if None not in self.spl_params.values():
ri = html_grey_true
else:
ri = html_grey_false
else:
ri = html_grey_false
html_sample_weight = widgets.HTML(value=sw.format('Sample Weight'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
if model_type == "Credit":
html_rej_infer = widgets.HTML(value=ri.format('Rejection Inference'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
elif model_type == "Default" or PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] == "regression":
regression = '<div style="color:#E5E5EA; text-align:center; background-color:#E5E5EA; font-size:12px">{}</div>'
html_rej_infer = widgets.HTML(value=regression.format('N/A'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
elif PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
html_rej_infer = widgets.HTML(value=ri.format('Revenue & Cost'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
html_fair_italics = '<div style="color:black; text-align:left; padding-left:5px; font-style: italic;font-weight: bold;font-size:14px">{}</div>'
html_fair_bold = '<div style="color:black; text-align:center;font-weight: bold;font-size:20px">{}</div>'
html_fair_bold_red = '<div style="color:#C41E3A; text-align:center; font-weight:bold; font-size:20px">{}</div>'
html_fair_bold_green = '<div style="color:#228B22; text-align:center; font-weight:bold; font-size:20px">{}</div>'
html_fair_small = '<div style="color:black; text-align:left; padding-left:25px; font-size:12px">{}</div>'
html_fair_metric = '<div style="color:black; text-align:right; font-weight: bold;font-size:20px">{}</div>'
html_fair_ci = '<div style="color:black; text-align:left; padding-left:5px; font-size:15px">{}</div>'
chosen_p_v = option_p_var[0]
fair1 = widgets.HTML(value=html_fair_italics.format('Fairness'), layout=Layout(display="flex", margin='0'))
fair2_1 = widgets.HTML(value=html_fair_small.format('Metric'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
fair2_2 = widgets.HTML(value=html_fair_small.format('Assessment'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
fair3_1 = widgets.HTML(
value=html_fair_bold.format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]),
layout=Layout(display="flex", justify_content="center", margin='0'))
if self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion") == 'fair':
pattern = html_fair_bold_green
else:
pattern = html_fair_bold_red
fair3_2_v = pattern.format(self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion").title())
fair3_2 = widgets.HTML(value=fair3_2_v,
layout=Layout(display="flex", justify_content="center", margin='0'))
fair4_1 = widgets.HTML(value=html_fair_small.format('Value'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
fair4_2 = widgets.HTML(value=html_fair_small.format('Threshold'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
v = html_fair_metric.format("{:.{decimal_pts}f}".format(self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[0], decimal_pts=self.decimals))
fair5_1 = widgets.HTML(value=v,layout=Layout(display="flex", width='50%', justify_content="center", margin='0'))
c = html_fair_ci.format('\xB1 ' + "{:.{decimal_pts}f}".format(self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[2], decimal_pts=self.decimals))
fair5_1_1 = widgets.HTML(value=c,layout=Layout(display="flex", width='50%', justify_content="center", margin='0'))
t = html_fair_bold.format("{:.{decimal_pts}f}".format(self.fair_conclusion.get(chosen_p_v).get("threshold"), decimal_pts=self.decimals))
fair5_2 = widgets.HTML(value=t,
layout=Layout(display="flex", justify_content="center", margin='0'))
fair5 = HBox([fair5_1, fair5_1_1], layout=Layout(display="flex", justify_content="center"))
box1f = VBox(children=[fair2_1, fair3_1, fair4_1, fair5], layout=Layout(width="66.666%"))
box2f = VBox(children=[fair2_2, fair3_2, fair4_2, fair5_2], layout=Layout(width="66.666%"))
box3f = HBox([box1f, box2f])
box4f = VBox([fair1, box3f], layout=Layout(width="66.666%", margin='5px 5px 5px 0px'))
box4f.add_class("fair_green")
html_perf_italics = '<div style="color:black; text-align:left; padding-left:5px; font-style: italic;font-weight: bold;font-size:14px">{}</div>'
html_perf_bold = '<div style="color:black; text-align:center; font-weight: bold;font-size:20px">{}</div>'
html_perf_small = '<div style="color:black; text-align:left; padding-left:25px; font-size:12px">{}</div>'
html_perf_metric = '<div style="color:black; text-align:right; font-weight: bold;font-size:20px">{}</div>'
html_perf_ci = '<div style="color:black; text-align:left; padding-left:5px;font-size:15px">{}</div>'
perf1 = widgets.HTML(value=html_perf_italics.format('Performance'),
layout=Layout(display="flex", width='33.3333%', margin='0'))
perf2_1 = widgets.HTML(value=html_perf_small.format('Assessment'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
perf3_1 = widgets.HTML(
value=html_perf_bold.format(PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[0]),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
perf4_1 = widgets.HTML(value=html_perf_small.format('Value'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
v = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get('perf_metric_values').get(self.perf_metric_name)[0], decimal_pts=self.decimals)
perf5_1 = widgets.HTML(value=html_perf_metric.format(v),
layout=Layout(display="flex", justify_content="flex-start", width="50%", margin='0'))
c = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get('perf_metric_values').get(self.perf_metric_name)[1], decimal_pts=self.decimals)
perf5_1_1 = widgets.HTML(value=html_perf_ci.format('\xB1 ' + c),
layout=Layout(display="flex", justify_content="flex-start", width="50%", margin='0'))
perf5 = HBox([perf5_1, perf5_1_1], layout=Layout(display="flex", justify_content="center"))
box1p = VBox(children=[perf2_1, perf3_1, perf4_1, perf5])
box2p = VBox([perf1, box1p], layout=Layout(width="33.333%", margin='5px 0px 5px 5px'))
box2p.add_class('perf_blue')
metric_box = HBox([box4f, box2p], layout=Layout(width="auto"))
PATH = Path(__file__).parent.parent.joinpath('resources', 'widget')
if model_type != 'Uplift' and PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
image1 = IPython.display.Image(filename=PATH/"perf_class_jpg.JPG", width=300, height=500)
A = widgets.Image(
value=image1.data,
format='jpg',
width=260
)
image2 = IPython.display.Image(filename=PATH/"fair_class_jpg.JPG", width=300, height=500)
B = widgets.Image(
value=image2.data,
format='jpg',
width=260
)
elif model_type == "Uplift":
image1 = IPython.display.Image(filename=PATH/"perf_uplift_jpg.JPG", width=300, height=500)
A = widgets.Image(
value=image1.data,
format='jpg',
width=260
)
image2 = IPython.display.Image(filename=PATH/"fair_uplift_jpg.JPG", width=300, height=500)
B = widgets.Image(
value=image2.data,
format='jpg',
width=260
)
else:
image1 = IPython.display.Image(filename=PATH/"perf_regression_jpg.JPG", width=300, height=500)
A = widgets.Image(
value=image1.data,
format='jpg',
width=260
)
image2 = IPython.display.Image(filename=PATH/"fair_regression_jpg.JPG", width=300, height=500)
B = widgets.Image(
value=image2.data,
format='jpg',
width=260
)
tab = widgets.Tab([A, B], layout={'width': '32%', 'margin': '15px', 'height': '350px'})
tab.set_title(0, 'Performance Metrics')
tab.set_title(1, 'Fairness Metrics')
plot_output = widgets.Output(layout=Layout(display='flex', align_items='stretch', width="66.6666%"))
filtering(option_p_var[0])
dropdown_protected_feature.observe(dropdown_event_handler, names='value')
item_layout = widgets.Layout(margin='0 0 0 0')
input_widgets1 = widgets.HBox([html_model_type, html_sample_weight, html_rej_infer, html_model_name],
layout=item_layout)
input_widgets2 = widgets.HBox([dropdown_protected_feature, html_model_priority, html_model_impact, html_model_concern],
layout=item_layout)
input_widgets = VBox([input_widgets1, input_widgets2])
top_display = widgets.VBox([input_widgets, metric_box])
plot_tab = widgets.HBox([plot_output, tab])
dashboard = widgets.VBox([top_display, plot_tab])
display(dashboard)
print("*The threshold and the values of ratio-based metrics are shifted down by 1.")
else:
print("The widget is only available on Jupyter notebook")
except:
pass
def _set_feature_mask(self):
"""
Sets the feature mask for each protected variable based on its privileged group
Returns
----------
feature_mask : dict of list
Stores the mask array for every protected variable applied on the x_test dataset.
"""
feature_mask = {}
for i in self.model_params[0].p_var:
privileged_grp = self.model_params[0].p_grp.get(i)
feature_mask[i] = self.model_params[0].protected_features_cols[i].isin(privileged_grp)
return feature_mask
def _get_e_lift(self):
"""
Helper function to get empirical lift
Returns
---------
None
"""
return None
def _get_confusion_matrix(self, curr_p_var = None, **kwargs):
"""
Compute confusion matrix
Parameters
-------------
curr_p_var : string, default=None
Current protected variable
Returns
-------
Confusion matrix metrics based on privileged and unprivileged groups
"""
if curr_p_var == None :
return [None] * 4
else :
return [None] * 8
def _base_input_check(self):
"""
Checks if there are conflicting input values
"""
try:
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'information':
if self.fair_threshold > 1:
self.err.push('conflict_error', var_name_a=str(self.fair_metric_name), some_string="conflict with fair_threshold", value="", function_name="_base_input_check")
self.err.pop()
except TypeError:
pass
def _model_type_input(self):
"""
Checks if model type input is valid
"""
for i in self.model_params :
#throw an error if model_type provided is not in _model_type_to_metric_lookup
if i.model_type not in self._model_type_to_metric_lookup.keys():
self.err.push('value_error', var_name="model_type", given=str(i.model_type),
expected=list(self._model_type_to_metric_lookup.keys()),
function_name="_model_type_input")
#print any exceptions occured
self.err.pop()
model_size = self._model_type_to_metric_lookup[self.model_params[0].model_type][2]
#check if model_size provided based in model_type provided is accepted as per _model_type_to_metric_lookup
if model_size > len(self.model_params):
self.err.push('length_error', var_name="model_type", given=str(len(self.model_params)),
expected=str(model_size),
function_name="_model_type_input")
#print any exceptions occured
self.err.pop()
#check if model_size is -1. If it is only take first set of model_params values
elif model_size == -1:
self.model_params = self.model_params[:1]
else:
self.model_params = self.model_params[:model_size]
#check if model_type of first model_container is uplift, the model_name of second model_container should be clone. Otherwise, throw an exception
if self.model_params[0].model_type == 'uplift':
if self.model_params[1].model_name != "clone" :
self.err.push('value_error', var_name="model_name", given=str(self.model_params[1].model_name),
expected="clone",
function_name="_model_type_input")
#print any exceptions occured
self.err.pop()
def _fairness_metric_value_input_check(self):
"""
Checks if fairness metric value input is valid
"""
if self.fairness_metric_value_input is not None:
for i in self.fairness_metric_value_input.keys() :
#if user provided keys are not in protected variables, ignore
if i not in self.model_params[0].p_var:
print("The fairness_metric_value_input is not provided properly, so it is ignored")
self.fairness_metric_value_input = None
break
for j in self.fairness_metric_value_input[i].keys():
#if user provided fair metrics are not in fair metrics in use case class, ignore
if j not in self._use_case_metrics['fair']:
print("The fairness_metric_value_input is not provided properly, so it is ignored")
self.fairness_metric_value_input = None
break
def check_fair_metric_name(self):
"""
Checks if primary fairness metric is valid
"""
try:
if FairnessMetrics.map_fair_metric_to_group[self.fair_metric_name][4] == False:
ratio_parity_metrics = []
for i,j in FairnessMetrics.map_fair_metric_to_group.items():
if j[1] == self._model_type_to_metric_lookup[self.model_params[0].model_type][0]:
if FairnessMetrics.map_fair_metric_to_group[i][4] == True:
ratio_parity_metrics.append(i)
self.err.push('value_error', var_name="fair_metric_name", given=self.fair_metric_name, expected=ratio_parity_metrics, function_name="check_fair_metric_name")
except:
pass
#print any exceptions occured
self.err.pop()
def check_perf_metric_name(self):
"""
Checks if primary performance metric is valid
"""
try:
if PerformanceMetrics.map_perf_metric_to_group[self.perf_metric_name][4] == False:
perf_list = []
for i,j in PerformanceMetrics.map_perf_metric_to_group.items():
if j[1] == self._model_type_to_metric_lookup[self.model_params[0].model_type][0]:
if PerformanceMetrics.map_perf_metric_to_group[i][4] == True:
perf_list.append(i)
self.err.push('value_error', var_name="perf_metric_name", given=self.perf_metric_name, expected=perf_list, function_name="check_perf_metric_name")
except:
pass
#print any exceptions occured
self.err.pop()
def _fairness_tree(self, is_pos_label_favourable = True):
"""
Sets the feature mask for each protected variable based on its privileged group
Parameters
-----------
is_pos_label_favourable: boolean, default=True
Whether the pos_label is the favourable label
Returns
----------
self.fair_metric_name : string
Fairness metric name
"""
err_ = []
if self.fair_concern not in ['eligible', 'inclusive', 'both']:
err_.append(['value_error', "fair_concern", str(self.fair_concern), str(['eligible', 'inclusive', 'both'])])
if self.fair_priority not in ['benefit', 'harm']:
err_.append(['value_error', "fair_priority", str(self.fair_priority),str(['benefit', 'harm'])])
if self.fair_impact not in ['significant', 'selective', 'normal']:
err_.append(['value_error', "fair_impact", str(self.fair_impact),str(['significant', 'selective', 'normal'])])
if err_ != []:
for i in range(len(err_)):
self.err.push(err_[i][0], var_name=err_[i][1], given=err_[i][2], expected=err_[i][3],
function_name="_fairness_tree")
self.err.pop()
if is_pos_label_favourable == True:
if self.fair_priority == "benefit":
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fpr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'equal_opportunity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fdr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'ppv_parity'
elif self.fair_concern == 'both':
self.err.push("conflict_error", var_name_a="fair_concern", some_string="not applicable", value="", function_name="_fairness_tree")
self.err.pop()
elif self.fair_priority == "harm" :
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fpr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'fnr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fdr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'for_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'calibration_by_group'
else:
if self.fair_priority == "benefit":
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fnr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'tnr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'neg_equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'for_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'npv_parity'
elif self.fair_concern == 'both':
self.err.push("conflict_error", var_name_a="fairness concern", some_string="not applicable", value="", function_name="_fairness_tree")
self.err.pop()
elif self.fair_priority == "harm" :
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fnr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'fpr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'for_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'fdr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'calibration_by_group'
return self.fair_metric_name
def get_prob_calibration_results(self):
"""
Gets the probability calibration results
Returns
------------
a dictionary with below keys and values:
'prob_true': the ground truth values split into 10 bins from 0 to 1
'prob_pred': the mean predicted probability in each bin
'score': the brier loss score
"""
if self.evaluate_status_cali == True:
return self.perf_metric_obj.result.get("calibration_curve")
else:
return None
def get_perf_metrics_results(self):
"""
Gets the performance metrics results
Returns
------------
a dictionary with keys as the metric name and values as the metric value together with confidence interval
"""
if self.evaluate_status == 1:
return self.perf_metric_obj.result.get("perf_metric_values")
else:
return None
def get_fair_metrics_results(self):
"""
Gets the fair metrics results
Returns
------------
a dictionary with keys as the metric name and values as the metric value together with confidence interval
"""
if self.evaluate_status == 1:
result = {}
for p_var in self.fair_metric_obj.result.keys():
result[p_var] = self.fair_metric_obj.result[p_var]['fair_metric_values']
return result
else:
return None
def get_tradeoff_results(self):
"""
Gets the tradeoff results
Returns
------------
a dictionary with below keys and values:
protected variable name as key to split result values for each protected variable
'fair_metric_name': fairness metric name
'perf_metric_name': performance metric name
'fair': array of shape (n, n*) of fairness metric values
'perf': array of shape (n, n*) of performance metric values
'th_x': array of shape (n*, ) of thresholds on x axis
'th_y': array of shape (n*, ) of thresholds on y axis
'max_perf_point': maxiumn performance point on the grid
'max_perf_single_th': maxiumn performance point on the grid with single threshold
'max_perf_neutral_fair': maxiumn performance point on the grid with neutral fairness
*n is defined by tradeoff_threshold_bins in config
"""
if self.tradeoff_status == 1:
return self.tradeoff_obj.result
else:
return None
def get_loo_results(self):
"""
Gets the leave one out analysis results
Returns
------------
a dictionary with below keys and values:
protected variable name as key to split fairness result on each protected variable
protected variable name as key to denote the removed protected variable
array values denote the performance metric value, fariness metric value, fairness conclusion and suggestion
"""
if self.feature_imp_status_loo == True:
return self.feature_imp_values
else:
return None
def get_correlation_analysis_results(self):
"""
Gets the correlation analysis results
Returns
------------
a dictionary with below keys and values:
'feature_names': feature names for correlation analysis
'corr_values': correlation values according to feature names
"""
if self.feature_imp_status_corr == True:
return self.correlation_output
else:
return None
class NpEncoder(json.JSONEncoder):
"""
"""
def default(self, obj):
"""
Parameters
------------
obj : object
"""
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
| 52.075563 | 262 | 0.566659 | import numpy as np
import pandas as pd
import datetime
import json
from ..util.utility import *
from ..metrics.fairness_metrics import FairnessMetrics
from ..metrics.performance_metrics import PerformanceMetrics
from ..metrics.newmetric import *
from ..metrics.tradeoff import TradeoffRate
import ipywidgets as widgets
import IPython
from ipywidgets import Layout, Button, Box, VBox, HBox, Text, GridBox
from IPython.display import display, clear_output, HTML
from IPython.core.display import HTML
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import sys
import warnings
from ..util.errors import *
from math import floor
import concurrent.futures
from tqdm.auto import tqdm
from pathlib import Path
import matplotlib.lines as mlines
class Fairness:
"""
Base Class with attributes used across all use cases within Machine Learning model fairness evaluation.
"""
def __init__(self, model_params):
"""
Parameters
------------------
model_params : list
It holds ModelContainer object(s).
Data holder that contains all the attributes of the model to be assessed. Compulsory input for initialization.
Instance Attributes
-------------------
fair_metric_obj : object, default=None
Stores the FairnessMetrics() object and contains the result of the computations.
perf_metric_obj : object, default=None
Stores the PerformanceMetrics() object and contains the result of the computations.
percent_distribution : dict, default=None
Stores the percentage breakdown of the classes in y_true.
calibration_score : float, default=None
The brier score loss computed for calibration. Computable if y_prob is given.
tradeoff_obj : object, default=None
Stores the TradeoffRate() object and contains the result of the computations.
correlation_output : dict, default=None
Pairwise correlation of most important features (top 20 feature + protected variables).
feature_mask : dict of list, default=None
Stores the mask array for every protected variable applied on the x_test dataset.
fair_conclusion : dict, default=None
Contains conclusion of how the primary fairness metric compares against the fairness threshold. The key will be the protected variable and the conclusion will be "fair" or "unfair".
e.g. {"gender": {'fairness_conclusion': "fair", "threshold": 0.01}, "race":{'fairness_conclusion': "unfair", "threshold": 0.01}}
evaluate_status : int, default=0
Tracks the status of the completion of the evaluate() method to be checked in compile(). Either 1 for complete or -1 for error if any exceptions were raised.
evaluate_status_cali: boolean, default=False
Tracks the status of the completion of the calibration curve step within evaluate() method to be checked in compile().
False = Skipped (if y_prob is not provided)
True = Complete
tradeoff_status : int, default=0
Tracks the status of the completion of the tradeoff() method to be checked in compile().
0 = Not started
1 = Complete
-1 = Skipped (if y_prob is not provided)
feature_imp_status : int, default=0
Tracks the status of the completion of the compute_feature_imp() method to be checked in compile().
0 = Not started
1 = Complete
-1 = Skipped (if model_object not provided, wrong train_op_name/predict_op_name, x_train or x_test error)
feature_imp_status_loo: boolean, default=False
Tracks the status of the completion of the leave-one-out analysis step within feature_importance() method to be checked in compile().
False = Skipped (if x_train or y_train or model object or fit/predict operator names are not provided)
True = Complete
feature_imp_status_corr: boolean, default=False
Tracks the status of the completion of the correlation matrix computation step within feature_importance() method to be checked in compile().
False = Skipped (if the correlation dataframe is not provided in ModelContainer)
True = Complete
feature_imp_values: dict of list, default = None
Contains the difference in metric values between the original and loco models for each protected variable.
{"gender":
{
"gender": (perf_delta, fair_delta, flip, suggestion),
"race": (perf_delta, fair_delta, flip, suggestion)
},
"race":
{
"gender": (perf_delta, fair_delta, flip, suggestion),
"race": (perf_delta, fair_delta, flip, suggestion)
}
}
flip = "fair to fair", "unfair to fair", "fair to unfair", "unfair to unfair"
sigma : float or int , default = 0
Standard deviation for Gaussian kernel for smoothing the contour lines of primary fairness metric.
When sigma <= 0, smoothing is turn off.
Suggested to try sigma = 3 or above if noisy contours are observed.
err : object
VeritasError object
"""
self.model_params = model_params
self.fair_metric_obj = None
self.perf_metric_obj = None
self.percent_distribution = None
self.calibration_score = None
self.calibration_curve_bin = None
self.tradeoff_obj = None
self.correlation_output = None
self.feature_mask = self._set_feature_mask()
self.fair_conclusion = None
self.evaluate_status = 0
self.tradeoff_status = 0
self.feature_imp_status = 0
self.feature_imp_values = None
self.feature_imp_status_corr = False
self.feature_imp_status_loo = False
self.sigma = None
self.err = VeritasError()
def evaluate(self, visualize=False, output=True, n_threads=1, seed=None):
"""
Computes the percentage count of subgroups, performance, and fairness metrics together with their confidence intervals, calibration score & fairness metric self.fair_conclusion for all protected variables.
If visualize = True, output will be overwritten to False (will not be shown) and run fairness_widget() from Fairness.
Parameters
----------
visualize : boolean, default=False
If visualize = True, output will be overwritten to False and run fairness_widget() from Fairness.
output : boolean, default=True
If output = True, _print_evaluate() from Fairness will run.
n_threads : int, default=1
Number of currently active threads of a job
seed : int, default=None
Used to initialize the random number generator.
Returns
----------
_fairness_widget() or _print_evaluate()
"""
#check if evaluate hasn't run, only run if haven't
if self.evaluate_status == 0:
#to show progress bar
eval_pbar = tqdm(total=100, desc='Evaluate performance', bar_format='{l_bar}{bar}')
eval_pbar.update(1)
#execute performance metrics from PerformanceMetrics class
self._compute_performance(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
eval_pbar.set_description('Evaluate fairness')
#execute fairness metrics from FairnessMetrics class
self._compute_fairness(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
#to determine fairness conclusion based on inputs
self._fairness_conclusion()
#set status to 1 after evaluate has run
self.evaluate_status = 1
eval_pbar.set_description('Evaluate')
eval_pbar.update(100 - eval_pbar.n)
eval_pbar.close()
print('', flush=True)
#to trigger widget
if visualize == True:
output = False
self._fairness_widget()
#to trigger evaluate printout
if output == True:
self._print_evaluate()
def _fair_conclude(self, protected_feature_name, **kwargs):
"""
Checks the fairness_conclusion for the selected protected feature with the primary fairness metric value against the fair_threshold
Parameters
----------
protected_feature_name : string
Name of a protected feature
Other Parameters
----------------
priv_m_v : float
Privileged metric value
Returns
----------
out : dict
Fairness threshold and conclusion for the chosen protected variable
"""
#for feature importance, when privileged metric values have been overwritten during leave-one-out analysis
if "priv_m_v" in kwargs:
priv_m_v = kwargs["priv_m_v"]
value = kwargs["value"]
#else run as per input values
else:
priv_m_v = self.fair_metric_obj.result.get(protected_feature_name).get("fair_metric_values").get(self.fair_metric_name)[1]
value = self.fair_metric_obj.result[protected_feature_name]["fair_metric_values"].get(self.fair_metric_name)[0]
#to handle different variations of threhold value provided e.g. float, decimals, integer
fair_threshold = self._compute_fairness_metric_threshold(priv_m_v)
out = {}
#append threshold value to result
out['threshold'] = fair_threshold
#if metric used is ratio based, means it will either be more than 1 or less than 1. So set n = 1 to see the difference.
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'ratio':
n = 1
#if metric used is pairty based, means it will either be more than 0 or less than 0 So set n = 0 to see the difference.
elif FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'parity':
n = 0
#find absolute difference of fair values calculated after metric has been applied
f_value = abs(value - n)
#determine whether input values are fair or unfair depending on metrics applied
if f_value <= fair_threshold:
out['fairness_conclusion'] = 'fair'
else:
out['fairness_conclusion'] = 'unfair'
return out
def _fairness_conclusion(self):
"""
Computes _fair_conclude() for all the protected features and returns results in a dictionary
Returns
----------
self.fair_conclusion : dict
fair_conclusion and threshold for every protected variable
"""
self.fair_conclusion = {}
#to append each fair conclusion for each protected variable into a single dictionary
for i in self.model_params[0].p_var:
self.fair_conclusion[i] = self._fair_conclude(i)
def _compute_fairness_metric_threshold(self, priv_m_v):
"""
Computes the fairness metric threshold based on the fair_threshold variable
Parameters
----------
priv_m_v : float
Privileged metric value
Returns
----------
fair_threshold : float
Fairness metric threshold
"""
#to handle different variations of threhold value provided e.g. float, decimals, integer
if self.fair_threshold > 1:
self.fair_threshold = floor(self.fair_threshold)
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'ratio':
fair_threshold = 1 - (self.fair_threshold / 100)
elif FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'parity':
fair_threshold = (1 - (self.fair_threshold / 100)) * priv_m_v
return fair_threshold
else:
return self.fair_threshold
def _compute_performance(self, n_threads, seed, eval_pbar):
"""
Computes the percentage count of subgroups, all the performance metrics together with their confidence intervals & the calibration curve data.
Parameters
-----------
n_threads : int
Number of currently active threads of a job
seed : int
Used to initialize the random number generator.
eval_pbar : tqdm object
Progress bar
Returns
----------
All calculations from every performance metric
"""
#to initialize PerformanceMetrics and exceute all the perf metrics at one go
self.perf_metric_obj = PerformanceMetrics(self)
self.perf_metric_obj.execute_all_perf(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
#bring status bar to full after all perf metrics have been ran
eval_pbar.update(1)
#if calibration_curve function has been run, then set status to True
if self.perf_metric_obj.result["calibration_curve"] is None:
self.evaluate_status_cali = False
else:
self.evaluate_status_cali = True
#if perf_dynamic function has been run, then set status to True
if self.perf_metric_obj.result['perf_dynamic'] is None:
self.evaluate_status_perf_dynamics = False
else:
self.evaluate_status_perf_dynamics = True
def _compute_fairness(self, n_threads, seed, eval_pbar):
"""
Computes all the fairness metrics together with their confidence intervals & the self.fair_conclusion for every protected variable
Parameters
-----------
n_threads : int
Number of currently active threads of a job
seed : int
Used to initialize the random number generator.
eval_pbar : tqdm object
Progress bar
Returns
----------
All calculations from every fairness metric
"""
#to initialize FairnessMetrics and exceute all the fair metrics at one go
self.fair_metric_obj = FairnessMetrics(self)
self.fair_metric_obj.execute_all_fair(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar)
#bring status bar to full after all fair metrics have been ran
eval_pbar.update(1)
for i in self.model_params[0].p_var:
for j in self._use_case_metrics['fair']:
#if user provides fair metric value input value for each protected variable
if self.fairness_metric_value_input is not None :
if i in self.fairness_metric_value_input.keys():
if j in self.fairness_metric_value_input[i].keys():
self.fair_metric_obj.result[i]["fair_metric_values"][j]= (self.fairness_metric_value_input[i][j], self.fair_metric_obj.result[i]["fair_metric_values"][j][1], self.fair_metric_obj.result[i]["fair_metric_values"][j][2] )
msg = "{} value for {} is overwritten by user input, CI and privileged metric value may be inconsistent."
msg = msg.format(FairnessMetrics.map_fair_metric_to_group[j][0], i)
warnings.warn(msg)
def compile(self, skip_tradeoff_flag=0, skip_feature_imp_flag=0, n_threads=1):
"""
Runs the evaluation function together with the trade-off and feature importance sections and saves all the results to a JSON file locally.
Parameters
-------------
skip_tradeoff_flag : int, default=0
Skip running tradeoff function if it is 1.
skip_feature_imp_flag : int, default=0
Skip running feature importance function if it is 1.
n_threads : int, default=1
Number of currently active threads of a job
Returns
----------
Prints messages for the status of evaluate and tradeoff and generates model artifact
"""
#check if evaluate hasn't run, only run if haven't
if self.evaluate_status == 0:
self.evaluate(visualize=False, output=False, n_threads=n_threads)
#printout
print('{:40s}{:<10}'.format('Running evaluate','done'))
print('{:5s}{:35s}{:<10}'.format('','performance measures','done'))
print('{:5s}{:35s}{:<10}'.format('','bias detection','done'))
if self.evaluate_status_cali:
print('{:5s}{:35s}{:<10}'.format('','probability calibration','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','probability calibration','skipped'))
if self.evaluate_status_perf_dynamics:
print('{:5s}{:35s}{:<10}'.format('','performance dynamics','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','performance dynamics','skipped'))
#check if user wants to skip tradeoff, if yes tradeoff will not run, print skipped
if self.tradeoff_status == -1:
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
#check if tradeoff hasn't run and user does not want to skip, only run if haven't
elif self.tradeoff_status == 0 and skip_tradeoff_flag==0:
try :
self.tradeoff(output=False, n_threads=n_threads)
#if user wants to skip tradeoff, print skipped
if self.tradeoff_status == -1 :
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
#set status to 1 after evaluate has run
elif self.tradeoff_status == 1 :
print('{:40s}{:<10}'.format('Running tradeoff','done'))
except :
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
#check if tradeoff hasn't run and user wants to skip, print skipped
elif self.tradeoff_status == 0 and skip_tradeoff_flag==1:
self.tradeoff_status = -1
print('{:40s}{:<10}'.format('Running tradeoff','skipped'))
else:
print('{:40s}{:<10}'.format('Running tradeoff','done'))
#check if user wants to skip feature_importance, if yes feature_importance will not run, print skipped
if self.feature_imp_status_corr:
print('{:40s}{:<10}'.format('Running feature importance','done'))
elif self.feature_imp_status == -1:
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
#check if feature_importance hasn't run and user does not want to skip, only run if haven't
elif self.feature_imp_status == 0 and skip_feature_imp_flag ==0:
try :
self.feature_importance(output=False, n_threads=n_threads)
if self.feature_imp_status == 1:
print('{:40s}{:<10}'.format('Running feature importance','done'))
elif self.feature_imp_status_corr:
print('{:40s}{:<10}'.format('Running feature importance','done'))
else:
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
except:
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
#check if feature_importance hasn't run and user wants to skip, print skipped
elif self.feature_imp_status == 0 and skip_feature_imp_flag ==1:
self.feature_imp_status = -1
print('{:40s}{:<10}'.format('Running feature importance','skipped'))
else:
print('{:40s}{:<10}'.format('Running feature importance','done'))
#check if feature_importance_loo has ran, if not print skipped
if self.feature_imp_status_loo:
print('{:5s}{:35s}{:<10}'.format('','leave-one-out analysis','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','leave-one-out analysis','skipped'))
#check if feature_importance_corr has ran, if not print skipped
if self.feature_imp_status_corr:
print('{:5s}{:35s}{:<10}'.format('','correlation analysis','done'))
else:
print('{:5s}{:35s}{:<10}'.format('','correlation analysis','skipped'))
#run function to generate json model artifact file after all API functions have ran
self._generate_model_artifact()
def tradeoff(self, output=True, n_threads=1, sigma = 0):
"""
Computes the trade-off between performance and fairness over a range of threshold values.
If output = True, run the _print_tradeoff() function.
Parameters
-----------
output : boolean, default=True
If output = True, run the _print_tradeoff() function.
n_threads : int, default=1
Number of currently active threads of a job
sigma : float or int , default = 0
Standard deviation for Gaussian kernel for smoothing the contour lines of primary fairness metric.
When sigma <= 0, smoothing is turn off.
Suggested to try sigma = 3 or above if noisy contours are observed.
"""
#if y_prob is None, skip tradeoff
if self.model_params[0].y_prob is None:
self.tradeoff_status = -1
print("Tradeoff has been skipped due to y_prob")
#if user wants to skip tradeoff, return None
if self.tradeoff_status == -1:
return
#check if tradeoff hasn't run, only run if haven't
elif self.tradeoff_status == 0:
self.sigma = sigma
n_threads = check_multiprocessing(n_threads)
#to show progress bar
tdff_pbar = tqdm(total=100, desc='Tradeoff', bar_format='{l_bar}{bar}')
tdff_pbar.update(5)
sys.stdout.flush()
#initialize tradeoff
self.tradeoff_obj = TradeoffRate(self)
tdff_pbar.update(10)
#run tradeoff
self.tradeoff_obj.compute_tradeoff(n_threads, tdff_pbar)
tdff_pbar.update(100 - tdff_pbar.n)
tdff_pbar.close()
print('', flush=True)
#if after running tradoeff, result is None, print skipped
if self.tradeoff_obj.result == {}:
print(self.tradeoff_obj.msg)
self.tradeoff_status = -1
else:
#set status to 1 after tradeoff has ran
self.tradeoff_status = 1
#if tradeoff has already ran once, just print result
if output and self.tradeoff_status == 1:
self._print_tradeoff()
def feature_importance(self, output=True, n_threads=1):
"""
Trains models using the leave-one-variable-out method for each protected variable and computes the performance and fairness metrics each time to assess the impact of those variables.
If output = True, run the _print_feature_importance() function.
Parameters
------------
output : boolean, default=True
Flag to print out the results of evaluation in the console. This flag will be False if visualize=True.
n_threads : int
Number of currently active threads of a job
Returns
------------
self.feature_imp_status_loo : boolean
Tracks the status of the completion of the leave-one-out analysis step within feature_importance() method to be checked in compile().
self.feature_imp_status : int
Tracks the status of the completion of the feature_importance() method to be checked in compile().
self._compute_correlation()
self._print_feature_importance()
"""
#if feature_imp_status_corr hasn't run
if self.feature_imp_status_corr == False:
self._compute_correlation()
#if user wants to skip feature_importance, return None
if self.feature_imp_status == -1:
self.feature_imp_values = None
return
#check if feature_importance hasn't run, only run if haven't
if self.feature_imp_status == 0:
for k in self.model_params:
x_train = k.x_train
y_train = k.y_train
model_object = k.model_object
x_test = k.x_test
train_op_name = k.train_op_name
predict_op_name = k.predict_op_name
# if model_object is not provided, skip feature_importance
if model_object is None:
self.feature_imp_status = -1
print("Feature importance has been skipped due to model_object")
return
else :
for var_name in [train_op_name, predict_op_name]:
#to check callable functions
try:
callable(getattr(model_object, var_name))
except:
self.feature_imp_status = -1
print("Feature importance has been skipped due to train_op_name/predict_op_name error")
return
#to show progress bar
fimp_pbar = tqdm(total=100, desc='Feature importance', bar_format='{l_bar}{bar}')
fimp_pbar.update(1)
self.feature_imp_values = {}
for h in self.model_params[0].p_var:
self.feature_imp_values[h] = {}
fimp_pbar.update(1)
#if evaluate_status = 0, run evaluate() first
if self.evaluate_status == 0:
self.evaluate(output=False)
#if user wants to skip feature_importance, return None
if self.feature_imp_status == -1:
self.feature_imp_values = None
return
fimp_pbar.update(1)
num_p_var = len(self.model_params[0].p_var)
n_threads = check_multiprocessing(n_threads)
max_workers = min(n_threads, num_p_var)
#if require to run with 1 thread, will skip deepcopy
worker_progress = 80/num_p_var
if max_workers >=1:
threads = []
with concurrent.futures.ThreadPoolExecutor(max_workers = max_workers) as executor:
fimp_pbar.update(5)
#iterate through protected variables to drop one by one as part of leave-one-out
for i in self.model_params[0].p_var:
if max_workers == 1:
use_case_object = self
else:
use_case_object = deepcopy(self)
threads.append(executor.submit(Fairness._feature_imp_loo, p_variable=i, use_case_object=use_case_object, fimp_pbar=fimp_pbar, worker_progress=worker_progress ))
for thread in threads:
fimp_pbar.update(round(8/num_p_var, 2))
if thread.result() is None:
self.feature_imp_status = -1
return
else:
for removed_pvar, values in thread.result().items():
for pvar, v in values.items():
self.feature_imp_values[pvar][removed_pvar] = v
#change flag after feature_importance has finished running
self.feature_imp_status_loo = True
self.feature_imp_status = 1
fimp_pbar.update(2)
fimp_pbar.update(100.0-fimp_pbar.n)
fimp_pbar.close()
print('', flush=True)
#if feature_importance has already ran once, just print result
if output == True:
self._print_feature_importance()
def _feature_imp_loo(p_variable, use_case_object, fimp_pbar, worker_progress):
"""
Maps each thread's work for feature_importance()
Parameters
------------
p_variable : str
Name of protected variable
use_case_object : object
Initialised use case object
fimp_pbar :
worker_progress :
Returns
------------
dictionary of loo_result of each p_var
"""
#get baseline values
baseline_perf_values = use_case_object.perf_metric_obj.result.get("perf_metric_values").get(use_case_object.perf_metric_name)[0]
baseline_fair_values = use_case_object.fair_metric_obj.result.get(p_variable).get("fair_metric_values").get(use_case_object.fair_metric_name)[0]
baseline_fairness_conclusion = use_case_object.fair_conclusion.get(p_variable).get("fairness_conclusion")
#toDel#baseline_values = [baseline_perf_values, baseline_fair_values, baseline_fairness_conclusion]
# empty y_pred_new list to be appended
y_pred_new = []
loo_result = {}
# loop through model_params
for k in range(len(use_case_object.model_params)):
## for uplift model type --> two model container --> need to train two models
## when model param len =2, then it is uplift model
p_var = use_case_object.model_params[k].p_var
x_train = use_case_object.model_params[k].x_train
y_train = use_case_object.model_params[k].y_train
model_object = use_case_object.model_params[k].model_object
x_test = use_case_object.model_params[k].x_test
y_pred = use_case_object.model_params[k].y_pred
y_prob = use_case_object.model_params[k].y_prob
pos_label = use_case_object.model_params[k].pos_label
neg_label = use_case_object.model_params[k].neg_label
train_op = getattr(model_object, use_case_object.model_params[k].train_op_name)
predict_op = getattr(model_object, use_case_object.model_params[k].predict_op_name)
#show progress bar
fimp_pbar.update(round(worker_progress*0.9/len(use_case_object.model_params), 2))
try:
#check if x_train is a dataframe
if isinstance(x_train, pd.DataFrame):
#drop protected variable and train model
pre_loo_model_obj = train_op(x_train.drop(columns=[p_variable]), y_train) # train_op_name is string, need to use getattr[] to get the attribute?
else :
pre_loo_model_obj = train_op(x_train, y_train, p_variable) # train_op to handle drop column i inside train_op
# Predict and compute performance Metrics (PerformanceMetrics.result.balanced_acc)
except:
#else print skipped and return None
print("LOO analysis is skipped for [", p_variable, "] due to x_train/y_train error")
use_case_object.feature_imp_status = -1
return None
try:
#check if x_test is a dataframe
if isinstance(x_test, pd.DataFrame):
#drop protected variable and predict
pre_y_pred_new = np.array(predict_op(x_test.drop(columns=[p_variable])))
else :
pre_y_pred_new = predict_op(x_train, y_train, p_variable) # train_op to handle drop column i inside train_op
except:
#else print skipped and return None
print("LOO analysis is skipped for [", p_variable, "] due to x_test/y_test error")
use_case_object.feature_imp_status = -1
return None
fimp_pbar.update(round(worker_progress*0.02, 2))
pre_y_pred_new = predict_op(x_test.drop(columns=[p_variable]))
#to ensure labels and datatype for predicted values are correct before running metrics
if len(pre_y_pred_new.shape) == 1 and pre_y_pred_new.dtype.kind in ['i','O','U']:
pre_y_pred_new, pos_label2 = check_label(pre_y_pred_new, pos_label, neg_label)
else:
pre_y_pred_new = pre_y_pred_new.astype(np.float64)
y_pred_new.append(pre_y_pred_new)
#run performance and fairness evaluation only for primary performance and fair metric
loo_perf_value = use_case_object.perf_metric_obj.translate_metric(use_case_object.perf_metric_name, y_pred_new=y_pred_new)
##to find deltas (removed - baseline) for primary perf metric
deltas_perf = loo_perf_value - baseline_perf_values #toDel#baseline_values[0]
# to iterate through each protected variable for each protected variable that is being dropped
for j in use_case_object.model_params[0].p_var:
fimp_pbar.update(round(worker_progress*0.08/len(p_var), 2))
use_case_object.fair_metric_obj.curr_p_var = j #will this work under multithreading? will not work, should changes to a copy
## get loo_perf_value,loo_fair_values
loo_fair_value, loo_priv_m_v = use_case_object.fair_metric_obj.translate_metric(use_case_object.fair_metric_name, y_pred_new=y_pred_new)[:2]
##to find deltas (removed - baseline) for each protected variable in iteration for primary fair metric
#toDel#deltas_fair = loo_fair_value - baseline_values[1]
baseline_fair_values_j = use_case_object.fair_metric_obj.result.get(j).get("fair_metric_values").get(use_case_object.fair_metric_name)[0]
baseline_fairness_conclusion_j = use_case_object.fair_conclusion.get(j).get("fairness_conclusion")
deltas_fair = loo_fair_value - baseline_fair_values_j
##fairness fair_conclusion
loo_fairness_conclusion = use_case_object._fair_conclude(j, priv_m_v=loo_priv_m_v, value=loo_fair_value)
#toDel#delta_conclusion = baseline_values[2] + " to " + loo_fairness_conclusion["fairness_conclusion"]
delta_conclusion = baseline_fairness_conclusion_j + " to " + loo_fairness_conclusion["fairness_conclusion"]
##suggestion
#if metric used is parity based, means it will either be more than 0 or less than 0. So set n = 0 to see the difference.
if FairnessMetrics.map_fair_metric_to_group.get(use_case_object.fair_metric_name)[2] == 'parity':
n = 0
#if metric used is ratio based, means it will either be more than 1 or less than 1. So set n = 1 to see the difference.
else:
n = 1
if abs(loo_fair_value - n) < abs(baseline_fair_values_j - n):
if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" :
if deltas_perf <= 0:
suggestion = 'exclude'
else:
suggestion = 'examine further'
else :
if deltas_perf >= 0:
suggestion = 'exclude'
else:
suggestion = 'examine further'
delta_conclusion += " (+)"
elif abs(loo_fair_value - n) > abs(baseline_fair_values_j - n):
if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" :
if deltas_perf >= 0:
suggestion = 'include'
else:
suggestion = 'examine further'
else:
if deltas_perf <= 0:
suggestion = 'include'
else:
suggestion = 'examine further'
delta_conclusion += " (-)"
else:
if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" :
if deltas_perf < 0:
suggestion = 'exclude'
elif deltas_perf > 0:
suggestion = 'include'
else:
suggestion = 'exclude'
else:
if deltas_perf > 0:
suggestion = 'exclude'
elif deltas_perf < 0:
suggestion = 'include'
else:
suggestion = 'exclude'
loo_result[j] = [deltas_perf, deltas_fair, delta_conclusion, suggestion]
return {p_variable: loo_result}
def _compute_correlation(self):
"""
Computes the top-20 correlation matrix inclusive of the protected variables
"""
try :
if isinstance(self.model_params[0].x_test, str):
self.feature_imp_status_corr = False
return
if isinstance(self.model_params[0].feature_imp, pd.DataFrame) and isinstance(self.model_params[0].x_test, pd.DataFrame):
#sort feature_imp dataframe by values (descending)
sorted_dataframe = self.model_params[0].feature_imp.sort_values(by=self.model_params[0].feature_imp.columns[1], ascending=False)
#extract n_features and pass into array
feature_cols = np.array(sorted_dataframe.iloc[:,0])
p_var_cols = np.array(self.model_params[0].p_var)
feature_cols = [col for col in feature_cols if col not in p_var_cols]
feature_cols = feature_cols[:20-len(p_var_cols)]
#feature_columns value from x_test
feature_columns = self.model_params[0].x_test[feature_cols]
#p_var_columns value from protected_features_cols
p_var_columns = self.model_params[0].x_test[p_var_cols]
#create final columns and apply corr()
df = pd.concat([feature_columns, p_var_columns], axis=1).corr()
self.correlation_output = {"feature_names":df.columns.values, "corr_values":df.values}
#return correlation_output as dataframe
self.feature_imp_status_corr = True
else:
#extract n_features and pass into array
feature_cols = np.array(self.model_params[0].x_test.columns[:20])
p_var_cols = np.array(self.model_params[0].p_var)
feature_cols = [col for col in feature_cols if col not in p_var_cols]
feature_cols = feature_cols[:20-len(p_var_cols)]
#feature_columns value from x_test
feature_columns = self.model_params[0].x_test[feature_cols]
#p_var_columns value from protected_features_cols
p_var_columns = self.model_params[0].x_test[p_var_cols]
#create final columns and apply corr()
df = pd.concat([feature_columns, p_var_columns], axis=1).corr()
self.correlation_output = {"feature_names":df.columns.values, "corr_values":df.values}
self.feature_imp_status_corr = True
except:
self.feature_imp_status_corr = False
def _print_evaluate(self):
"""
Formats the results of the evaluate() method before printing to console.
"""
if ("_rejection_inference_flag" in dir(self)):
if True in self._rejection_inference_flag.values():
print("Special Parameters")
print("Rejection Inference = True")
name = []
for i in self.model_params[0].p_grp.keys():
name += [i + " - " + str(self.model_params[0].p_grp.get(i)[0])]
str1 = ", ".join(
str(e) for e in list(set(filter(lambda a: a != self.model_params[0].p_grp.get(i)[0],
self.model_params[0].protected_features_cols[i]))))
name += [i + " - " + str1]
titles = ['Group', 'Base Rate', 'Number of Rejected Applicants']
a = []
for i in self.spl_params['base_default_rate'].keys():
a += self.spl_params['base_default_rate'].get(i)
b = []
for i in self.spl_params['num_applicants'].keys():
b += self.spl_params['num_applicants'].get(i)
data = [titles] + list(zip(name, a, b))
for i, d in enumerate(data):
line = '| '.join(str(x).ljust(16) for x in d)
print(line)
if i == 0:
print('-' * len(line))
print("\n")
elif hasattr(self, 'spl_params') and ('revenue' in self.spl_params or 'treatment_cost' in self.spl_params):
print("Special Parameters")
titles = ['Revenue', 'Treatment Cost']
a = [self.spl_params['revenue']]
b = [self.spl_params['treatment_cost']]
data = [titles] + list(zip(a, b))
for i, d in enumerate(data):
line = '| '.join(str(x).ljust(16) for x in d)
print(line)
if i == 0:
print('-' * len(line))
print("\n")
if PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
print("Class Distribution")
if self.model_params[0].model_type != "uplift":
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "pos_label",
self.perf_metric_obj.result.get("class_distribution").get("pos_label") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "neg_label",
self.perf_metric_obj.result.get("class_distribution").get("neg_label") * 100, decimal_pts=self.decimals))
else:
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "CN",
self.perf_metric_obj.result.get("class_distribution").get("CN") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "TN",
self.perf_metric_obj.result.get("class_distribution").get("TN") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "CR",
self.perf_metric_obj.result.get("class_distribution").get("CR") * 100, decimal_pts=self.decimals))
print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "TR",
self.perf_metric_obj.result.get("class_distribution").get("TR") * 100, decimal_pts=self.decimals))
else:
pass
print("\n")
if self.model_params[0].sample_weight is not None:
print("Performance Metrics (Sample Weight = True)")
else:
print("Performance Metrics")
def print_metric_value(metric, fair):
v2 = " +/- "
if fair == 0:
if any(map(lambda x: x is None, self.perf_metric_obj.result.get("perf_metric_values")[metric])):
self.perf_metric_obj.result.get("perf_metric_values")[metric] = tuple(
'NA' if x is None else x for x in self.perf_metric_obj.result.get("perf_metric_values")[metric])
m = "\t" + PerformanceMetrics.map_perf_metric_to_group.get(metric)[0]
if self.perf_metric_obj.result.get("perf_metric_values").get(metric)[0] == "NA":
v1 = "NA"
v3 = "NA"
else:
v1 = "{:>0.{decimal_pts}f}".format(self.perf_metric_obj.result.get("perf_metric_values").get(metric)[0], decimal_pts=self.decimals)
v3 = "{:>0.{decimal_pts}f}".format(self.perf_metric_obj.result.get("perf_metric_values").get(metric)[1], decimal_pts=self.decimals)
else:
if any(map(lambda x: x is None, self.fair_metric_obj.result.get(i_var).get("fair_metric_values")[metric])):
self.fair_metric_obj.result.get(i_var).get("fair_metric_values")[metric] = tuple('NA' if x is None else x for x in self.fair_metric_obj.result.get(i_var).get("fair_metric_values")[metric])
m = "\t" + FairnessMetrics.map_fair_metric_to_group.get(metric)[0]
if self.fair_metric_obj.result.get(i_var).get("fair_metric_values")[metric][0] == "NA":
v1 = "NA"
v3 = "NA"
else :
v1 = "{:>0.{decimal_pts}f}".format(self.fair_metric_obj.result.get(i_var).get("fair_metric_values")[metric][0], decimal_pts=self.decimals)
v3 = "{:>0.{decimal_pts}f}".format(self.fair_metric_obj.result.get(i_var).get("fair_metric_values")[metric][2], decimal_pts=self.decimals)
if (v1 == "NA") & (v3 == "NA"):
v = v1
else:
v = v1 + v2 + v3
if self.perf_metric_name == metric or self.fair_metric_name == metric:
print("\033[1m" + "{0:<45s}{1:>30s}".format(m, v) + "\033[0m")
else:
print("{0:<45s}{1:>30s}".format(m, v))
for k in self._use_case_metrics["perf"]:
print_metric_value(k, 0)
if self.perf_metric_obj.result.get("calibration_curve") is None:
pass
else:
print("\n")
print("Probability Calibration")
m = "\tBrier Loss Score"
v = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get("calibration_curve").get("score"),
decimal_pts=self.decimals)
print("{0:<45s}{1:>30s}".format(m, v))
print("\n")
if self.fair_metric_input == 'auto':
print('Primary Fairness Metric Suggestion')
print('\t{}'.format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]))
print('based on')
print('\tfair_priority = {}'.format(self.fair_priority))
print('\tfair_concern = {}'.format(self.fair_concern))
print('\tfair_impact = {}'.format(self.fair_impact))
print('\n')
for i, i_var in enumerate(self.model_params[0].p_var):
p_len = len(str(i + 1) + ": " + i_var)
print("-" * 35 + str(i + 1) + ": " + i_var.title() + "-" * int((45 - p_len)))
print("Value Distribution")
print("{:<45s}{:>29.{decimal_pts}f}%".format('\tPrivileged Group',
self.fair_metric_obj.result.get(i_var).get(
"feature_distribution").get("privileged_group") * 100,
decimal_pts=self.decimals))
print("{:<45s}{:>29.{decimal_pts}f}%".format('\tUnprivileged Group',
self.fair_metric_obj.result.get(i_var).get(
"feature_distribution").get("unprivileged_group") * 100,
decimal_pts=self.decimals))
print("\n")
if self.model_params[0].sample_weight is not None:
print("Fairness Metrics (Sample Weight = True)")
else:
print("Fairness Metrics")
for h in self._use_case_metrics["fair"]:
print_metric_value(h, 1)
print("\n")
print("Fairness Conclusion")
m = "\tOutcome ({})".format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0])
v = self.fair_conclusion.get(i_var).get("fairness_conclusion").title()
print("{0:<55s}{1:>20s}*".format(m, v))
m = "\tFairness Threshold"
if self.fair_threshold > 0 and self.fair_threshold < 1:
v = str(self.fair_threshold)
elif self.fair_threshold > 1 and self.fair_threshold < 100:
v = str(self.fair_threshold) + "%"
print("{0:<45s}{1:>30s}".format(m, v))
print("\n")
print('* The outcome is calculated based on your inputs and is provided for informational purposes only. Should you decide to act upon the information herein, you do so at your own risk and Veritas Toolkit will not be liable or responsible in any way. ')
sys.stdout.flush()
def _print_tradeoff(self):
"""
Formats the results of the tradeoff() method before printing to console.
"""
i = 1
p_var = self.model_params[0].p_var
for p_variable in p_var:
#title
title_str = " "+ str(i) + ". " + p_variable +" "
if len(title_str)%2 == 1:
title_str+=" "
line_str = int((72-len(title_str))/2) * "-"
print(line_str + title_str +line_str)
print("Performance versus Fairness Trade-Off")
#Single Threshold
print("\t Single Threshold")
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged/Unprivileged Threshold",
self.tradeoff_obj.result[p_variable]["max_perf_single_th"][
0], decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format(
str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"),
self.tradeoff_obj.result[p_variable]["max_perf_single_th"][2], decimal_pts=self.decimals))
# Separated Thresholds
print("\t Separated Thresholds")
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged Threshold",
self.tradeoff_obj.result[p_variable]["max_perf_point"][0],
decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Unprivileged Threshold",
self.tradeoff_obj.result[p_variable]["max_perf_point"][1],
decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format(
str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"),
self.tradeoff_obj.result[p_variable]["max_perf_point"][2], decimal_pts=self.decimals))
# Separated Thresholds under Neutral Fairness (0.01)
print("\t Separated Thresholds under Neutral Fairness ({})".format(self.fair_neutral_tolerance))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged Threshold", self.tradeoff_obj.result[p_variable][
"max_perf_neutral_fair"][0], decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Unprivileged Threshold",
self.tradeoff_obj.result[p_variable][
"max_perf_neutral_fair"][1], decimal_pts=self.decimals))
print("\t\t{:35s}{:>20.{decimal_pts}f}".format(
str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"),
self.tradeoff_obj.result[p_variable]["max_perf_neutral_fair"][2], decimal_pts=self.decimals))
print("\t\t*estimated by approximation, subject to the resolution of mesh grid")
print("")
i+=1
sys.stdout.flush()
def _print_feature_importance(self):
"""
Formats the results of the feature_importance() method before printing to console.
"""
for i, i_var in enumerate(self.model_params[0].p_var):
print("\n")
p_len = len(str(i + 1) + ": Fairness on " + i_var)
print("-" * 50 + str(i + 1) + ": Fairness on " + i_var.title() + "-" * int((116 - 50 - p_len)))
print()
print("-" * 116)
print("|{:<30}|{:<20}|{:<20}|{:<20}|{:<20}|".format("Removed Protected Variable", self.perf_metric_name,
self.fair_metric_name, "Fairness Conclusion",
"Suggestion"))
print("-" * 116)
for j in self.model_params[0].p_var:
col1, col2, col3, col4 = self.feature_imp_values[i_var][j]
print("|{:<30}|{:<20.{decimal_pts}f}|{:<20.{decimal_pts}f}|{:<20}|{:<20}|".format(j, col1, col2, col3, (col4).title(), decimal_pts=self.decimals))
print("-" * 116)
print()
if self.feature_imp_status_corr == False:
print("Correlation matrix skippped")
else:
return self.correlation_output
sys.stdout.flush()
def _generate_model_artifact(self):
"""
Generates the JSON file to be saved locally at the end of compile()
"""
#aggregate the results into model artifact
print('{:40s}'.format('Generating model artifact'), end='')
artifact = {}
# Section 1 - fairness_init
#write results to fairness_init
fairness_init = {}
fairness_init["fair_metric_name_input"] = self.fair_metric_input
fairness_init["fair_metric_name"] = FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]
fairness_init["perf_metric_name"] = PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[0]
fairness_init["protected_features"] = self.model_params[0].p_var
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[1] != "regression":
fairness_init["fair_priority"] = self.fair_priority
fairness_init["fair_concern"] = self.fair_concern
fairness_init["fair_impact"] = self.fair_impact
if self.model_params[0].model_type == "uplift" or self.model_params[0].model_type == "credit":
fairness_init["special_params"] = self.spl_params #num_applicants and base_default_rate for creditscoring, treatment_cost, revenue and selection_threshold for customermarketing
fairness_init["fair_threshold_input"] = self.fair_threshold_input
fairness_init["fair_neutral_tolerance"] = self.fair_neutral_tolerance
model_type = self.model_params[0].model_type
#add fairness_init results to artifact
artifact["fairness_init"] = fairness_init
perf_result = deepcopy(self.perf_metric_obj.result)
perf_vals_wth_metric_names = {}
for key in self.perf_metric_obj.result["perf_metric_values"].keys():
if key in PerformanceMetrics.map_perf_metric_to_group.keys():
perf_vals_wth_metric_names[PerformanceMetrics.map_perf_metric_to_group.get(key)[0]] = \
self.perf_metric_obj.result["perf_metric_values"][key]
perf_result["perf_metric_values"] = perf_vals_wth_metric_names
artifact = {**artifact, **(perf_result)}
artifact["correlation_matrix"] = self.correlation_output
# above part will only be tested when Credit Scoring and Customer Marketing classes can be run
p_var = self.model_params[0].p_var
#write results to features_dict
features_dict = {}
for pvar in p_var:
dic_h = {}
dic_h["fair_threshold"] = self.fair_conclusion.get(pvar).get("threshold")
dic_h["privileged"] = self.model_params[0].p_grp[pvar]
dic_t = {}
dic_t["fairness_conclusion"] = self.fair_conclusion.get(pvar).get("fairness_conclusion")
dic_t["tradeoff"] = None
if self.tradeoff_status != -1:
dic_t["tradeoff"] = self.tradeoff_obj.result.get(pvar)
dic_t["feature_importance"] = None
if self.feature_imp_status != -1:
dic_t["feature_importance"] = self.feature_imp_values.get(pvar)
fair_vals_wth_metric_names = {}
for key in self.fair_metric_obj.result.get(pvar)['fair_metric_values'].keys():
if key in FairnessMetrics.map_fair_metric_to_group.keys():
fair_vals_wth_metric_names[FairnessMetrics.map_fair_metric_to_group.get(key)[0]] = \
self.fair_metric_obj.result.get(pvar)['fair_metric_values'][key]
fair_result = deepcopy(self.fair_metric_obj.result.get(pvar))
fair_result['fair_metric_values'] = fair_vals_wth_metric_names
for k, v in fair_result['fair_metric_values'].items():
fair_result['fair_metric_values'][k] = [v[0], v[2]]
features_dict[str(pvar)] = {**dic_h, **fair_result, **dic_t}
#add features_dict results to artifact
artifact["features"] = features_dict
print('done')
model_name = (self.model_params[0].model_name +"_").replace(" ","_")
filename = "model_artifact_" + model_name + datetime.datetime.today().strftime('%Y%m%d_%H%M') + ".json"
self.artifact = artifact
artifactJson = json.dumps(artifact, cls=NpEncoder)
jsonFile = open(filename, "w")
jsonFile.write(artifactJson)
jsonFile.close()
print("Saved model artifact to " + filename)
def _fairness_widget(self):
"""
Runs to pop up a widget to visualize the evaluation output
"""
try :
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
display(HTML("""
<style>
.dropdown_clr {
background-color: #E2F0D9;
}
.fair_green{
width:auto;
background-color:#E2F0D9;
}
.perf_blue {
width:auto;
background-color:#DEEBF7;
}
</style>
"""))
result_fairness = self.fair_metric_obj.result
option_p_var = self.fair_metric_obj.p_var[0]
options = []
for i in self.fair_metric_obj.p_var[0]:
options += [i + " (privileged group = " + str(self.model_params[0].p_grp.get(i))+ ")"]
model_type = self.model_params[0].model_type.title()
if PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
model_concern = self.fair_concern.title()
model_priority = self.fair_priority.title()
model_impact = self.fair_impact.title()
else:
model_concern = "N/A"
model_priority = "N/A"
model_impact = "N/A"
model_name = self.model_params[0].model_name.title()
html_pink = '<div style="color:black; text-align:left; padding-left:5px; background-color:#FBE5D6; font-size:12px">{}</div>'
html_grey_true = '<div style="color:black; text-align:center; background-color:#AEAEB2; font-size:12px">{}</div>'
html_grey_false = '<div style="color:#8E8E93; text-align:center; background-color:#E5E5EA; font-size:12px">{}</div>'
html_yellow_left = '<div style="color:black; text-align:left; padding-left:5px; background-color:#FFF2CC; font-size:12px">{}</div>'
html_yellow_right = '<div style="color:black; text-align:right; padding-right:5px; background-color:#FFF2CC; font-size:12px">{}</div>'
html_model_type = widgets.HTML(value=html_yellow_left.format('Model Type: ' + model_type),
layout=Layout(display="flex", width='30%'))
html_model_name = widgets.HTML(value=html_yellow_right.format('Model Name: ' + model_name),
layout=Layout(display="flex", justify_content="flex-end", width='45%'))
dropdown_protected_feature = widgets.Dropdown(options=options, description=r'Protected Feature:',
layout=Layout(display="flex", justify_content="flex-start",
width='62.5%', padding='0px 0px 0px 5px'),
style=dict(description_width='initial'))
dropdown_protected_feature.add_class("dropdown_clr")
html_model_priority = widgets.HTML(value=html_pink.format("Priority: " + model_priority),
layout=Layout(display="flex", width='12.5%'))
html_model_impact = widgets.HTML(value=html_pink.format("Impact: " + model_impact),
layout=Layout(display="flex", width='12.5%'))
html_model_concern = widgets.HTML(value=html_pink.format('Concern: ' + model_concern),
layout=Layout(display="flex", width='12.5%'))
if (self.model_params[0].sample_weight is not None):
sw = html_grey_true
else:
sw = html_grey_false
if "_rejection_inference_flag" in dir(self):
if True in self._rejection_inference_flag.values():
ri = html_grey_true
else:
ri = html_grey_false
elif hasattr(self, 'spl_params') and model_type == "Uplift":
if None not in self.spl_params.values():
ri = html_grey_true
else:
ri = html_grey_false
else:
ri = html_grey_false
html_sample_weight = widgets.HTML(value=sw.format('Sample Weight'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
if model_type == "Credit":
html_rej_infer = widgets.HTML(value=ri.format('Rejection Inference'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
elif model_type == "Default" or PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] == "regression":
regression = '<div style="color:#E5E5EA; text-align:center; background-color:#E5E5EA; font-size:12px">{}</div>'
html_rej_infer = widgets.HTML(value=regression.format('N/A'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
elif PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
html_rej_infer = widgets.HTML(value=ri.format('Revenue & Cost'),
layout=Layout(display="flex", justify_content="center", width='12.5%'))
html_fair_italics = '<div style="color:black; text-align:left; padding-left:5px; font-style: italic;font-weight: bold;font-size:14px">{}</div>'
html_fair_bold = '<div style="color:black; text-align:center;font-weight: bold;font-size:20px">{}</div>'
html_fair_bold_red = '<div style="color:#C41E3A; text-align:center; font-weight:bold; font-size:20px">{}</div>'
html_fair_bold_green = '<div style="color:#228B22; text-align:center; font-weight:bold; font-size:20px">{}</div>'
html_fair_small = '<div style="color:black; text-align:left; padding-left:25px; font-size:12px">{}</div>'
html_fair_metric = '<div style="color:black; text-align:right; font-weight: bold;font-size:20px">{}</div>'
html_fair_ci = '<div style="color:black; text-align:left; padding-left:5px; font-size:15px">{}</div>'
chosen_p_v = option_p_var[0]
fair1 = widgets.HTML(value=html_fair_italics.format('Fairness'), layout=Layout(display="flex", margin='0'))
fair2_1 = widgets.HTML(value=html_fair_small.format('Metric'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
fair2_2 = widgets.HTML(value=html_fair_small.format('Assessment'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
fair3_1 = widgets.HTML(
value=html_fair_bold.format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]),
layout=Layout(display="flex", justify_content="center", margin='0'))
if self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion") == 'fair':
pattern = html_fair_bold_green
else:
pattern = html_fair_bold_red
fair3_2_v = pattern.format(self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion").title())
fair3_2 = widgets.HTML(value=fair3_2_v,
layout=Layout(display="flex", justify_content="center", margin='0'))
fair4_1 = widgets.HTML(value=html_fair_small.format('Value'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
fair4_2 = widgets.HTML(value=html_fair_small.format('Threshold'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
v = html_fair_metric.format("{:.{decimal_pts}f}".format(self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[0], decimal_pts=self.decimals))
fair5_1 = widgets.HTML(value=v,layout=Layout(display="flex", width='50%', justify_content="center", margin='0'))
c = html_fair_ci.format('\xB1 ' + "{:.{decimal_pts}f}".format(self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[2], decimal_pts=self.decimals))
fair5_1_1 = widgets.HTML(value=c,layout=Layout(display="flex", width='50%', justify_content="center", margin='0'))
t = html_fair_bold.format("{:.{decimal_pts}f}".format(self.fair_conclusion.get(chosen_p_v).get("threshold"), decimal_pts=self.decimals))
fair5_2 = widgets.HTML(value=t,
layout=Layout(display="flex", justify_content="center", margin='0'))
fair5 = HBox([fair5_1, fair5_1_1], layout=Layout(display="flex", justify_content="center"))
box1f = VBox(children=[fair2_1, fair3_1, fair4_1, fair5], layout=Layout(width="66.666%"))
box2f = VBox(children=[fair2_2, fair3_2, fair4_2, fair5_2], layout=Layout(width="66.666%"))
box3f = HBox([box1f, box2f])
box4f = VBox([fair1, box3f], layout=Layout(width="66.666%", margin='5px 5px 5px 0px'))
box4f.add_class("fair_green")
html_perf_italics = '<div style="color:black; text-align:left; padding-left:5px; font-style: italic;font-weight: bold;font-size:14px">{}</div>'
html_perf_bold = '<div style="color:black; text-align:center; font-weight: bold;font-size:20px">{}</div>'
html_perf_small = '<div style="color:black; text-align:left; padding-left:25px; font-size:12px">{}</div>'
html_perf_metric = '<div style="color:black; text-align:right; font-weight: bold;font-size:20px">{}</div>'
html_perf_ci = '<div style="color:black; text-align:left; padding-left:5px;font-size:15px">{}</div>'
perf1 = widgets.HTML(value=html_perf_italics.format('Performance'),
layout=Layout(display="flex", width='33.3333%', margin='0'))
perf2_1 = widgets.HTML(value=html_perf_small.format('Assessment'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
perf3_1 = widgets.HTML(
value=html_perf_bold.format(PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[0]),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
perf4_1 = widgets.HTML(value=html_perf_small.format('Value'),
layout=Layout(display="flex", justify_content="flex-start", margin='0'))
v = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get('perf_metric_values').get(self.perf_metric_name)[0], decimal_pts=self.decimals)
perf5_1 = widgets.HTML(value=html_perf_metric.format(v),
layout=Layout(display="flex", justify_content="flex-start", width="50%", margin='0'))
c = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get('perf_metric_values').get(self.perf_metric_name)[1], decimal_pts=self.decimals)
perf5_1_1 = widgets.HTML(value=html_perf_ci.format('\xB1 ' + c),
layout=Layout(display="flex", justify_content="flex-start", width="50%", margin='0'))
perf5 = HBox([perf5_1, perf5_1_1], layout=Layout(display="flex", justify_content="center"))
box1p = VBox(children=[perf2_1, perf3_1, perf4_1, perf5])
box2p = VBox([perf1, box1p], layout=Layout(width="33.333%", margin='5px 0px 5px 5px'))
box2p.add_class('perf_blue')
metric_box = HBox([box4f, box2p], layout=Layout(width="auto"))
PATH = Path(__file__).parent.parent.joinpath('resources', 'widget')
if model_type != 'Uplift' and PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
image1 = IPython.display.Image(filename=PATH/"perf_class_jpg.JPG", width=300, height=500)
A = widgets.Image(
value=image1.data,
format='jpg',
width=260
)
image2 = IPython.display.Image(filename=PATH/"fair_class_jpg.JPG", width=300, height=500)
B = widgets.Image(
value=image2.data,
format='jpg',
width=260
)
elif model_type == "Uplift":
image1 = IPython.display.Image(filename=PATH/"perf_uplift_jpg.JPG", width=300, height=500)
A = widgets.Image(
value=image1.data,
format='jpg',
width=260
)
image2 = IPython.display.Image(filename=PATH/"fair_uplift_jpg.JPG", width=300, height=500)
B = widgets.Image(
value=image2.data,
format='jpg',
width=260
)
else:
image1 = IPython.display.Image(filename=PATH/"perf_regression_jpg.JPG", width=300, height=500)
A = widgets.Image(
value=image1.data,
format='jpg',
width=260
)
image2 = IPython.display.Image(filename=PATH/"fair_regression_jpg.JPG", width=300, height=500)
B = widgets.Image(
value=image2.data,
format='jpg',
width=260
)
tab = widgets.Tab([A, B], layout={'width': '32%', 'margin': '15px', 'height': '350px'})
tab.set_title(0, 'Performance Metrics')
tab.set_title(1, 'Fairness Metrics')
plot_output = widgets.Output(layout=Layout(display='flex', align_items='stretch', width="66.6666%"))
def filtering(protected_feature):
global chosen_p_v
chosen_p_v = protected_feature
if self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion") == 'fair':
fair3_2.value = html_fair_bold_green.format(self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion").title())
else:
fair3_2.value = html_fair_bold_red.format(self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion").title())
fair5_1.value = html_fair_metric.format("{:.{decimal_pts}f}".format(
self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[0],
decimal_pts=self.decimals))
fair5_1_1.value = html_fair_ci.format('\xB1 ' + "{:.{decimal_pts}f}".format(
self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[2],
decimal_pts=self.decimals))
fair5_2.value = html_fair_bold.format("{:.{decimal_pts}f}".format(self.fair_conclusion.get(chosen_p_v).get("threshold"), decimal_pts=self.decimals))
plot_output.clear_output()
for metric in NewMetric.__subclasses__():
if metric.metric_name in result_fairness[protected_feature]['fair_metric_values'].keys():
del result_fairness[protected_feature]['fair_metric_values'][metric.metric_name]
filtered_data = pd.DataFrame(result_fairness[protected_feature]['fair_metric_values'])
if model_type != 'Uplift' and PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression":
filtered_data.loc[0,'disparate_impact'] = filtered_data['disparate_impact'][0] - 1
metrics = list(filtered_data.columns)
values = filtered_data.loc[0].values
th_min = -1*self.fair_conclusion.get(chosen_p_v).get("threshold")
th_max = self.fair_conclusion.get(chosen_p_v).get("threshold")
with plot_output:
fig = plt.figure(figsize=(20, 13), dpi=300)
clrs = ['#C41E3A' if (x == self.fair_metric_name) else '#12239E' for x in metrics]
ax = fig.gca()
idx = [i for i in range(len(values)) if values[i] == None]
metrics = [metrics[i] for i in range(len(metrics)) if i not in idx]
values = [values[i] for i in range(len(values)) if i not in idx]
plt.bar(metrics, values, color=clrs, align='center', width=0.5)
plt.yticks(fontsize=25)
label = []
for i in range(len(metrics)):
if metrics[i] == 'fpr_parity':
label += ["FPR Parity"]
elif metrics[i] == 'tnr_parity':
label += ["TNR Parity"]
elif metrics[i] == 'fnr_parity':
label += ["FNR Parity"]
elif metrics[i] == 'ppv_parity':
label += ["PPV Parity"]
elif metrics[i] == 'npv_parity':
label += ["NPV Parity"]
elif metrics[i] == 'fdr_parity':
label += ["FDR Parity"]
elif metrics[i] == 'for_parity':
label += ["FOR Parity"]
elif metrics[i] == 'mi_independence':
label += ["MI Independence"]
elif metrics[i] == 'mi_sufficiency':
label += ["MI Sufficiency"]
elif metrics[i] == 'mi_separation':
label += ["MI Separation"]
elif metrics[i] == 'disparate_impact':
label += ["*Disparate Impact"]
else:
label += [FairnessMetrics.map_fair_metric_to_group.get(metrics[i])[0]]
wrap_label = []
for l in label:
l_ = l.split(" ")
l_.insert(1, "\n")
wrap_label += [" ".join(l_)]
if model_type == 'Uplift' or PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] == "regression":
plt.xticks(fontsize=23, ticks=np.arange(len(label)), labels=wrap_label, rotation=0)
else:
plt.xticks(fontsize=23, ticks=np.arange(len(label)), labels=wrap_label, rotation=90)
ax.tick_params(axis="x", direction="in", length=16, width=2)
plt.ylabel("Values", fontsize=25)
plt.title('Fairness Metric Assessment', fontsize=35, y=1.01)
plt.grid(color='black', axis='y', linewidth=0.5)
plt.axhspan(th_min, th_max, color='#228B22', alpha=0.2, lw=0)
if max(values) > th_max:
ymax = max(values)*1.5
else:
ymax = th_max*1.5
if min(values) < th_min:
ymin = min(values)*1.5
else:
ymin = th_min*1.5
plt.ylim([ymin, ymax])
th = mpatches.Patch(color='#228B22', alpha=0.2,label='Threshold Range')
pm = mpatches.Patch(color='#C41E3A', label='Primary Metric')
plt.legend(handles=[pm, th],loc='upper center', bbox_to_anchor=(0.5, -0.2),prop={"size": 25}, ncol=2, borderaxespad = 3)
plt.box(False)
plt.tight_layout()
plt.show()
def dropdown_event_handler(change):
new = change.new.split(" (")[0]
filtering(new)
filtering(option_p_var[0])
dropdown_protected_feature.observe(dropdown_event_handler, names='value')
item_layout = widgets.Layout(margin='0 0 0 0')
input_widgets1 = widgets.HBox([html_model_type, html_sample_weight, html_rej_infer, html_model_name],
layout=item_layout)
input_widgets2 = widgets.HBox([dropdown_protected_feature, html_model_priority, html_model_impact, html_model_concern],
layout=item_layout)
input_widgets = VBox([input_widgets1, input_widgets2])
top_display = widgets.VBox([input_widgets, metric_box])
plot_tab = widgets.HBox([plot_output, tab])
dashboard = widgets.VBox([top_display, plot_tab])
display(dashboard)
print("*The threshold and the values of ratio-based metrics are shifted down by 1.")
else:
print("The widget is only available on Jupyter notebook")
except:
pass
def _set_feature_mask(self):
"""
Sets the feature mask for each protected variable based on its privileged group
Returns
----------
feature_mask : dict of list
Stores the mask array for every protected variable applied on the x_test dataset.
"""
feature_mask = {}
for i in self.model_params[0].p_var:
privileged_grp = self.model_params[0].p_grp.get(i)
feature_mask[i] = self.model_params[0].protected_features_cols[i].isin(privileged_grp)
return feature_mask
def _get_e_lift(self):
"""
Helper function to get empirical lift
Returns
---------
None
"""
return None
def _get_confusion_matrix(self, curr_p_var = None, **kwargs):
"""
Compute confusion matrix
Parameters
-------------
curr_p_var : string, default=None
Current protected variable
Returns
-------
Confusion matrix metrics based on privileged and unprivileged groups
"""
if curr_p_var == None :
return [None] * 4
else :
return [None] * 8
def _base_input_check(self):
"""
Checks if there are conflicting input values
"""
try:
if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'information':
if self.fair_threshold > 1:
self.err.push('conflict_error', var_name_a=str(self.fair_metric_name), some_string="conflict with fair_threshold", value="", function_name="_base_input_check")
self.err.pop()
except TypeError:
pass
def _model_type_input(self):
"""
Checks if model type input is valid
"""
for i in self.model_params :
#throw an error if model_type provided is not in _model_type_to_metric_lookup
if i.model_type not in self._model_type_to_metric_lookup.keys():
self.err.push('value_error', var_name="model_type", given=str(i.model_type),
expected=list(self._model_type_to_metric_lookup.keys()),
function_name="_model_type_input")
#print any exceptions occured
self.err.pop()
model_size = self._model_type_to_metric_lookup[self.model_params[0].model_type][2]
#check if model_size provided based in model_type provided is accepted as per _model_type_to_metric_lookup
if model_size > len(self.model_params):
self.err.push('length_error', var_name="model_type", given=str(len(self.model_params)),
expected=str(model_size),
function_name="_model_type_input")
#print any exceptions occured
self.err.pop()
#check if model_size is -1. If it is only take first set of model_params values
elif model_size == -1:
self.model_params = self.model_params[:1]
else:
self.model_params = self.model_params[:model_size]
#check if model_type of first model_container is uplift, the model_name of second model_container should be clone. Otherwise, throw an exception
if self.model_params[0].model_type == 'uplift':
if self.model_params[1].model_name != "clone" :
self.err.push('value_error', var_name="model_name", given=str(self.model_params[1].model_name),
expected="clone",
function_name="_model_type_input")
#print any exceptions occured
self.err.pop()
def _fairness_metric_value_input_check(self):
"""
Checks if fairness metric value input is valid
"""
if self.fairness_metric_value_input is not None:
for i in self.fairness_metric_value_input.keys() :
#if user provided keys are not in protected variables, ignore
if i not in self.model_params[0].p_var:
print("The fairness_metric_value_input is not provided properly, so it is ignored")
self.fairness_metric_value_input = None
break
for j in self.fairness_metric_value_input[i].keys():
#if user provided fair metrics are not in fair metrics in use case class, ignore
if j not in self._use_case_metrics['fair']:
print("The fairness_metric_value_input is not provided properly, so it is ignored")
self.fairness_metric_value_input = None
break
def check_fair_metric_name(self):
"""
Checks if primary fairness metric is valid
"""
try:
if FairnessMetrics.map_fair_metric_to_group[self.fair_metric_name][4] == False:
ratio_parity_metrics = []
for i,j in FairnessMetrics.map_fair_metric_to_group.items():
if j[1] == self._model_type_to_metric_lookup[self.model_params[0].model_type][0]:
if FairnessMetrics.map_fair_metric_to_group[i][4] == True:
ratio_parity_metrics.append(i)
self.err.push('value_error', var_name="fair_metric_name", given=self.fair_metric_name, expected=ratio_parity_metrics, function_name="check_fair_metric_name")
except:
pass
#print any exceptions occured
self.err.pop()
def check_perf_metric_name(self):
"""
Checks if primary performance metric is valid
"""
try:
if PerformanceMetrics.map_perf_metric_to_group[self.perf_metric_name][4] == False:
perf_list = []
for i,j in PerformanceMetrics.map_perf_metric_to_group.items():
if j[1] == self._model_type_to_metric_lookup[self.model_params[0].model_type][0]:
if PerformanceMetrics.map_perf_metric_to_group[i][4] == True:
perf_list.append(i)
self.err.push('value_error', var_name="perf_metric_name", given=self.perf_metric_name, expected=perf_list, function_name="check_perf_metric_name")
except:
pass
#print any exceptions occured
self.err.pop()
def _fairness_tree(self, is_pos_label_favourable = True):
"""
Sets the feature mask for each protected variable based on its privileged group
Parameters
-----------
is_pos_label_favourable: boolean, default=True
Whether the pos_label is the favourable label
Returns
----------
self.fair_metric_name : string
Fairness metric name
"""
err_ = []
if self.fair_concern not in ['eligible', 'inclusive', 'both']:
err_.append(['value_error', "fair_concern", str(self.fair_concern), str(['eligible', 'inclusive', 'both'])])
if self.fair_priority not in ['benefit', 'harm']:
err_.append(['value_error', "fair_priority", str(self.fair_priority),str(['benefit', 'harm'])])
if self.fair_impact not in ['significant', 'selective', 'normal']:
err_.append(['value_error', "fair_impact", str(self.fair_impact),str(['significant', 'selective', 'normal'])])
if err_ != []:
for i in range(len(err_)):
self.err.push(err_[i][0], var_name=err_[i][1], given=err_[i][2], expected=err_[i][3],
function_name="_fairness_tree")
self.err.pop()
if is_pos_label_favourable == True:
if self.fair_priority == "benefit":
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fpr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'equal_opportunity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fdr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'ppv_parity'
elif self.fair_concern == 'both':
self.err.push("conflict_error", var_name_a="fair_concern", some_string="not applicable", value="", function_name="_fairness_tree")
self.err.pop()
elif self.fair_priority == "harm" :
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fpr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'fnr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fdr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'for_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'calibration_by_group'
else:
if self.fair_priority == "benefit":
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fnr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'tnr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'neg_equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'for_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'npv_parity'
elif self.fair_concern == 'both':
self.err.push("conflict_error", var_name_a="fairness concern", some_string="not applicable", value="", function_name="_fairness_tree")
self.err.pop()
elif self.fair_priority == "harm" :
if self.fair_impact == "normal" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'fnr_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'fpr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'equal_odds'
elif self.fair_impact =="significant" or self.fair_impact == "selective" :
if self.fair_concern == 'inclusive' :
self.fair_metric_name = 'for_parity'
elif self.fair_concern == 'eligible':
self.fair_metric_name = 'fdr_parity'
elif self.fair_concern == 'both':
self.fair_metric_name = 'calibration_by_group'
return self.fair_metric_name
def get_prob_calibration_results(self):
"""
Gets the probability calibration results
Returns
------------
a dictionary with below keys and values:
'prob_true': the ground truth values split into 10 bins from 0 to 1
'prob_pred': the mean predicted probability in each bin
'score': the brier loss score
"""
if self.evaluate_status_cali == True:
return self.perf_metric_obj.result.get("calibration_curve")
else:
return None
def get_perf_metrics_results(self):
"""
Gets the performance metrics results
Returns
------------
a dictionary with keys as the metric name and values as the metric value together with confidence interval
"""
if self.evaluate_status == 1:
return self.perf_metric_obj.result.get("perf_metric_values")
else:
return None
def get_fair_metrics_results(self):
"""
Gets the fair metrics results
Returns
------------
a dictionary with keys as the metric name and values as the metric value together with confidence interval
"""
if self.evaluate_status == 1:
result = {}
for p_var in self.fair_metric_obj.result.keys():
result[p_var] = self.fair_metric_obj.result[p_var]['fair_metric_values']
return result
else:
return None
def get_tradeoff_results(self):
"""
Gets the tradeoff results
Returns
------------
a dictionary with below keys and values:
protected variable name as key to split result values for each protected variable
'fair_metric_name': fairness metric name
'perf_metric_name': performance metric name
'fair': array of shape (n, n*) of fairness metric values
'perf': array of shape (n, n*) of performance metric values
'th_x': array of shape (n*, ) of thresholds on x axis
'th_y': array of shape (n*, ) of thresholds on y axis
'max_perf_point': maxiumn performance point on the grid
'max_perf_single_th': maxiumn performance point on the grid with single threshold
'max_perf_neutral_fair': maxiumn performance point on the grid with neutral fairness
*n is defined by tradeoff_threshold_bins in config
"""
if self.tradeoff_status == 1:
return self.tradeoff_obj.result
else:
return None
def get_loo_results(self):
"""
Gets the leave one out analysis results
Returns
------------
a dictionary with below keys and values:
protected variable name as key to split fairness result on each protected variable
protected variable name as key to denote the removed protected variable
array values denote the performance metric value, fariness metric value, fairness conclusion and suggestion
"""
if self.feature_imp_status_loo == True:
return self.feature_imp_values
else:
return None
def get_correlation_analysis_results(self):
"""
Gets the correlation analysis results
Returns
------------
a dictionary with below keys and values:
'feature_names': feature names for correlation analysis
'corr_values': correlation values according to feature names
"""
if self.feature_imp_status_corr == True:
return self.correlation_output
else:
return None
class NpEncoder(json.JSONEncoder):
"""
"""
def default(self, obj):
"""
Parameters
------------
obj : object
"""
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
| 8,699 | 0 | 117 |
92354fff9f91deee1a9965a8b9781f13da819870 | 623 | py | Python | apps/coordinator/forms.py | rohitmaurya-png/EventManager | b01d45f0ee1856c2a088a3f61a973c453b741fd6 | [
"MIT"
] | 1 | 2022-03-27T01:02:29.000Z | 2022-03-27T01:02:29.000Z | apps/coordinator/forms.py | rohitmaurya-png/EventManager | b01d45f0ee1856c2a088a3f61a973c453b741fd6 | [
"MIT"
] | null | null | null | apps/coordinator/forms.py | rohitmaurya-png/EventManager | b01d45f0ee1856c2a088a3f61a973c453b741fd6 | [
"MIT"
] | 3 | 2021-04-20T08:27:31.000Z | 2022-02-18T09:30:48.000Z | from django.forms import ModelForm
from apps.event.models import Event | 38.9375 | 71 | 0.629213 | from django.forms import ModelForm
from apps.event.models import Event
class EventForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.fields['category'].widget.attrs['class'] = 'input'
self.fields['image'].widget.attrs['class'] = 'input'
self.fields['title'].widget.attrs['class'] = 'input'
self.fields['description'].widget.attrs['class'] = 'input'
self.fields['price'].widget.attrs['class'] = 'input'
class Meta:
model = Event
fields = ['category', 'image', 'title', 'description', 'price'] | 387 | 142 | 23 |
7fd59db42373c4390d9d508b351343d7742bc795 | 15,845 | py | Python | sdk/python/pulumi_keycloak/openid/client_service_account_role.py | davide-talesco/pulumi-keycloak | 08d66be6f2bf578d4292e29eb6181794375bc4e5 | [
"ECL-2.0",
"Apache-2.0"
] | 13 | 2020-04-28T15:20:56.000Z | 2022-03-24T18:00:17.000Z | sdk/python/pulumi_keycloak/openid/client_service_account_role.py | davide-talesco/pulumi-keycloak | 08d66be6f2bf578d4292e29eb6181794375bc4e5 | [
"ECL-2.0",
"Apache-2.0"
] | 49 | 2020-02-06T17:53:35.000Z | 2022-03-25T19:36:08.000Z | sdk/python/pulumi_keycloak/openid/client_service_account_role.py | davide-talesco/pulumi-keycloak | 08d66be6f2bf578d4292e29eb6181794375bc4e5 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-06-09T01:08:56.000Z | 2021-12-07T15:30:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ClientServiceAccountRoleArgs', 'ClientServiceAccountRole']
@pulumi.input_type
@pulumi.input_type
| 42.940379 | 239 | 0.651499 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ClientServiceAccountRoleArgs', 'ClientServiceAccountRole']
@pulumi.input_type
class ClientServiceAccountRoleArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
realm_id: pulumi.Input[str],
role: pulumi.Input[str],
service_account_user_id: pulumi.Input[str]):
"""
The set of arguments for constructing a ClientServiceAccountRole resource.
:param pulumi.Input[str] client_id: The id of the client that provides the role.
:param pulumi.Input[str] realm_id: The realm the clients and roles belong to.
:param pulumi.Input[str] role: The name of the role that is assigned.
:param pulumi.Input[str] service_account_user_id: The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "realm_id", realm_id)
pulumi.set(__self__, "role", role)
pulumi.set(__self__, "service_account_user_id", service_account_user_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
"""
The id of the client that provides the role.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> pulumi.Input[str]:
"""
The realm the clients and roles belong to.
"""
return pulumi.get(self, "realm_id")
@realm_id.setter
def realm_id(self, value: pulumi.Input[str]):
pulumi.set(self, "realm_id", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The name of the role that is assigned.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="serviceAccountUserId")
def service_account_user_id(self) -> pulumi.Input[str]:
"""
The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
return pulumi.get(self, "service_account_user_id")
@service_account_user_id.setter
def service_account_user_id(self, value: pulumi.Input[str]):
pulumi.set(self, "service_account_user_id", value)
@pulumi.input_type
class _ClientServiceAccountRoleState:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
service_account_user_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ClientServiceAccountRole resources.
:param pulumi.Input[str] client_id: The id of the client that provides the role.
:param pulumi.Input[str] realm_id: The realm the clients and roles belong to.
:param pulumi.Input[str] role: The name of the role that is assigned.
:param pulumi.Input[str] service_account_user_id: The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if realm_id is not None:
pulumi.set(__self__, "realm_id", realm_id)
if role is not None:
pulumi.set(__self__, "role", role)
if service_account_user_id is not None:
pulumi.set(__self__, "service_account_user_id", service_account_user_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the client that provides the role.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> Optional[pulumi.Input[str]]:
"""
The realm the clients and roles belong to.
"""
return pulumi.get(self, "realm_id")
@realm_id.setter
def realm_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "realm_id", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
The name of the role that is assigned.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="serviceAccountUserId")
def service_account_user_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
return pulumi.get(self, "service_account_user_id")
@service_account_user_id.setter
def service_account_user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_user_id", value)
class ClientServiceAccountRole(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_id: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
service_account_user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows for assigning client roles to the service account of an openid client.
You need to set `service_accounts_enabled` to `true` for the openid client that should be assigned the role.
If you'd like to attach realm roles to a service account, please use the `openid.ClientServiceAccountRealmRole`
resource.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
# client1 provides a role to other clients
client1 = keycloak.openid.Client("client1", realm_id=realm.id)
client1_role = keycloak.Role("client1Role",
realm_id=realm.id,
client_id=client1.id,
description="A role that client1 provides")
# client2 is assigned the role of client1
client2 = keycloak.openid.Client("client2",
realm_id=realm.id,
service_accounts_enabled=True)
client2_service_account_role = keycloak.openid.ClientServiceAccountRole("client2ServiceAccountRole",
realm_id=realm.id,
service_account_user_id=client2.service_account_user_id,
client_id=client1.id,
role=client1_role.name)
```
## Import
This resource can be imported using the format `{{realmId}}/{{serviceAccountUserId}}/{{clientId}}/{{roleId}}`. Examplebash
```sh
$ pulumi import keycloak:openid/clientServiceAccountRole:ClientServiceAccountRole client2_service_account_role my-realm/489ba513-1ceb-49ba-ae0b-1ab1f5099ebf/baf01820-0f8b-4494-9be2-fb3bc8a397a4/c7230ab7-8e4e-4135-995d-e81b50696ad8
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The id of the client that provides the role.
:param pulumi.Input[str] realm_id: The realm the clients and roles belong to.
:param pulumi.Input[str] role: The name of the role that is assigned.
:param pulumi.Input[str] service_account_user_id: The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClientServiceAccountRoleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows for assigning client roles to the service account of an openid client.
You need to set `service_accounts_enabled` to `true` for the openid client that should be assigned the role.
If you'd like to attach realm roles to a service account, please use the `openid.ClientServiceAccountRealmRole`
resource.
## Example Usage
```python
import pulumi
import pulumi_keycloak as keycloak
realm = keycloak.Realm("realm",
realm="my-realm",
enabled=True)
# client1 provides a role to other clients
client1 = keycloak.openid.Client("client1", realm_id=realm.id)
client1_role = keycloak.Role("client1Role",
realm_id=realm.id,
client_id=client1.id,
description="A role that client1 provides")
# client2 is assigned the role of client1
client2 = keycloak.openid.Client("client2",
realm_id=realm.id,
service_accounts_enabled=True)
client2_service_account_role = keycloak.openid.ClientServiceAccountRole("client2ServiceAccountRole",
realm_id=realm.id,
service_account_user_id=client2.service_account_user_id,
client_id=client1.id,
role=client1_role.name)
```
## Import
This resource can be imported using the format `{{realmId}}/{{serviceAccountUserId}}/{{clientId}}/{{roleId}}`. Examplebash
```sh
$ pulumi import keycloak:openid/clientServiceAccountRole:ClientServiceAccountRole client2_service_account_role my-realm/489ba513-1ceb-49ba-ae0b-1ab1f5099ebf/baf01820-0f8b-4494-9be2-fb3bc8a397a4/c7230ab7-8e4e-4135-995d-e81b50696ad8
```
:param str resource_name: The name of the resource.
:param ClientServiceAccountRoleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClientServiceAccountRoleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_id: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
service_account_user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClientServiceAccountRoleArgs.__new__(ClientServiceAccountRoleArgs)
if client_id is None and not opts.urn:
raise TypeError("Missing required property 'client_id'")
__props__.__dict__["client_id"] = client_id
if realm_id is None and not opts.urn:
raise TypeError("Missing required property 'realm_id'")
__props__.__dict__["realm_id"] = realm_id
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
if service_account_user_id is None and not opts.urn:
raise TypeError("Missing required property 'service_account_user_id'")
__props__.__dict__["service_account_user_id"] = service_account_user_id
super(ClientServiceAccountRole, __self__).__init__(
'keycloak:openid/clientServiceAccountRole:ClientServiceAccountRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
client_id: Optional[pulumi.Input[str]] = None,
realm_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
service_account_user_id: Optional[pulumi.Input[str]] = None) -> 'ClientServiceAccountRole':
"""
Get an existing ClientServiceAccountRole resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The id of the client that provides the role.
:param pulumi.Input[str] realm_id: The realm the clients and roles belong to.
:param pulumi.Input[str] role: The name of the role that is assigned.
:param pulumi.Input[str] service_account_user_id: The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ClientServiceAccountRoleState.__new__(_ClientServiceAccountRoleState)
__props__.__dict__["client_id"] = client_id
__props__.__dict__["realm_id"] = realm_id
__props__.__dict__["role"] = role
__props__.__dict__["service_account_user_id"] = service_account_user_id
return ClientServiceAccountRole(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[str]:
"""
The id of the client that provides the role.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="realmId")
def realm_id(self) -> pulumi.Output[str]:
"""
The realm the clients and roles belong to.
"""
return pulumi.get(self, "realm_id")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
The name of the role that is assigned.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="serviceAccountUserId")
def service_account_user_id(self) -> pulumi.Output[str]:
"""
The id of the service account that is assigned the role (the service account of the client that "consumes" the role).
"""
return pulumi.get(self, "service_account_user_id")
| 2,970 | 12,361 | 67 |
d41c83717df708e26bc5a9fb92a8162fc920852a | 6,479 | py | Python | python/cendalytics/wikipedia/ingest/dmo/dbpedia_taxonomy_extractor.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/cendalytics/wikipedia/ingest/dmo/dbpedia_taxonomy_extractor.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/cendalytics/wikipedia/ingest/dmo/dbpedia_taxonomy_extractor.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
from typing import Optional
from base import BaseObject
from base import FileIO
class DBpediaTaxonomyExtractor(BaseObject):
""" Extract latent 'is-a' hierarchy from unstructured text """
__isa_patterns = None
__clause_patterns = None
def __init__(self,
input_text: str,
is_debug: bool = False):
"""
Created:
7-Jan-2020
craig.trim@ibm.com
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1706
Updated:
7-Feb-2020
craig.trim@ibm.com
* moved dictionaries to CSV resources
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1837
"""
BaseObject.__init__(self, __name__)
if self.__isa_patterns is None:
self.__isa_patterns = FileIO.file_to_lines_by_relative_path(
"resources/config/dbpedia/patterns_isa.csv")
self.__isa_patterns = [x.lower().strip() for x in self.__isa_patterns]
if self.__clause_patterns is None:
self.__clause_patterns = FileIO.file_to_lines_by_relative_path(
"resources/config/dbpedia/patterns_clause.csv")
self.__clause_patterns = [x.lower().strip() for x in self.__clause_patterns]
self._input_text = input_text
self._is_debug = is_debug
@staticmethod
def _remove_parens(input_text: str) -> str:
"""
Purpose:
Remove parens
Sample Input:
A drug (/drɑːɡ/) is any substance
Sample Output:
A drug is any substance
:return:
text without parens
"""
if '(' not in input_text and ')' not in input_text:
return input_text
x = input_text.index('(')
y = input_text.index(')') + 2
return f"{input_text[0:x]}{input_text[y:]}"
@staticmethod
def _remove_akas(input_text: str) -> str:
"""
Purpose:
Remove AKA sections
Sample Input:
Lung cancer, also known as lung carcinoma, is a malignant lung tumor
Sample Output:
Lung cancer is a malignant lung tumor
:return:
text without AKA
"""
patterns = [', also known as ',
', or ',
', formerly known as']
for pattern in patterns:
if pattern in input_text:
x = input_text.index(pattern)
y = input_text[:(x + len(pattern))].index(',') + x + len(pattern) + 4
input_text = f"{input_text[:x]}{input_text[y:]}"
return input_text
| 32.722222 | 88 | 0.563976 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
from typing import Optional
from base import BaseObject
from base import FileIO
class DBpediaTaxonomyExtractor(BaseObject):
""" Extract latent 'is-a' hierarchy from unstructured text """
__isa_patterns = None
__clause_patterns = None
def __init__(self,
input_text: str,
is_debug: bool = False):
"""
Created:
7-Jan-2020
craig.trim@ibm.com
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1706
Updated:
7-Feb-2020
craig.trim@ibm.com
* moved dictionaries to CSV resources
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1837
"""
BaseObject.__init__(self, __name__)
if self.__isa_patterns is None:
self.__isa_patterns = FileIO.file_to_lines_by_relative_path(
"resources/config/dbpedia/patterns_isa.csv")
self.__isa_patterns = [x.lower().strip() for x in self.__isa_patterns]
if self.__clause_patterns is None:
self.__clause_patterns = FileIO.file_to_lines_by_relative_path(
"resources/config/dbpedia/patterns_clause.csv")
self.__clause_patterns = [x.lower().strip() for x in self.__clause_patterns]
self._input_text = input_text
self._is_debug = is_debug
@staticmethod
def _remove_parens(input_text: str) -> str:
"""
Purpose:
Remove parens
Sample Input:
A drug (/drɑːɡ/) is any substance
Sample Output:
A drug is any substance
:return:
text without parens
"""
if '(' not in input_text and ')' not in input_text:
return input_text
x = input_text.index('(')
y = input_text.index(')') + 2
return f"{input_text[0:x]}{input_text[y:]}"
@staticmethod
def _remove_akas(input_text: str) -> str:
"""
Purpose:
Remove AKA sections
Sample Input:
Lung cancer, also known as lung carcinoma, is a malignant lung tumor
Sample Output:
Lung cancer is a malignant lung tumor
:return:
text without AKA
"""
patterns = [', also known as ',
', or ',
', formerly known as']
for pattern in patterns:
if pattern in input_text:
x = input_text.index(pattern)
y = input_text[:(x + len(pattern))].index(',') + x + len(pattern) + 4
input_text = f"{input_text[:x]}{input_text[y:]}"
return input_text
def _cleanse_text(self,
input_text: str) -> str:
original_input_text = input_text
input_text = self._remove_parens(input_text)
input_text = self._remove_akas(input_text)
if self._is_debug and original_input_text != input_text:
self.logger.debug('\n'.join([
"Text Cleansing Completed",
f"\tOriginal: {original_input_text}",
f"\tNormalized: {input_text}"]))
return input_text
def _segmenter(self,
input_text: str) -> list:
from nlutext.core.svc import PerformSentenceSegmentation
segmenter = PerformSentenceSegmentation(is_debug=self._is_debug)
return segmenter.process(some_input_text=input_text,
remove_wiki_references=True)
def _isa_normalizer(self,
input_text: str) -> str:
input_text = input_text.lower().strip()
for pattern in self.__isa_patterns:
if pattern in input_text:
input_text = input_text.replace(pattern, 'is_a')
return input_text
def _clause_inducer(self,
input_text: str) -> str:
regex = re.compile(r"[A-Za-z]+\s+(in|of)\s+", re.IGNORECASE)
target = ', '
input_text = input_text.lower().strip()
for candidate in self.__clause_patterns:
k_mid = f" {candidate} "
k_start = f"{candidate} "
k_end = f" {candidate}"
if input_text.startswith(k_start):
input_text = input_text.replace(k_start, target)
elif k_mid in input_text:
input_text = input_text.replace(k_mid, target)
elif input_text.endswith(k_end):
input_text = input_text.replace(k_end, target)
while True:
search_result = regex.search(input_text)
if not search_result:
break
input_text = input_text.replace(search_result.group(), target)
input_text = input_text.strip().replace(f' {target}', target).replace(' ', ' ')
if input_text.startswith(', '):
input_text = input_text[2:].strip()
return input_text
def process(self) -> Optional[str]:
if not self._input_text:
self.logger.warning("SubClass Extraction Failed: No Input")
return None
normalized = self._isa_normalizer(self._input_text)
if 'is_a' not in normalized:
self.logger.warning('\n'.join([
"SubClass Extraction Failed: No IS-A",
f"\tOriginal Text: {self._input_text}",
f"\tNormalized: {normalized}"]))
return None
x = normalized.index('is_a') + len('is_a')
normalized = normalized[x:].strip()
normalized = self._clause_inducer(normalized)
normalized = normalized.replace(',', '.')
normalized = normalized.replace(';', '.')
sentences = self._segmenter(normalized)
subclass = sentences[0].replace('.', '').strip()
if not subclass:
self.logger.warning('\n'.join([
"SubClass Extraction Failed: No SubClass",
f"\tOriginal Text: {self._input_text}",
f"\tNormalized: {normalized}",
f"\tSentences: {sentences}"]))
return None
if self._is_debug:
self.logger.debug('\n'.join([
"SubClass Extraction Completed",
f"\tResult: {subclass}",
f"\tOriginal Text: {self._input_text}",
f"\tNormalized: {normalized}",
f"\tSentences: {sentences}"]))
return subclass
| 3,612 | 0 | 135 |
42a0ec5a0a58798bb8e98eb38c0494bb6f52629f | 715 | py | Python | traveller_utils/ct/trade_classifications.py | egor045/traveller_utils | eba30e98cb3c666ce070b5da97391df6ebbc7b8a | [
"MIT"
] | null | null | null | traveller_utils/ct/trade_classifications.py | egor045/traveller_utils | eba30e98cb3c666ce070b5da97391df6ebbc7b8a | [
"MIT"
] | null | null | null | traveller_utils/ct/trade_classifications.py | egor045/traveller_utils | eba30e98cb3c666ce070b5da97391df6ebbc7b8a | [
"MIT"
] | null | null | null | ''' Trade classifications'''
VALID_TRADE_CLASSIFICATIONS = [
"Ag",
"Na",
"In",
"Ni",
"Ri",
"Po",
"Wa",
"De",
"As",
"Ic"
]
class TradeClassification():
''' Planetary trade classification'''
@property
def trade_classification(self):
''' Return own value'''
return self.__trade_classification
| 19.861111 | 51 | 0.532867 | ''' Trade classifications'''
VALID_TRADE_CLASSIFICATIONS = [
"Ag",
"Na",
"In",
"Ni",
"Ri",
"Po",
"Wa",
"De",
"As",
"Ic"
]
class TradeClassification():
''' Planetary trade classification'''
def __init__(self, tc: str):
if tc in VALID_TRADE_CLASSIFICATIONS:
self.__trade_classification = str(tc)
else:
raise ValueError(
"Invalid classification {}".format(
str(tc)
)
)
@property
def trade_classification(self):
''' Return own value'''
return self.__trade_classification
def __str__(self):
return self.__trade_classification
| 299 | 0 | 54 |
4c2383af468f1f45430c4b8613997b9607971ce4 | 6,772 | py | Python | huacai_server/common/libs/cosine_neo.py | slinger0225/huacai | 87e8de05981b1361f0396db28678ce07de459dc5 | [
"Apache-2.0"
] | null | null | null | huacai_server/common/libs/cosine_neo.py | slinger0225/huacai | 87e8de05981b1361f0396db28678ce07de459dc5 | [
"Apache-2.0"
] | null | null | null | huacai_server/common/libs/cosine_neo.py | slinger0225/huacai | 87e8de05981b1361f0396db28678ce07de459dc5 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
from common.libs.neo2cos import find_songs
INT_BITS = 32
MAX_INT = (1 << (INT_BITS - 1)) - 1 # Maximum Integer for INT_BITS
def main(mode, input_cus=''):
# df1 = pd.read_excel(Song_addr);
"""读取歌曲库"""
# df2 = pd.read_excel(Cus_addr);
"""读取用户库"""
# print (indi_list)
# data = pd.DataFrame(df1) # 将所有歌曲信息放进一个dataframe中(几百首歌可以用,多了要想别的)
# cus = pd.DataFrame(df2) # 同上
song, song_sec, song_rev = find_songs(input_cus)
# init_data(Song_addr,Cus_addr)
if mode == 1:
rec_list = []
cursor = 1
while (cursor):
try:
cus_temp = cus.loc[cursor - 1].values
# print(cus_temp)
except:
cursor = 0
else:
cursor += 1
rec_list.append(recommend_one(data, cus_temp))
# print (rec_list)"""
elif mode == 2:
rec_list = recommend_one(song, song_sec, song_rev, input_cus)
print(rec_list)
else:
return 0
return rec_list # 返回cus_id
# print(main(2,[10,1,1,0,0,0,0,0]))
| 31.207373 | 120 | 0.554194 | import pandas as pd
import numpy as np
from common.libs.neo2cos import find_songs
INT_BITS = 32
MAX_INT = (1 << (INT_BITS - 1)) - 1 # Maximum Integer for INT_BITS
def indi_count(df1, aaa): # 计算曲库中伪hash值的计数
cursor = 0
dataa = df1.loc[:, ['Indi']].values
aaa = [0] * 256
for cursor in range(0, 256):
aaa[cursor] = np.sum(dataa == cursor)
# print(aaa)
return aaa
def binary_count(num): # 一个二进制数有几个1
count = 0
bit = 1
while num and bit <= MAX_INT + 1:
if bit & num:
count += 1
num -= bit
bit = bit << 1
return count
def cosine(music_list, dist, cus_data, music_data):
music_list.append(music_data[0]) # 记录选进去的music_id
dist.append(
np.dot(cus_data[1:8], music_data[1:8]) / (np.linalg.norm(cus_data[1:8]) * np.linalg.norm(music_data[1:8])))
def init_data(Song_addr, Cus_addr):
df1 = pd.read_excel(Song_addr);
"""读取歌曲库"""
df2 = pd.read_excel(Cus_addr);
"""读取用户库"""
indi_list = []
indi_list = indi_count(indi_list) # 统计特征值的多少
# print (indi_list)
data = pd.DataFrame(df1) # 将所有歌曲信息放进一个dataframe中(几百首歌可以用,多了要想别的)
cus = pd.DataFrame(df2) # 同上
def recommend_one(data, data_sec, data_rev, cus_temp):
rec_list = [] # 返回值容器
dist_top = []
dist_sec = []
dist_rev = []
music_list_top = []
music_list_sec = []
music_list_rev = []
cus_rev = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(0, 3):
cus_rev[i + 1] = cus_temp[4 + i]
cus_rev[4 + i] = cus_temp[i + 1]
cus_rev[0] = cus_temp[0]
cus_rev[7] = cus_temp[7]
# cusarr = np.array(cus_temp[1:8])
# cusarr_rev = np.array(cus_rev[1:8])
rec_list.append(int(cus_temp[0]))
# print (cus_rev)
# print (cusarr)
# cus_result = np.argsort(-cusarr)
# cus_result_rev = np.argsort(-cusarr_rev) #获得用户的正向情感和反相情感
"""
indi = 0
indi_rev = 0
for k in range(0, 2): # 取用户两个最高的特征值计算
indi += 2 ** (6 - cus_result[k]);
indi_rev += 2 ** (6 - cus_result_rev[k])
# print(indi)
"""
for line in data:
cosine(music_list_top, dist_top, cus_temp, line)
for line in data_sec:
cosine(music_list_sec, dist_sec, cus_temp, line)
for line in data_rev:
cosine(music_list_rev, dist_rev, cus_rev, line)
"""
cursor = 1
while (cursor): # 所有歌全部读取 读到没有其他歌
try:
#data_temp = data.loc[cursor - 1].values
data_temp = data[cursor-1]
except:
# print('no more data')
cursor = 0
else:
cursor += 1
if indi_list[indi] >= 3:
if binary_count(indi & int(data_temp[9])) == 2:
cosine(music_list_top, dist_top, cus_temp, data_temp)
elif binary_count(indi & int(data_temp[9])) == 1:
cosine(music_list_sec, dist_sec, cus_temp, data_temp)
elif binary_count(indi_rev & int(data_temp[9])) == 2:
cosine(music_list_rev, dist_rev, cus_rev, data_temp)
else:
if binary_count(indi & int(data_temp[9])) >= 1:
cosine(music_list_sec, dist_sec, cus_temp, data_temp)
"""
arr_top = np.array(dist_top)
arr_sec = np.array(dist_sec)
arr_rev = np.array(dist_rev)
result_top = np.argsort(-arr_top)
result_sec = np.argsort(-arr_sec)
result_rev = np.argsort(-arr_rev); # 返回从小到大的索引值
"""
print('Totaly', (music_list_top.__len__() + music_list_sec.__len__() + music_list_rev.__len__()), 'music compared,',
music_list_top.__len__(), 'perfectly matched, ', music_list_sec.__len__(), 'partially matched and',
music_list_rev.__len__(), 'on contrast')
"""
for item in music_list_top[:3]:
rec_list.append(item)
for item in music_list_sec[:3]:
rec_list.append(item)
for item in music_list_rev[:3]:
rec_list.append(item)
rec_list = music_list_top[:3] + music_list_sec[:3] + music_list_rev[:3]
if len(rec_list) < 9:
rec_list += music_list_top[3:(12 - len(rec_list))]
if len(rec_list) < 9:
rec_list += music_list_sec[3:(12 - len(rec_list))]
if len(rec_list) < 9:
rec_list += music_list_rev[3:(12 - len(rec_list))]
rec_list[1:] = rec_list
rec_list[0] = cus_temp[0]
return rec_list
"""
if music_list_top.__len__() >= 3:
for i in range(0, 3):
try:
data=data[music_list_top[result_top[i]]]
#data_temp = data.loc[music_list_top[result_top[i]] - 1].values # 部分计算版
#rec_list.append(data_temp[0])
rec_list.append(music_list_top[result_top[i]])
except:
print('No more matched results')
break
for i in range(0, 3):
try:
data_temp = data.loc[music_list_sec[result_sec[i]] - 1].values # 有hash
# data = df1.loc[result_sec[i]].values # 无hash
rec_list.append(data_temp[0])
# print(cus)
except:
print('No more matched results')
break
for i in range(0, 3):
try:
data_temp = data.loc[music_list_rev[result_rev[i]] - 1].values # 随机版
rec_list.append(data_temp[0])
# print(cus)
except:
print('No more matched results')
break
else:
for i in range(0, 6):
try:
data_temp = data.loc[music_list_sec[result_sec[i]] - 1].values # 随机版
rec_list.append(data_temp[0])
# print(cus)
except:
print('No more matched results')
break
return rec_list
"""
def main(mode, input_cus=''):
# df1 = pd.read_excel(Song_addr);
"""读取歌曲库"""
# df2 = pd.read_excel(Cus_addr);
"""读取用户库"""
# print (indi_list)
# data = pd.DataFrame(df1) # 将所有歌曲信息放进一个dataframe中(几百首歌可以用,多了要想别的)
# cus = pd.DataFrame(df2) # 同上
song, song_sec, song_rev = find_songs(input_cus)
# init_data(Song_addr,Cus_addr)
if mode == 1:
rec_list = []
cursor = 1
while (cursor):
try:
cus_temp = cus.loc[cursor - 1].values
# print(cus_temp)
except:
cursor = 0
else:
cursor += 1
rec_list.append(recommend_one(data, cus_temp))
# print (rec_list)"""
elif mode == 2:
rec_list = recommend_one(song, song_sec, song_rev, input_cus)
print(rec_list)
else:
return 0
return rec_list # 返回cus_id
# print(main(2,[10,1,1,0,0,0,0,0]))
| 5,823 | 0 | 115 |
ad8b82837baad1cbd5a7c0296bdadf2cbdadf30b | 1,569 | py | Python | password_generator.py | chronologie7/python-secure-password-generator | 5b0a82ad367bb480aac19df8448b86b169c55d58 | [
"MIT"
] | null | null | null | password_generator.py | chronologie7/python-secure-password-generator | 5b0a82ad367bb480aac19df8448b86b169c55d58 | [
"MIT"
] | null | null | null | password_generator.py | chronologie7/python-secure-password-generator | 5b0a82ad367bb480aac19df8448b86b169c55d58 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import secrets
import re
def len_pass():
"""
function generate randomly the length of password,
from 10 to 16 caracters
"""
while True:
len_pass = secrets.randbelow(17)
if len_pass >= 10:
break
return len_pass
def get_password(len_password):
"""
function generates the password with the length
gived like parameter.
"""
password = ""
while len(password) <= len_password:
# p1 and p2, positions generate randomly
p1 = secrets.randbelow(4)
p2 = secrets.randbelow(len(all_strings[p1]))
if all_strings[p1][p2] not in password:
password += all_strings[p1][p2]
return password
lower_strings = "abcdefghijklmnopqrstuvwxyz"
upper_strings = "ABCDEFGHIJKLMNOPQRSTU"
number_strings = "0123456789"
symbol_strings = "!@#$%^*()[]{}?"
regex = r"[a-z]{2,}[A-Z]{2,}[0-9]{2,}[!@#\$%\^\*\(\)\[\]\{\}\?]{2,}"
all_strings = []
all_strings.append(lower_strings)
all_strings.append(upper_strings)
all_strings.append(number_strings)
all_strings.append(symbol_strings)
len_password = len_pass()
print("Generating password...")
while True:
password = get_password(len_password)
# checking if password matches with password requirements
if re.search(regex, password) != None:
break
print(f"your password is: {password}")
# saving the password in a file.
with open("your_pass.txt","w", encoding="utf-8") as file:
file.write(password)
print("Your password saved in \"your_pass.txt\"")
print("done!")
| 27.526316 | 68 | 0.655194 | #!/usr/bin/env python3
import secrets
import re
def len_pass():
"""
function generate randomly the length of password,
from 10 to 16 caracters
"""
while True:
len_pass = secrets.randbelow(17)
if len_pass >= 10:
break
return len_pass
def get_password(len_password):
"""
function generates the password with the length
gived like parameter.
"""
password = ""
while len(password) <= len_password:
# p1 and p2, positions generate randomly
p1 = secrets.randbelow(4)
p2 = secrets.randbelow(len(all_strings[p1]))
if all_strings[p1][p2] not in password:
password += all_strings[p1][p2]
return password
lower_strings = "abcdefghijklmnopqrstuvwxyz"
upper_strings = "ABCDEFGHIJKLMNOPQRSTU"
number_strings = "0123456789"
symbol_strings = "!@#$%^*()[]{}?"
regex = r"[a-z]{2,}[A-Z]{2,}[0-9]{2,}[!@#\$%\^\*\(\)\[\]\{\}\?]{2,}"
all_strings = []
all_strings.append(lower_strings)
all_strings.append(upper_strings)
all_strings.append(number_strings)
all_strings.append(symbol_strings)
len_password = len_pass()
print("Generating password...")
while True:
password = get_password(len_password)
# checking if password matches with password requirements
if re.search(regex, password) != None:
break
print(f"your password is: {password}")
# saving the password in a file.
with open("your_pass.txt","w", encoding="utf-8") as file:
file.write(password)
print("Your password saved in \"your_pass.txt\"")
print("done!")
| 0 | 0 | 0 |
8a02149a966c1b433133e53deb39d9b47cb2ad21 | 5,332 | py | Python | patrole_tempest_plugin/tests/api/network/test_floating_ips_rbac.py | Vegasq/patrole | 787fbd72542c233a66309c1700fad9645d01a394 | [
"Apache-2.0"
] | null | null | null | patrole_tempest_plugin/tests/api/network/test_floating_ips_rbac.py | Vegasq/patrole | 787fbd72542c233a66309c1700fad9645d01a394 | [
"Apache-2.0"
] | null | null | null | patrole_tempest_plugin/tests/api/network/test_floating_ips_rbac.py | Vegasq/patrole | 787fbd72542c233a66309c1700fad9645d01a394 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.network import rbac_base as base
CONF = config.CONF
| 40.090226 | 79 | 0.661853 | # Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.network import rbac_base as base
CONF = config.CONF
class FloatingIpsRbacTest(base.BaseNetworkRbacTest):
@classmethod
def resource_setup(cls):
super(FloatingIpsRbacTest, cls).resource_setup()
# Create an external network for floating ip creation.
cls.fip_extnet = cls.create_network(**{'router:external': True})
# Update router:external attribute to False for proper subnet resource
# cleanup by base class.
cls.fip_extnet_id = cls.fip_extnet['id']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.networks_client.update_network, cls.fip_extnet_id,
**{'router:external': False})
# Create a subnet for the external network
cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
cls.create_subnet(cls.fip_extnet,
cidr=cls.cidr,
mask_bits=24)
def _create_floatingip(self, floating_ip_address=None):
if floating_ip_address is not None:
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.fip_extnet_id,
floating_ip_address=floating_ip_address)
else:
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.fip_extnet_id)
floating_ip = body['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.floating_ips_client.delete_floatingip,
floating_ip['id'])
return floating_ip
@rbac_rule_validation.action(service="neutron",
rule="create_floatingip")
@decorators.idempotent_id('f8f7474c-b8a5-4174-af84-73097d6ced38')
def test_create_floating_ip(self):
"""Create floating IP.
RBAC test for the neutron create_floatingip policy
"""
with self.rbac_utils.override_role(self):
self._create_floatingip()
@rbac_rule_validation.action(
service="neutron",
rules=["create_floatingip",
"create_floatingip:floating_ip_address"])
@decorators.idempotent_id('a8bb826a-403d-4130-a55d-120a0a660806')
def test_create_floating_ip_floatingip_address(self):
"""Create floating IP with address.
RBAC test for the neutron create_floatingip:floating_ip_address policy
"""
fip = str(netaddr.IPAddress(self.cidr) + 10)
with self.rbac_utils.override_role(self):
self._create_floatingip(floating_ip_address=fip)
@rbac_rule_validation.action(service="neutron",
rules=["get_floatingip", "update_floatingip"],
expected_error_codes=[404, 403])
@decorators.idempotent_id('2ab1b060-19f8-4ef6-a838-e2ab7b377c63')
def test_update_floating_ip(self):
"""Update floating IP.
RBAC test for the neutron update_floatingip policy
"""
floating_ip = self._create_floatingip()
with self.rbac_utils.override_role(self):
# Associate floating IP to the other port
self.floating_ips_client.update_floatingip(
floating_ip['id'], port_id=None)
@rbac_rule_validation.action(service="neutron",
rules=["get_floatingip"],
expected_error_codes=[404])
@decorators.idempotent_id('f8846fd0-c976-48fe-a148-105303931b32')
def test_show_floating_ip(self):
"""Show floating IP.
RBAC test for the neutron get_floatingip policy
"""
floating_ip = self._create_floatingip()
with self.rbac_utils.override_role(self):
# Show floating IP
self.floating_ips_client.show_floatingip(floating_ip['id'])
@rbac_rule_validation.action(service="neutron",
rules=["get_floatingip", "delete_floatingip"],
expected_error_codes=[404, 403])
@decorators.idempotent_id('2611b068-30d4-4241-a78f-1b801a14db7e')
def test_delete_floating_ip(self):
"""Delete floating IP.
RBAC test for the neutron delete_floatingip policy
"""
floating_ip = self._create_floatingip()
with self.rbac_utils.override_role(self):
# Delete the floating IP
self.floating_ips_client.delete_floatingip(floating_ip['id'])
| 1,436 | 2,965 | 23 |
a3c9748574f3f9edc2726fd8b9de9ba7018042fb | 668 | py | Python | boundaries/ocd-division/country:ca/csd:3518039/definition.py | imhangoo/represent-canada-data | 0d9cc818b343079f81a00c15438d79c079a10c9b | [
"OML"
] | null | null | null | boundaries/ocd-division/country:ca/csd:3518039/definition.py | imhangoo/represent-canada-data | 0d9cc818b343079f81a00c15438d79c079a10c9b | [
"OML"
] | null | null | null | boundaries/ocd-division/country:ca/csd:3518039/definition.py | imhangoo/represent-canada-data | 0d9cc818b343079f81a00c15438d79c079a10c9b | [
"OML"
] | null | null | null | from datetime import date
import boundaries
boundaries.register('Brock wards',
domain='Brock, ON',
last_updated=date(2018, 11, 2),
name_func=lambda f: 'Ward %s' % f.get('WARD'),
id_func=boundaries.attr('WARD'),
authority='Township of Brock',
source_url='https://city-oshawa.opendata.arcgis.com/datasets/DurhamRegion::brock-ward-boundaries',
licence_url='https://www.durham.ca/en/regional-government/resources/Documents/OpenDataLicenceAgreement.pdf',
data_url='https://opendata.arcgis.com/datasets/f48be88029db4e959269cf1d0773998a_30.zip',
encoding='iso-8859-1',
extra={'division_id': 'ocd-division/country:ca/csd:3518039'},
)
| 39.294118 | 112 | 0.73503 | from datetime import date
import boundaries
boundaries.register('Brock wards',
domain='Brock, ON',
last_updated=date(2018, 11, 2),
name_func=lambda f: 'Ward %s' % f.get('WARD'),
id_func=boundaries.attr('WARD'),
authority='Township of Brock',
source_url='https://city-oshawa.opendata.arcgis.com/datasets/DurhamRegion::brock-ward-boundaries',
licence_url='https://www.durham.ca/en/regional-government/resources/Documents/OpenDataLicenceAgreement.pdf',
data_url='https://opendata.arcgis.com/datasets/f48be88029db4e959269cf1d0773998a_30.zip',
encoding='iso-8859-1',
extra={'division_id': 'ocd-division/country:ca/csd:3518039'},
)
| 0 | 0 | 0 |
31edf40f3241aa1631047e3c8f26d1226efc1bbc | 5,363 | py | Python | myapp.py | ekungurov/devops-school-website-v2 | de56af4e4be67c11a8cee822060c4ecf3d751887 | [
"MIT"
] | null | null | null | myapp.py | ekungurov/devops-school-website-v2 | de56af4e4be67c11a8cee822060c4ecf3d751887 | [
"MIT"
] | null | null | null | myapp.py | ekungurov/devops-school-website-v2 | de56af4e4be67c11a8cee822060c4ecf3d751887 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from time import sleep
from cpu_load_generator import load_single_core
import requests
import urllib3
import json
import logging
import config
import re
import os
REQUESTS_LIMIT = 20
DELAY_IN_SECONDS = 0.1
app = Flask(__name__)
user = os.getenv('DB_LOGIN', default = config.DB_LOGIN)
password = os.getenv('DB_PASSWORD', default = config.DB_PASSWORD)
host = os.getenv('DB_HOST', default = config.DB_HOST)
dbname = os.getenv('DB_NAME', default = config.DB_NAME)
app.config['SQLALCHEMY_DATABASE_URI'] = \
f'mysql+pymysql://{user}:{password}@{host}/{dbname}'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
@app.route('/')
@app.route('/health')
@app.route('/cpu_load/<int:seconds>')
@app.route('/cpu_load/')
@app.route('/planet/<id>')
@app.route('/clear_data')
@app.route('/fill_data')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 30.645714 | 104 | 0.694015 | from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from time import sleep
from cpu_load_generator import load_single_core
import requests
import urllib3
import json
import logging
import config
import re
import os
REQUESTS_LIMIT = 20
DELAY_IN_SECONDS = 0.1
app = Flask(__name__)
user = os.getenv('DB_LOGIN', default = config.DB_LOGIN)
password = os.getenv('DB_PASSWORD', default = config.DB_PASSWORD)
host = os.getenv('DB_HOST', default = config.DB_HOST)
dbname = os.getenv('DB_NAME', default = config.DB_NAME)
app.config['SQLALCHEMY_DATABASE_URI'] = \
f'mysql+pymysql://{user}:{password}@{host}/{dbname}'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Planet(db.Model):
__tablename__ = 'planet'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
rotation_period = db.Column(db.Integer)
orbital_period = db.Column(db.Integer)
diameter = db.Column(db.Integer)
climate = db.Column(db.Text)
gravity = db.Column(db.Text)
terrain = db.Column(db.Text)
surface_water = db.Column(db.Text)
population = db.Column(db.BigInteger)
created_date = db.Column(db.DateTime)
updated_date = db.Column(db.DateTime)
url = db.Column(db.Text)
people = db.relationship('Person', backref='planet', lazy='dynamic')
def __repr__(self):
return '<Planet %r>' % self.name
class Person(db.Model):
__tablename__ = 'people'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
height = db.Column(db.Integer)
mass = db.Column(db.Float)
hair_color = db.Column(db.Text)
skin_color = db.Column(db.Text)
eye_color = db.Column(db.Text)
birth_year = db.Column(db.Text)
gender = db.Column(db.Text)
planet_id = db.Column(db.Integer, db.ForeignKey('planet.id'))
created_date = db.Column(db.DateTime)
updated_date = db.Column(db.DateTime)
url = db.Column(db.Text)
def __repr__(self):
return '<Person %r>' % self.name
@app.route('/')
def index():
planet_list = Planet.query.all()
return render_template('planet-list.html', planetList = planet_list)
@app.route('/health')
def health():
return json.dumps({'healthy':True}), 200, {'ContentType':'application/json'}
@app.route('/cpu_load/<int:seconds>')
@app.route('/cpu_load/')
def cpu_load(seconds = 60):
load_single_core(core_num=0, duration_s=seconds, target_load=1.0)
return render_template('cpu-load.html')
@app.route('/planet/<id>')
def planet(id):
current_planet = Planet.query.filter_by(id=id).first()
character_list = current_planet.people.all()
return render_template('character-list.html', planet = current_planet, characterList = character_list)
@app.route('/clear_data')
def clear():
recreate_tables()
return render_template('cleared.html')
@app.route('/fill_data')
def fill():
fill_tables()
return render_template('filled.html')
def get_json(url):
logging.warning(f"Fetching {url}")
sleep(DELAY_IN_SECONDS)
return requests.get(url, verify=False).json()
def disable_ssl_warnings():
urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
def insert_planet(planet):
planet_obj = Planet(
id = re.search(r'/planets/(\d+)/', planet['url']).group(1),
name = planet['name'],
rotation_period = planet['rotation_period'],
orbital_period = planet['orbital_period'],
diameter = planet['diameter'],
climate = planet['climate'],
gravity = planet['gravity'],
terrain = planet['terrain'],
surface_water = planet['surface_water'],
population = planet['population'],
created_date = planet['created'][:-1],
updated_date = planet['edited'][:-1],
url = planet['url'],
)
logging.debug(planet_obj.created_date)
db.session.add(planet_obj)
def insert_person(person):
person_obj = Person(
id = re.search(r'/people/(\d+)/', person['url']).group(1),
name = person['name'],
height = person['height'],
mass = person['mass'].replace(',', ''),
hair_color = person['hair_color'],
skin_color = person['skin_color'],
eye_color = person['eye_color'],
birth_year = person['birth_year'],
gender = person['gender'],
planet_id = re.search(r'/planets/(\d+)/', person['homeworld']).group(1),
created_date = person['created'][:-1],
updated_date = person['edited'][:-1],
url = person['url']
)
logging.debug(person_obj.created_date)
db.session.add(person_obj)
def parse_planets():
requests_counter = 0
next_url = "https://swapi.dev/api/planets/"
while next_url and requests_counter < REQUESTS_LIMIT:
planets_json = get_json(next_url)
requests_counter += 1
for planet in planets_json['results']:
insert_planet(planet)
next_url = planets_json['next']
db.session.commit()
def parse_people():
requests_counter = 0
next_url = "https://swapi.dev/api/people/"
while next_url and requests_counter < REQUESTS_LIMIT:
people_json = get_json(next_url)
requests_counter += 1
for person in people_json['results']:
insert_person(person)
next_url = people_json['next']
db.session.commit()
def recreate_tables():
db.drop_all()
db.create_all()
def fill_tables():
disable_ssl_warnings()
parse_planets()
parse_people()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 2,833 | 1,199 | 363 |
bf2386eda10dbd4e36fec08c259437d5597d434b | 1,615 | py | Python | snakemake/scripts/qc/iz_stats_from_shiver.py | BDI-pathogens/ShiverCovid | a032a7a786288b28994eae51215e7851f7571018 | [
"MIT"
] | null | null | null | snakemake/scripts/qc/iz_stats_from_shiver.py | BDI-pathogens/ShiverCovid | a032a7a786288b28994eae51215e7851f7571018 | [
"MIT"
] | null | null | null | snakemake/scripts/qc/iz_stats_from_shiver.py | BDI-pathogens/ShiverCovid | a032a7a786288b28994eae51215e7851f7571018 | [
"MIT"
] | null | null | null | """
Basic script to take in insert size csv from shiver, where each row
is 3 comma-separated values (insert size, number of that size, fraction).
Returns the insert size at 0.05, 0.5, 0.95 percentiles, as well as
the number of inserts >350 and the fraction of inserts that are >350bp.
tanya.golubchik@bdi.ox.ac.uk
October 2017
"""
from __future__ import print_function
import sys
from os import path
def get_insert_size_stats(instrm, thresh=350):
"""
Calculate insert size stats - values at .05/.5/.95 pc and number of inserts over a threshold size.
"""
cumsum = 0.
v05, v50, v95 = '', '', ''
n_thresh = 0
f_thresh = 0
for line in instrm:
try:
iz, n, frac = line.split(',')
iz = int(iz)
frac = float(frac)
except ValueError:
continue
if iz > thresh:
n_thresh += int(n)
f_thresh += frac
cumsum += frac
if not v05 and (cumsum >= 0.05):
v05 = iz
if not v50 and (cumsum >= 0.5):
v50 = iz
if not v95 and (cumsum >= 0.95):
v95 = iz
return v05, v50, v95, n_thresh, f_thresh
if __name__ == '__main__':
if len(sys.argv) != 2 or not path.isfile(sys.argv[-1]):
sys.stdout.write(',,,,\n')
sys.stderr.write('Usage: {0} MyInsertSizeStats.csv\n'.format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1]) as instrm:
v05, v50, v95, n_thresh, f_thresh = get_insert_size_stats(instrm, thresh=350)
sys.stdout.write('{0},{1},{2},{3},{4}\n'.format(v05, v50, v95, n_thresh, f_thresh))
| 28.333333 | 102 | 0.589474 | """
Basic script to take in insert size csv from shiver, where each row
is 3 comma-separated values (insert size, number of that size, fraction).
Returns the insert size at 0.05, 0.5, 0.95 percentiles, as well as
the number of inserts >350 and the fraction of inserts that are >350bp.
tanya.golubchik@bdi.ox.ac.uk
October 2017
"""
from __future__ import print_function
import sys
from os import path
def get_insert_size_stats(instrm, thresh=350):
"""
Calculate insert size stats - values at .05/.5/.95 pc and number of inserts over a threshold size.
"""
cumsum = 0.
v05, v50, v95 = '', '', ''
n_thresh = 0
f_thresh = 0
for line in instrm:
try:
iz, n, frac = line.split(',')
iz = int(iz)
frac = float(frac)
except ValueError:
continue
if iz > thresh:
n_thresh += int(n)
f_thresh += frac
cumsum += frac
if not v05 and (cumsum >= 0.05):
v05 = iz
if not v50 and (cumsum >= 0.5):
v50 = iz
if not v95 and (cumsum >= 0.95):
v95 = iz
return v05, v50, v95, n_thresh, f_thresh
if __name__ == '__main__':
if len(sys.argv) != 2 or not path.isfile(sys.argv[-1]):
sys.stdout.write(',,,,\n')
sys.stderr.write('Usage: {0} MyInsertSizeStats.csv\n'.format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1]) as instrm:
v05, v50, v95, n_thresh, f_thresh = get_insert_size_stats(instrm, thresh=350)
sys.stdout.write('{0},{1},{2},{3},{4}\n'.format(v05, v50, v95, n_thresh, f_thresh))
| 0 | 0 | 0 |
29d970c65d90594779709e89f4b856d781be71e6 | 2,196 | py | Python | revitsnoop.py | daren-thomas/rps-sample-scripts | 584354c8d7ff96348a051e94100a1512c7612027 | [
"MIT"
] | 27 | 2015-04-08T14:09:48.000Z | 2022-03-12T18:32:04.000Z | revitsnoop.py | daren-thomas/rps-sample-scripts | 584354c8d7ff96348a051e94100a1512c7612027 | [
"MIT"
] | null | null | null | revitsnoop.py | daren-thomas/rps-sample-scripts | 584354c8d7ff96348a051e94100a1512c7612027 | [
"MIT"
] | 12 | 2015-09-21T23:12:08.000Z | 2020-09-29T14:25:55.000Z | '''
Launch a RevitLookup "Snoop Objects" dialog for elements from the RPS shell
= Introduction =
You _do_ have `RevitLookup` installed, don't you? This is _the_ tool for
introspecting model elements. You can find it in the Revit SDK folder, along
with the source code. The plugin does many things, among which I most often use
the "Snoop Current Selection..." feature. This pops up a nice dialog that lets
you snoop around in the selected elements properties. See here for more:
https://github.com/jeremytammik/RevitLookup
I find that RevitLookup and RevitPythonShell complement each other rather well.
Except, while inside the shell, you can't start any other plugins, so you
can't access the snoop functionality. Unless...
= Details =
The module `revitsnoop` provides a mechanism to hook into the RevitLookup
plugin and start it with an object of your choice.
Example:
{{{
>>>import revitsnoop
>>>snooper = revitsnoop.RevitSnoop(__revit__)
>>>snooper.snoop(doc.ProjectInformation)
}}}
This will pop up a dialog for snooping the documents project information. You
can of course snoop any `Element` object.
'''
import clr
from Autodesk.Revit.DB import ElementSet
| 36 | 80 | 0.70173 | '''
Launch a RevitLookup "Snoop Objects" dialog for elements from the RPS shell
= Introduction =
You _do_ have `RevitLookup` installed, don't you? This is _the_ tool for
introspecting model elements. You can find it in the Revit SDK folder, along
with the source code. The plugin does many things, among which I most often use
the "Snoop Current Selection..." feature. This pops up a nice dialog that lets
you snoop around in the selected elements properties. See here for more:
https://github.com/jeremytammik/RevitLookup
I find that RevitLookup and RevitPythonShell complement each other rather well.
Except, while inside the shell, you can't start any other plugins, so you
can't access the snoop functionality. Unless...
= Details =
The module `revitsnoop` provides a mechanism to hook into the RevitLookup
plugin and start it with an object of your choice.
Example:
{{{
>>>import revitsnoop
>>>snooper = revitsnoop.RevitSnoop(__revit__)
>>>snooper.snoop(doc.ProjectInformation)
}}}
This will pop up a dialog for snooping the documents project information. You
can of course snoop any `Element` object.
'''
import clr
from Autodesk.Revit.DB import ElementSet
class RevitSnoop(object):
def __init__(self, uiApplication):
'''
for RevitSnoop to function properly, it needs to be instantiated
with a reverence to the Revit Application object.
'''
# find the RevitLookup plugin
rlapp = [app for app in uiApplication.LoadedApplications
if app.GetType().Namespace == 'RevitLookup'
and app.GetType().Name == 'App'][0]
# tell IronPython about the assembly of the RevitLookup plugin
clr.AddReference(rlapp.GetType().Assembly)
import RevitLookup
self.RevitLookup = RevitLookup
# See note in CelloctorExt.cs in the RevitLookup source:
self.RevitLookup.Snoop.CollectorExts.CollectorExt.m_app = uiApplication
def snoop(self, element):
elementSet = ElementSet()
elementSet.Insert(element)
form = self.RevitLookup.Snoop.Forms.Objects(elementSet)
form.ShowDialog()
| 167 | 793 | 24 |
22c2c4c83c573c3bdc32d4aa64064ca2f1dd1a3f | 1,304 | py | Python | unit_tests/test_manila_plugin_handlers.py | openstack-charmers/charm-template-manila-plugin | 651ab72a9ff7697b36a7475eaf430033e11d6686 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_manila_plugin_handlers.py | openstack-charmers/charm-template-manila-plugin | 651ab72a9ff7697b36a7475eaf430033e11d6686 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_manila_plugin_handlers.py | openstack-charmers/charm-template-manila-plugin | 651ab72a9ff7697b36a7475eaf430033e11d6686 | [
"Apache-2.0"
] | 1 | 2019-09-24T05:58:01.000Z | 2019-09-24T05:58:01.000Z | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import mock
import reactive.sdn_charm_handlers as handlers
import charms_openstack.test_utils as test_utils
| 28.977778 | 74 | 0.703988 | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import mock
import reactive.sdn_charm_handlers as handlers
import charms_openstack.test_utils as test_utils
class TestRegisteredHooks(test_utils.TestRegisteredHooks):
def test_hooks(self):
defaults = [
'charm.installed',
'config.changed',
'update-status']
hook_set = {
'when': {
},
'when_not': {
}
}
# test that the hooks were registered via the
# reactive.barbican_handlers
self.registered_hooks_test_helper(handlers, hook_set, defaults)
class TestManilaPluginCharmHandles(test_utils.PatchHelper):
pass
| 382 | 84 | 73 |
2f314c67a4bf8d93040aaf05bf091ec2549ad0f1 | 4,864 | py | Python | Python35/Lib/site-packages/meds/kernel.py | kvonch/my_projects | 149e46e387f3f66bf08b223b4d7e9be6ba3fd207 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | Python35/Lib/site-packages/meds/kernel.py | kvonch/my_projects | 149e46e387f3f66bf08b223b4d7e9be6ba3fd207 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | Python35/Lib/site-packages/meds/kernel.py | kvonch/my_projects | 149e46e387f3f66bf08b223b4d7e9be6ba3fd207 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # meds/kernel.py
#
#
""" central piece of code that loads the plugins and starts services. """
from meds.utils.cli import hello, set_completer, enable_history, termsetup
from meds.utils.misc import include, locked
from meds.utils.trace import get_exception
from meds.utils.name import name, sname, mname
from meds.utils.join import j
from meds.scheduler import Scheduler
from meds.object import Object, OOL
from meds.engine import Engine
from meds.event import Event
from meds import __version__
import meds.core
import importlib
import logging
import pkgutil
import termios
import types
import time
import tty
import sys
| 32 | 80 | 0.540913 | # meds/kernel.py
#
#
""" central piece of code that loads the plugins and starts services. """
from meds.utils.cli import hello, set_completer, enable_history, termsetup
from meds.utils.misc import include, locked
from meds.utils.trace import get_exception
from meds.utils.name import name, sname, mname
from meds.utils.join import j
from meds.scheduler import Scheduler
from meds.object import Object, OOL
from meds.engine import Engine
from meds.event import Event
from meds import __version__
import meds.core
import importlib
import logging
import pkgutil
import termios
import types
import time
import tty
import sys
class Kernel(Engine):
def announce(self, txt):
from meds.core import fleet
for bot in fleet:
bot.announce(txt)
def boot(self):
""" start boot proces of the kernel. use provided config. """
from meds.core import cfg
if cfg.shell: hello(cfg.name, version=__version__, **cfg)
self._cfg = cfg
self._resume.fd = sys.stdin.fileno()
self._resume.old = termsetup(sys.stdin.fileno())
for name in cfg.packages: self.walk(name)
self.initialize("meds.run")
set_completer(sorted(set([cmnd for cmnd in self.list()])))
enable_history()
meds.core.cfg.update(cfg)
self.ready()
self.start()
def dispatch(self, event):
event.dispatch()
def find(self, cmnd):
""" find command. """
for name in meds.core.names.get(cmnd, []):
if name not in self._table: continue
mod = self._table[name]
obj = getattr(mod, cmnd, None)
if obj and type(obj) == types.FunctionType:
if "event" in obj.__code__.co_varnames:
yield obj
def initialize(self, name):
""" initialze package. """
event = Event()
thrs = []
for modname in self.walk(name):
n = modname.split(".")[-1]
mod = self._table.get(modname, None)
if not mod:
mod = self.load(modname)
if n in self._cfg.default:
mod.init(event)
if self._cfg.all or n in self._cfg.init.split(","):
if n not in self._cfg.exclude.split(','):
logging.info("! init %s" % n)
thr = self.launch(mod.init, event, name="%s.initialize" % n)
thrs.append(thr)
event.ready()
return thrs
def loading(self, modname, force=False):
""" load a module. """
from meds.core import cmnds, names
if force: mod = self.load(modname)
else: mod = self.direct(modname)
for key in dir(mod):
obj = getattr(mod, key, None)
if obj and type(obj) == types.FunctionType:
if "event" in obj.__code__.co_varnames:
if key.startswith("cb_"):
k = key.split("cb_")[-1]
self._cbs.register(key, obj)
else:
names.register(key, modname)
if key not in ["init", "shutdown"]:
cmnds.register(key, obj)
def list(self, want=""):
""" list all functions found in a module. """
for modname in self.modules():
mod = self.direct(modname)
for key in dir(mod):
if key in ["init", "shutdown"]: continue
if want and key != want: continue
obj = getattr(mod, key, None)
if obj and type(obj) == types.FunctionType:
if "event" in obj.__code__.co_varnames:
yield key
def resume(self):
""" resume the kernel. """
pass
@locked
def shutdown(self, close=True):
""" stop bots, services and plugins. """
logging.debug("shutdown")
event = Event()
thrs = []
for key, mod in self._table.items():
if "meds.run" not in key:
continue
if "shutdown" in dir(mod):
thr = self.launch(mod.shutdown, event)
thrs.append(thr)
for bot in meds.core.fleet:
if "stop" in dir(bot):
thr = self.launch(bot.stop)
thrs.append(thr)
if "exit" in dir(bot):
thr = self.launch(bot.exit)
thrs.append(thr)
bot.ready()
meds.core.launcher.waiter(thrs)
self.killall()
self.ready()
def walk(self, name, force=False):
""" return all modules in a package. """
logging.info("! walk %s" % name)
mods = []
for modname in sorted(list(self.modules(name))):
self.loading(modname, force)
mods.append(modname)
return mods
| 125 | 4,087 | 23 |
1ebbfd39f32b3e2957b6fb464b5ac1172da3264b | 4,287 | py | Python | metaspace/browser/sm/browser/utils.py | METASPACE2020/METASPACE | e1acd9a409f84a78eed7ca9713258c09b0e137ca | [
"Apache-2.0"
] | 32 | 2018-08-13T15:49:42.000Z | 2022-01-17T18:32:19.000Z | metaspace/browser/sm/browser/utils.py | METASPACE2020/METASPACE | e1acd9a409f84a78eed7ca9713258c09b0e137ca | [
"Apache-2.0"
] | 624 | 2018-07-02T15:18:22.000Z | 2022-03-30T08:10:35.000Z | metaspace/browser/sm/browser/utils.py | METASPACE2020/METASPACE | e1acd9a409f84a78eed7ca9713258c09b0e137ca | [
"Apache-2.0"
] | 6 | 2021-01-10T22:24:30.000Z | 2022-03-16T19:14:37.000Z | import re
import shutil
from pathlib import Path
from typing import Optional
from functools import wraps
from time import time
import boto3
from botocore.client import Config
from sm.browser.mz_search import S3File
| 35.139344 | 98 | 0.602753 | import re
import shutil
from pathlib import Path
from typing import Optional
from functools import wraps
from time import time
import boto3
from botocore.client import Config
from sm.browser.mz_search import S3File
def list_file_sizes(bucket, max_size_mb=5120):
obj_sizes = {}
i = 0
for obj in bucket.objects.all():
obj_sizes[obj.key] = obj.size
i += 1
if i % 1000:
print(f"{i} objects")
obj_sizes = sorted(obj_sizes.items(), key=lambda kv: kv[1], reverse=True)
obj_sizes_limited = [(key, size) for key, size in obj_sizes if size < max_size_mb * 1024 ** 2]
for i in range(10):
key, size = obj_sizes_limited[i]
print(size / 1024 ** 2, key)
def get_file_by_ext(path: Path, ext: str) -> Optional[Path]:
for f_path in path.iterdir():
if f_path.suffix.lower() == ext:
return f_path
def clean_dir(path):
shutil.rmtree(path, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
def mz_ppm_bin(mz, ppm):
return mz - mz * ppm * 1e-6, mz + mz * ppm * 1e-6
def timing(f):
@wraps(f)
def wrap(*args, **kw):
start = time()
result = f(*args, **kw)
elapsed = time() - start
mins, secs = divmod(elapsed, 60)
print(f"func:{f.__name__} args:[{args}, {kw}] took:{mins:.0f} min {secs:.3f} sec")
return result
return wrap
class DatasetFiles:
def __init__(self, full_ds_s3_path: str, local_dir: Path = "/tmp/dataset-browser"):
self.full_ds_s3_path = full_ds_s3_path.rstrip("/")
bucket_name, self.ds_s3_path = re.sub(r"s3?://", "", self.full_ds_s3_path).split("/", 1)
s3 = boto3.Session().resource("s3",
endpoint_url='http://storage:9000',
aws_access_key_id='minioadmin',
aws_secret_access_key='minioadmin',
config=Config(signature_version='s3v4'))
self._bucket = s3.Bucket(bucket_name)
self.ds_name = self.ds_s3_path.split("/")[-1]
self.ds_path = local_dir / self.ds_name
self.ds_path.mkdir(exist_ok=True)
self.segments_path = self.ds_path / "segments"
self.sorted_peaks_path = self.ds_path / "peaks_sorted_by_mz.bin"
self.mz_index_path = self.ds_path / "mz_index.bin"
self.ds_coordinates_path = self.ds_path / "coordinates.bin"
self.imzml_path: Path = None
self.ibd_path: Path = None
self._find_imzml_ibd_name()
def _find_imzml_ibd_name(self):
for obj in self._bucket.objects.filter(Prefix=self.ds_s3_path):
fn = obj.key.split('/')[-1]
ext = Path(fn).suffix.lower()
if ext == ".imzml":
self.imzml_path = self.ds_path / fn
elif ext == ".ibd":
self.ibd_path = self.ds_path / fn
def download_imzml(self):
for obj in self._bucket.objects.filter(Prefix=self.ds_s3_path):
fn = obj.key.split('/')[-1]
if Path(fn).suffix.lower() in [".imzml", ".ibd"]:
f_path = self.ds_path / fn
if not f_path.exists():
self._bucket.download_file(obj.key, str(f_path))
def upload_sorted_mz(self):
self._bucket.upload_file(
Filename=str(self.ds_coordinates_path),
Key=f"{self.ds_s3_path}/{self.ds_coordinates_path.name}",
)
self._bucket.upload_file(
Filename=str(self.sorted_peaks_path),
Key=f"{self.ds_s3_path}/{self.sorted_peaks_path.name}",
)
self._bucket.upload_file(
Filename=str(self.mz_index_path), Key=f"{self.ds_s3_path}/{self.mz_index_path.name}",
)
def read_coordinates(self) -> bytes:
s3_object = self._bucket.Object(key=f"{self.ds_s3_path}/{self.ds_coordinates_path.name}")
return s3_object.get()["Body"].read()
def read_mz_index(self) -> bytes:
s3_object = self._bucket.Object(key=f"{self.ds_s3_path}/{self.mz_index_path.name}")
return s3_object.get()["Body"].read()
def make_sorted_peaks_s3_file(self) -> S3File:
return S3File(self._bucket.Object(key=f"{self.ds_s3_path}/{self.sorted_peaks_path.name}"))
| 3,740 | -2 | 326 |
13c0e6f9186a50e99281790670d725a2e39d73b6 | 1,976 | py | Python | app/tasks/create_pull_request_card.py | palazzem/gello | 19fe9e4aa8de485dd829a87047ec64f89b5fa7ee | [
"Apache-2.0"
] | 44 | 2018-03-28T14:22:23.000Z | 2022-03-15T07:25:06.000Z | app/tasks/create_pull_request_card.py | palazzem/gello | 19fe9e4aa8de485dd829a87047ec64f89b5fa7ee | [
"Apache-2.0"
] | 44 | 2018-03-28T14:19:03.000Z | 2022-02-16T10:24:57.000Z | app/tasks/create_pull_request_card.py | palazzem/gello | 19fe9e4aa8de485dd829a87047ec64f89b5fa7ee | [
"Apache-2.0"
] | 12 | 2018-03-28T14:15:43.000Z | 2021-07-19T17:33:20.000Z | # -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""create_pull_request_card.py
Creates a trello card based on GitHub pull request data.
"""
import textwrap
from . import CreateTrelloCard
from ..services import PullRequestService
class CreatePullRequestCard(CreateTrelloCard):
"""A class that creates a trello card on a board."""
def __init__(self):
"""Initializes a task to create a pull request trello card."""
super().__init__()
self._pull_request_service = PullRequestService()
def _card_body(self):
"""Concrete helper method.
Internal helper to format the trello card body, based on the data
passed in.
Returns:
str: the markdown template for the Trello card created.
"""
return textwrap.dedent(
f"""
# GitHub Pull Request Opened By Community Member
___
- Pull Request link: [{self._title}]({self._url})
- Opened by: [{self._user}]({self._user_url})
___
### Pull Request Body
___
"""
) + self._body
def _persist_card_to_database(self, card):
"""Concrete helper method.
Internal helper to save the record created to the database.
Args:
card (trello.Card): An object representing the trello card created.
Returns:
None
"""
self._pull_request_service.create(
name=self._title,
url=self._url,
github_pull_request_id=self._id,
repo_id=self._repo_id,
trello_board_id=card.board_id,
trello_card_id=card.id,
trello_card_url=card.url,
trello_list_id=card.list_id
)
| 27.068493 | 79 | 0.611336 | # -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""create_pull_request_card.py
Creates a trello card based on GitHub pull request data.
"""
import textwrap
from . import CreateTrelloCard
from ..services import PullRequestService
class CreatePullRequestCard(CreateTrelloCard):
"""A class that creates a trello card on a board."""
def __init__(self):
"""Initializes a task to create a pull request trello card."""
super().__init__()
self._pull_request_service = PullRequestService()
def _card_body(self):
"""Concrete helper method.
Internal helper to format the trello card body, based on the data
passed in.
Returns:
str: the markdown template for the Trello card created.
"""
return textwrap.dedent(
f"""
# GitHub Pull Request Opened By Community Member
___
- Pull Request link: [{self._title}]({self._url})
- Opened by: [{self._user}]({self._user_url})
___
### Pull Request Body
___
"""
) + self._body
def _persist_card_to_database(self, card):
"""Concrete helper method.
Internal helper to save the record created to the database.
Args:
card (trello.Card): An object representing the trello card created.
Returns:
None
"""
self._pull_request_service.create(
name=self._title,
url=self._url,
github_pull_request_id=self._id,
repo_id=self._repo_id,
trello_board_id=card.board_id,
trello_card_id=card.id,
trello_card_url=card.url,
trello_list_id=card.list_id
)
| 0 | 0 | 0 |
12953800b5b6ce796066391662bc4943c6f2fee9 | 618 | py | Python | setup.py | Habidatum/slisonner | 488be30a199a5d29271e24377c37a7ad83d52e3e | [
"MIT"
] | 2 | 2017-02-06T17:15:11.000Z | 2017-04-17T13:18:18.000Z | setup.py | Habidatum/slisonner | 488be30a199a5d29271e24377c37a7ad83d52e3e | [
"MIT"
] | null | null | null | setup.py | Habidatum/slisonner | 488be30a199a5d29271e24377c37a7ad83d52e3e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup, find_packages
requirements = ['lz4tools==1.3.1.2',
'numpy',
'py==1.4.31',
'pytest==3.0.3']
setup(name='slisonner',
version='0.7.9',
description='Habidatum Chronotope Slison encode/decode utility',
long_description='',
author='Nikita Pestrov',
author_email='nikita.pestrov@habidatum.com',
maintainer='Nikita Pestrov',
maintainer_email='nikita.pestrov@habidatum.com',
packages=find_packages(),
install_requires=requirements,
platforms='any',
classifiers=['Programming Language :: Python :: 3.4'])
| 26.869565 | 70 | 0.673139 | #!/usr/bin/env python3
from setuptools import setup, find_packages
requirements = ['lz4tools==1.3.1.2',
'numpy',
'py==1.4.31',
'pytest==3.0.3']
setup(name='slisonner',
version='0.7.9',
description='Habidatum Chronotope Slison encode/decode utility',
long_description='',
author='Nikita Pestrov',
author_email='nikita.pestrov@habidatum.com',
maintainer='Nikita Pestrov',
maintainer_email='nikita.pestrov@habidatum.com',
packages=find_packages(),
install_requires=requirements,
platforms='any',
classifiers=['Programming Language :: Python :: 3.4'])
| 0 | 0 | 0 |
dae537a9e6566c98d886e38c1cba095f8bb8a63d | 13,450 | py | Python | scripts/convergence.py | Miedema/MCNetwork | daab1fe5880c47695c6e21124f99aa6b2589aba1 | [
"Apache-2.0"
] | null | null | null | scripts/convergence.py | Miedema/MCNetwork | daab1fe5880c47695c6e21124f99aa6b2589aba1 | [
"Apache-2.0"
] | null | null | null | scripts/convergence.py | Miedema/MCNetwork | daab1fe5880c47695c6e21124f99aa6b2589aba1 | [
"Apache-2.0"
] | 1 | 2020-10-14T09:45:06.000Z | 2020-10-14T09:45:06.000Z | #!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
import numpy as np
from time import sleep
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
fileOpenTries = 0
while fileOpenTries < 50:
fileOpenTries += 1
try:
with h5py.File(join(pathToSimFolder, "data.hdf5"), "r") as dataFile:
voltages = np.array(dataFile["/voltages"][:])
optEnergy = np.array(dataFile["/optEnergy"][:])
while True:
try:
generations = np.array(dataFile["/generation"][:])
mode = "genetic"
break
except KeyError:
pass
try:
basinAccepted = np.array(dataFile["/basinAccepted"][:], dtype=int)
accepted = basinAccepted.astype(bool)
notAccepted = np.invert(accepted)
mode = "basinHop"
break
except KeyError:
pass
mode = "MC"
try:
accepted = np.array(dataFile["/accepted"][:], dtype=bool)
notAccepted = np.invert(accepted)
except KeyError:
accepted = np.ones(
optEnergy.shape, dtype=bool
) # support for deprecated version
notAccepted = np.invert(accepted)
break
break
except OSError as e:
if "No such file" in repr(e):
raise e
else:
print(f"could not open file. try number {fileOpenTries}")
sleep(1)
cotrolElectrodeIndices = list(range(0, len(electrodes)))
cotrolElectrodeIndices.remove(parameters["outputElectrode"])
cotrolElectrodeIndices.remove(parameters["inputElectrode1"])
cotrolElectrodeIndices.remove(parameters["inputElectrode2"])
controlVoltages = voltages[:, cotrolElectrodeIndices]
if mode == "MC":
distance = 0
meanRange = 1000
displace = []
for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]):
mean = np.mean(
controlVoltages[
int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), :
],
axis=0,
)
# displace.append(np.sqrt(np.sum((controlVoltages[i])**2)))
displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2)))
MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(range(len(MSD)), MSD, "r-", label="MSD")
ax2 = ax.twinx()
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.legend()
ax2.legend()
ax.set_xlabel("step")
ax.set_ylabel("displacement")
plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("displacement")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_displacement.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###############################
if mode == "genetic":
distance = 0
meanRange = 1000
displace = []
for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]):
mean = np.mean(
controlVoltages[
int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), :
],
axis=0,
)
# displace.append(np.sqrt(np.sum((controlVoltages[i])**2)))
displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2)))
MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(range(len(MSD)), MSD, "r-", label="MSD")
ax2 = ax.twinx()
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.legend()
ax2.legend()
ax.set_xlabel("step")
ax.set_ylabel("displacement")
plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
genBest = np.empty(optEnergy.shape)
for i in range(int(optEnergy.shape[0] / 25)):
genBest[i * 25 : (i + 1) * 25] = max(optEnergy[i * 25 : (i + 1) * 25])
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all")
ax.plot(genBest, color="darkblue", label="gen best")
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all")
ax.plot(genBest, color="darkblue", label="gen best")
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("displacement")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_displacement.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###############################
if mode == "basinHop":
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
buff = np.where(basinAccepted[:, 0] == 2)[0]
basinChanges = np.array([buff, np.zeros(buff.shape)], dtype=int)
buff = np.where(basinAccepted[:, 0] == 3)[0]
basinChanges = np.append(
basinChanges, np.array([buff, np.ones(buff.shape)], dtype=int), axis=1
)
basinChanges = basinChanges[:, np.argsort(basinChanges[0])]
if basinChanges.shape[1] > 0:
for i in range(basinChanges.shape[1]):
if basinChanges[1, i]:
ax.axvline(basinChanges[0, i], color="darkgreen", zorder=-1)
else:
ax.axvline(basinChanges[0, i], color="darkred", zorder=-1)
ax.plot(
np.arange(0, basinChanges[0, 0]),
np.maximum.accumulate(optEnergy[: basinChanges[0, 0]]),
color="darkblue",
label="basin best",
)
for i in range(1, basinChanges.shape[1]):
ax.plot(
np.arange(basinChanges[0, i - 1], basinChanges[0, i]),
np.maximum.accumulate(
optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]]
),
color="darkblue",
)
ax.plot(
np.arange(basinChanges[0, -1], len(optEnergy)),
np.maximum.accumulate(optEnergy[basinChanges[0, -1] :]),
color="darkblue",
)
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
if basinChanges.shape[1] > 0:
for i in range(basinChanges.shape[1]):
if basinChanges[1, i]:
ax.axvline(basinChanges[0, i], color="darkgreen", zorder=-1)
else:
ax.axvline(basinChanges[0, i], color="darkred", zorder=-1)
ax.plot(
np.arange(0, basinChanges[0, 0]),
np.maximum.accumulate(optEnergy[: basinChanges[0, 0]]),
color="darkblue",
label="basin best",
)
for i in range(1, basinChanges.shape[1]):
ax.plot(
np.arange(basinChanges[0, i - 1], basinChanges[0, i]),
np.maximum.accumulate(
optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]]
),
color="darkblue",
)
ax.plot(
np.arange(basinChanges[0, -1], len(optEnergy)),
np.maximum.accumulate(optEnergy[basinChanges[0, -1] :]),
color="darkblue",
)
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
# calc last basin best
basinBestIdx = np.argmax(optEnergy[0 : basinChanges[0, 0]])
basinBestVoltages = controlVoltages[basinBestIdx]
# ax2.plot(np.arange(0,basinChanges[0,0]), np.sqrt(np.sum((controlVoltages[0:basinChanges[0,0]] - basinBestVoltages)**2, axis = 1 )) ,color="darkblue")
for i in range(1, basinChanges.shape[1]):
ax2.plot(
np.arange(basinChanges[0, i - 1], basinChanges[0, i]),
np.sqrt(
np.sum(
(
controlVoltages[basinChanges[0, i - 1] : basinChanges[0, i]]
- basinBestVoltages
)
** 2,
axis=1,
)
),
color="k",
)
# calc last basin best
if basinChanges[1, i]:
basinBestIdx = (
np.argmax(optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]])
+ basinChanges[0, i - 1]
)
basinBestVoltages = controlVoltages[basinBestIdx]
ax2.plot(
np.arange(basinChanges[0, -1], len(optEnergy)),
np.sqrt(
np.sum(
(controlVoltages[basinChanges[0, -1] :] - basinBestVoltages) ** 2,
axis=1,
)
),
color="k",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("dist")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_dist.png"), bbox_inches="tight", dpi=300
)
# plt.show()
plt.close(fig)
| 28.375527 | 156 | 0.542602 | #!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
import numpy as np
from time import sleep
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
fileOpenTries = 0
while fileOpenTries < 50:
fileOpenTries += 1
try:
with h5py.File(join(pathToSimFolder, "data.hdf5"), "r") as dataFile:
voltages = np.array(dataFile["/voltages"][:])
optEnergy = np.array(dataFile["/optEnergy"][:])
while True:
try:
generations = np.array(dataFile["/generation"][:])
mode = "genetic"
break
except KeyError:
pass
try:
basinAccepted = np.array(dataFile["/basinAccepted"][:], dtype=int)
accepted = basinAccepted.astype(bool)
notAccepted = np.invert(accepted)
mode = "basinHop"
break
except KeyError:
pass
mode = "MC"
try:
accepted = np.array(dataFile["/accepted"][:], dtype=bool)
notAccepted = np.invert(accepted)
except KeyError:
accepted = np.ones(
optEnergy.shape, dtype=bool
) # support for deprecated version
notAccepted = np.invert(accepted)
break
break
except OSError as e:
if "No such file" in repr(e):
raise e
else:
print(f"could not open file. try number {fileOpenTries}")
sleep(1)
cotrolElectrodeIndices = list(range(0, len(electrodes)))
cotrolElectrodeIndices.remove(parameters["outputElectrode"])
cotrolElectrodeIndices.remove(parameters["inputElectrode1"])
cotrolElectrodeIndices.remove(parameters["inputElectrode2"])
controlVoltages = voltages[:, cotrolElectrodeIndices]
if mode == "MC":
distance = 0
meanRange = 1000
displace = []
for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]):
mean = np.mean(
controlVoltages[
int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), :
],
axis=0,
)
# displace.append(np.sqrt(np.sum((controlVoltages[i])**2)))
displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2)))
MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(range(len(MSD)), MSD, "r-", label="MSD")
ax2 = ax.twinx()
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.legend()
ax2.legend()
ax.set_xlabel("step")
ax.set_ylabel("displacement")
plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("displacement")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_displacement.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###############################
if mode == "genetic":
distance = 0
meanRange = 1000
displace = []
for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]):
mean = np.mean(
controlVoltages[
int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), :
],
axis=0,
)
# displace.append(np.sqrt(np.sum((controlVoltages[i])**2)))
displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2)))
MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(range(len(MSD)), MSD, "r-", label="MSD")
ax2 = ax.twinx()
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.legend()
ax2.legend()
ax.set_xlabel("step")
ax.set_ylabel("displacement")
plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
genBest = np.empty(optEnergy.shape)
for i in range(int(optEnergy.shape[0] / 25)):
genBest[i * 25 : (i + 1) * 25] = max(optEnergy[i * 25 : (i + 1) * 25])
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all")
ax.plot(genBest, color="darkblue", label="gen best")
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all")
ax.plot(genBest, color="darkblue", label="gen best")
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("displacement")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_displacement.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###############################
if mode == "basinHop":
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
buff = np.where(basinAccepted[:, 0] == 2)[0]
basinChanges = np.array([buff, np.zeros(buff.shape)], dtype=int)
buff = np.where(basinAccepted[:, 0] == 3)[0]
basinChanges = np.append(
basinChanges, np.array([buff, np.ones(buff.shape)], dtype=int), axis=1
)
basinChanges = basinChanges[:, np.argsort(basinChanges[0])]
if basinChanges.shape[1] > 0:
for i in range(basinChanges.shape[1]):
if basinChanges[1, i]:
ax.axvline(basinChanges[0, i], color="darkgreen", zorder=-1)
else:
ax.axvline(basinChanges[0, i], color="darkred", zorder=-1)
ax.plot(
np.arange(0, basinChanges[0, 0]),
np.maximum.accumulate(optEnergy[: basinChanges[0, 0]]),
color="darkblue",
label="basin best",
)
for i in range(1, basinChanges.shape[1]):
ax.plot(
np.arange(basinChanges[0, i - 1], basinChanges[0, i]),
np.maximum.accumulate(
optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]]
),
color="darkblue",
)
ax.plot(
np.arange(basinChanges[0, -1], len(optEnergy)),
np.maximum.accumulate(optEnergy[basinChanges[0, -1] :]),
color="darkblue",
)
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
if basinChanges.shape[1] > 0:
for i in range(basinChanges.shape[1]):
if basinChanges[1, i]:
ax.axvline(basinChanges[0, i], color="darkgreen", zorder=-1)
else:
ax.axvline(basinChanges[0, i], color="darkred", zorder=-1)
ax.plot(
np.arange(0, basinChanges[0, 0]),
np.maximum.accumulate(optEnergy[: basinChanges[0, 0]]),
color="darkblue",
label="basin best",
)
for i in range(1, basinChanges.shape[1]):
ax.plot(
np.arange(basinChanges[0, i - 1], basinChanges[0, i]),
np.maximum.accumulate(
optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]]
),
color="darkblue",
)
ax.plot(
np.arange(basinChanges[0, -1], len(optEnergy)),
np.maximum.accumulate(optEnergy[basinChanges[0, -1] :]),
color="darkblue",
)
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
# calc last basin best
basinBestIdx = np.argmax(optEnergy[0 : basinChanges[0, 0]])
basinBestVoltages = controlVoltages[basinBestIdx]
# ax2.plot(np.arange(0,basinChanges[0,0]), np.sqrt(np.sum((controlVoltages[0:basinChanges[0,0]] - basinBestVoltages)**2, axis = 1 )) ,color="darkblue")
for i in range(1, basinChanges.shape[1]):
ax2.plot(
np.arange(basinChanges[0, i - 1], basinChanges[0, i]),
np.sqrt(
np.sum(
(
controlVoltages[basinChanges[0, i - 1] : basinChanges[0, i]]
- basinBestVoltages
)
** 2,
axis=1,
)
),
color="k",
)
# calc last basin best
if basinChanges[1, i]:
basinBestIdx = (
np.argmax(optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]])
+ basinChanges[0, i - 1]
)
basinBestVoltages = controlVoltages[basinBestIdx]
ax2.plot(
np.arange(basinChanges[0, -1], len(optEnergy)),
np.sqrt(
np.sum(
(controlVoltages[basinChanges[0, -1] :] - basinBestVoltages) ** 2,
axis=1,
)
),
color="k",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("dist")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_dist.png"), bbox_inches="tight", dpi=300
)
# plt.show()
plt.close(fig)
| 0 | 0 | 0 |
17daa0f82b74591eac225b897ad2349f5612d7e1 | 1,813 | py | Python | src/planteye_vision/data_chunks/data_chunk.py | p2o-lab/planteye | cb834d8215111e3f2e122f11439426679c1248bd | [
"MIT"
] | 1 | 2022-01-25T08:03:27.000Z | 2022-01-25T08:03:27.000Z | src/planteye_vision/data_chunks/data_chunk.py | p2o-lab/planteye-vision | cb834d8215111e3f2e122f11439426679c1248bd | [
"MIT"
] | null | null | null | src/planteye_vision/data_chunks/data_chunk.py | p2o-lab/planteye-vision | cb834d8215111e3f2e122f11439426679c1248bd | [
"MIT"
] | 1 | 2021-12-03T09:17:35.000Z | 2021-12-03T09:17:35.000Z | from abc import ABC, abstractmethod
from planteye_vision.data_chunks.data_chunk_data import DataChunkData
from planteye_vision.data_chunks.metadata_chunk import MetadataChunk, MetadataChunkData
from planteye_vision.data_chunks.data_chunk_status import DataChunkStatus
| 30.728814 | 109 | 0.679537 | from abc import ABC, abstractmethod
from planteye_vision.data_chunks.data_chunk_data import DataChunkData
from planteye_vision.data_chunks.metadata_chunk import MetadataChunk, MetadataChunkData
from planteye_vision.data_chunks.data_chunk_status import DataChunkStatus
class DataChunk(ABC):
@abstractmethod
def add_data(self, data: DataChunkData):
pass
@abstractmethod
def add_metadata(self, metadata: MetadataChunk):
pass
@abstractmethod
def add_status(self, status: DataChunkStatus):
pass
@abstractmethod
def as_dict(self):
pass
class GeneralDataChunk(DataChunk):
def __init__(self, name: str, chunk_type: str, parameters: dict, hidden: bool = False):
self.name = name
self.chunk_type = chunk_type
self.hidden = hidden
self.parameters = parameters
self.data = []
self.metadata = []
self.status = []
def add_data(self, data_chunk: DataChunkData):
self.data.append(data_chunk)
def add_metadata(self, metadata_chunk: MetadataChunkData):
self.metadata.append(metadata_chunk)
def add_status(self, status_chunk: DataChunkStatus):
self.status.append(status_chunk)
def as_dict(self):
data_dict = {}
for data_chunk in self.data:
data_dict.update({data_chunk.name: data_chunk.as_dict()})
metadata_dict = {}
for metadata_chunk in self.metadata:
metadata_dict.update(metadata_chunk.as_dict())
status_dict = {}
for status_chunk in self.status:
status_dict.update(status_chunk.as_dict())
return {'type': self.chunk_type, 'name': self.name, 'parameters': self.parameters, 'data': data_dict,
'metadata': metadata_dict, 'status': status_dict}
| 1,163 | 200 | 180 |
84263ca7d53234e16d5fb6d9a14003d623ee7686 | 4,101 | py | Python | authserver/app.py | bendalby82/tokenrotatorproxy | 05b0f19b53eb1b39b30cb440d36bfbc778b5ad60 | [
"MIT"
] | 1 | 2017-01-10T19:01:10.000Z | 2017-01-10T19:01:10.000Z | authserver/app.py | bendalby82/tokenrotatorproxy | 05b0f19b53eb1b39b30cb440d36bfbc778b5ad60 | [
"MIT"
] | null | null | null | authserver/app.py | bendalby82/tokenrotatorproxy | 05b0f19b53eb1b39b30cb440d36bfbc778b5ad60 | [
"MIT"
] | null | null | null | # Name: authserver.py
#
# Description: Validates a one-time-passocde that is included in the header.
# Cases:
# [1] Valid OTP - belongs to a known application, not used
# [2] Valid OTP - belongs to a known application, but already used
# [3] Invalid OTP - not recgnised
# [4] No OTP
#
#
#!flask/bin/python
from flask import Flask, jsonify, request
from flask_api import status
import requests
OTPCodeTable=dict()
#Structure to hold an OTP code for an authorized application
app = Flask(__name__)
#Placeholder - return:
# True if this token should be expired
# False if this token can continue to be used
#Simpistic function to get an unused code from the pool
@app.route('/', methods=['GET'])
@app.route('/auth', methods=['GET'])
@app.after_request
OTPCodeTable=initializeTestCodes()
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
app.run(host='0.0.0.0',port=80) | 32.808 | 122 | 0.655938 | # Name: authserver.py
#
# Description: Validates a one-time-passocde that is included in the header.
# Cases:
# [1] Valid OTP - belongs to a known application, not used
# [2] Valid OTP - belongs to a known application, but already used
# [3] Invalid OTP - not recgnised
# [4] No OTP
#
#
#!flask/bin/python
from flask import Flask, jsonify, request
from flask_api import status
import requests
OTPCodeTable=dict()
#Structure to hold an OTP code for an authorized application
class AuthorizedApplicationCode:
def __init__(self, otpcode="", authappurl="", codeused=False):
self.otpcode=otpcode
self.authappurl=authappurl
self.codeused=codeused
app = Flask(__name__)
def initializeTestCodes():
myotpcodetable=dict()
validused=AuthorizedApplicationCode("abc123","http://exampleclient/tokencatcher",True)
validfresh=AuthorizedApplicationCode("abc456","http://exampleclient/tokencatcher",False)
myotpcodetable[validused.otpcode]=validused
myotpcodetable[validfresh.otpcode]=validfresh
return myotpcodetable
def getotpcode(headerdictionary):
optcode = ""
for k, v in headerdictionary.items():
if k == "Otpcode":
optcode = v
break
if optcode == "":
print "Optcode header empty or missing"
return optcode
def getAuthorizedApplicationCode(otpcode):
global OTPCodeTable
myAAC = dict()
if otpcode in OTPCodeTable:
myAAC = OTPCodeTable[otpcode]
return myAAC
#Placeholder - return:
# True if this token should be expired
# False if this token can continue to be used
def checkTokenExpiry(authappcode):
return False
#Simpistic function to get an unused code from the pool
def getNewAuthorizedApplicationCode():
global OTPCodeTable
newcode=AuthorizedApplicationCode("","",True)
for k, v in OTPCodeTable.items():
if v.codeused==False:
newcode=v
return newcode
def serializeheaders(mydic):
headerstring = ""
for k, v in mydic.items():
if headerstring == "":
headerstring = "%s:%s" % (k,v)
else:
headerstring = "%s,%s:%s" % (headerstring,k,v)
return headerstring
@app.route('/', methods=['GET'])
def get_base():
otpcode = getotpcode(request.headers)
#print serializeheaders(request.headers)
return 'AUTH SERVICE UP AND RUNNING: Your otpcode was: %s' % otpcode
def postNewToken():
newcode = getNewAuthorizedApplicationCode()
postpayload = {'Otpcode': newcode.otpcode}
posturl = newcode.authappurl
resp = requests.post(posturl, data=postpayload)
return resp.status_code
@app.route('/auth', methods=['GET'])
def get_auth():
otpcode = getotpcode(request.headers)
returnMessage="Missing one time passcode"
#Default is to return 401
returnCode=status.HTTP_401_UNAUTHORIZED
if otpcode != "":
myAAC = getAuthorizedApplicationCode(otpcode)
if myAAC:
if myAAC.codeused == False:
if checkTokenExpiry(myAAC):
#TODO: Actually expire the token
postresult = postNewToken()
returnMessage = "OK. Valid token received and expired, new token posted with response %d" % postresult
else:
returnMessage = "OK: Valid token received"
returnCode=status.HTTP_200_OK
else:
postresult = postNewToken()
returnMessage="Expired token. New token posted with response %d" % postresult
else:
returnMessage="Unrecognised token"
print "authserver: %s" % returnMessage
return returnMessage, returnCode
@app.after_request
def after_request(response):
#Allows us to call this from other domains.
response.headers.add('Access-Control-Allow-Origin', '*')
return response
OTPCodeTable=initializeTestCodes()
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
app.run(host='0.0.0.0',port=80) | 2,799 | 11 | 273 |