hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f775c9f689bfc087dae8bdb25d1cd48072e78520
| 1,138
|
py
|
Python
|
root/settings/__init__.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | 6
|
2019-11-21T10:09:49.000Z
|
2021-06-19T09:52:59.000Z
|
root/settings/__init__.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
root/settings/__init__.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
"""
This is a django-split-settings main file.
For more information read this:
https://github.com/sobolevn/django-split-settings
Default environment is `development`.
To change settings file:
`DJANGO_ENV=production python manage.py runserver`
"""
import django_heroku
from split_settings.tools import include
base_settings = [
'components/middleware.py', # middleware configuration
'components/apps.py', # installed applications
'components/database.py', # database settings
'components/pyuploadcare.py', # pyuploadcare settings
'components/rest_framework.py', # rest framework settings
'components/allauth.py', # allauth rest_auth settings
'components/currency.py', # currency settings
'components/email.py', # email settings
'components/rest_framework.py', # rest framework settings
'components/common.py', # standard django settings
'components/cors_configuration.py',
# configuration for Access Control Allow Origin
'components/graphene.py',
# sendy config
'components/sendy.py'
]
# Include settings:
include(*base_settings)
django_heroku.settings(locals())
| 30.756757
| 62
| 0.745167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 886
| 0.778559
|
f77623537e19394a7aca74fbc25bffc73cb1952f
| 584
|
py
|
Python
|
Dataset/Leetcode/train/125/245.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/125/245.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/125/245.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, s: str) -> bool:
i=0
j = len(s)-1
#lower()把所有大写字母改成小写,其余不变
s = s.lower()
while i<j:
while not(97 <= ord(s[i]) <= 122 or 48 <= ord(s[i]) <= 57):
if i == j:
return True
i += 1
while not(97 <= ord(s[j]) <= 122 or 48 <= ord(s[j]) <= 57):
if i == j:
return True
j -= 1
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
| 26.545455
| 71
| 0.332192
| 611
| 0.995114
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.087948
|
f776be33a00e9a7a7de35e919a22d945de72c2c3
| 688
|
py
|
Python
|
testapp/urls.py
|
danigosa/django-simple-seo
|
17610e50148c6672cb34e96654df1d3515b0444f
|
[
"BSD-3-Clause"
] | 11
|
2015-01-02T15:44:31.000Z
|
2021-07-27T06:54:35.000Z
|
testapp/urls.py
|
danigosa/django-simple-seo
|
17610e50148c6672cb34e96654df1d3515b0444f
|
[
"BSD-3-Clause"
] | 8
|
2016-02-03T07:07:04.000Z
|
2022-01-13T00:42:32.000Z
|
testapp/urls.py
|
danigosa/django-simple-seo
|
17610e50148c6672cb34e96654df1d3515b0444f
|
[
"BSD-3-Clause"
] | 8
|
2015-02-20T13:51:51.000Z
|
2021-06-24T19:11:30.000Z
|
from django.conf.urls import patterns, url, include
from django.contrib import admin
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .views import template_test
urlpatterns = patterns(
'',
url(r'^test/', template_test, name='template_test'),
url(r'^test2/', include('testapp.another_urls', namespace='foo', app_name='faa'))
)
admin.autodiscover()
urlpatterns += patterns(
'',
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
import debug_toolbar
urlpatterns += patterns(
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| 23.724138
| 85
| 0.713663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.136628
|
f7796a989bdd258bec7902dd5290b418ab45666b
| 1,645
|
py
|
Python
|
continual_learning/scenarios/utils.py
|
jaryP/ContinualAI
|
7d9b7614066d219ebd72049692da23ad6ec132b0
|
[
"MIT"
] | null | null | null |
continual_learning/scenarios/utils.py
|
jaryP/ContinualAI
|
7d9b7614066d219ebd72049692da23ad6ec132b0
|
[
"MIT"
] | null | null | null |
continual_learning/scenarios/utils.py
|
jaryP/ContinualAI
|
7d9b7614066d219ebd72049692da23ad6ec132b0
|
[
"MIT"
] | null | null | null |
from typing import Sequence, Union
import numpy as np
from scipy.ndimage.interpolation import rotate as np_rotate
from PIL.Image import Image
from torch import Tensor, tensor
from torchvision.transforms.functional import rotate
class ImageRotation(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img: Union[Image, Tensor, np.ndarray]):
if isinstance(img, np.ndarray):
img = np_rotate(img, angle=self.degree, reshape=False)
elif isinstance(img, Image):
img = img.rotate(self.degree)
elif isinstance(img, Tensor):
img = rotate(img, angle=self.degree)
else:
raise ValueError(f'Accepted types are: '
f'[ndarray, PIL Image, Tensor] {type(img)}')
return img
class PixelsPermutation(object):
def __init__(self, index_permutation: Sequence[int]):
self.permutation = index_permutation
def __call__(self, img: Union[Image, Tensor, np.ndarray]):
if isinstance(img, np.ndarray):
img = img.reshape(-1)[self.permutation].reshape(*img.shape)
elif isinstance(img, Image):
img = img.getdata()
img = img.reshape(-1)[self.permutation].reshape(*img.shape)
img = Image.fromarray(img)
elif isinstance(img, Tensor):
img = img.numpy()
img = img.reshape(-1)[self.permutation].reshape(*img.shape)
img = tensor(img)
else:
raise ValueError(f'Accepted types are: '
f'[ndarray, PIL Image, Tensor] {type(img)}')
return img
| 33.571429
| 73
| 0.612766
| 1,408
| 0.855927
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.080243
|
f77bd48d7ad8370a1142d05db86188aea9cfe2af
| 14,355
|
py
|
Python
|
d_graph.py
|
MohamedAl-Hussein/pyGraphs
|
43346b1f25332dd7ab80cdd9656b3ed7af21d4d2
|
[
"MIT"
] | null | null | null |
d_graph.py
|
MohamedAl-Hussein/pyGraphs
|
43346b1f25332dd7ab80cdd9656b3ed7af21d4d2
|
[
"MIT"
] | null | null | null |
d_graph.py
|
MohamedAl-Hussein/pyGraphs
|
43346b1f25332dd7ab80cdd9656b3ed7af21d4d2
|
[
"MIT"
] | null | null | null |
# Course: CS261 - Data Structures
# Author: Mohamed Al-Hussein
# Assignment: 06
# Description: Directed graph implementation.
from collections import deque
import heapq
class DirectedGraph:
"""
Class to implement directed weighted graph
- duplicate edges not allowed
- loops not allowed
- only positive edge weights
- vertex names are integers
"""
def __init__(self, start_edges=None):
"""
Store graph info as adjacency matrix
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
self.v_count = 0
self.adj_matrix = []
# populate graph with initial vertices and edges (if provided)
# before using, implement add_vertex() and add_edge() methods
if start_edges is not None:
v_count = 0
for u, v, _ in start_edges:
v_count = max(v_count, u, v)
for _ in range(v_count + 1):
self.add_vertex()
for u, v, weight in start_edges:
self.add_edge(u, v, weight)
def __str__(self):
"""
Return content of the graph in human-readable form
DO NOT CHANGE THIS METHOD IN ANY WAY
"""
if self.v_count == 0:
return 'EMPTY GRAPH\n'
out = ' |'
out += ' '.join(['{:2}'.format(i) for i in range(self.v_count)]) + '\n'
out += '-' * (self.v_count * 3 + 3) + '\n'
for i in range(self.v_count):
row = self.adj_matrix[i]
out += '{:2} |'.format(i)
out += ' '.join(['{:2}'.format(w) for w in row]) + '\n'
out = f"GRAPH ({self.v_count} vertices):\n{out}"
return out
def add_vertex(self) -> int:
"""
Adds new vertex to the graph.
Returns new number of vertices in graph.
"""
# Extend matrix by one column.
for row in self.adj_matrix:
row.append(0)
# Extend matrix by one row.
self.adj_matrix.append([0] * (self.v_count + 1))
# Update vertex count.
self.v_count += 1
return self.v_count
def add_edge(self, src: int, dst: int, weight=1) -> None:
"""
Adds a new edge to the graph, connecting src vertex to dst vertex.
If src and dst point to the same vertex, or if weight is not positive, does nothing and returns.
If edge already exists, updates weight of edge.
"""
# Only update weight if src and dst exist and don't point to same vertex and weight is positive.
if self._is_valid_edge(src, dst) and weight >= 1:
self.adj_matrix[src][dst] = weight
def remove_edge(self, src: int, dst: int) -> None:
"""
Removes an edge between src and dst vertices.
If either vertex does not exist in the graph, or if there is no edge between them, does nothing and returns.
"""
# Only remove edge if vertices exist.
if self._is_valid_edge(src, dst):
self.adj_matrix[src][dst] = 0
def get_vertices(self) -> []:
"""Returns a list of vertices of the graph."""
return [_ for _ in range(self.v_count)]
def get_edges(self) -> []:
"""
Returns a list of 3-tuples containing the source vertex, destination vertex, and edge weight for all edges
in graph.
"""
edges: list = list()
for i in range(self.v_count):
for j in range(self.v_count):
# Edge exists between vertex i and j.
if self.adj_matrix[i][j] > 0:
edges.append((i, j, self.adj_matrix[i][j]))
return edges
def is_valid_path(self, path: []) -> bool:
"""
Return True if the provided path is valid.
An empty path or a path with a single vertex is considered valid.
"""
# Check if path is empty or contains only a single vertex.
if len(path) == 0:
return True
elif len(path) == 1:
if 0 <= path[0] < self.v_count:
return True
else:
return False
# Iterate through vertices in path, checking if they are adjacent to each other so that they form a path.
step: int = 0
while step < len(path) - 1:
src, dst = path[step], path[step + 1]
if not self.are_adjacent(src, dst):
return False
step += 1
return True
def dfs(self, v_start: int, v_end: int = None) -> []:
"""
Return list of vertices visited during DFS search from v_start vertex up to optional v_end vertex.
If v_start is not in the graph, returns empty list.
If v_end is not in the graph, will treat it as having no v_end parameter.
Vertices are picked in ascending order.
"""
visited: list = list()
# Check if v_start is in graph.
if not 0 <= v_start < self.v_count:
return visited
# Check if v_end is in graph.
if isinstance(v_end, int) and not 0 <= v_end < self.v_count:
v_end = None
# Traverse graph until we either reach v_end or traverse every vertex.
return self._dfs(v_start, v_end)
def bfs(self, v_start: int, v_end: int = None) -> []:
"""
Return list of vertices visited during BFS search from v_start vertex up to optional v_end vertex.
If v_start is not in the graph, returns empty list.
If v_end is not in the graph, will treat it as having no v_end parameter.
Vertices are picked in ascending order.
"""
visited: list = list()
# Check if v_start is in graph.
if not 0 <= v_start < self.v_count:
return visited
# Check if v_end is in graph.
if isinstance(v_end, int) and not 0 <= v_end < self.v_count:
v_end = None
# Traverse graph until we either reach v_end or traverse every vertex.
vertices: deque = deque()
vertices.appendleft(v_start)
while len(vertices) > 0:
v: int = vertices.pop()
if v not in visited:
# Add vertex to visited vertices.
visited.append(v)
# Stop if vertex is equal to v_end.
if v == v_end:
break
# Add all neighbors of vertex in descending order so that they are popped in ascending order.
for neighbor in self.neighbors(v):
vertices.appendleft(neighbor)
return visited
def has_cycle(self):
"""Return True if graph contains a cycle."""
# If any of the strongly connected components (SCC) of the graph contain more than one vertex, then that SCC
# contains a cycle and so does the graph.
for component in self.connected_components():
if len(component) > 1:
return True
return False
def dijkstra(self, src: int) -> []:
"""
Returns a list of distances of src vertex to every other vertex.
If a vertex is not reachable, then its distance is infinity.
"""
distances: list = list()
if self.is_empty() or not 0 <= src < self.v_count:
return distances
# Create priority queue containing first vertex with distance 0.
vertices: list = list()
heapq.heappush(vertices, (0, src))
visited: dict = dict()
# Iterate through priority queue, updating min distance for each vertex.
while vertices:
dist_v, v = heapq.heappop(vertices)
if v not in visited:
visited[v] = dist_v
for neighbor in self.neighbors(v):
d_neighbor: int = self.adj_matrix[v][neighbor]
heapq.heappush(vertices, (dist_v + d_neighbor, neighbor))
# Update distances with min distance for each vertex, or inf if they are not reachable.
for v in self.get_vertices():
dist: int = visited.get(v, float("inf"))
distances.append(dist)
return distances
def are_adjacent(self, src: int, dst: int) -> bool:
"""Returns True if src vertex has an outgoing edge that connects to dst vertex."""
# Check if vertices are valid.
if not self._is_valid_edge(src, dst):
return False
return self.adj_matrix[src][dst] > 0
def connected_components(self) -> []:
"""
Return a list of lists containing all strongly connected components (SCC) of the graph.
Uses Kosaraju's algorithm to detect all SCCs.
"""
components: list = list()
if self.is_empty():
return components
# Iterate through all vertices via DFS.
# The top_stack maintains a topological sorting of all visited vertices.
top_stack: deque = deque()
vertices: deque = deque()
for v in self.get_vertices():
vertices.appendleft(v)
_: list = self._dfs_complete(vertices, top_stack=top_stack)
# Reverse graph to perform second round of DFS.
d_reverse: DirectedGraph = self.reversed()
self.adj_matrix, d_reverse.adj_matrix = d_reverse.adj_matrix, self.adj_matrix
# Iterate through all vertices in reverse order via DFS.
components = self._dfs_complete(top_stack)
# Reverse graph again to return to original form.
self.adj_matrix = d_reverse.adj_matrix
return components
def reversed(self) -> "DirectedGraph":
"""Returns a new DirectedGraph with outgoing edges swapped with incoming and vice versa."""
# Initialize new empty digraph with similar number of vertices.
d_graph: DirectedGraph = DirectedGraph()
for _ in range(self.v_count):
d_graph.add_vertex()
# Reflect edges over matrix diagonal to reverse their orientation then add them to new digraph.
for i in range(self.v_count):
for j in range(self.v_count):
d_graph.adj_matrix[i][j] = self.adj_matrix[j][i]
return d_graph
def neighbors(self, v: int) -> []:
"""Return all vertices that vertex v has an outgoing edge to."""
neighbors: list = list()
for i in range(self.v_count):
if self.adj_matrix[v][i] > 0:
neighbors.append(i)
return neighbors
def is_empty(self) -> bool:
"""Return True if the graph contains no vertices."""
return self.v_count == 0
def _is_valid_edge(self, src: int, dst: int) -> bool:
"""
Returns True if an edge between a src and dst vertex is valid.
An edge is invalid if the src and dst point to the same vertex, or if either vertex is not on the graph.
"""
return src != dst and 0 <= src < self.v_count and 0 <= dst < self.v_count
def _dfs_complete(self, vertices: deque, top_stack: deque = None) -> []:
"""
Returns a list of weakly connected components using DFS traversal.
An optional top_stack parameter tracks the topological sorting of the graph and in turn ensures that
the returned components are strongly connected.
"""
components: list = list()
unvisited: list = [True] * self.v_count
while vertices:
v: int = vertices.popleft()
# Grab the next vertex that hasn't been visited.
while not unvisited[v] and vertices:
v = vertices.popleft()
# All vertices have been visited, so we can stop.
if v is None:
break
component: list = self._dfs(v_start=v, unvisited=unvisited, top_stack=top_stack)
if len(component) > 0:
components.append(component)
return components
def _dfs(self, v_start: int, v_end: int = None, unvisited: list = None, top_stack: deque = None) -> []:
"""
Returns a list containing all vertices visited starting from the vertex v_start up to the optional vertex
v_end via DFS.
An optional list of unvisited vertices ensures vertices are visited exactly once during multiple calls to
this method.
An optional top_stack parameter maintains a topological sorting of all visited vertices.
"""
# The backstack holds any visited vertices in the order that they were visited.
backstack: deque = deque()
vertices: deque = deque()
visited: list = list()
if unvisited is None:
unvisited = [True] * self.v_count
vertices.appendleft(v_start)
while vertices:
v: int = vertices.popleft()
if unvisited[v]:
unvisited[v] = False
visited.append(v)
backstack.appendleft(v)
# Unroll backstack so that its top points to a vertex with at least one unvisited neighbor. Update
# top_stack in the process.
if top_stack is not None:
self._backtrack(unvisited, backstack, top_stack)
if v == v_end:
break
# Neighbors are pushed in descending order so that they are visited in ascending order.
for neighbor in reversed(self.neighbors(v)):
if unvisited[neighbor]:
vertices.appendleft(neighbor)
return visited
def _backtrack(self, unvisited: list, backstack: deque, top_stack: deque) -> None:
"""
While the vertex at the top of the backstack has no unvisited neighbors, pops the vertex and pushes it to the
top_stack.
This effectively rolls back the backstack so that either the stack is emptied or the top points to a vertex
that has unvisited neighbors.
The top_stack will contain the topological sorting of the graph in return.
"""
while backstack:
v: int = backstack[0]
v_unvisited: list = list()
for neighbor in self.neighbors(v):
if unvisited[neighbor]:
v_unvisited.append(neighbor)
if not v_unvisited:
top_stack.appendleft(backstack.popleft())
else:
break
| 33.618267
| 117
| 0.585023
| 14,182
| 0.987948
| 0
| 0
| 0
| 0
| 0
| 0
| 6,213
| 0.432811
|
f77befa83cf2914313d51ff9e7931425c66499dd
| 6,847
|
py
|
Python
|
src/code_submission/2_pasanju/preprocessing/prepredict.py
|
NehzUx/AutoGraph-KDDCup2020
|
d2fc228f4ccc5785db3129cca0445a80b6fef11d
|
[
"MIT"
] | 1
|
2021-12-06T14:59:55.000Z
|
2021-12-06T14:59:55.000Z
|
src/code_submission/2_pasanju/preprocessing/prepredict.py
|
NehzUx/AutoGraph-Benchmark
|
d2fc228f4ccc5785db3129cca0445a80b6fef11d
|
[
"MIT"
] | null | null | null |
src/code_submission/2_pasanju/preprocessing/prepredict.py
|
NehzUx/AutoGraph-Benchmark
|
d2fc228f4ccc5785db3129cca0445a80b6fef11d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2020/5/14 20:41
# @Author: Mecthew
import time
import numpy as np
import pandas as pd
import scipy
from sklearn.svm import LinearSVC
from sklearn.linear_model import logistic
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
import scipy.sparse as sp
from utils.logger import get_logger
logger = get_logger("INFO")
class SVM:
def __init__(self, **kwargs):
self.name = "SVM"
self._model = CalibratedClassifierCV(LinearSVC(C=1.0, max_iter=500, class_weight=None, random_state=666))
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
class LR:
def __init__(self, **kwargs):
self.name = "LR"
self._model = logistic.LogisticRegression(C=1.0, solver="liblinear", multi_class="auto",
class_weight=None, max_iter=100, random_state=666)
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
def prepredict(graph_df, train_indices, use_valid, use_ohe=False):
t1 = time.time()
fea_table = graph_df['fea_table'].set_index(keys="node_index")
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']]
x_train, y_train = fea_table.loc[train_indices].to_numpy(), train_label.to_numpy()
x_test = fea_table.loc[test_indices].to_numpy()
lr = LR()
lr.fit(x_train, y_train)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(y_train.reshape(-1, 1))
x_train_feat, x_test_feat = ohe.transform(np.argmax(lr.predict(x_train), axis=1).reshape(-1, 1)).toarray(), \
ohe.transform(np.argmax(lr.predict(x_test), axis=1).reshape(-1, 1)).toarray()
else:
x_train_feat, x_test_feat = lr.predict(x_train), \
lr.predict(x_test)
pre_feat = np.concatenate([x_train_feat, x_test_feat], axis=0)
total_indices = np.concatenate([train_indices, test_indices], axis=0)
train_predict = np.argmax(x_train_feat, axis=1)
train_acc = accuracy_score(y_true=y_train, y_pred=train_predict)
t2 = time.time()
logger.info("Time cost for training {}: {}s, train acc {}".format(lr.name, t2-t1, train_acc))
return pd.DataFrame(data=pre_feat, index=total_indices)
def lpa_predict(graph_df, n_class, train_indices, use_valid, max_iter=100, tol=1e-3, use_ohe=False):
t1 = time.time()
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']].to_numpy()
print("Train label shape {}".format(train_label.shape))
train_label = train_label.reshape(-1)
edges = graph_df['edge_file'][['src_idx', 'dst_idx', 'edge_weight']].to_numpy()
edge_index = edges[:, :2].astype(np.int).transpose() # transpose to (2, num_edges)
edge_weight = edges[:, 2].astype(np.float)
num_nodes = len(train_indices) + len(test_indices)
t2 = time.time()
total_indices = np.concatenate([train_indices, test_indices], axis=0)
adj = sp.coo_matrix((edge_weight, edge_index), shape=(num_nodes, num_nodes)).tocsr()
adj = adj[total_indices] # reorder
adj = adj[:, total_indices]
t3 = time.time()
logger.debug("Time cost for transform adj {}s".format(t3 - t2))
row_sum = np.array(adj.sum(axis=1), dtype=np.float)
d_inv = np.power(row_sum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
normal_adj = sp.diags(d_inv).dot(adj).tocsr().transpose()
Pll = normal_adj[:len(train_indices), :len(train_indices)].copy()
Plu = normal_adj[:len(train_indices), len(train_indices):].copy()
Pul = normal_adj[len(train_indices):, :len(train_indices)].copy()
Puu = normal_adj[len(train_indices):, len(train_indices):].copy()
label_mat = np.eye(n_class)[train_label]
label_mat_prob = label_mat.copy()
print("Pul shape {}, label_mat shape {}".format(Pul.shape, label_mat_prob.shape))
Pul_dot_lable_mat = Pul.dot(label_mat)
unlabel_mat = np.zeros(shape=(len(test_indices), n_class))
iter, changed = 0, np.inf
t4 = time.time()
logger.debug("Time cost for prepare matrix {}s".format(t4-t3))
while iter < max_iter and changed > tol:
if iter % 10 == 0:
logger.debug("---> Iteration %d/%d, changed: %f" % (iter, max_iter, changed))
iter += 1
pre_unlabel_mat = unlabel_mat
unlabel_mat = Puu.dot(unlabel_mat) + Pul_dot_lable_mat
label_mat_prob = Pll.dot(label_mat_prob) + Plu.dot(pre_unlabel_mat)
changed = np.abs(pre_unlabel_mat - unlabel_mat).sum()
logger.debug("Time cost for training lpa {}".format(time.time() - t4))
# preds = np.argmax(np.array(unlabel_mat), axis=1)
# unlabel_mat = np.eye(n_class)[preds]
train_acc = accuracy_score(y_true=train_label, y_pred=np.argmax(label_mat_prob, axis=1))
logger.info("LPA training acc {}".format(train_acc))
logger.info("Time cost for LPA {}s".format(time.time() - t1))
total_indices = np.concatenate([train_indices, test_indices], axis=0)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(train_label.reshape(-1, 1))
label_mat_ohe = ohe.transform(np.argmax(label_mat_prob, axis=1).reshape(-1, 1)).toarray()
unlabel_mat_ohe = ohe.transform(np.argmax(unlabel_mat, axis=1).reshape(-1, 1)).toarray()
lu_mat_ohe = np.concatenate([label_mat_ohe, unlabel_mat_ohe], axis=0)
return pd.DataFrame(data=lu_mat_ohe, index=total_indices), train_acc
else:
unlabel_mat_prob = unlabel_mat
lu_mat_prob = np.concatenate([label_mat_prob, unlabel_mat_prob], axis=0)
return pd.DataFrame(data=lu_mat_prob, index=total_indices), train_acc
def is_nonnegative_integer(x_feats):
is_nonnegative = (x_feats >= 0).all()
is_integer = True
for feat in x_feats:
feat_int_sum = np.array(feat, dtype=np.int).sum()
feat_sum = np.array(feat, dtype=np.float).sum()
is_integer = (feat_int_sum == feat_sum)
if is_integer is False:
break
return is_nonnegative and is_integer
| 43.062893
| 117
| 0.676355
| 772
| 0.11275
| 0
| 0
| 0
| 0
| 0
| 0
| 758
| 0.110705
|
f77cba016c9db38a8357e9b79839c267bbbde362
| 4,754
|
py
|
Python
|
disaster_data/sources/noaa_coast/spider.py
|
cognition-gis/cognition-disaster-data
|
5441bd282d36b2d998d1d366d714d38fc5b92c8f
|
[
"Apache-2.0"
] | null | null | null |
disaster_data/sources/noaa_coast/spider.py
|
cognition-gis/cognition-disaster-data
|
5441bd282d36b2d998d1d366d714d38fc5b92c8f
|
[
"Apache-2.0"
] | 1
|
2022-03-02T14:58:21.000Z
|
2022-03-02T14:58:21.000Z
|
disaster_data/sources/noaa_coast/spider.py
|
cognition-gis/cognition-disaster-data
|
5441bd282d36b2d998d1d366d714d38fc5b92c8f
|
[
"Apache-2.0"
] | null | null | null |
import os
import scrapy
from scrapy.crawler import CrawlerProcess
import requests
from disaster_data.sources.noaa_coast.utils import get_geoinfo, get_fgdcinfo
class NoaaImageryCollections(scrapy.Spider):
name = 'noaa-coast-imagery-collections'
start_urls = [
'https://coast.noaa.gov/htdata/raster2/index.html#imagery',
]
@classmethod
def crawl(cls, outfile='output.json', ids=None, items=False):
cls.ids = ids
cls.items = items
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'FEED_FORMAT': 'json',
'FEED_URI': outfile
})
process.crawl(cls)
# Blocked while crawling
process.start()
def parse(self, response):
"""
Generate a STAC Collection for each NOAA imagery project, optionally filtering by ID.
"""
dem_table, imagery_table = response.xpath('//*[@class="sortable"]')
imagery_head = imagery_table.xpath('.//thead//tr/th//text()').getall()
collections = []
collection_items = []
ret = {}
for row in imagery_table.xpath('.//tbody//tr'):
values = row.xpath('.//td')
id = values[-1].xpath('.//text()').get()
if self.ids:
if id not in self.ids:
continue
feature = {
"stac_version": "0.7.0",
"properties": {},
"assets": {},
"extent": {}
}
# Converting HTML table into STAC Item
for head, value in zip(imagery_head, values):
links = value.xpath('.//a/@href').getall()
data = value.xpath('.//text()').getall()
if head == 'Dataset Name':
feature['assets'].update({
"metadata_xml": {
"href": links[0],
"type": "xml"
},
"metadata_html": {
"href": links[1],
"type": "html"
}
})
elif head == 'https':
feature['assets'].update({
"assets_http": {
"href": links[0],
"type": "html"
}
})
elif head == 'ftp':
feature['assets'].update({
"assets_ftp": {
"href": links[0],
"type": "ftp"
}
})
elif head == 'DAV':
feature['assets'].update({
"asset_viewer": {
"href": links[0],
"type": "html"
}
})
elif head == 'Tile Index':
feature['assets'].update({
"tile_index": {
"href": links[0],
"type": "shp"
}
})
elif head == 'ID #':
feature.update({'id': int(data[0])})
# Geometry handling
geoinfo = get_geoinfo('/vsizip//vsicurl/{}/0tileindex.shp'.format(feature['assets']['tile_index']['href']))
feature.update(geoinfo['geometry'])
feature['extent'].update({'spatial': geoinfo['bbox']})
# FGDC metadata
fgdcinfo = get_fgdcinfo(feature['assets']['metadata_xml']['href'])
feature['extent'].update({'temporal': [
fgdcinfo['start_date'],
fgdcinfo['end_date'],
]})
feature.update({
'title': fgdcinfo['title'],
'description': fgdcinfo['description'],
'processing': fgdcinfo['processing'],
})
collections.append(feature)
# Scrape items
if self.items:
items_url = os.path.join(feature['assets']['assets_http']['href'], 'urllist{}.txt'.format(feature['id']))
collection_items.append(self.parse_collection_items(items_url))
ret.update({'collections': collections})
if self.items:
ret.update({'items': collection_items})
return ret
def parse_collection_items(self, file_list_url):
r = requests.get(file_list_url)
collection_items = r.content.decode('utf-8').splitlines()
return ['/vsicurl/'+x for x in collection_items if x.endswith('.tif')]
| 34.955882
| 121
| 0.446782
| 4,590
| 0.965503
| 0
| 0
| 404
| 0.084981
| 0
| 0
| 1,163
| 0.244636
|
f77cc067eb5667c5dadfdaf7622c60b024ae8bc5
| 2,004
|
py
|
Python
|
rlo/test/rlo/test_factory.py
|
tomjaguarpaw/knossos-ksc
|
8fa75e67c0db8f632b135379740051cd10ff31f2
|
[
"MIT"
] | 31
|
2021-09-09T16:09:55.000Z
|
2022-02-20T02:15:19.000Z
|
rlo/test/rlo/test_factory.py
|
tomjaguarpaw/knossos-ksc
|
8fa75e67c0db8f632b135379740051cd10ff31f2
|
[
"MIT"
] | 40
|
2021-08-06T14:30:08.000Z
|
2022-01-19T08:49:52.000Z
|
rlo/test/rlo/test_factory.py
|
tomjaguarpaw/knossos-ksc
|
8fa75e67c0db8f632b135379740051cd10ff31f2
|
[
"MIT"
] | 5
|
2021-08-06T11:20:31.000Z
|
2022-01-07T19:39:40.000Z
|
import pytest
from rlo import factory
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
@pytest.mark.parametrize("loss", ["pinball=0.6", "huber"])
def test_torch_model_from_config(use_subtree_match_edges, loss):
# Check we can construct a Model
config = {
"num_embeddings": 3,
"hidden_dim": 2,
"num_gnn_blocks": 5,
"output_hidden_dim": 2,
"simulation_depth_train": 10,
"lr": 0.01,
"loss": loss,
"repetition": 1,
"decoder_readout": "sum",
"graph_state_keep_prob": 0.9,
"output_keep_prob": 0.2,
"aggregation_over_edge_types": "sum",
"use_subtree_match_edges": use_subtree_match_edges,
}
factory.torch_model_from_config(config)
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
def test_torch_data_converter_from_config(use_subtree_match_edges):
# Check we can construct a DataConverter
config = {
"simulation_depth_train": 11,
"use_subtree_match_edges": use_subtree_match_edges,
"cost_normalization": "none",
}
factory.data_converter_from_config(config)
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
@pytest.mark.parametrize("loss", ["pinball=0.3", "huber"])
def test_torch_regressor_from_config(use_subtree_match_edges, loss):
# Check we can construct a TorchModelWrapper
config = {
"num_embeddings": 3,
"hidden_dim": 2,
"num_gnn_blocks": 5,
"output_hidden_dim": 2,
"lr": 0.01,
"loss": loss,
"repetition": 1,
"use_subtree_match_edges": use_subtree_match_edges,
"cost_normalization": "none",
"tensorflow": False,
"simulation_depth_eval": 10,
"decoder_readout": "sum",
"graph_state_keep_prob": 0.99,
"output_keep_prob": 0.2,
"aggregation_over_edge_types": "sum",
"simulation_depth_train": 10,
}
factory.single_regressor_from_config(config)
| 31.809524
| 68
| 0.657685
| 0
| 0
| 0
| 0
| 1,956
| 0.976048
| 0
| 0
| 841
| 0.419661
|
f77db444ca4d359ed2a89460019181e2cac7a2bd
| 1,285
|
py
|
Python
|
src/setup_mac.py
|
dittert/pyprobe
|
1b0d0e403645ed204332c70c8a89e094f860023a
|
[
"Apache-2.0"
] | null | null | null |
src/setup_mac.py
|
dittert/pyprobe
|
1b0d0e403645ed204332c70c8a89e094f860023a
|
[
"Apache-2.0"
] | null | null | null |
src/setup_mac.py
|
dittert/pyprobe
|
1b0d0e403645ed204332c70c8a89e094f860023a
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Dirk Dittert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
base = 'Console'
executables = [
Executable('probe.py', copyDependentFiles=True)
]
includefiles = []
packages = ['pyprobe', 'psutil']
includes = []
setup(name='pyprobe',
version='1.0',
description='x',
options={
'build_exe': {
'include_files': includefiles,
'packages': packages,
'excludes': [],
'includes': ['requests']
},
'bdist_mac': {
'bundle_name': 'pyprobe'
}
},
executables=executables, requires=['requests', 'psutil'])
| 27.340426
| 74
| 0.649805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 821
| 0.638911
|
f77db8b6066d045b501403f79c57e6ba8e7db030
| 6,432
|
py
|
Python
|
src/gui/view_menu/layer_list.py
|
jeremiahws/DLAE
|
5005d1c275279cc283c59f226732f073cf340a52
|
[
"Apache-2.0"
] | 2
|
2021-05-25T12:23:23.000Z
|
2021-06-20T11:40:40.000Z
|
src/gui/view_menu/layer_list.py
|
jeremiahws/DLAE
|
5005d1c275279cc283c59f226732f073cf340a52
|
[
"Apache-2.0"
] | null | null | null |
src/gui/view_menu/layer_list.py
|
jeremiahws/DLAE
|
5005d1c275279cc283c59f226732f073cf340a52
|
[
"Apache-2.0"
] | 4
|
2019-10-16T07:52:41.000Z
|
2021-11-20T17:28:25.000Z
|
# Copyright 2019 Jeremiah Sanders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dlae/gui/view_menu/layer_list.py"""
import tkinter as tk
class LayerList:
def __init__(self):
self.button_heights = 1
self.button_widths = 15
self.label_heights = 1
self.label_widths = 15
self.entry_widths = 15
self.tl_layer_list = tk.Toplevel()
self.tl_layer_list.title('List of layers')
self.tl_layer_list.wm_protocol('WM_DELETE_WINDOW', self.tl_layer_list.withdraw)
self.tl_layer_list.resizable(width=False, height=False)
self.b_serial_layers = tk.Button(self.tl_layer_list, text='View serial layers', command=self.view_serial_layers, height=self.button_heights).grid(row=0, column=0, columnspan=3, sticky=tk.E+tk.W)
self.b_gen_layers = tk.Button(self.tl_layer_list, text='View generator layers', command=self.view_gen_layers, height=self.button_heights).grid(row=0, column=3, columnspan=3, sticky=tk.E+tk.W)
self.b_discrim_layers = tk.Button(self.tl_layer_list, text='View discriminator layers', command=self.view_discrim_layers, height=self.button_heights).grid(row=0, column=6, columnspan=3, sticky=tk.E+tk.W)
self.b_serial_layers = tk.Button(self.tl_layer_list, text='Rebuild model', command=self.rebuild_serial_layers, height=self.button_heights).grid(row=2, column=0, columnspan=3, sticky=tk.E+tk.W)
self.b_gen_layers = tk.Button(self.tl_layer_list, text='Rebuild generator', command=self.rebuild_gen_layers, height=self.button_heights).grid(row=2, column=3, columnspan=3, sticky=tk.E+tk.W)
self.b_discrim_layers = tk.Button(self.tl_layer_list, text='Rebuild discriminator', command=self.rebuild_discrim_layers, height=self.button_heights).grid(row=2, column=6, columnspan=3, sticky=tk.E+tk.W)
self.lb_layers_list = tk.Listbox(self.tl_layer_list)
self.lb_layers_list.bind('<<ListboxSelect>>', self.cursor_select)
self.lb_layers_list.config(width=85, height=25)
self.lb_layers_list.grid(row=1, column=0, columnspan=9, sticky=tk.N+tk.S+tk.E+tk.W)
self.sb_layers_list = tk.Scrollbar(self.tl_layer_list, orient="vertical")
self.sb_layers_list.config(command=self.lb_layers_list.yview)
self.sb_layers_list.grid(row=1, column=9, sticky=tk.N+tk.S)
self.lb_layers_list.config(yscrollcommand=self.sb_layers_list.set)
self.lb_layers_list_serial = tk.Listbox(self.tl_layer_list)
self.lb_layers_list_gen = tk.Listbox(self.tl_layer_list)
self.lb_layers_list_discrim = tk.Listbox(self.tl_layer_list)
self.s_layer_to_modify = tk.StringVar(value="No layer selected")
self.i_index = tk.IntVar()
self.b_layer_to_modify = tk.Button(self.tl_layer_list, text='Update layer', command=self.change_layer, height=self.button_heights).grid(row=3, column=0, columnspan=3, sticky=tk.E+tk.W)
self.b_inject_layer = tk.Button(self.tl_layer_list, text='Inject layer', command=self.inject_layer, height=self.button_heights).grid(row=3, column=3, columnspan=3, sticky=tk.E+tk.W)
self.b_delete_layer = tk.Button(self.tl_layer_list, text='Delete layer', command=self.delete_layer, height=self.button_heights).grid(row=3, column=6, columnspan=3, sticky=tk.E+tk.W)
self.e_layer_to_modify = tk.Entry(self.tl_layer_list, textvariable=self.s_layer_to_modify, width=self.entry_widths).grid(row=4, column=0, columnspan=9, sticky=tk.E+tk.W)
self.tl_layer_list.withdraw()
def cursor_select(self, event):
try:
index = self.lb_layers_list.curselection()[0]
selection = self.lb_layers_list.get(index)
self.i_index.set(index)
self.s_layer_to_modify.set(selection)
except:
pass
def change_layer(self):
self.lb_layers_list.delete(self.i_index.get())
self.lb_layers_list.insert(self.i_index.get(), self.s_layer_to_modify.get())
def inject_layer(self):
self.lb_layers_list.insert(self.i_index.get() + 1, self.s_layer_to_modify.get())
def delete_layer(self):
self.lb_layers_list.delete(self.i_index.get())
def show(self):
self.tl_layer_list.deiconify()
def view_serial_layers(self):
layers = self.lb_layers_list_serial.get(0, tk.END)
if any(layers):
self.lb_layers_list.delete(0, tk.END)
[self.lb_layers_list.insert(tk.END, layer) for layer in layers]
else:
self.lb_layers_list.delete(0, tk.END)
def view_gen_layers(self):
layers = self.lb_layers_list_gen.get(0, tk.END)
if any(layers):
self.lb_layers_list.delete(0, tk.END)
[self.lb_layers_list.insert(tk.END, layer) for layer in layers]
else:
self.lb_layers_list.delete(0, tk.END)
def view_discrim_layers(self):
layers = self.lb_layers_list_discrim.get(0, tk.END)
if any(layers):
self.lb_layers_list.delete(0, tk.END)
[self.lb_layers_list.insert(tk.END, layer) for layer in layers]
else:
self.lb_layers_list.delete(0, tk.END)
def rebuild_serial_layers(self):
layers = self.lb_layers_list.get(0, tk.END)
self.lb_layers_list_serial.delete(0, tk.END)
[self.lb_layers_list_serial.insert(tk.END, layer) for layer in layers]
self.lb_layers_list.delete(0, tk.END)
def rebuild_gen_layers(self):
layers = self.lb_layers_list.get(0, tk.END)
self.lb_layers_list_gen.delete(0, tk.END)
[self.lb_layers_list_gen.insert(tk.END, layer) for layer in layers]
self.lb_layers_list.delete(0, tk.END)
def rebuild_discrim_layers(self):
layers = self.lb_layers_list.get(0, tk.END)
self.lb_layers_list_discrim.delete(0, tk.END)
[self.lb_layers_list_discrim.insert(tk.END, layer) for layer in layers]
self.lb_layers_list.delete(0, tk.END)
| 50.25
| 211
| 0.704291
| 5,786
| 0.899565
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.13324
|
f77e315e6c8b0a904e3ca8fb92860fcdc824f09d
| 977
|
py
|
Python
|
preprocessing.py
|
Prakhar-Bhartiya/SentimentAnalysis
|
8fa2664a57b01e7303ef26d1226a81c0e25be4b7
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
Prakhar-Bhartiya/SentimentAnalysis
|
8fa2664a57b01e7303ef26d1226a81c0e25be4b7
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
Prakhar-Bhartiya/SentimentAnalysis
|
8fa2664a57b01e7303ef26d1226a81c0e25be4b7
|
[
"MIT"
] | null | null | null |
"""
DATA DESCRIPTION
sentiment140 dataset. It contains 1,600,000 tweets extracted using the twitter api . The tweets have been annotated (0 = negative, 4 = positive) and they can be used to detect sentiment .
It contains the following 6 fields:
target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
ids: The id of the tweet ( 2087)
date: the date of the tweet (Sat May 16 23:58:44 UTC 2009)
flag: The query (lyx). If there is no query, then this value is NO_QUERY.
user: the user that tweeted (robotickilldozr)
text: the text of the tweet (Lyx is cool)
"""
#import libraries
import pandas as pd
data = pd.read_csv('training.1600000.processed.noemoticon.csv',encoding = 'latin', header=None, nrows=25)
#Adding header to data
data = data.rename(columns={0: 'target', 1: 'id', 2: 'TimeStamp', 3: 'query', 4: 'username', 5: 'content'})
#Dropping unncessary columns
data.drop(['id','TimeStamp','query'], axis=1, inplace=True)
print(data.to_string())
| 32.566667
| 187
| 0.721597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 770
| 0.788127
|
f77f2fcfbb893554c9dac95eda0dc9991fd25b40
| 1,803
|
py
|
Python
|
indico/modules/events/static/controllers.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
indico/modules/events/static/controllers.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
indico/modules/events/static/controllers.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import redirect, request, session
from werkzeug.exceptions import NotFound
from indico.core.db import db
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.static.models.static import StaticSite, StaticSiteState
from indico.modules.events.static.tasks import build_static_site
from indico.modules.events.static.views import WPStaticSites
from indico.web.flask.util import url_for
class RHStaticSiteBase(RHManageEventBase):
pass
class RHStaticSiteList(RHStaticSiteBase):
def _process(self):
static_sites = self.event.static_sites.order_by(StaticSite.requested_dt.desc()).all()
return WPStaticSites.render_template('static_sites.html', self.event, static_sites=static_sites)
class RHStaticSiteBuild(RHStaticSiteBase):
ALLOW_LOCKED = True
def _process(self):
static_site = StaticSite(creator=session.user, event=self.event)
db.session.add(static_site)
db.session.commit()
build_static_site.delay(static_site)
return redirect(url_for('.list', self.event))
class RHStaticSiteDownload(RHStaticSiteBase):
normalize_url_spec = {
'locators': {
lambda self: self.static_site
}
}
def _process_args(self):
RHStaticSiteBase._process_args(self)
self.static_site = StaticSite.get_one(request.view_args['id'])
def _process(self):
if self.static_site.state != StaticSiteState.success:
raise NotFound
return self.static_site.send()
| 31.631579
| 104
| 0.742651
| 1,093
| 0.606212
| 0
| 0
| 0
| 0
| 0
| 0
| 247
| 0.136994
|
f77f91aa533c688d45149adae8643805965bb2c7
| 622
|
py
|
Python
|
kruptos/csapp/api.py
|
ashwani762/Kruptos
|
9cd04ee6147c2dc14764e45c3481690ae399e664
|
[
"Apache-2.0"
] | null | null | null |
kruptos/csapp/api.py
|
ashwani762/Kruptos
|
9cd04ee6147c2dc14764e45c3481690ae399e664
|
[
"Apache-2.0"
] | null | null | null |
kruptos/csapp/api.py
|
ashwani762/Kruptos
|
9cd04ee6147c2dc14764e45c3481690ae399e664
|
[
"Apache-2.0"
] | null | null | null |
from csapp.models import Kruptos
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from rest_framework import status
from .serializers import KruptosSerializer
class KruptosViewSet(viewsets.ModelViewSet):
permission_classes = [
permissions.AllowAny
]
serializer_class = KruptosSerializer
def get_queryset(self):
return Kruptos.objects.all()
def perform_create(self, serializer):
serializer.save()
def destroy(self, request, *args, **kwargs):
return Response(status=status.HTTP_400_BAD_REQUEST)
| 29.619048
| 59
| 0.726688
| 411
| 0.660772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f7809285eb96f9645d677756834951e018513264
| 7,612
|
py
|
Python
|
test_autolens/simulators/imaging/instrument_util.py
|
agarwalutkarsh554/PyAutoLens
|
72d2f5c39834446e72879fd119b591e52b36cac4
|
[
"MIT"
] | null | null | null |
test_autolens/simulators/imaging/instrument_util.py
|
agarwalutkarsh554/PyAutoLens
|
72d2f5c39834446e72879fd119b591e52b36cac4
|
[
"MIT"
] | null | null | null |
test_autolens/simulators/imaging/instrument_util.py
|
agarwalutkarsh554/PyAutoLens
|
72d2f5c39834446e72879fd119b591e52b36cac4
|
[
"MIT"
] | null | null | null |
from os import path
import autolens as al
import autolens.plot as aplt
from test_autogalaxy.simulators.imaging import instrument_util
test_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "..", "..")
def pixel_scale_from_instrument(instrument):
"""
Returns the pixel scale from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return (0.2, 0.2)
elif instrument in "euclid":
return (0.1, 0.1)
elif instrument in "hst":
return (0.05, 0.05)
elif instrument in "hst_up":
return (0.03, 0.03)
elif instrument in "ao":
return (0.01, 0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def grid_from_instrument(instrument):
"""
Returns the `Grid` from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.GridIterate.uniform(shape_2d=(80, 80), pixel_scales=0.2)
elif instrument in "euclid":
return al.GridIterate.uniform(shape_2d=(120, 120), pixel_scales=0.1)
elif instrument in "hst":
return al.GridIterate.uniform(shape_2d=(200, 200), pixel_scales=0.05)
elif instrument in "hst_up":
return al.GridIterate.uniform(shape_2d=(300, 300), pixel_scales=0.03)
elif instrument in "ao":
return al.GridIterate.uniform(shape_2d=(800, 800), pixel_scales=0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def psf_from_instrument(instrument):
"""
Returns the *PSF* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.5, pixel_scales=0.2, renormalize=True
)
elif instrument in "euclid":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.1, pixel_scales=0.1, renormalize=True
)
elif instrument in "hst":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.05, renormalize=True
)
elif instrument in "hst_up":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.03, renormalize=True
)
elif instrument in "ao":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.025, pixel_scales=0.01, renormalize=True
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulator_from_instrument(instrument):
"""
Returns the *Simulator* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
grid = grid_from_instrument(instrument=instrument)
psf = psf_from_instrument(instrument=instrument)
if instrument in "vro":
return al.SimulatorImaging(
exposure_time=100.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "euclid":
return al.SimulatorImaging(
exposure_time=2260.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst_up":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "ao":
return al.SimulatorImaging(
exposure_time=1000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulate_imaging_from_instrument(instrument, dataset_name, galaxies):
# Simulate the imaging data, remembering that we use a special image which ensures edge-effects don't
# degrade our modeling of the telescope optics (e.al. the PSF convolution).
grid = instrument_util.grid_from_instrument(instrument=instrument)
simulator = simulator_from_instrument(instrument=instrument)
# Use the input galaxies to setup a tracer, which will generate the image for the simulated imaging data.
tracer = al.Tracer.from_galaxies(galaxies=galaxies)
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
# Now, lets output this simulated imaging-data to the test_autoarray/simulator folder.
test_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "..", ".."
)
dataset_path = path.join(test_path, "dataset", "imaging", dataset_name, instrument)
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
plotter = aplt.MatPlot2D(output=aplt.Output(path=dataset_path, format="png"))
plotter = aplt.MatPlot2D(output=aplt.Output(path=dataset_path, format="png"))
aplt.Imaging.subplot_imaging(imaging=imaging, plotter=plotter)
aplt.imaging.individual(
imaging=imaging,
image=True,
noise_map=True,
psf=True,
signal_to_noise_map=True,
plotter=plotter,
)
aplt.Tracer.subplot_tracer(tracer=tracer, grid=grid, plotter=plotter)
aplt.Tracer.figures(
tracer=tracer,
grid=grid,
image=True,
source_plane=True,
convergence=True,
potential=True,
deflections=True,
plotter=plotter,
)
def load_test_imaging(dataset_name, instrument, name=None):
pixel_scales = instrument_util.pixel_scale_from_instrument(instrument=instrument)
test_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "..", ".."
)
dataset_path = path.join(test_path, "dataset", "imaging", dataset_name, instrument)
return al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
pixel_scales=pixel_scales,
name=name,
)
| 33.982143
| 110
| 0.633605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,214
| 0.290857
|
f78287f3fba8bcf7a557dbad608ff12faa053899
| 687
|
py
|
Python
|
tests/test_gpcontrolset.py
|
waider/gopro-py-api
|
b18b5458f5bbe689f468842d6888104317786de8
|
[
"MIT"
] | 1
|
2019-05-06T21:48:54.000Z
|
2019-05-06T21:48:54.000Z
|
tests/test_gpcontrolset.py
|
waider/gopro-py-api
|
b18b5458f5bbe689f468842d6888104317786de8
|
[
"MIT"
] | null | null | null |
tests/test_gpcontrolset.py
|
waider/gopro-py-api
|
b18b5458f5bbe689f468842d6888104317786de8
|
[
"MIT"
] | null | null | null |
from .conftest import GoProCameraTest
from socket import timeout
from urllib import error
class GpControlSetTest(GoProCameraTest):
def test_gp_control_set(self):
# on success, this is an empty json blob
self.responses['/gp/gpControl/setting/foo/bar'] = '{}'
assert '{}' == self.goprocam.gpControlSet('foo', 'bar')
def test_gp_control_set_error(self):
assert isinstance(self.goprocam.gpControlSet('foo', 'bar'),
error.HTTPError)
def test_gp_control_set_timeout(self):
self.responses['/gp/gpControl/setting/foo/bar'] = timeout()
assert isinstance(self.goprocam.gpControlSet('foo', 'bar'), timeout)
| 34.35
| 76
| 0.676856
| 593
| 0.863173
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.203785
|
f782e665db7375deff9d1e85d757a68033315dd2
| 1,727
|
py
|
Python
|
ca_bc_abbotsford/people.py
|
djac/scrapers-ca
|
2e16a85ff8a05ea49031a11ede66fa452631f8da
|
[
"MIT"
] | null | null | null |
ca_bc_abbotsford/people.py
|
djac/scrapers-ca
|
2e16a85ff8a05ea49031a11ede66fa452631f8da
|
[
"MIT"
] | null | null | null |
ca_bc_abbotsford/people.py
|
djac/scrapers-ca
|
2e16a85ff8a05ea49031a11ede66fa452631f8da
|
[
"MIT"
] | null | null | null |
from utils import CanadianScraper, CanadianPerson as Person
COUNCIL_PAGE = 'http://www.abbotsford.ca/city_hall/mayor_and_council/city_council.htm'
CONTACT_PAGE = 'http://www.abbotsford.ca/contact_us.htm'
class AbbotsfordPersonScraper(CanadianScraper):
def scrape(self):
councillor_seat_number = 1
coun_page = self.lxmlize(COUNCIL_PAGE)
contact_page = self.lxmlize(CONTACT_PAGE)
councillors = coun_page.xpath('//div[@id="main-content"]//h3')
contact_data = contact_page.xpath('//p[contains(./strong/text(), "Mayor & Council")]/following-sibling::table[1]//tr')[2:]
assert len(councillors), 'No councillors found'
assert len(councillors) == len(contact_data), 'Expected {}, got {}'.format(len(councillors), len(contact_data))
for councillor, contact in zip(councillors, contact_data):
text = councillor.text_content()
if text.startswith('Councill'):
role = 'Councillor'
district = 'Abbotsford (seat {})'.format(councillor_seat_number)
councillor_seat_number += 1
else:
role = 'Mayor'
district = 'Abbotsford'
name = text.split(' ', 1)[1]
image = councillor.xpath('./img/@src')[0]
phone = contact.xpath('./td[2]/text()')[0]
fax = contact.xpath('./td[3]/text()')[0]
p = Person(primary_org='legislature', name=name, district=district, role=role)
p.add_source(COUNCIL_PAGE)
p.add_source(CONTACT_PAGE)
p.image = image
p.add_contact('voice', phone, 'legislature')
p.add_contact('fax', fax, 'legislature')
yield p
| 43.175
| 130
| 0.609728
| 1,519
| 0.87956
| 1,467
| 0.84945
| 0
| 0
| 0
| 0
| 430
| 0.248987
|
f783069506127a9b55df9ae0fb7a072477dcbc3b
| 32
|
py
|
Python
|
tests/unit/cli/test_repo.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 110
|
2017-09-14T02:15:15.000Z
|
2022-03-30T20:14:21.000Z
|
tests/unit/cli/test_repo.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 115
|
2017-09-22T12:15:30.000Z
|
2022-01-17T12:31:21.000Z
|
tests/unit/cli/test_repo.py
|
tehlingchu/anchore-cli
|
b0df36337f443749991a49263227c1d40989debb
|
[
"Apache-2.0"
] | 56
|
2017-09-22T11:26:25.000Z
|
2022-03-03T14:14:58.000Z
|
from anchorecli.cli import repo
| 16
| 31
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f7836ff545709d136c298d62a1c6e262234ad38c
| 2,692
|
py
|
Python
|
Python/Activies/Classroom10-1.py
|
FranciscoMends/Python_Codes
|
fd0b33443d67b56b092beeea0e778285be6a42a9
|
[
"MIT"
] | null | null | null |
Python/Activies/Classroom10-1.py
|
FranciscoMends/Python_Codes
|
fd0b33443d67b56b092beeea0e778285be6a42a9
|
[
"MIT"
] | null | null | null |
Python/Activies/Classroom10-1.py
|
FranciscoMends/Python_Codes
|
fd0b33443d67b56b092beeea0e778285be6a42a9
|
[
"MIT"
] | null | null | null |
'''
nome = input('Insira seu nome: ')
if nome == 'Mendes':
print('Que nome lindo você tem!')
else:
print('Seu nome é tão normal!')
print('Bom dia {}!'.format(nome))
'''
#DESAFIO_28
'''
from random import randint
from time import sleep
x = randint(0,5)
y = int(input('Digite um número de 0 à 5: '))
print('Loading...')
sleep(2)
if x == y:
print('Parabéns, você venceu!')
else:
print('Tente novamente, você perdeu!')
print(x)
'''
#DESAFIO_29
'''
velocity = int(input('Qual a velocidade atual do seu carro em Km/h? '))
if velocity > 80:
print('Você foi multado por excesso de velocidade!')
print('Velocidade permitia: 80km/h')
print('Velocidade ultrapassada: {}km/h'.format(velocity))
infraction = (velocity - 80) * 7
print('Valor da multa: R${},00'.format(infraction))
'''
#DESAFIO_30
'''
number = int(input('Insira um número inteiro: '))
if number % 2 == 0:
print('Seu número é PAR!')
else:
print('Seu número é ÍMPAR!')
'''
#DESAFIO_31
'''
distance = int(input('Qual a distância em Km que deseja viajar? '))
if distance <= 200:
final_value = distance * 0.50
else:
final_value = distance * 0.45
print('Valor da passagem: R${:.2f}'.format(final_value))
'''
#DESAFIO_32
'''
from datetime import date
year = int(input('Insira um ano (Coloque "0" caso queira analisar a data atual): '))
if year == 0:
year = date.today().year
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
print(year, 'é um ano BISSEXTO!')
else:
print(year, 'não é um ano BISSEXTO!')
'''
#DESAFIO_33
'''
x = int(input('Digite o primeiro número: '))
y = int(input('Digite o segundo número: '))
z = int(input('Digite o terceiro número: '))
number_max = max(x,y,z)
number_min = min(x,y,z)
print('Maior número:',number_max)
print('Menor número:',number_min)
'''
#DESAFIO_34
'''
wage = float(input('Insira seu salário: R$'))
if wage > 1250:
salary_increase = ((10/100) * wage) + wage
percent = 10
else:
salary_increase = ((15/100) * wage) + wage
percent = 15
print()
print('Salário atual: R${:.2f}'.format(wage))
print('Aumento de {}%'.format(percent))
print('Salário final: R${:.2f}'.format(salary_increase))
'''
#DESAFIO_35
'''
line1 = float(input('Insira o comprimento da primeira reta: '))
line2 = float(input('Insira o comprimento da segunda reta: '))
line3 = float(input('Insira o comprimento da terceira reta: '))
if line1 < line2 + line3 and line2 < line1 + line3 and line3 < line1 + line2:
print('Podem formar um triângulo!')
else:
print('Não podem formar um triângulo!')
'''
#PROVA
'''
s = 'prova de python'
x = len(s)
print(x)
x = 'curso de python no cursoemvideo'
y = x[:5]
print(y)
'''
x = 3 * 5 + 4 ** 2
print(x)
| 24.697248
| 84
| 0.643759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,676
| 0.983101
|
f78374a6b9c098ca930042e6331630796196647c
| 4,902
|
py
|
Python
|
temp logger complete.py
|
nevillethenev/Beer
|
a8fae43e7b2f846e208daad4a9b025703f0acb2a
|
[
"Unlicense"
] | null | null | null |
temp logger complete.py
|
nevillethenev/Beer
|
a8fae43e7b2f846e208daad4a9b025703f0acb2a
|
[
"Unlicense"
] | null | null | null |
temp logger complete.py
|
nevillethenev/Beer
|
a8fae43e7b2f846e208daad4a9b025703f0acb2a
|
[
"Unlicense"
] | null | null | null |
#/usr/bin/python
import serial
import time
import matplotlib.pyplot as plt
import numpy as np
import os
"""""""""""""""""""""""""""""""""""
"""""""NEVS BEER SCRIPT""""""""""""
"""""""""""""""""""""""""""""""""""
###need to add exception handler for serial disconnection
## SETUP SERIAL PORT
try:
ser = serial.Serial('COM3',9600) # open serial port
print('Serial connection established!')
except:
print('ERR: Unable to connect to arduino...retrying')
time.sleep(3)
try:
ser = serial.Serial('COM3',9600)
except:
raw_input('ERR: Unable to connect to arduino....check connections and press Enter to continue')
try:
ser = serial.Serial('COM3',9600)
except:
raw_input('ERR: Unable to connect to arduino...Press Enter to exit..')
## STRIKE WATER CALCULATOR
##strike water calculator
##volume of water is heated inside an insulated mash tun
##grain is added to mash tun
## Tw = (Tm((Sw*mw)+(Sg*mg))-(Sg*mg*Tg))/(Sw*mw)
## Tw = strike water temp.
## Tm = mash temp.
Sw = 1; ##Specific heat water
Sg = 0.4; ##Specific heat grain
beername = raw_input("Please enter the name of the beer:")
Tm = input("Mash Temp.(\xb0C)")
Vw = input("Water Volume(L)")
mw = Vw; ##mass water(kg) = volume water(L)
mg = input("Grain mass(kg)")
Tg = input("Grain temp.(\xb0C)")
print("Calculating...")
time.sleep(1)
Tw = (Tm*((Sw*mw)+(Sg*mg))-(Sg*mg*Tg))/(Sw*mw)
Tw = round(Tw,1)
##print "Strike temp.(\xb0C) = "+str(Tw)
## MASH INSTRUCTIONS
print 'Set strike temperature to ' + str(Tw) + '\xb0C'
raw_input('Press Enter to continue...')
temperaturefloat = 0
##measure temperature
while True:
try:
temperaturefloat = round(float((ser.read(7))),1) #read
except: ##handle all serial read errors
try:
ser = serial.Serial('COM3',9600) # open serial port
except:
ser.close()
ser = serial.Serial('COM3',9600) # open serial port
temperaturefloat = 0
time.sleep(0.1)
print str(temperaturefloat) + '\xb0C'
time.sleep(0.1)
## if temperaturefloat > Tm: #### check temperature 5 times
## dragon = np.ones(5)
## for i in range(0,4):
## try:
## temperaturefloat = round(float(ser.read(7)),1)
## except: ##handle all serial read errors
## temperaturefloat = 0
##
## if temperaturefloat < 0:
## temperaturefloat = 0
##
## print str(temperaturefloat) + '\xb0C'
## dragon[i] = temperaturefloat
## print str(dragon)
## time.sleep(0.1)
## if sum(dragon)/5 > Tm:
## print 'SUCCESS'
## break
if temperaturefloat > Tm:
print 'Stike temperature reached! Please stir the water and prepare grain for submersion...'
mashtime1 = 60*input('Enter total mash time (min):')
raw_input('Submerge grain and press enter to coninue...')
print 'Mash in progress, please wait ' + str(mashtime1/60) + ' minutes...'
break
## TEMPERATURE LOGGING
ser.close() ## restart Com port
ser = serial.Serial('COM3',9600)
print 'Temp(\xb0C)\tTime(s)'
nowtimefloat = 0
temperaturefloat = 0
#read from serial and exit when user wants
while nowtimefloat < mashtime1:
try:
temperaturefloat = round(float((ser.read(7))),1) #read
except: ##handle all serial read errors
try:
ser = serial.Serial('COM3',9600) # open serial port
except:
ser.close()
ser = serial.Serial('COM3',9600) # open serial port
temperaturefloat = 0
time.sleep(0.1)
nowtimefloat = round(time.clock(),1)
nowtimestring = str(nowtimefloat)
temperaturesting = str(temperaturefloat)
goblin = open('templog.txt','a') #open txt file
datastring = temperaturesting + '\t' + nowtimestring + '\n'
print(datastring) #print temp to console
goblin.write(datastring)
## goblin.flush()
## ser.close() # close port
else:
print "Mash complete!"
raw_input('Press Enter to save the data..')
goblin.close()
os.rename('templog.txt',beername + 'templog.txt')
print 'Data saved!'
raw_input('Press Enter to exit...')
## DATA ANALYSIS
##plt.axis([0,3600,55,75])
###temperature lines
##plt.hlines(70,0,3600,colors='r')
##plt.hlines(60,0,3600,colors='r')
##
##dragon = np.loadtxt('templog.txt', delimiter="\t")
##x = dragon[:,1]
##y = dragon[:,0]
##
##plt.scatter(x,y)
####plt.draw()
##plt.show()
##plt.waitforbuttonpress()
####plt.pause(0.1)
##
##raw_input('Press Enter to exit...')
##
| 29.178571
| 104
| 0.563035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,821
| 0.575479
|
f78376ac5696d0e27ff83ec0f818efffebaf1f64
| 874
|
py
|
Python
|
src/domain/usecases/get_all_glaucomatous_images_paths.py
|
OzielFilho/ProjetoFinalPdi
|
c9e6fe415f1a985d6eeac204580d3ab623026665
|
[
"MIT"
] | null | null | null |
src/domain/usecases/get_all_glaucomatous_images_paths.py
|
OzielFilho/ProjetoFinalPdi
|
c9e6fe415f1a985d6eeac204580d3ab623026665
|
[
"MIT"
] | null | null | null |
src/domain/usecases/get_all_glaucomatous_images_paths.py
|
OzielFilho/ProjetoFinalPdi
|
c9e6fe415f1a985d6eeac204580d3ab623026665
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from domain.errors.failure import Failure
from domain.errors.image_failure import ImageFailure
from domain.repositories.image_repository_abstraction import ImageRepositoryAbstraction
class GetAllGlaucomatousImagesPathsAbstraction(ABC):
@abstractmethod
def __init__(self, repository: ImageRepositoryAbstraction) -> None:
pass
@abstractmethod
def __call__(self) -> Failure | list[str]:
pass
class GetAllGlaucomatousImagesPaths(GetAllGlaucomatousImagesPathsAbstraction):
def __init__(self, repository: ImageRepositoryAbstraction) -> None:
self.repository = repository
def __call__(self) -> Failure | list[str]:
try:
return self.repository.get_all_glaucomatous_images_paths()
except BaseException as exception:
return ImageFailure(str(exception))
| 32.37037
| 87
| 0.756293
| 648
| 0.741419
| 0
| 0
| 175
| 0.200229
| 0
| 0
| 0
| 0
|
f783bb5a51fe8b1b4c8ff1d1556d8997b3dd57bd
| 944
|
py
|
Python
|
numsgraph.py
|
FNut/PyDev
|
f591aa6ace1b9032e4a9159c03478571c75a38b1
|
[
"MIT"
] | 2
|
2021-01-21T12:54:50.000Z
|
2021-12-26T13:45:19.000Z
|
numsgraph.py
|
FNut/PyDev
|
f591aa6ace1b9032e4a9159c03478571c75a38b1
|
[
"MIT"
] | 12
|
2021-01-21T14:12:02.000Z
|
2021-02-07T06:12:44.000Z
|
numsgraph.py
|
FNut/PyDev
|
f591aa6ace1b9032e4a9159c03478571c75a38b1
|
[
"MIT"
] | null | null | null |
import pygame
import math
pygame.init()
pi = ('Pi = ' + str(math.pi))
e = ('E = ' + str(math.e))
f = ('F = 0,1,1,2,3,5,8,13...')
p = ('P = 1,2,5,12,29...')
l = ('L = 2,1,3,4,7,11,18,29...')
pl = ('P-L = 2,6,14,34,82...')
display = pygame.display.set_mode((800,600))
pygame.display.set_caption('Nums')
font = pygame.font.SysFont('None', 72)
pitxt = font.render(pi, 0, (0,255,0))
etxt = font.render(e, 0, (0,255,0))
ftxt = font.render(f, 0, (0,255,0))
ptxt = font.render(p, 0, (0,255,0))
ltxt = font.render(l, 0, (0,255,0))
pltxt = font.render(pl, 0, (0,255,0))
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.update()
display.blit(pitxt, (0,0))
display.blit(etxt, (0,40))
display.blit(ftxt, (0,80))
display.blit(ptxt, (0,120))
display.blit(ltxt, (0,160))
display.blit(pltxt, (0,200))
pygame.quit()
| 29.5
| 45
| 0.559322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.127119
|
f783f229f95c2d9280dddc95def69a100dcd1954
| 5,980
|
py
|
Python
|
scoap3/modules/tools/tasks.py
|
Lilykos/scoap3-next
|
b779b8c32504e09b2c5330aa6a18e1e1c9affd33
|
[
"CC-BY-4.0"
] | 1
|
2021-08-04T09:48:38.000Z
|
2021-08-04T09:48:38.000Z
|
scoap3/modules/tools/tasks.py
|
Lilykos/scoap3-next
|
b779b8c32504e09b2c5330aa6a18e1e1c9affd33
|
[
"CC-BY-4.0"
] | 158
|
2018-09-10T07:31:14.000Z
|
2022-03-30T07:18:51.000Z
|
scoap3/modules/tools/tasks.py
|
Lilykos/scoap3-next
|
b779b8c32504e09b2c5330aa6a18e1e1c9affd33
|
[
"CC-BY-4.0"
] | 9
|
2015-04-28T11:55:04.000Z
|
2021-09-28T12:14:53.000Z
|
import io
import csv
import logging
from StringIO import StringIO
from datetime import datetime
from gzip import GzipFile
import boto3
from celery import shared_task
from flask import current_app
from flask_mail import Attachment
from invenio_mail.api import TemplatedMessage
logger = logging.getLogger(__name__)
def encode_element(element):
"""
Converts element to utf-8 string.
None value will be converted to an empty string.
"""
if element is None:
return ""
if isinstance(element, basestring):
return element.encode('utf-8')
return element
def to_csv(data):
"""
Serialize generated tool data to CSV.
:param data: dictionary representing the data to be serialized.
'header' key has to contain a list of string, 'data' key has to contain a list of list of string.
:return: (content_type, data) 2-tuple: corresponding MIME type as string and the serialized value as string.
"""
if not data or 'header' not in data or 'data' not in data:
raise ValueError('Invalid parameter to be serialized.')
result = StringIO()
cw = csv.writer(result, delimiter=";", quoting=csv.QUOTE_ALL)
cw.writerow(data['header'])
for row in data['data']:
cw.writerow([encode_element(element) for element in row])
return 'text/csv', result.getvalue()
def send_result(result_data, content_type, recipients, tool_name):
"""
Sends the result via email to the user who requested it.
:param result_data: generated data in a serialized form.
:param content_type: MIME type of the attachment.
:param recipients: recipients who will receive the email.
:param tool_name: name of the tool, which will be used in the subject of the email.
"""
timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
filename = 'scoap3_export_%s_%s.csv' % (tool_name, timestamp)
# compress data if needed
# try:
# compress = current_app.config.get('TOOL_COMPRESS_ATTACHMENT', False)
# if compress:
# compressed_buffer = StringIO()
# gzip_file = GzipFile(fileobj=compressed_buffer, mode="wt")
# gzip_file.write(result_data)
# gzip_file.close()
#
# result_data = compressed_buffer.getvalue()
# content_type = 'application/gzip'
# filename += '.gz'
# except Exception as e:
# logger.error('Error in csv compression: {}'.format(e.message))
#
# attachment = Attachment(filename=filename, content_type=content_type, data=result_data)
host = current_app.config.get('S3_HOSTNAME')
bucket = current_app.config.get('S3_BUCKET')
s3 = boto3.resource('s3', endpoint_url='http://s3.cern.ch/')
s3.meta.client.upload_fileobj(
io.BytesIO(result_data), bucket, filename,
ExtraArgs={'ACL': 'public-read'}
)
file_url = "{}/{}/{}".format(host, bucket, filename)
msg = TemplatedMessage(
template_html='scoap3_tools/email/result.html',
ctx={'attachment_url': file_url},
subject='SCOAP3 - Export %s result' % tool_name,
sender=current_app.config.get('MAIL_DEFAULT_SENDER'),
recipients=recipients,
# attachments=[attachment],
)
current_app.extensions['mail'].send(msg)
def send_failed_email(recipients, tool_name, task_id=None):
"""
Notifies the user about a failed generation.
:param recipients: recipients who will receive the email.
:param tool_name: name of the tool, which will be used in the subject of the email.
:param task_id: celery task id, if available.
"""
msg = TemplatedMessage(
template_html='scoap3_tools/email/failed.html',
subject='SCOAP3 - Export %s result error' % tool_name,
sender=current_app.config.get('MAIL_DEFAULT_SENDER'),
recipients=recipients,
ctx={'task_id': task_id}
)
current_app.extensions['mail'].send(msg)
@shared_task(bind=True)
def run_tool(self, result_email, tool_name, serializer_function=to_csv, **kwargs):
"""
Wrapper for generating result for a tool.
It generates the result using the tool_function parameter's return value, then sends it via email.
:param self: bound task type instance.
:param result_email: email address to send the results to.
:param tool_name: name of the tool, which is used to determine the generator function.
:param serializer_function: serializer function
:param kwargs: additional kwargs passed to the tool_function
"""
try:
logger.info('Running tool. tool_name=%s result_email=%s' % (tool_name, result_email))
tool_function = current_app.config.get('TOOL_FUNCTIONS', {}).get(tool_name)
if tool_function is None:
logger.warn('Invalid tool name: %s' % tool_name)
send_failed_email([result_email], tool_name)
return
result = tool_function(**kwargs)
logger.info('Result calculated. result_data_count=%d tool_name=%s result_email=%s' % (
len(result['data']), tool_name, result_email))
content_type, serialized_result = serializer_function(result)
logger.info('Result serialized, sending email... result_data_count=%d tool_name=%s result_email=%s' % (
len(result['data']), tool_name, result_email))
send_result(serialized_result, content_type, [result_email], tool_name)
logger.info('Result successfully sent. tool_name=%s result_email=%s' % (tool_name, result_email))
except Exception as e:
# in case an unexpected error occurs, log the details
logger.error('Unexpected error occured while running an export tool. '
'tool_name=%s result_email=%s expection=%s' % (tool_name, result_email, e.message))
# and notify the user
recipients = current_app.config.get('OPERATIONS_EMAILS', []) + [result_email]
send_failed_email(recipients, tool_name, self.request.id)
raise
| 37.610063
| 112
| 0.678595
| 0
| 0
| 0
| 0
| 2,041
| 0.341304
| 0
| 0
| 3,051
| 0.510201
|
f78676da21ba7106ed5e99f74d32df70174e47d8
| 756
|
py
|
Python
|
telegram_bot/handlers/commands/detailed_mode.py
|
ProgrammingLanguageLeader/MathematicianBot
|
a4627962a6c8bfac76013d80780997ab4b0f7952
|
[
"MIT"
] | null | null | null |
telegram_bot/handlers/commands/detailed_mode.py
|
ProgrammingLanguageLeader/MathematicianBot
|
a4627962a6c8bfac76013d80780997ab4b0f7952
|
[
"MIT"
] | 16
|
2018-03-05T14:25:16.000Z
|
2022-03-11T23:46:56.000Z
|
telegram_bot/handlers/commands/detailed_mode.py
|
ProgrammingLanguageLeader/MathematicianBot
|
a4627962a6c8bfac76013d80780997ab4b0f7952
|
[
"MIT"
] | null | null | null |
from system.db import db
from telegram_bot.handlers.utils.decorators import remember_new_user, \
send_typing, write_logs
from telegram_bot.handlers.utils.menu_entries import MenuEntry
from telegram_bot.handlers.utils.reply_markup import create_main_reply_markup
from telegram_bot.models import User
@write_logs
@send_typing
@remember_new_user
def handle_detailed_mode_cmd(bot, update) -> int:
db.session.query(User).filter_by(
telegram_id=update.message.from_user.id
).update({
'simple_mode': False
})
db.session.commit()
bot.send_message(
chat_id=update.message.chat_id,
text='Switched to detailed mode',
reply_markup=create_main_reply_markup()
)
return MenuEntry.START_MENU.value
| 30.24
| 77
| 0.756614
| 0
| 0
| 0
| 0
| 450
| 0.595238
| 0
| 0
| 40
| 0.05291
|
f788b1d1658062d96ad83c42b9cd26071a4b8418
| 374
|
py
|
Python
|
my_spotless_app/migrations/0002_alter_service_picture_url.py
|
AntociM/Spotless
|
8cd2d7f76eccee046d42f7a836cf91af04527186
|
[
"ADSL"
] | null | null | null |
my_spotless_app/migrations/0002_alter_service_picture_url.py
|
AntociM/Spotless
|
8cd2d7f76eccee046d42f7a836cf91af04527186
|
[
"ADSL"
] | 29
|
2022-01-22T19:05:56.000Z
|
2022-03-01T08:57:14.000Z
|
my_spotless_app/migrations/0002_alter_service_picture_url.py
|
AntociM/Project-4
|
8cd2d7f76eccee046d42f7a836cf91af04527186
|
[
"ADSL"
] | 1
|
2022-03-02T11:00:59.000Z
|
2022-03-02T11:00:59.000Z
|
# Generated by Django 3.2 on 2022-02-27 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_spotless_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='service',
name='picture_url',
field=models.TextField(),
),
]
| 19.684211
| 45
| 0.590909
| 283
| 0.756684
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.262032
|
f788b46ad9abee669c31dc3a176027a5ef06cdbd
| 8,983
|
py
|
Python
|
robot/TTS.py
|
mluyuchen/wukong-robot
|
67f5cdb06db9e5e256017925a5efe6721cb2bd1d
|
[
"MIT"
] | 8
|
2021-02-01T06:33:49.000Z
|
2022-02-02T11:06:58.000Z
|
robot/TTS.py
|
mluyuchen/wukong-robot
|
67f5cdb06db9e5e256017925a5efe6721cb2bd1d
|
[
"MIT"
] | 1
|
2020-06-10T10:59:02.000Z
|
2020-06-10T10:59:02.000Z
|
robot/TTS.py
|
mluyuchen/wukong-robot
|
67f5cdb06db9e5e256017925a5efe6721cb2bd1d
|
[
"MIT"
] | 6
|
2021-01-20T03:22:19.000Z
|
2022-03-21T14:19:32.000Z
|
# -*- coding: utf-8-*-
import os
import base64
import tempfile
import pypinyin
from aip import AipSpeech
from . import utils, config, constants
from robot import logging
from pathlib import Path
from pypinyin import lazy_pinyin
from pydub import AudioSegment
from abc import ABCMeta, abstractmethod
from .sdk import TencentSpeech, AliSpeech, XunfeiSpeech, atc
logger = logging.getLogger(__name__)
class AbstractTTS(object):
"""
Generic parent class for all TTS engines
"""
__metaclass__ = ABCMeta
@classmethod
def get_config(cls):
return {}
@classmethod
def get_instance(cls):
profile = cls.get_config()
instance = cls(**profile)
return instance
@abstractmethod
def get_speech(self, phrase):
pass
class HanTTS(AbstractTTS):
"""
HanTTS:https://github.com/junzew/HanTTS
要使用本模块, 需要先从 SourceForge 下载语音库 syllables.zip :
https://sourceforge.net/projects/hantts/files/?source=navbar
并解压到 ~/.wukong 目录下
"""
SLUG = "han-tts"
CHUNK = 1024
punctuation = [',', '。','?','!','“','”',';',':','(',")",":",";",",",".","?","!","\"","\'","(",")"]
def __init__(self, voice='syllables', **args):
super(self.__class__, self).__init__()
self.voice = voice
@classmethod
def get_config(cls):
# Try to get han-tts config from config
return config.get('han-tts', {})
def get_speech(self, phrase):
"""
Synthesize .wav from text
"""
src = os.path.join(constants.CONFIG_PATH, self.voice)
text = phrase
def preprocess(syllables):
temp = []
for syllable in syllables:
for p in self.punctuation:
syllable = syllable.replace(p, "")
if syllable.isdigit():
syllable = atc.num2chinese(syllable)
new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)
for e in new_sounds:
temp.append(e)
else:
temp.append(syllable)
return temp
if not os.path.exists(src):
logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))
return None
logger.debug("{} 合成中...".format(self.SLUG))
delay = 0
increment = 355 # milliseconds
pause = 500 # pause for punctuation
syllables = lazy_pinyin(text, style=pypinyin.TONE3)
syllables = preprocess(syllables)
# initialize to be complete silence, each character takes up ~500ms
result = AudioSegment.silent(duration=500*len(text))
for syllable in syllables:
path = os.path.join(src, syllable+".wav")
sound_file = Path(path)
# insert 500 ms silence for punctuation marks
if syllable in self.punctuation:
short_silence = AudioSegment.silent(duration=pause)
result = result.overlay(short_silence, position=delay)
delay += increment
continue
# skip sound file that doesn't exist
if not sound_file.is_file():
continue
segment = AudioSegment.from_wav(path)
result = result.overlay(segment, position=delay)
delay += increment
tmpfile = ''
with tempfile.NamedTemporaryFile() as f:
tmpfile = f.name
result.export(tmpfile, format="wav")
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
class BaiduTTS(AbstractTTS):
"""
使用百度语音合成技术
要使用本模块, 首先到 yuyin.baidu.com 注册一个开发者账号,
之后创建一个新应用, 然后在应用管理的"查看key"中获得 API Key 和 Secret Key
填入 config.yml 中.
...
baidu_yuyin:
appid: '9670645'
api_key: 'qg4haN8b2bGvFtCbBGqhrmZy'
secret_key: '585d4eccb50d306c401d7df138bb02e7'
dev_pid: 1936
per: 1
lan: 'zh'
...
"""
SLUG = "baidu-tts"
def __init__(self, appid, api_key, secret_key, per=1, lan='zh', **args):
super(self.__class__, self).__init__()
self.client = AipSpeech(appid, api_key, secret_key)
self.per, self.lan = str(per), lan
@classmethod
def get_config(cls):
# Try to get baidu_yuyin config from config
return config.get('baidu_yuyin', {})
def get_speech(self, phrase):
result = self.client.synthesis(phrase, self.lan, 1, {'per': self.per});
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
tmpfile = utils.write_temp_file(result, '.mp3')
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
else:
logger.critical('{} 合成失败!'.format(self.SLUG), exc_info=True)
class TencentTTS(AbstractTTS):
"""
腾讯的语音合成
region: 服务地域,挑个离自己最近的区域有助于提升速度。
有效值:https://cloud.tencent.com/document/api/441/17365#.E5.9C.B0.E5.9F.9F.E5.88.97.E8.A1.A8
voiceType:
- 0:女声1,亲和风格(默认)
- 1:男声1,成熟风格
- 2:男声2,成熟风格
language:
- 1: 中文,最大100个汉字(标点符号算一个汉子)
- 2: 英文,最大支持400个字母(标点符号算一个字母)
"""
SLUG = "tencent-tts"
def __init__(self, appid, secretid, secret_key, region='ap-guangzhou', voiceType=0, language=1, **args):
super(self.__class__, self).__init__()
self.engine = TencentSpeech.tencentSpeech(secret_key, secretid)
self.region, self.voiceType, self.language = region, voiceType, language
@classmethod
def get_config(cls):
# Try to get tencent_yuyin config from config
return config.get('tencent_yuyin', {})
def get_speech(self, phrase):
result = self.engine.TTS(phrase, self.voiceType, self.language, self.region)
if 'Response' in result and 'Audio' in result['Response']:
audio = result['Response']['Audio']
data = base64.b64decode(audio)
tmpfile = utils.write_temp_file(data, '.wav')
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
else:
logger.critical('{} 合成失败!'.format(self.SLUG), exc_info=True)
class XunfeiTTS(AbstractTTS):
"""
科大讯飞的语音识别API.
"""
SLUG = "xunfei-tts"
def __init__(self, appid, api_key, api_secret, voice='xiaoyan'):
super(self.__class__, self).__init__()
self.appid, self.api_key, self.api_secret, self.voice_name = appid, api_key, api_secret, voice
@classmethod
def get_config(cls):
# Try to get xunfei_yuyin config from config
return config.get('xunfei_yuyin', {})
def get_speech(self, phrase):
return XunfeiSpeech.synthesize(phrase, self.appid, self.api_key, self.api_secret, self.voice_name)
class AliTTS(AbstractTTS):
"""
阿里的TTS
voice: 发音人,默认是 xiaoyun
全部发音人列表:https://help.aliyun.com/document_detail/84435.html?spm=a2c4g.11186623.2.24.67ce5275q2RGsT
"""
SLUG = "ali-tts"
def __init__(self, appKey, token, voice='xiaoyun', **args):
super(self.__class__, self).__init__()
self.appKey, self.token, self.voice = appKey, token, voice
@classmethod
def get_config(cls):
# Try to get ali_yuyin config from config
return config.get('ali_yuyin', {})
def get_speech(self, phrase):
tmpfile = AliSpeech.tts(self.appKey, self.token, self.voice, phrase)
if tmpfile is not None:
logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))
return tmpfile
else:
logger.critical('{} 合成失败!'.format(self.SLUG), exc_info=True)
def get_engine_by_slug(slug=None):
"""
Returns:
A TTS Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("无效的 TTS slug '%s'", slug)
selected_engines = list(filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines()))
if len(selected_engines) == 0:
raise ValueError("错误:找不到名为 {} 的 TTS 引擎".format(slug))
else:
if len(selected_engines) > 1:
logger.warning("注意: 有多个 TTS 名称与指定的引擎名 {} 匹配").format(slug)
engine = selected_engines[0]
logger.info("使用 {} TTS 引擎".format(engine.SLUG))
return engine.get_instance()
def get_engines():
def get_subclasses(cls):
subclasses = set()
for subclass in cls.__subclasses__():
subclasses.add(subclass)
subclasses.update(get_subclasses(subclass))
return subclasses
return [engine for engine in
list(get_subclasses(AbstractTTS))
if hasattr(engine, 'SLUG') and engine.SLUG]
| 32.665455
| 155
| 0.601247
| 8,008
| 0.826504
| 0
| 0
| 913
| 0.094231
| 0
| 0
| 3,166
| 0.326762
|
f7897d0bfd7b98594f64cf998c02d21b938fb01d
| 392
|
py
|
Python
|
app/utils.py
|
Chimmahh/StarJumper
|
6003ede1de61a17f1f8302faacf5f76033f8045d
|
[
"MIT"
] | null | null | null |
app/utils.py
|
Chimmahh/StarJumper
|
6003ede1de61a17f1f8302faacf5f76033f8045d
|
[
"MIT"
] | 3
|
2020-06-05T18:39:20.000Z
|
2022-02-11T03:40:48.000Z
|
app/utils.py
|
Chimmahh/StarJumper
|
6003ede1de61a17f1f8302faacf5f76033f8045d
|
[
"MIT"
] | 1
|
2018-07-26T16:44:04.000Z
|
2018-07-26T16:44:04.000Z
|
from channels.db import database_sync_to_async
from .exceptions import ClientError
from .models import Game
@database_sync_to_async
def get_game_or_error(game_id, user):
if not user.is_authenticated:
raise ClientError("USER_HAS_TO_LOGIN")
try:
game = Game.objects.get(pk=game_id)
except Game.DoesNotExist:
raise ClientError("GAME_INVALID")
return game
| 30.153846
| 46
| 0.75
| 0
| 0
| 0
| 0
| 283
| 0.721939
| 0
| 0
| 33
| 0.084184
|
f78ade6802218bb90c0b57cf1feec7d8f2242c2e
| 2,328
|
py
|
Python
|
tests/utils/test_file.py
|
gfi-centre-ouest/docker-devbox-ddb
|
1597d85ef6e9e8322cce195a454de54186ce9ec7
|
[
"MIT"
] | 4
|
2020-06-11T20:54:47.000Z
|
2020-09-22T13:07:17.000Z
|
tests/utils/test_file.py
|
gfi-centre-ouest/docker-devbox-ddb
|
1597d85ef6e9e8322cce195a454de54186ce9ec7
|
[
"MIT"
] | 113
|
2019-11-07T00:40:36.000Z
|
2021-01-18T12:50:16.000Z
|
tests/utils/test_file.py
|
inetum-orleans/docker-devbox-ddb
|
20c713cf7bfcaf289226a17a9648c17d16003b4d
|
[
"MIT"
] | null | null | null |
import os
import pytest
from ddb.__main__ import load_registered_features
from ddb.config import config
from ddb.feature import features
from ddb.feature.core import CoreFeature
from ddb.utils import file
from ddb.utils.file import FileWalker, FileUtils
class TestHasSameContent:
def test_should_return_true_if_same_content(self, data_dir: str):
file.has_same_content(os.path.join(data_dir, "512bytes.bin"), os.path.join(data_dir, "512bytes.copy.bin"))
def test_should_return_false_if_different_content(self, data_dir: str):
file.has_same_content(os.path.join(data_dir, "512bytes.bin"), os.path.join(data_dir, "1KB.bin"))
def test_should_raise_file_not_found_error_if_file_doesnt_exists(self, data_dir: str):
with pytest.raises(FileNotFoundError):
file.has_same_content(os.path.join(data_dir, "512bytes.bin"), os.path.join(data_dir, "another.bin"))
class TestFileWalker:
def test_should_exclude_files_in_excluded_directory(self):
fw = FileWalker([], ["**/node_modules"], [], ".")
assert fw.is_source_filtered("blabla/node_modules") is True
assert fw.is_source_filtered("blabla/node_modules/file") is True
assert fw.is_source_filtered("blabla/node_modules/subdirectory/file") is True
assert fw.is_source_filtered("blabla/another/subdirectory/file") is False
class TestFileUtils:
def test_get_file_content(self, data_dir: str, project_loader):
project_loader()
features.register(CoreFeature())
load_registered_features()
url = 'https://raw.githubusercontent.com/inetum-orleans/docker-devbox-ddb/b4f11276a37a4e4b1142f6b54b3d0763ccf5639e/ddb/__init__.py'
path = 'test_file_content.txt'
expected_file_content = '\n'.join([
'# -*- coding: utf-8 -*-',
'',
'from .__version__ import __version__',
''
])
url_content = FileUtils.get_file_content(url)
assert expected_file_content == url_content
url_content = FileUtils.get_file_content('file://' + os.path.join(config.path.project_home, path))
assert url_content == 'this is a file for test_file_content'
url_content = FileUtils.get_file_content('file://' + path)
assert url_content == 'this is a file for test_file_content'
| 40.137931
| 139
| 0.714777
| 2,063
| 0.886168
| 0
| 0
| 0
| 0
| 0
| 0
| 536
| 0.230241
|
f78b0bc589ac5d9426f05edb7fe27d25d4add06c
| 9,666
|
py
|
Python
|
test/test_datasets.py
|
pyronear/pyro-dataset
|
b6445f6051058f20f2fc821040ec3705dc60464c
|
[
"Apache-2.0"
] | null | null | null |
test/test_datasets.py
|
pyronear/pyro-dataset
|
b6445f6051058f20f2fc821040ec3705dc60464c
|
[
"Apache-2.0"
] | null | null | null |
test/test_datasets.py
|
pyronear/pyro-dataset
|
b6445f6051058f20f2fc821040ec3705dc60464c
|
[
"Apache-2.0"
] | 1
|
2022-02-14T12:37:24.000Z
|
2022-02-14T12:37:24.000Z
|
# Copyright (C) 2021, Pyronear contributors.
# This program is licensed under the GNU Affero General Public License version 3.
# See LICENSE or go to <https://www.gnu.org/licenses/agpl-3.0.txt> for full license details.
import unittest
import tempfile
from pathlib import Path
import json
from PIL.Image import Image
import pandas as pd
import random
import requests
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from torchvision.datasets import VisionDataset
from pyrodataset.wildfire import WildFireDataset, WildFireSplitter, computeSubSet
def generate_wildfire_dataset_fixture():
random.seed(42)
df = pd.DataFrame(columns=['imgFile', 'fire_id', 'fire'])
for i in range(974):
df = df.append({'imgFile': str(i).zfill(4) + '.jpg', 'fire_id': float(random.randint(1, 100)),
'fire': float(random.randint(0, 1))}, ignore_index=True)
return df
def generate_wildfire_subsampler_dataset_fixture():
df = pd.DataFrame(columns=['exploitable', 'fire', 'sequence', 'clf_confidence',
'loc_confidence', 'x', 'y', 't', 'stateStart',
'stateEnd', 'imgFile', 'fire_id', 'fBase'])
for b in range(10):
x = random.uniform(200, 500)
y = random.uniform(200, 500)
t = random.uniform(0, 100)
start = random.randint(0, 200)
end = random.randint(start + 11, 400)
base = str(b) + '.mp4'
imgsNb = random.sample(range(start, end), 10)
imgsNb.sort()
imgs = [str(b) + '_frame' + str(i) + '.png' for i in imgsNb]
fire_id = float(random.randint(1, 100))
fire = float(random.randint(0, 1))
for i in range(10):
df = df.append({'exploitable': True, 'fire': fire, 'sequence': 0,
'clf_confidence': 0, 'loc_confidence': 0, 'x': x, 'y': y, 't': t, 'stateStart': start,
'stateEnd': end, 'imgFile': imgs[i], 'fire_id': fire_id,
'fBase': base}, ignore_index=True)
return df
def get_wildfire_image():
#download image
url = 'https://media.springernature.com/w580h326/nature-cms/uploads/collections/' \
'Wildfire-and-ecosystems-Hero-d62e7fbbf36ce6915d4e3efef069ee0e.jpg'
response = requests.get(url)
# save image
file = open("test//0003.jpg", "wb")
file.write(response.content)
file.close()
class WildFireDatasetTester(unittest.TestCase):
def setUp(self):
self.path_to_frames = Path(__file__).parent
self.path_to_frames_str = str(self.path_to_frames)
self.wildfire_path = Path(__file__).parent / 'wildfire_dataset.csv'
self.wildfire_df = generate_wildfire_dataset_fixture()
self.wildfire_df.to_csv(self.wildfire_path)
get_wildfire_image()
def test_wildfire_correctly_init_from_path(self):
for path_to_frames in [self.path_to_frames, self.path_to_frames_str]:
wildfire = WildFireDataset(
metadata=self.wildfire_path,
path_to_frames=path_to_frames
)
self.assertEqual(len(wildfire), 974)
self.assertEqual(len(wildfire[3]), 2)
def test_wildfire_correctly_init_from_dataframe(self):
for path_to_frames in [self.path_to_frames, self.path_to_frames_str]:
wildfire = WildFireDataset(
metadata=self.wildfire_df,
path_to_frames=path_to_frames
)
self.assertEqual(len(wildfire), 974)
self.assertEqual(len(wildfire[3]), 2)
# try to get one image of wildfire (item 3 is authorized image fixture)
observation_3, metadata_3 = wildfire[3]
self.assertIsInstance(observation_3, Image) # image correctly loaded ?
self.assertEqual(observation_3.size, (580, 326))
# metadata correctly loaded ?
self.assertTrue(torch.equal(metadata_3, torch.tensor([self.wildfire_df.loc[3]['fire']])))
def test_wildfire_correctly_init_with_multiple_targets(self):
wildfire = WildFireDataset(
metadata=self.wildfire_df,
path_to_frames=self.path_to_frames,
transform=transforms.ToTensor(),
target_names=['fire', 'fire_id']
)
self.assertEqual(len(wildfire), 974)
# try to get one image of wildfire (item 3 is authorized image fixture)
observation_3, metadata_3 = wildfire[3]
self.assertIsInstance(observation_3, torch.Tensor) # image correctly loaded ?
self.assertEqual(observation_3.size(), torch.Size([3, 326, 580]))
self.assertTrue(torch.equal(metadata_3, torch.tensor([self.wildfire_df.loc[3]['fire'],
self.wildfire_df.loc[3]['fire_id']]))) # metadata correctly loaded ?
def test_invalid_csv_path_raises_exception(self):
with self.assertRaises(ValueError):
WildFireDataset(
metadata='bad_path.csv',
path_to_frames=self.path_to_frames
)
def test_wildfire_correctly_init_with_transform(self):
wildfire = WildFireDataset(
metadata=self.wildfire_path,
path_to_frames=self.path_to_frames,
transform=transforms.Compose([transforms.Resize((100, 66)), transforms.ToTensor()])
)
observation_3, _ = wildfire[3]
self.assertEqual(observation_3.size(), torch.Size((3, 100, 66)))
def test_dataloader_can_be_init_with_wildfire(self):
wildfire = WildFireDataset(metadata=self.wildfire_path, path_to_frames=self.path_to_frames)
DataLoader(wildfire, batch_size=64)
class WildFireSubSamplerTester(unittest.TestCase):
def setUp(self):
self.path_to_frames = Path(__file__).parent
self.wildfire_path = Path(__file__).parent / 'wildfire_dataset.csv'
self.wildfire_df = generate_wildfire_subsampler_dataset_fixture()
self.wildfire_df.to_csv(self.wildfire_path)
def test_good_size_after_subsamping(self):
self.assertEqual(len(self.wildfire_df), 100)
metadataSS = computeSubSet(self.wildfire_df, 2)
self.assertEqual(len(metadataSS), 20)
def test_metadata_changes_each_time(self):
metadataSS_1 = computeSubSet(self.wildfire_df, 2, seed=1)
metadataSS_2 = computeSubSet(self.wildfire_df, 2, seed=2)
self.assertEqual(len(metadataSS_1), 20)
self.assertEqual(len(metadataSS_2), 20)
self.assertFalse(metadataSS_1['imgFile'].values.tolist() == metadataSS_2['imgFile'].values.tolist())
def test_metadata_does_not_changes_with_same_seed(self):
metadataSS_1 = computeSubSet(self.wildfire_df, 2, seed=1)
metadataSS_2 = computeSubSet(self.wildfire_df, 2, seed=1)
self.assertEqual(len(metadataSS_1), 20)
self.assertEqual(len(metadataSS_2), 20)
self.assertTrue(metadataSS_1['imgFile'].values.tolist() == metadataSS_2['imgFile'].values.tolist())
def test_increase_not_fire_semples(self):
metadataSS = computeSubSet(self.wildfire_path, 2, 1)
self.assertGreater(len(metadataSS), 20)
def test_invalid_csv_path_raises_exception(self):
with self.assertRaises(ValueError):
computeSubSet(
metadata='bad_path.csv',
frame_per_seq=2
)
class WildFireDatasetSplitter(unittest.TestCase):
def setUp(self):
self.path_to_frames = Path(__file__).parent
self.wildfire_df = generate_wildfire_dataset_fixture()
self.wildfire = WildFireDataset(metadata=self.wildfire_df, path_to_frames=self.path_to_frames)
def test_consistent_ratios_good_init(self):
ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
splitter = WildFireSplitter(ratios)
self.assertEqual(ratios, splitter.ratios)
def test_inconsistent_ratios_raise_exception(self):
ratios = {'train': 0.9, 'val': 0.2, 'test': 0.1} # sum > 1
with self.assertRaises(ValueError):
WildFireSplitter(ratios)
def test_splitting_with_test_to_zero(self):
ratios = {'train': 0.8, 'val': 0.2, 'test': 0}
splitter = WildFireSplitter(ratios, seed=42)
splitter.fit(self.wildfire)
for (set_, ratio_) in splitter.ratios_.items():
self.assertAlmostEqual(ratio_, ratios[set_], places=1)
def test_splitting_gives_good_splits_size(self):
n_samples_expected = {'train': 688, 'val': 147, 'test': 139}
ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
splitter = WildFireSplitter(ratios, seed=42)
splitter.fit(self.wildfire)
self.assertEqual(splitter.n_samples_, n_samples_expected)
for (set_, ratio_) in splitter.ratios_.items():
self.assertAlmostEqual(ratio_, ratios[set_], places=1)
def test_splitting_working_with_transforms(self):
ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
transforms_expected = {'train': transforms.RandomCrop(10), 'val': None, 'test': None}
splitter = WildFireSplitter(ratios, transforms=transforms_expected)
splitter.fit(self.wildfire)
for (set_, transform_expected) in transforms_expected.items():
self.assertIs(getattr(splitter, set_).transform, transform_expected)
def test_splitting_with_unavailable_algorithm_raise_exception(self):
ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
splitter = WildFireSplitter(ratios, algorithm='wtf')
with self.assertRaises(ValueError):
splitter.fit(self.wildfire)
if __name__ == '__main__':
unittest.main()
| 38.975806
| 114
| 0.658183
| 7,146
| 0.739292
| 0
| 0
| 0
| 0
| 0
| 0
| 1,278
| 0.132216
|
f78b62473ace335a7a8a2b3f902ea2441941d851
| 26,116
|
py
|
Python
|
python/dgllife/model/pretrain/__init__.py
|
VIGNESHinZONE/dgl-lifesci
|
9a892fd0935a7d8ab125530f54ce1e2a38b2377a
|
[
"Apache-2.0"
] | null | null | null |
python/dgllife/model/pretrain/__init__.py
|
VIGNESHinZONE/dgl-lifesci
|
9a892fd0935a7d8ab125530f54ce1e2a38b2377a
|
[
"Apache-2.0"
] | null | null | null |
python/dgllife/model/pretrain/__init__.py
|
VIGNESHinZONE/dgl-lifesci
|
9a892fd0935a7d8ab125530f54ce1e2a38b2377a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable= no-member, arguments-differ, invalid-name
#
# Utilities for using pre-trained models.
import torch
from dgl.data.utils import _get_dgl_url, download
from .moleculenet import *
from .generative_models import *
from .property_prediction import *
from .reaction import *
__all__ = ['load_pretrained']
url = {**moleculenet_url, **generative_url, **property_url, **reaction_url}
def download_and_load_checkpoint(model_name, model, model_postfix,
local_pretrained_path='pre_trained.pth', log=True):
"""Download pretrained model checkpoint
The model will be loaded to CPU.
Parameters
----------
model_name : str
Name of the model
model : nn.Module
Instantiated model instance
model_postfix : str
Postfix for pretrained model checkpoint
local_pretrained_path : str
Local name for the downloaded model checkpoint
log : bool
Whether to print progress for model loading
Returns
-------
model : nn.Module
Pretrained model
"""
url_to_pretrained = _get_dgl_url(model_postfix)
local_pretrained_path = '_'.join([model_name, local_pretrained_path])
download(url_to_pretrained, path=local_pretrained_path, log=log)
checkpoint = torch.load(local_pretrained_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
if log:
print('Pretrained model loaded')
return model
# pylint: disable=I1101
def load_pretrained(model_name, log=True):
"""Load a pretrained model
Parameters
----------
model_name : str
Currently supported options include
* ``'GCN_Tox21'``: A GCN-based model for molecular property prediction on Tox21
* ``'GAT_Tox21'``: A GAT-based model for molecular property prediction on Tox21
* ``'Weave_Tox21'``: A Weave model for molecular property prediction on Tox21
* ``'AttentiveFP_Aromaticity'``: An AttentiveFP model for predicting number of
aromatic atoms on a subset of Pubmed
* ``'DGMG_ChEMBL_canonical'``: A DGMG model trained on ChEMBL with a canonical
atom order
* ``'DGMG_ChEMBL_random'``: A DGMG model trained on ChEMBL for molecule generation
with a random atom order
* ``'DGMG_ZINC_canonical'``: A DGMG model trained on ZINC for molecule generation
with a canonical atom order
* ``'DGMG_ZINC_random'``: A DGMG model pre-trained on ZINC for molecule generation
with a random atom order
* ``'JTNN_ZINC'``: A JTNN model pre-trained on ZINC for molecule generation
* ``'wln_center_uspto'``: A WLN model pre-trained on USPTO for reaction prediction
* ``'wln_rank_uspto'``: A WLN model pre-trained on USPTO for candidate product ranking
* ``'gin_supervised_contextpred'``: A GIN model pre-trained with supervised learning
and context prediction
* ``'gin_supervised_infomax'``: A GIN model pre-trained with supervised learning
and deep graph infomax
* ``'gin_supervised_edgepred'``: A GIN model pre-trained with supervised learning
and edge prediction
* ``'gin_supervised_masking'``: A GIN model pre-trained with supervised learning
and attribute masking
* ``'GCN_canonical_BACE'``: A GCN model trained on BACE with canonical
featurization for atoms
* ``'GCN_attentivefp_BACE'``: A GCN model trained on BACE with attentivefp
featurization for atoms
* ``'GAT_canonical_BACE'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_BACE'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_BACE'``: A Weave model trained on BACE with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BACE'``: A Weave model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BACE'``: An MPNN model trained on BACE with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BACE'``: An MPNN model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BACE'``: An AttentiveFP model trained on BACE with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on BACE with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BACE'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BACE
* ``'gin_supervised_infomax_BACE'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BACE
* ``'gin_supervised_edgepred_BACE'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BACE
* ``'gin_supervised_masking_BACE'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BACE
* ``'NF_canonical_BACE'``: An NF model trained on BACE with canonical
featurization for atoms
* ``'GCN_canonical_BBBP'``: A GCN model trained on BBBP with canonical
featurization for atoms
* ``'GCN_attentivefp_BBBP'``: A GCN model trained on BBBP with attentivefp
featurization for atoms
* ``'GAT_canonical_BBBP'``: A GAT model trained on BBBP with canonical
featurization for atoms
* ``'GAT_attentivefp_BBBP'``: A GAT model trained on BBBP with attentivefp
featurization for atoms
* ``'Weave_canonical_BBBP'``: A Weave model trained on BBBP with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BBBP'``: A Weave model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BBBP'``: An MPNN model trained on BBBP with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BBBP'``: An MPNN model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BBBP'``: An AttentiveFP model trained on BBBP with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BBBP'``: An AttentiveFP model trained on BBBP with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BBBP'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BBBP
* ``'gin_supervised_infomax_BBBP'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BBBP
* ``'gin_supervised_edgepred_BBBP'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BBBP
* ``'gin_supervised_masking_BBBP'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BBBP
* ``'NF_canonical_BBBP'``: An NF model pre-trained on BBBP with canonical
featurization for atoms
* ``'GCN_canonical_ClinTox'``: A GCN model trained on ClinTox with canonical
featurization for atoms
* ``'GCN_attentivefp_ClinTox'``: A GCN model trained on ClinTox with attentivefp
featurization for atoms
* ``'GAT_canonical_ClinTox'``: A GAT model trained on ClinTox with canonical
featurization for atoms
* ``'GAT_attentivefp_ClinTox'``: A GAT model trained on ClinTox with attentivefp
featurization for atoms
* ``'Weave_canonical_ClinTox'``: A Weave model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ClinTox'``: A Weave model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ClinTox'``: An MPNN model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ClinTox'``: An MPNN model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ClinTox'``: An AttentiveFP model trained on ClinTox with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on ClinTox with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_ESOL'``: A GCN model trained on ESOL with canonical
featurization for atoms
* ``'GCN_attentivefp_ESOL'``: A GCN model trained on ESOL with attentivefp
featurization for atoms
* ``'GAT_canonical_ESOL'``: A GAT model trained on ESOL with canonical
featurization for atoms
* ``'GAT_attentivefp_ESOL'``: A GAT model trained on ESOL with attentivefp
featurization for atoms
* ``'Weave_canonical_ESOL'``: A Weave model trained on ESOL with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ESOL'``: A Weave model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ESOL'``: An MPNN model trained on ESOL with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ESOL'``: An MPNN model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ESOL'``: An AttentiveFP model trained on ESOL with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ESOL'``: An AttentiveFP model trained on ESOL with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ESOL'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ESOL
* ``'gin_supervised_infomax_ESOL'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ESOL
* ``'gin_supervised_edgepred_ESOL'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ESOL
* ``'gin_supervised_masking_ESOL'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ESOL
* ``'GCN_canonical_FreeSolv'``: A GCN model trained on FreeSolv with canonical
featurization for atoms
* ``'GCN_attentivefp_FreeSolv'``: A GCN model trained on FreeSolv with attentivefp
featurization for atoms
* ``'GAT_canonical_FreeSolv'``: A GAT model trained on FreeSolv with canonical
featurization for atoms
* ``'GAT_attentivefp_FreeSolv'``: A GAT model trained on FreeSolv with attentivefp
featurization for atoms
* ``'Weave_canonical_FreeSolv'``: A Weave model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_FreeSolv'``: A Weave model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_FreeSolv'``: An MPNN model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_FreeSolv'``: An MPNN model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_FreeSolv'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_infomax_FreeSolv'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on FreeSolv
* ``'gin_supervised_edgepred_FreeSolv'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_masking_FreeSolv'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on FreeSolv
* ``'GCN_canonical_HIV'``: A GCN model trained on HIV with canonical
featurization for atoms
* ``'GCN_attentivefp_HIV'``: A GCN model trained on HIV with attentivefp
featurization for atoms
* ``'GAT_canonical_HIV'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_HIV'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_HIV'``: A Weave model trained on HIV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_HIV'``: A Weave model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_HIV'``: An MPNN model trained on HIV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_HIV'``: An MPNN model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_HIV'``: An AttentiveFP model trained on HIV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_HIV'``: An AttentiveFP model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_HIV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on HIV
* ``'gin_supervised_infomax_HIV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on HIV
* ``'gin_supervised_edgepred_HIV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on HIV
* ``'gin_supervised_masking_HIV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on HIV
* ``'NF_canonical_HIV'``: An NF model trained on HIV with canonical
featurization for atoms
* ``'GCN_canonical_Lipophilicity'``: A GCN model trained on Lipophilicity with canonical
featurization for atoms
* ``'GCN_attentivefp_Lipophilicity'``: A GCN model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'GAT_canonical_Lipophilicity'``: A GAT model trained on Lipophilicity with canonical
featurization for atoms
* ``'GAT_attentivefp_Lipophilicity'``: A GAT model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'Weave_canonical_Lipophilicity'``: A Weave model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'Weave_attentivefp_Lipophilicity'``: A Weave model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'MPNN_canonical_Lipophilicity'``: An MPNN model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'MPNN_attentivefp_Lipophilicity'``: An MPNN model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'AttentiveFP_canonical_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_infomax_Lipophilicity'``: A GIN model pre-trained with supervised
learning and infomax, and fine-tuned on Lipophilicity
* ``'gin_supervised_edgepred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and edge prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_masking_Lipophilicity'``: A GIN model pre-trained with supervised
learning and masking, and fine-tuned on Lipophilicity
* ``'GCN_canonical_MUV'``: A GCN model trained on MUV with canonical
featurization for atoms
* ``'GCN_attentivefp_MUV'``: A GCN model trained on MUV with attentivefp
featurization for atoms
* ``'GAT_canonical_MUV'``: A GAT model trained on MUV with canonical
featurization for atoms
* ``'GAT_attentivefp_MUV'``: A GAT model trained on MUV with attentivefp
featurization for atoms
* ``'Weave_canonical_MUV'``: A Weave model trained on MUV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_MUV'``: A Weave model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_MUV'``: An MPNN model trained on MUV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_MUV'``: An MPNN model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_MUV'``: An AttentiveFP model trained on MUV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_MUV'``: An AttentiveFP model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_MUV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on MUV
* ``'gin_supervised_infomax_MUV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on MUV
* ``'gin_supervised_edgepred_MUV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on MUV
* ``'gin_supervised_masking_MUV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on MUV
* ``'GCN_canonical_PCBA'``: A GCN model trained on PCBA with canonical
featurization for atoms
* ``'GCN_attentivefp_PCBA'``: A GCN model trained on PCBA with attentivefp
featurization for atoms
* ``'GAT_canonical_PCBA'``: A GAT model trained on PCBA with canonical
featurization for atoms
* ``'GAT_attentivefp_PCBA'``: A GAT model trained on PCBA with attentivefp
featurization for atoms
* ``'Weave_canonical_PCBA'``: A Weave model trained on PCBA with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_PCBA'``: A Weave model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_PCBA'``: An MPNN model trained on PCBA with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_PCBA'``: An MPNN model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_PCBA'``: An AttentiveFP model trained on PCBA with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_PCBA'``: An AttentiveFP model trained on PCBA with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_SIDER'``: A GCN model trained on SIDER with canonical
featurization for atoms
* ``'GCN_attentivefp_SIDER'``: A GCN model trained on SIDER with attentivefp
featurization for atoms
* ``'GAT_canonical_SIDER'``: A GAT model trained on SIDER with canonical
featurization for atoms
* ``'GAT_attentivefp_SIDER'``: A GAT model trained on SIDER with attentivefp
featurization for atoms
* ``'Weave_canonical_SIDER'``: A Weave model trained on SIDER with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_SIDER'``: A Weave model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_SIDER'``: An MPNN model trained on SIDER with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_SIDER'``: An MPNN model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_SIDER'``: An AttentiveFP model trained on SIDER with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_SIDER'``: An AttentiveFP model trained on SIDER with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_SIDER'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on SIDER
* ``'gin_supervised_infomax_SIDER'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on SIDER
* ``'gin_supervised_edgepred_SIDER'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on SIDER
* ``'gin_supervised_masking_SIDER'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on SIDER
* ``'NF_canonical_SIDER'``: An NF model trained on SIDER with canonical
featurization for atoms
* ``'GCN_canonical_Tox21'``: A GCN model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_attentivefp_Tox21'``: A GCN model trained on Tox21 with attentivefp
featurization for atoms
* ``'GAT_canonical_Tox21'``: A GAT model trained on Tox21 with canonical
featurization for atoms
* ``'GAT_attentivefp_Tox21'``: A GAT model trained on Tox21 with attentivefp
featurization for atoms
* ``'Weave_canonical_Tox21'``: A Weave model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_Tox21'``: A Weave model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_Tox21'``: An MPNN model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_Tox21'``: An MPNN model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_Tox21'``: An AttentiveFP model trained on Tox21 with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Tox21'``: An AttentiveFP model trained on Tox21 with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Tox21'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Tox21
* ``'gin_supervised_infomax_Tox21'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on Tox21
* ``'gin_supervised_edgepred_Tox21'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on Tox21
* ``'gin_supervised_masking_Tox21'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on Tox21
* ``'NF_canonical_Tox21'``: An NF model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_canonical_ToxCast'``: A GCN model trained on ToxCast with canonical
featurization for atoms
* ``'GCN_attentivefp_ToxCast'``: A GCN model trained on ToxCast with attentivefp
featurization for atoms
* ``'GAT_canonical_ToxCast'``: A GAT model trained on ToxCast with canonical
featurization for atoms
* ``'GAT_attentivefp_ToxCast'``: A GAT model trained on ToxCast with attentivefp
featurization for atoms
* ``'Weave_canonical_ToxCast'``: A Weave model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ToxCast'``: A Weave model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ToxCast'``: An MPNN model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ToxCast'``: An MPNN model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ToxCast'``: An AttentiveFP model trained on ToxCast with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ToxCast'``: An AttentiveFP model trained on ToxCast with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ToxCast'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ToxCast
* ``'gin_supervised_infomax_ToxCast'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ToxCast
* ``'gin_supervised_edgepred_ToxCast'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ToxCast
* ``'gin_supervised_masking_ToxCast'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ToxCast
* ``'NF_canonical_ToxCast'``: An NF model trained on ToxCast with canonical
featurization for atoms and bonds
log : bool
Whether to print progress for model loading
Returns
-------
model
"""
if model_name not in url:
raise RuntimeError("Cannot find a pretrained model with name {}".format(model_name))
for func in [create_moleculenet_model, create_generative_model,
create_property_model, create_reaction_model]:
model = func(model_name)
if model is not None:
break
return download_and_load_checkpoint(model_name, model, url[model_name], log=log)
| 59.219955
| 98
| 0.689501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24,918
| 0.954128
|
f78b86d747c672620428e6bd3b8435d4dd3f3512
| 5,608
|
py
|
Python
|
src/pyrobot/vrep_locobot/camera.py
|
gujralsanyam22/pyrobot
|
a0448714857b684d8b280f710e9304988524d2e0
|
[
"MIT"
] | 2,150
|
2019-06-12T20:55:41.000Z
|
2022-03-21T07:14:51.000Z
|
src/pyrobot/vrep_locobot/camera.py
|
gujralsanyam22/pyrobot
|
a0448714857b684d8b280f710e9304988524d2e0
|
[
"MIT"
] | 124
|
2019-06-22T17:12:27.000Z
|
2022-02-26T11:43:13.000Z
|
src/pyrobot/vrep_locobot/camera.py
|
gujralsanyam22/pyrobot
|
a0448714857b684d8b280f710e9304988524d2e0
|
[
"MIT"
] | 329
|
2019-06-13T03:03:54.000Z
|
2022-03-30T07:04:55.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pyrobot.utils.util as prutil
from pyrobot.core import Camera
from pyrobot.utils.util import try_cv2_import
cv2 = try_cv2_import()
from cv_bridge import CvBridge, CvBridgeError
from pyrep.objects.vision_sensor import VisionSensor
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
from pyrep.objects.joint import Joint
class LoCoBotCamera(Camera):
"""docstring for SimpleCamera"""
def __init__(self, configs, simulator):
self.sim = simulator.sim
self.rgb_cam = VisionSensor("kinect_rgb")
self.depth_cam = VisionSensor("kinect_depth")
self.rgb_cam.set_render_mode(RenderMode.OPENGL3)
self.depth_cam.set_render_mode(RenderMode.OPENGL3)
# Pan and tilt related variables.
self.pan_joint = Joint("LoCoBot_head_pan_joint")
self.tilt_joint = Joint("LoCoBot_head_tilt_joint")
def get_rgb(self):
return self.rgb_cam.capture_rgb()
def get_depth(self):
return self.depth_cam.capture_depth()
def get_rgb_depth(self):
return self.get_rgb(), self.get_depth()
def get_intrinsics(self):
# Todo: Remove this after we fix intrinsics
raise NotImplementedError
"""
Returns the instrinsic matrix of the camera
:return: the intrinsic matrix (shape: :math:`[3, 3]`)
:rtype: np.ndarray
"""
# fx = self.configs['Camera.fx']
# fy = self.configs['Camera.fy']
# cx = self.configs['Camera.cx']
# cy = self.configs['Camera.cy']
Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
return Itc
def pix_to_3dpt(self, rs, cs, in_cam=False):
"""
Get the 3D points of the pixels in RGB images.
:param rs: rows of interest in the RGB image.
It can be a list or 1D numpy array
which contains the row indices.
The default value is None,
which means all rows.
:param cs: columns of interest in the RGB image.
It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame
(shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam
(shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
raise NotImplementedError
def get_current_pcd(self, in_cam=True):
"""
Return the point cloud at current time step (one frame only)
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame (shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
raise NotImplementedError
@property
def state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return self.get_state()
def get_state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return [self.get_pan(), self.get_tilt()]
def get_pan(self):
"""
Return the current pan joint angle of the robot camera.
:return:
pan: Pan joint angle
:rtype: float
"""
return self.pan_joint.get_joint_position()
def get_tilt(self):
"""
Return the current tilt joint angle of the robot camera.
:return:
tilt: Tilt joint angle
:rtype: float
"""
return self.tilt_joint.get_joint_position()
def set_pan(self, pan, wait=True):
"""
Sets the pan joint angle to the specified value.
:param pan: value to be set for pan joint
:param wait: wait until the pan angle is set to
the target angle.
:type pan: float
:type wait: bool
"""
self.pan_joint.set_joint_position(pan)
# [self.sim.step() for _ in range(50)]
def set_tilt(self, tilt, wait=True):
"""
Sets the tilt joint angle to the specified value.
:param tilt: value to be set for the tilt joint
:param wait: wait until the tilt angle is set to
the target angle.
:type tilt: float
:type wait: bool
"""
self.tilt_joint.set_joint_position(tilt)
def set_pan_tilt(self, pan, tilt, wait=True):
"""
Sets both the pan and tilt joint angles to the specified values.
:param pan: value to be set for pan joint
:param tilt: value to be set for the tilt joint
:param wait: wait until the pan and tilt angles are set to
the target angles.
:type pan: float
:type tilt: float
:type wait: bool
"""
self.set_pan(pan)
self.set_tilt(tilt)
def reset(self):
"""
This function resets the pan and tilt joints by actuating
them to their home configuration.
"""
self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)
| 26.578199
| 88
| 0.63766
| 5,066
| 0.903352
| 0
| 0
| 235
| 0.041904
| 0
| 0
| 3,479
| 0.620364
|
f78c09a2c8173da1710410d85fa882bb6b674769
| 105
|
py
|
Python
|
main.py
|
lmkhkm/SerialMonitor
|
05b00e7a05e2a71ddfc9b0e30e42a83b073f88e1
|
[
"MIT"
] | null | null | null |
main.py
|
lmkhkm/SerialMonitor
|
05b00e7a05e2a71ddfc9b0e30e42a83b073f88e1
|
[
"MIT"
] | 1
|
2021-05-08T20:52:27.000Z
|
2021-05-08T20:52:27.000Z
|
main.py
|
lmkhkm/SerialMonitor
|
05b00e7a05e2a71ddfc9b0e30e42a83b073f88e1
|
[
"MIT"
] | null | null | null |
import serial
ser = serial.Serial('COM7',115200, timeout=1)
while True:
print("R: ", ser.readline())
| 21
| 45
| 0.67619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.104762
|
f78d0e1c968d2c094e91caabfbf0e1efad8f0eb3
| 1,009
|
py
|
Python
|
examples/avro/py/generate_avro_users.py
|
kikkomep/pydoop
|
4f855ef775b925b8c9f2adf1c0ef13337323ee24
|
[
"Apache-2.0"
] | null | null | null |
examples/avro/py/generate_avro_users.py
|
kikkomep/pydoop
|
4f855ef775b925b8c9f2adf1c0ef13337323ee24
|
[
"Apache-2.0"
] | null | null | null |
examples/avro/py/generate_avro_users.py
|
kikkomep/pydoop
|
4f855ef775b925b8c9f2adf1c0ef13337323ee24
|
[
"Apache-2.0"
] | null | null | null |
import sys
import random
import avro.schema
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
NAME_POOL = ['george', 'john', 'paul', 'ringo']
OFFICE_POOL = ['office-%d' % _ for _ in xrange(4)]
COLOR_POOL = ['black', 'cyan', 'magenta', 'yellow']
def main(argv):
try:
schema_fn = argv[1]
n_users = int(argv[2])
avro_fn = argv[3]
except IndexError:
sys.exit('Usage: %s SCHEMA_FILE N_USERS AVRO_FILE' % argv[0])
with open(schema_fn) as f_in:
schema = avro.schema.parse(f_in.read())
with open(avro_fn, 'wb') as f_out:
writer = DataFileWriter(f_out, DatumWriter(), schema)
for i in xrange(n_users):
writer.append({
'name': random.choice(NAME_POOL),
'office': random.choice(OFFICE_POOL),
'favorite_color': random.choice(COLOR_POOL),
'favorite_number': i,
})
writer.close()
if __name__ == '__main__':
main(sys.argv)
| 28.027778
| 69
| 0.597621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 170
| 0.168484
|
f78d23bb7041a7dd86f556d3f4cd134329c150dd
| 2,604
|
py
|
Python
|
tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py
|
souravsingh/chainercv
|
8f76510472bc95018c183e72f37bc6c34a89969c
|
[
"MIT"
] | 1
|
2018-12-27T03:47:45.000Z
|
2018-12-27T03:47:45.000Z
|
tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py
|
souravsingh/chainercv
|
8f76510472bc95018c183e72f37bc6c34a89969c
|
[
"MIT"
] | null | null | null |
tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py
|
souravsingh/chainercv
|
8f76510472bc95018c183e72f37bc6c34a89969c
|
[
"MIT"
] | 2
|
2019-12-16T02:20:26.000Z
|
2022-01-17T02:00:49.000Z
|
import numpy as np
import unittest
from chainer.dataset import DatasetMixin
from chainer import testing
from chainercv.utils import assert_is_bbox_dataset
from chainercv.utils import generate_random_bbox
class BboxDataset(DatasetMixin):
def __init__(self, options=(), empty_bbox=False):
self.options = options
self.empty_bbox = empty_bbox
def __len__(self):
return 10
def get_example(self, i):
img = np.random.randint(0, 256, size=(3, 48, 64))
if self.empty_bbox:
n_bbox = 0
else:
n_bbox = np.random.randint(10, 20)
bbox = generate_random_bbox(n_bbox, (48, 64), 5, 20)
label = np.random.randint(0, 20, size=n_bbox).astype(np.int32)
return (img, bbox, label) + self.options
class InvalidSampleSizeDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(
InvalidSampleSizeDataset, self).get_example(i)[:3]
return img, bbox
class InvalidImageDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidImageDataset, self).get_example(i)[:3]
return img[0], bbox, label
class InvalidBboxDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidBboxDataset, self).get_example(i)[:3]
bbox += 1000
return img, bbox, label
class InvalidLabelDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidLabelDataset, self).get_example(i)[:3]
label += 1000
return img, bbox, label
class MismatchLengthDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(
MismatchLengthDataset, self).get_example(i)[:3]
return img, bbox, label[1:]
@testing.parameterize(
{'dataset': BboxDataset(), 'valid': True},
{'dataset': BboxDataset(empty_bbox=True), 'valid': True},
{'dataset': BboxDataset(('option',)), 'valid': True},
{'dataset': InvalidSampleSizeDataset(), 'valid': False},
{'dataset': InvalidImageDataset(), 'valid': False},
{'dataset': InvalidBboxDataset(), 'valid': False},
{'dataset': InvalidLabelDataset(), 'valid': False},
{'dataset': MismatchLengthDataset(), 'valid': False},
)
class TestAssertIsBboxDataset(unittest.TestCase):
def test_assert_is_bbox_dataset(self):
if self.valid:
assert_is_bbox_dataset(self.dataset, 20)
else:
with self.assertRaises(AssertionError):
assert_is_bbox_dataset(self.dataset, 20)
testing.run_module(__name__, __file__)
| 28.304348
| 78
| 0.656298
| 1,858
| 0.713518
| 0
| 0
| 770
| 0.295699
| 0
| 0
| 136
| 0.052227
|
f78da1263e700a0f21ebec44c019c94ee9c11482
| 3,002
|
py
|
Python
|
seahub/utils/http.py
|
Xandersoft/seahub
|
f75f238b3e0a907e8a8003f419e367fa36e992e7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
seahub/utils/http.py
|
Xandersoft/seahub
|
f75f238b3e0a907e8a8003f419e367fa36e992e7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
seahub/utils/http.py
|
Xandersoft/seahub
|
f75f238b3e0a907e8a8003f419e367fa36e992e7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012-2016 Seafile Ltd.
from __future__ import unicode_literals
import unicodedata
import urlparse
import json
from functools import wraps
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
class _HTTPException(Exception):
def __init__(self, message=''):
self.message = message
def __str__(self):
return '%s: %s' % (self.__class__.__name__, self.message)
class BadRequestException(_HTTPException):
pass
class RequestForbbiddenException(_HTTPException):
pass
JSON_CONTENT_TYPE = 'application/json; charset=utf-8'
def json_response(func):
@wraps(func)
def wrapped(*a, **kw):
try:
result = func(*a, **kw)
except BadRequestException, e:
return HttpResponseBadRequest(e.message)
except RequestForbbiddenException, e:
return HttpResponseForbidden(e.messages)
if isinstance(result, HttpResponse):
return result
else:
return HttpResponse(json.dumps(result), status=200,
content_type=JSON_CONTENT_TYPE)
return wrapped
def int_param(request, key):
v = request.GET.get(key, None)
if not v:
raise BadRequestException()
try:
return int(v)
except ValueError:
raise BadRequestException()
def is_safe_url(url, host=None):
"""
https://github.com/django/django/blob/fc6d147a63f89795dbcdecb0559256470fff4380/django/utils/http.py
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
def _is_safe_url(url, host):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse.urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
| 35.317647
| 103
| 0.676549
| 298
| 0.099267
| 0
| 0
| 492
| 0.163891
| 0
| 0
| 1,107
| 0.368754
|
f78df0be4fb31a9f76737c561c3fe76708dbdcd5
| 187
|
py
|
Python
|
biopipen/core/defaults.py
|
pwwang/bioprocs
|
4055a62eed8ef4fba0a5f1be430af72a9e22143d
|
[
"MIT"
] | 4
|
2018-01-16T08:25:53.000Z
|
2019-01-03T23:41:31.000Z
|
biopipen/core/defaults.py
|
pwwang/bioprocs
|
4055a62eed8ef4fba0a5f1be430af72a9e22143d
|
[
"MIT"
] | 3
|
2018-05-22T20:11:46.000Z
|
2019-08-19T17:37:04.000Z
|
biopipen/core/defaults.py
|
pwwang/bioprocs
|
4055a62eed8ef4fba0a5f1be430af72a9e22143d
|
[
"MIT"
] | 1
|
2019-01-14T23:14:24.000Z
|
2019-01-14T23:14:24.000Z
|
"""Provide default settgins"""
from pathlib import Path
BIOPIPEN_DIR = Path(__file__).parent.parent.resolve()
REPORT_DIR = BIOPIPEN_DIR / "reports"
SCRIPT_DIR = BIOPIPEN_DIR / "scripts"
| 26.714286
| 53
| 0.770053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.256684
|
f7904ac31330990ac63a4b3068ea84654cf9b168
| 6,172
|
py
|
Python
|
pextant/sextant.py
|
norheim/pextant
|
f4235719279c0e6f178ae1e0f8b1ea3346533915
|
[
"MIT"
] | null | null | null |
pextant/sextant.py
|
norheim/pextant
|
f4235719279c0e6f178ae1e0f8b1ea3346533915
|
[
"MIT"
] | 1
|
2019-12-03T03:52:41.000Z
|
2019-12-04T14:50:36.000Z
|
pextant/sextant.py
|
norheim/pextant
|
f4235719279c0e6f178ae1e0f8b1ea3346533915
|
[
"MIT"
] | 1
|
2019-12-03T02:37:57.000Z
|
2019-12-03T02:37:57.000Z
|
from flask_settings import GEOTIFF_FULL_PATH
import sys
import traceback
sys.path.append('../')
import numpy as np
import json
from datetime import timedelta
from functools import update_wrapper
from pextant.EnvironmentalModel import GDALMesh
from pextant.explorers import Astronaut
from pextant.analysis.loadWaypoints import JSONloader
from pextant.lib.geoshapely import GeoPolygon, LAT_LONG
from pextant.solvers.astarMesh import astarSolver
from flask import Flask
from flask import make_response, request, current_app
app = Flask(__name__)
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def main(argv):
print 'STARTING SEXTANT'
geotiff_full_path = ""
try:
geotiff_full_path = argv[0]
except IndexError:
# print 'Syntax is "sextant <inputfile>"'
pass
if not geotiff_full_path or geotiff_full_path == 'sextant:app':
geotiff_full_path = GEOTIFF_FULL_PATH
print geotiff_full_path
gdal_mesh = GDALMesh(geotiff_full_path)
explorer = Astronaut(80)
solver, waypoints, environmental_model = None, None, None
@app.route('/test', methods=['GET', 'POST'])
@crossdomain(origin='*')
def test():
print str(request)
return json.dumps({'test':'test'})
@app.route('/setwaypoints', methods=['GET', 'POST'])
@crossdomain(origin='*')
def set_waypoints():
try:
global solver, waypoints, environmental_model
print('in set waypoints')
request_data = request.get_json(force=True)
xp_json = request_data['xp_json']
json_loader = JSONloader(xp_json['sequence'])
print 'loaded xp json'
waypoints = json_loader.get_waypoints()
print 'gdal mesh is built from %s' % str(geotiff_full_path)
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy')
print('loaded fine')
return json.dumps({'loaded': True})
except Exception, e:
traceback.print_exc()
response = {'error': str(e),
'status_code': 400}
return response
@app.route('/solve', methods=['GET', 'POST'])
@crossdomain(origin='*')
def solve():
global solver, waypoints, environmental_model
print 'in solve'
request_data = request.get_json(force=True)
return_type = request_data['return']
if 'xp_json' in request_data:
xp_json = request_data['xp_json']
json_loader = JSONloader(xp_json['sequence'])
waypoints = json_loader.get_waypoints()
print(waypoints.to(LAT_LONG))
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy')
search_results, rawpoints, _ = solver.solvemultipoint(waypoints)
return_json = {
'latlong':[]
}
if return_type == 'segmented':
for search_result in search_results.list:
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(search_result.raw).transpose()).to(LAT_LONG)
return_json['latlong'].append({'latitudes': list(lat), 'longitudes': list(lon)})
else:
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(rawpoints).transpose()).to(LAT_LONG)
return_json['latlong'].append({'latitudes': list(lat), 'longitudes': list(lon)})
return json.dumps(return_json)
# OLD Stuff: delete
@app.route('/', methods=['GET', 'POST'])
@crossdomain(origin='*')
def get_waypoints():
print('got request')
data = request.get_json(force=True)
data_np = np.array(data['waypoints']).transpose()
#json_waypoints = JSONloader(xpjson)
waypoints = GeoPolygon(LAT_LONG, *data_np)
print waypoints.to(LAT_LONG)
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
explorer = Astronaut(80)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy', cached=True)
_, rawpoints, _ = solver.solvemultipoint(waypoints)
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(rawpoints).transpose()).to(LAT_LONG)
print((lat, lon))
return json.dumps({'latitudes': list(lat), 'longitudes': list(lon)})
if argv[0] != 'sextant:app':
app.run(host='localhost', port=5000)
# if __name__ == "__main__":
main(sys.argv[1:])
#main(['../data/maps/dem/HI_air_imagery.tif'])
| 38.098765
| 121
| 0.638043
| 0
| 0
| 0
| 0
| 3,371
| 0.546176
| 0
| 0
| 807
| 0.130752
|
f7905a650574afa3ef5e426f0e640ab9b3607fe3
| 10,085
|
py
|
Python
|
packages/gtmapi/service.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 60
|
2018-09-26T15:46:00.000Z
|
2021-10-10T02:37:14.000Z
|
packages/gtmapi/service.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 1,706
|
2018-09-26T16:11:22.000Z
|
2021-08-20T13:37:59.000Z
|
packages/gtmapi/service.py
|
griffinmilsap/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 11
|
2019-03-14T13:23:51.000Z
|
2022-01-25T01:29:16.000Z
|
#!/usr/bin/python3
import shutil
import os
import base64
from time import sleep
import flask
import requests.exceptions
import blueprint
from flask_cors import CORS
from confhttpproxy import ProxyRouter, ProxyRouterException
from flask import Flask, jsonify
import rest_routes
from lmsrvcore.utilities.migrate import migrate_work_dir_structure_v2
from gtmcore.dispatcher import Dispatcher
from gtmcore.dispatcher.jobs import update_environment_repositories
from gtmcore.configuration import Configuration
from gtmcore.logging import LMLogger
from gtmcore.auth.identity import AuthenticationError, get_identity_manager_class
from gtmcore.labbook.lock import reset_all_locks
logger = LMLogger.get_logger()
def configure_chp(proxy_dict: dict, is_hub_client: bool) -> str:
"""Set up the configurable HTTP proxy (CHP)
Args:
proxy_dict: obtained from the config dict inside the config instance
is_hub_client: are we running on the hub? (also obtained from config instance)
Returns:
the final api_prefix used by the router
We define this as a function mostly so we can optionally wrap it in a try block below
"""
# /api by default
api_prefix = proxy_dict["labmanager_api_prefix"]
proxy_router = ProxyRouter.get_proxy(proxy_dict)
# Wait up to 10 seconds for the CHP to be available
for _ in range(20):
try:
# This property raises an exception if the underlying request doesn't yield a status code of 200
proxy_router.routes # noqa
except (requests.exceptions.ConnectionError, ProxyRouterException):
sleep(0.5)
continue
# If there was no exception, the CHP is up and responding
break
else:
# We exhausted our for-loop
logger.error("Could not reach router after 20 tries (10 seconds), proxy_router.add() will likely fail")
if is_hub_client:
# Use full route prefix, including run/<client_id> if running in the Hub
api_target = f"run/{os.environ['GIGANTUM_CLIENT_ID']}{api_prefix}"
api_prefix = f"/{api_target}"
# explicit routes for UI with full route prefix
proxy_router.add("http://localhost:10002", f"run/{os.environ['GIGANTUM_CLIENT_ID']}")
else:
api_target = "api"
proxy_router.add("http://localhost:10001", api_target)
logger.info(f"Proxy routes ({type(proxy_router)}): {proxy_router.routes}")
return api_prefix
def configure_default_server(config_instance: Configuration) -> None:
"""Function to check if a server has been configured, and if not, configure and select the default server"""
try:
# Load the server configuration. If you get a FileNotFoundError there is no configured server
config_instance.get_server_configuration()
except FileNotFoundError:
default_server = config_instance.config['core']['default_server']
logger.info(f"Configuring Client with default server via auto-discovery: {default_server}")
try:
server_id = config_instance.add_server(default_server)
config_instance.set_current_server(server_id)
# Migrate any user dirs if needed. Here we assume all projects belong to the default server, since
# at the time it was the only available server.
migrate_work_dir_structure_v2(server_id)
except Exception as err:
logger.exception(f"Failed to configure default server! Restart Client to try again: {err}")
# Re-raise the exception so the API doesn't come up
raise
# Start Flask Server Initialization and app configuration
app = Flask("lmsrvlabbook")
random_bytes = os.urandom(32)
app.config["SECRET_KEY"] = base64.b64encode(random_bytes).decode('utf-8')
app.config["LABMGR_CONFIG"] = config = Configuration(wait_for_cache=10)
configure_default_server(config)
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
# Set Debug mode
app.config['DEBUG'] = config.config["flask"]["DEBUG"]
app.register_blueprint(blueprint.complete_labbook_service)
# Set starting flags
# If flask is run in debug mode the service will restart when code is changed, and some tasks
# we only want to happen once (ON_FIRST_START)
# The WERKZEUG_RUN_MAIN environmental variable is set only when running under debugging mode
ON_FIRST_START = app.config['DEBUG'] is False or os.environ.get('WERKZEUG_RUN_MAIN') != 'true'
ON_RESTART = os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
if os.environ.get('CIRCLECI') == 'true':
try:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
except requests.exceptions.ConnectionError:
url_prefix = config.config['proxy']["labmanager_api_prefix"]
else:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
# Add rest routes
app.register_blueprint(rest_routes.rest_routes, url_prefix=url_prefix)
if config.config["flask"]["allow_cors"]:
# Allow CORS
CORS(app, max_age=7200)
if ON_FIRST_START:
# Empty container-container share dir as it is ephemeral
share_dir = os.path.join(os.path.sep, 'mnt', 'share')
logger.info("Emptying container-container share folder: {}.".format(share_dir))
try:
for item in os.listdir(share_dir):
item_path = os.path.join(share_dir, item)
if os.path.isfile(item_path):
os.unlink(item_path)
else:
shutil.rmtree(item_path)
except Exception as e:
logger.error(f"Failed to empty share folder: {e}.")
raise
post_save_hook_code = """
import subprocess, os
def post_save_hook(os_path, model, contents_manager, **kwargs):
try:
client_ip = os.environ.get('GIGANTUM_CLIENT_IP')
if os.environ.get('HUB_CLIENT_ID'):
# Running in the Hub
service_route = "run/{}/api/savehook".format(os.environ.get('HUB_CLIENT_ID'))
else:
# Running locally
service_route = "api/savehook"
tokens = open('/home/giguser/jupyter_token').read().strip()
username, owner, lbname, jupyter_token = tokens.split(',')
url_args = "file={}&jupyter_token={}&email={}".format(os.path.basename(os_path), jupyter_token, os.environ['GIGANTUM_EMAIL'])
url = "http://{}:10001/{}/{}/{}/{}?{}".format(client_ip,service_route,username,owner,lbname,url_args)
subprocess.run(['wget', '--spider', url], cwd='/tmp')
except Exception as e:
print(e)
"""
os.makedirs(os.path.join(share_dir, 'jupyterhooks'))
with open(os.path.join(share_dir, 'jupyterhooks', '__init__.py'), 'w') as initpy:
initpy.write(post_save_hook_code)
# Reset distributed lock, if desired
if config.config["lock"]["reset_on_start"]:
logger.info("Resetting ALL distributed locks")
reset_all_locks(config.config['lock'])
# Create local data (for local dataset types) dir if it doesn't exist
local_data_dir = os.path.join(config.config['git']['working_directory'], 'local_data')
if os.path.isdir(local_data_dir) is False:
os.makedirs(local_data_dir, exist_ok=True)
logger.info(f'Created `local_data` dir for Local Filesystem Dataset Type: {local_data_dir}')
# Create certificates file directory for custom CA certificate support.
certificate_dir = os.path.join(config.config['git']['working_directory'], 'certificates', 'ssl')
if os.path.isdir(certificate_dir) is False:
os.makedirs(certificate_dir, exist_ok=True)
logger.info(f'Created `certificates` dir for SSL and custom CA certificates: {certificate_dir}')
# make sure temporary upload directory exists and is empty
tempdir = config.upload_dir
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
logger.info(f'Cleared upload temp dir: {tempdir}')
os.makedirs(tempdir)
# Start background startup tasks
d = Dispatcher()
# Make sure the queue is up before we start using RQ
for _ in range(20):
if d.ready_for_job(update_environment_repositories):
break
sleep(0.5)
else:
# We exhausted our for-loop
err_message = "Worker queue not ready after 20 tries (10 seconds) - fatal error"
logger.error(err_message)
raise RuntimeError(err_message)
# Run job to update Base images in the background
d.dispatch_task(update_environment_repositories, persist=True)
# Set auth error handler
@app.errorhandler(AuthenticationError)
def handle_auth_error(ex):
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
# TEMPORARY KLUDGE
# Due to GitPython implementation, resources leak. This block deletes all GitPython instances at the end of the request
# Future work will remove GitPython, at which point this block should be removed.
@app.after_request
def cleanup_git(response):
loader = getattr(flask.request, 'labbook_loader', None)
if loader:
for key in loader.__dict__["_promise_cache"]:
try:
lb = loader.__dict__["_promise_cache"][key].value
lb.git.repo.__del__()
except AttributeError:
continue
return response
# TEMPORARY KLUDGE
def main(debug=False) -> None:
try:
# Run app on 0.0.0.0, assuming not an issue since it should be in a container
# Please note: Debug mode must explicitly be set to False when running integration
# tests, due to properties of Flask werkzeug dynamic package reloading.
if debug:
# This is to support integration tests, which will call main
# with debug=False in order to avoid runtime reloading of Python code
# which causes the interpreter to crash.
app.run(host="0.0.0.0", port=10001, debug=debug)
else:
# If debug arg is not explicitly given then it is loaded from config
app.run(host="0.0.0.0", port=10001)
except Exception as err:
logger.exception(err)
raise
if __name__ == '__main__':
main()
| 38.938224
| 133
| 0.69529
| 0
| 0
| 0
| 0
| 535
| 0.053049
| 0
| 0
| 4,947
| 0.49053
|
f79637ff2082c4edbb504887dfd73b4aed28edc7
| 37,112
|
py
|
Python
|
bitten/model.py
|
dokipen/bitten
|
d4d2829c63eec84bcfab05ec7035a23e85d90c00
|
[
"BSD-3-Clause"
] | 1
|
2016-08-28T03:13:03.000Z
|
2016-08-28T03:13:03.000Z
|
bitten/model.py
|
dokipen/bitten
|
d4d2829c63eec84bcfab05ec7035a23e85d90c00
|
[
"BSD-3-Clause"
] | null | null | null |
bitten/model.py
|
dokipen/bitten
|
d4d2829c63eec84bcfab05ec7035a23e85d90c00
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Model classes for objects persisted in the database."""
from trac.attachment import Attachment
from trac.db import Table, Column, Index
from trac.resource import Resource
from trac.util.text import to_unicode
import codecs
import os
__docformat__ = 'restructuredtext en'
class BuildConfig(object):
"""Representation of a build configuration."""
_schema = [
Table('bitten_config', key='name')[
Column('name'), Column('path'), Column('active', type='int'),
Column('recipe'), Column('min_rev'), Column('max_rev'),
Column('label'), Column('description')
]
]
def __init__(self, env, name=None, path=None, active=False, recipe=None,
min_rev=None, max_rev=None, label=None, description=None):
"""Initialize a new build configuration with the specified attributes.
To actually create this configuration in the database, the `insert`
method needs to be called.
"""
self.env = env
self._old_name = None
self.name = name
self.path = path or ''
self.active = bool(active)
self.recipe = recipe or ''
self.min_rev = min_rev or None
self.max_rev = max_rev or None
self.label = label or ''
self.description = description or ''
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.name)
exists = property(fget=lambda self: self._old_name is not None,
doc='Whether this configuration exists in the database')
resource = property(fget=lambda self: Resource('build', '%s' % self.name),
doc='Build Config resource identification')
def delete(self, db=None):
"""Remove a build configuration and all dependent objects from the
database."""
assert self.exists, 'Cannot delete non-existing configuration'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
for platform in list(TargetPlatform.select(self.env, self.name, db=db)):
platform.delete(db=db)
for build in list(Build.select(self.env, config=self.name, db=db)):
build.delete(db=db)
# Delete attachments
Attachment.delete_all(self.env, 'build', self.resource.id, db)
cursor = db.cursor()
cursor.execute("DELETE FROM bitten_config WHERE name=%s", (self.name,))
if handle_ta:
db.commit()
self._old_name = None
def insert(self, db=None):
"""Insert a new configuration into the database."""
assert not self.exists, 'Cannot insert existing configuration'
assert self.name, 'Configuration requires a name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
cursor.execute("INSERT INTO bitten_config (name,path,active,"
"recipe,min_rev,max_rev,label,description) "
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
(self.name, self.path, int(self.active or 0),
self.recipe or '', self.min_rev, self.max_rev,
self.label or '', self.description or ''))
if handle_ta:
db.commit()
self._old_name = self.name
def update(self, db=None):
"""Save changes to an existing build configuration."""
assert self.exists, 'Cannot update a non-existing configuration'
assert self.name, 'Configuration requires a name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
cursor.execute("UPDATE bitten_config SET name=%s,path=%s,active=%s,"
"recipe=%s,min_rev=%s,max_rev=%s,label=%s,"
"description=%s WHERE name=%s",
(self.name, self.path, int(self.active or 0),
self.recipe, self.min_rev, self.max_rev,
self.label, self.description, self._old_name))
if self.name != self._old_name:
cursor.execute("UPDATE bitten_platform SET config=%s "
"WHERE config=%s", (self.name, self._old_name))
cursor.execute("UPDATE bitten_build SET config=%s "
"WHERE config=%s", (self.name, self._old_name))
if handle_ta:
db.commit()
self._old_name = self.name
def fetch(cls, env, name, db=None):
"""Retrieve an existing build configuration from the database by
name.
"""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT path,active,recipe,min_rev,max_rev,label,"
"description FROM bitten_config WHERE name=%s", (name,))
row = cursor.fetchone()
if not row:
return None
config = BuildConfig(env)
config.name = config._old_name = name
config.path = row[0] or ''
config.active = bool(row[1])
config.recipe = row[2] or ''
config.min_rev = row[3] or None
config.max_rev = row[4] or None
config.label = row[5] or ''
config.description = row[6] or ''
return config
fetch = classmethod(fetch)
def select(cls, env, include_inactive=False, db=None):
"""Retrieve existing build configurations from the database that match
the specified criteria.
"""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
if include_inactive:
cursor.execute("SELECT name,path,active,recipe,min_rev,max_rev,"
"label,description FROM bitten_config ORDER BY name")
else:
cursor.execute("SELECT name,path,active,recipe,min_rev,max_rev,"
"label,description FROM bitten_config "
"WHERE active=1 ORDER BY name")
for name, path, active, recipe, min_rev, max_rev, label, description \
in cursor:
config = BuildConfig(env, name=name, path=path or '',
active=bool(active), recipe=recipe or '',
min_rev=min_rev or None,
max_rev=max_rev or None, label=label or '',
description=description or '')
config._old_name = name
yield config
select = classmethod(select)
class TargetPlatform(object):
"""Target platform for a build configuration."""
_schema = [
Table('bitten_platform', key='id')[
Column('id', auto_increment=True), Column('config'), Column('name')
],
Table('bitten_rule', key=('id', 'propname'))[
Column('id', type='int'), Column('propname'), Column('pattern'),
Column('orderno', type='int')
]
]
def __init__(self, env, config=None, name=None):
"""Initialize a new target platform with the specified attributes.
To actually create this platform in the database, the `insert` method
needs to be called.
"""
self.env = env
self.id = None
self.config = config
self.name = name
self.rules = []
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.id)
exists = property(fget=lambda self: self.id is not None,
doc='Whether this target platform exists in the database')
def delete(self, db=None):
"""Remove the target platform from the database."""
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
for build in Build.select(self.env, platform=self.id, status=Build.PENDING, db=db):
build.delete()
cursor = db.cursor()
cursor.execute("DELETE FROM bitten_rule WHERE id=%s", (self.id,))
cursor.execute("DELETE FROM bitten_platform WHERE id=%s", (self.id,))
if handle_ta:
db.commit()
def insert(self, db=None):
"""Insert a new target platform into the database."""
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
assert not self.exists, 'Cannot insert existing target platform'
assert self.config, 'Target platform needs to be associated with a ' \
'configuration'
assert self.name, 'Target platform requires a name'
cursor = db.cursor()
cursor.execute("INSERT INTO bitten_platform (config,name) "
"VALUES (%s,%s)", (self.config, self.name))
self.id = db.get_last_id(cursor, 'bitten_platform')
if self.rules:
cursor.executemany("INSERT INTO bitten_rule VALUES (%s,%s,%s,%s)",
[(self.id, propname, pattern, idx)
for idx, (propname, pattern)
in enumerate(self.rules)])
if handle_ta:
db.commit()
def update(self, db=None):
"""Save changes to an existing target platform."""
assert self.exists, 'Cannot update a non-existing platform'
assert self.config, 'Target platform needs to be associated with a ' \
'configuration'
assert self.name, 'Target platform requires a name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
cursor.execute("UPDATE bitten_platform SET name=%s WHERE id=%s",
(self.name, self.id))
cursor.execute("DELETE FROM bitten_rule WHERE id=%s", (self.id,))
if self.rules:
cursor.executemany("INSERT INTO bitten_rule VALUES (%s,%s,%s,%s)",
[(self.id, propname, pattern, idx)
for idx, (propname, pattern)
in enumerate(self.rules)])
if handle_ta:
db.commit()
def fetch(cls, env, id, db=None):
"""Retrieve an existing target platform from the database by ID."""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT config,name FROM bitten_platform "
"WHERE id=%s", (id,))
row = cursor.fetchone()
if not row:
return None
platform = TargetPlatform(env, config=row[0], name=row[1])
platform.id = id
cursor.execute("SELECT propname,pattern FROM bitten_rule "
"WHERE id=%s ORDER BY orderno", (id,))
for propname, pattern in cursor:
platform.rules.append((propname, pattern))
return platform
fetch = classmethod(fetch)
def select(cls, env, config=None, db=None):
"""Retrieve existing target platforms from the database that match the
specified criteria.
"""
if not db:
db = env.get_db_cnx()
where_clauses = []
if config is not None:
where_clauses.append(("config=%s", config))
if where_clauses:
where = "WHERE " + " AND ".join([wc[0] for wc in where_clauses])
else:
where = ""
cursor = db.cursor()
cursor.execute("SELECT id FROM bitten_platform %s ORDER BY name"
% where, [wc[1] for wc in where_clauses])
for (id,) in cursor:
yield TargetPlatform.fetch(env, id)
select = classmethod(select)
class Build(object):
"""Representation of a build."""
_schema = [
Table('bitten_build', key='id')[
Column('id', auto_increment=True), Column('config'), Column('rev'),
Column('rev_time', type='int'), Column('platform', type='int'),
Column('slave'), Column('started', type='int'),
Column('stopped', type='int'), Column('status', size=1),
Index(['config', 'rev', 'platform'], unique=True)
],
Table('bitten_slave', key=('build', 'propname'))[
Column('build', type='int'), Column('propname'), Column('propvalue')
]
]
# Build status codes
PENDING = 'P'
IN_PROGRESS = 'I'
SUCCESS = 'S'
FAILURE = 'F'
# Standard slave properties
IP_ADDRESS = 'ipnr'
MAINTAINER = 'owner'
OS_NAME = 'os'
OS_FAMILY = 'family'
OS_VERSION = 'version'
MACHINE = 'machine'
PROCESSOR = 'processor'
TOKEN = 'token'
def __init__(self, env, config=None, rev=None, platform=None, slave=None,
started=0, stopped=0, rev_time=0, status=PENDING):
"""Initialize a new build with the specified attributes.
To actually create this build in the database, the `insert` method needs
to be called.
"""
self.env = env
self.id = None
self.config = config
self.rev = rev and str(rev) or None
self.platform = platform
self.slave = slave
self.started = started or 0
self.stopped = stopped or 0
self.rev_time = rev_time
self.status = status
self.slave_info = {}
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.id)
exists = property(fget=lambda self: self.id is not None,
doc='Whether this build exists in the database')
completed = property(fget=lambda self: self.status != Build.IN_PROGRESS,
doc='Whether the build has been completed')
successful = property(fget=lambda self: self.status == Build.SUCCESS,
doc='Whether the build was successful')
resource = property(fget=lambda self: Resource('build', '%s/%s' % (self.config, self.id)),
doc='Build resource identification')
def delete(self, db=None):
"""Remove the build from the database."""
assert self.exists, 'Cannot delete a non-existing build'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
for step in list(BuildStep.select(self.env, build=self.id)):
step.delete(db=db)
# Delete attachments
Attachment.delete_all(self.env, 'build', self.resource.id, db)
cursor = db.cursor()
cursor.execute("DELETE FROM bitten_slave WHERE build=%s", (self.id,))
cursor.execute("DELETE FROM bitten_build WHERE id=%s", (self.id,))
if handle_ta:
db.commit()
def insert(self, db=None):
"""Insert a new build into the database."""
assert not self.exists, 'Cannot insert an existing build'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
assert self.config and self.rev and self.rev_time and self.platform
assert self.status in (self.PENDING, self.IN_PROGRESS, self.SUCCESS,
self.FAILURE)
if not self.slave:
assert self.status == self.PENDING
cursor = db.cursor()
cursor.execute("INSERT INTO bitten_build (config,rev,rev_time,platform,"
"slave,started,stopped,status) "
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
(self.config, self.rev, int(self.rev_time),
self.platform, self.slave or '', self.started or 0,
self.stopped or 0, self.status))
self.id = db.get_last_id(cursor, 'bitten_build')
if self.slave_info:
cursor.executemany("INSERT INTO bitten_slave VALUES (%s,%s,%s)",
[(self.id, name, value) for name, value
in self.slave_info.items()])
if handle_ta:
db.commit()
def update(self, db=None):
"""Save changes to an existing build."""
assert self.exists, 'Cannot update a non-existing build'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
assert self.config and self.rev
assert self.status in (self.PENDING, self.IN_PROGRESS, self.SUCCESS,
self.FAILURE)
if not self.slave:
assert self.status == self.PENDING
cursor = db.cursor()
cursor.execute("UPDATE bitten_build SET slave=%s,started=%s,"
"stopped=%s,status=%s WHERE id=%s",
(self.slave or '', self.started or 0,
self.stopped or 0, self.status, self.id))
cursor.execute("DELETE FROM bitten_slave WHERE build=%s", (self.id,))
if self.slave_info:
cursor.executemany("INSERT INTO bitten_slave VALUES (%s,%s,%s)",
[(self.id, name, value) for name, value
in self.slave_info.items()])
if handle_ta:
db.commit()
def fetch(cls, env, id, db=None):
"""Retrieve an existing build from the database by ID."""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT config,rev,rev_time,platform,slave,started,"
"stopped,status FROM bitten_build WHERE id=%s", (id,))
row = cursor.fetchone()
if not row:
return None
build = Build(env, config=row[0], rev=row[1], rev_time=int(row[2]),
platform=int(row[3]), slave=row[4],
started=row[5] and int(row[5]) or 0,
stopped=row[6] and int(row[6]) or 0, status=row[7])
build.id = int(id)
cursor.execute("SELECT propname,propvalue FROM bitten_slave "
"WHERE build=%s", (id,))
for propname, propvalue in cursor:
build.slave_info[propname] = propvalue
return build
fetch = classmethod(fetch)
def select(cls, env, config=None, rev=None, platform=None, slave=None,
status=None, db=None, min_rev_time=None, max_rev_time=None):
"""Retrieve existing builds from the database that match the specified
criteria.
"""
if not db:
db = env.get_db_cnx()
where_clauses = []
if config is not None:
where_clauses.append(("config=%s", config))
if rev is not None:
where_clauses.append(("rev=%s", str(rev)))
if platform is not None:
where_clauses.append(("platform=%s", platform))
if slave is not None:
where_clauses.append(("slave=%s", slave))
if status is not None:
where_clauses.append(("status=%s", status))
if min_rev_time is not None:
where_clauses.append(("rev_time>=%s", min_rev_time))
if max_rev_time is not None:
where_clauses.append(("rev_time<=%s", max_rev_time))
if where_clauses:
where = "WHERE " + " AND ".join([wc[0] for wc in where_clauses])
else:
where = ""
cursor = db.cursor()
cursor.execute("SELECT id FROM bitten_build %s "
"ORDER BY rev_time DESC,config,slave"
% where, [wc[1] for wc in where_clauses])
for (id,) in cursor:
yield Build.fetch(env, id)
select = classmethod(select)
class BuildStep(object):
"""Represents an individual step of an executed build."""
_schema = [
Table('bitten_step', key=('build', 'name'))[
Column('build', type='int'), Column('name'), Column('description'),
Column('status', size=1), Column('started', type='int'),
Column('stopped', type='int')
],
Table('bitten_error', key=('build', 'step', 'orderno'))[
Column('build', type='int'), Column('step'), Column('message'),
Column('orderno', type='int')
]
]
# Step status codes
SUCCESS = 'S'
FAILURE = 'F'
def __init__(self, env, build=None, name=None, description=None,
status=None, started=None, stopped=None):
"""Initialize a new build step with the specified attributes.
To actually create this build step in the database, the `insert` method
needs to be called.
"""
self.env = env
self.build = build
self.name = name
self.description = description
self.status = status
self.started = started
self.stopped = stopped
self.errors = []
self._exists = False
exists = property(fget=lambda self: self._exists,
doc='Whether this build step exists in the database')
successful = property(fget=lambda self: self.status == BuildStep.SUCCESS,
doc='Whether the build step was successful')
def delete(self, db=None):
"""Remove the build step from the database."""
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
for log in list(BuildLog.select(self.env, build=self.build,
step=self.name, db=db)):
log.delete(db=db)
for report in list(Report.select(self.env, build=self.build,
step=self.name, db=db)):
report.delete(db=db)
cursor = db.cursor()
cursor.execute("DELETE FROM bitten_step WHERE build=%s AND name=%s",
(self.build, self.name))
cursor.execute("DELETE FROM bitten_error WHERE build=%s AND step=%s",
(self.build, self.name))
if handle_ta:
db.commit()
self._exists = False
def insert(self, db=None):
"""Insert a new build step into the database."""
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
assert self.build and self.name
assert self.status in (self.SUCCESS, self.FAILURE)
cursor = db.cursor()
cursor.execute("INSERT INTO bitten_step (build,name,description,status,"
"started,stopped) VALUES (%s,%s,%s,%s,%s,%s)",
(self.build, self.name, self.description or '',
self.status, self.started or 0, self.stopped or 0))
if self.errors:
cursor.executemany("INSERT INTO bitten_error (build,step,message,"
"orderno) VALUES (%s,%s,%s,%s)",
[(self.build, self.name, message, idx)
for idx, message in enumerate(self.errors)])
if handle_ta:
db.commit()
self._exists = True
def fetch(cls, env, build, name, db=None):
"""Retrieve an existing build from the database by build ID and step
name."""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT description,status,started,stopped "
"FROM bitten_step WHERE build=%s AND name=%s",
(build, name))
row = cursor.fetchone()
if not row:
return None
step = BuildStep(env, build, name, row[0] or '', row[1],
row[2] and int(row[2]), row[3] and int(row[3]))
step._exists = True
cursor.execute("SELECT message FROM bitten_error WHERE build=%s "
"AND step=%s ORDER BY orderno", (build, name))
for row in cursor:
step.errors.append(row[0] or '')
return step
fetch = classmethod(fetch)
def select(cls, env, build=None, name=None, status=None, db=None):
"""Retrieve existing build steps from the database that match the
specified criteria.
"""
if not db:
db = env.get_db_cnx()
assert status in (None, BuildStep.SUCCESS, BuildStep.FAILURE)
where_clauses = []
if build is not None:
where_clauses.append(("build=%s", build))
if name is not None:
where_clauses.append(("name=%s", name))
if status is not None:
where_clauses.append(("status=%s", status))
if where_clauses:
where = "WHERE " + " AND ".join([wc[0] for wc in where_clauses])
else:
where = ""
cursor = db.cursor()
cursor.execute("SELECT build,name FROM bitten_step %s ORDER BY stopped"
% where, [wc[1] for wc in where_clauses])
for build, name in cursor:
yield BuildStep.fetch(env, build, name, db=db)
select = classmethod(select)
class BuildLog(object):
"""Represents a build log."""
_schema = [
Table('bitten_log', key='id')[
Column('id', auto_increment=True), Column('build', type='int'),
Column('step'), Column('generator'), Column('orderno', type='int'),
Column('filename'),
Index(['build', 'step'])
],
]
# Message levels
DEBUG = 'D'
INFO = 'I'
WARNING = 'W'
ERROR = 'E'
UNKNOWN = ''
LEVELS_SUFFIX = '.levels'
def __init__(self, env, build=None, step=None, generator=None,
orderno=None, filename=None):
"""Initialize a new build log with the specified attributes.
To actually create this build log in the database, the `insert` method
needs to be called.
"""
self.env = env
self.id = None
self.build = build
self.step = step
self.generator = generator or ''
self.orderno = orderno and int(orderno) or 0
self.filename = filename or None
self.messages = []
self.logs_dir = env.config.get('bitten', 'logs_dir', 'log/bitten')
if not os.path.isabs(self.logs_dir):
self.logs_dir = os.path.join(env.path, self.logs_dir)
if not os.path.exists(self.logs_dir):
os.makedirs(self.logs_dir)
exists = property(fget=lambda self: self.id is not None,
doc='Whether this build log exists in the database')
def get_log_file(self, filename):
"""Returns the full path to the log file"""
if filename != os.path.basename(filename):
raise ValueError("Filename may not contain path: %s" % (filename,))
return os.path.join(self.logs_dir, filename)
def delete(self, db=None):
"""Remove the build log from the database."""
assert self.exists, 'Cannot delete a non-existing build log'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
if self.filename:
log_file = self.get_log_file(self.filename)
if os.path.exists(log_file):
try:
self.env.log.debug("Deleting log file: %s" % log_file)
os.remove(log_file)
except Exception, e:
self.env.log.warning("Error removing log file %s: %s" % (log_file, e))
level_file = log_file + self.LEVELS_SUFFIX
if os.path.exists(level_file):
try:
self.env.log.debug("Deleting level file: %s" % level_file)
os.remove(level_file)
except Exception, e:
self.env.log.warning("Error removing level file %s: %s" \
% (level_file, e))
cursor = db.cursor()
cursor.execute("DELETE FROM bitten_log WHERE id=%s", (self.id,))
if handle_ta:
db.commit()
self.id = None
def insert(self, db=None):
"""Insert a new build log into the database."""
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
assert self.build and self.step
cursor = db.cursor()
cursor.execute("INSERT INTO bitten_log (build,step,generator,orderno) "
"VALUES (%s,%s,%s,%s)", (self.build, self.step,
self.generator, self.orderno))
id = db.get_last_id(cursor, 'bitten_log')
log_file = "%s.log" % (id,)
cursor.execute("UPDATE bitten_log SET filename=%s WHERE id=%s", (log_file, id))
if self.messages:
log_file_name = self.get_log_file(log_file)
level_file_name = log_file_name + self.LEVELS_SUFFIX
codecs.open(log_file_name, "wb", "UTF-8").writelines([to_unicode(msg[1]+"\n") for msg in self.messages])
codecs.open(level_file_name, "wb", "UTF-8").writelines([to_unicode(msg[0]+"\n") for msg in self.messages])
if handle_ta:
db.commit()
self.id = id
def fetch(cls, env, id, db=None):
"""Retrieve an existing build from the database by ID."""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT build,step,generator,orderno,filename FROM bitten_log "
"WHERE id=%s", (id,))
row = cursor.fetchone()
if not row:
return None
log = BuildLog(env, int(row[0]), row[1], row[2], row[3], row[4])
log.id = id
if log.filename:
log_filename = log.get_log_file(log.filename)
if os.path.exists(log_filename):
log_lines = codecs.open(log_filename, "rb", "UTF-8").readlines()
else:
log_lines = []
level_filename = log.get_log_file(log.filename + cls.LEVELS_SUFFIX)
if os.path.exists(level_filename):
log_levels = dict(enumerate(codecs.open(level_filename, "rb", "UTF-8").readlines()))
else:
log_levels = {}
log.messages = [(log_levels.get(line_num, BuildLog.UNKNOWN).rstrip("\n"), line.rstrip("\n")) for line_num, line in enumerate(log_lines)]
else:
log.messages = []
return log
fetch = classmethod(fetch)
def select(cls, env, build=None, step=None, generator=None, db=None):
"""Retrieve existing build logs from the database that match the
specified criteria.
"""
if not db:
db = env.get_db_cnx()
where_clauses = []
if build is not None:
where_clauses.append(("build=%s", build))
if step is not None:
where_clauses.append(("step=%s", step))
if generator is not None:
where_clauses.append(("generator=%s", generator))
if where_clauses:
where = "WHERE " + " AND ".join([wc[0] for wc in where_clauses])
else:
where = ""
cursor = db.cursor()
cursor.execute("SELECT id FROM bitten_log %s ORDER BY orderno"
% where, [wc[1] for wc in where_clauses])
for (id, ) in cursor:
yield BuildLog.fetch(env, id, db=db)
select = classmethod(select)
class Report(object):
"""Represents a generated report."""
_schema = [
Table('bitten_report', key='id')[
Column('id', auto_increment=True), Column('build', type='int'),
Column('step'), Column('category'), Column('generator'),
Index(['build', 'step', 'category'])
],
Table('bitten_report_item', key=('report', 'item', 'name'))[
Column('report', type='int'), Column('item', type='int'),
Column('name'), Column('value')
]
]
def __init__(self, env, build=None, step=None, category=None,
generator=None):
"""Initialize a new report with the specified attributes.
To actually create this build log in the database, the `insert` method
needs to be called.
"""
self.env = env
self.id = None
self.build = build
self.step = step
self.category = category
self.generator = generator or ''
self.items = []
exists = property(fget=lambda self: self.id is not None,
doc='Whether this report exists in the database')
def delete(self, db=None):
"""Remove the report from the database."""
assert self.exists, 'Cannot delete a non-existing report'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
cursor.execute("DELETE FROM bitten_report_item WHERE report=%s",
(self.id,))
cursor.execute("DELETE FROM bitten_report WHERE id=%s", (self.id,))
if handle_ta:
db.commit()
self.id = None
def insert(self, db=None):
"""Insert a new build log into the database."""
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
assert self.build and self.step and self.category
# Enforce uniqueness of build-step-category.
# This should be done by the database, but the DB schema helpers in Trac
# currently don't support UNIQUE() constraints
assert not list(Report.select(self.env, build=self.build,
step=self.step, category=self.category,
db=db)), 'Report already exists'
cursor = db.cursor()
cursor.execute("INSERT INTO bitten_report "
"(build,step,category,generator) VALUES (%s,%s,%s,%s)",
(self.build, self.step, self.category, self.generator))
id = db.get_last_id(cursor, 'bitten_report')
for idx, item in enumerate([item for item in self.items if item]):
cursor.executemany("INSERT INTO bitten_report_item "
"(report,item,name,value) VALUES (%s,%s,%s,%s)",
[(id, idx, key, value) for key, value
in item.items()])
if handle_ta:
db.commit()
self.id = id
def fetch(cls, env, id, db=None):
"""Retrieve an existing build from the database by ID."""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT build,step,category,generator "
"FROM bitten_report WHERE id=%s", (id,))
row = cursor.fetchone()
if not row:
return None
report = Report(env, int(row[0]), row[1], row[2] or '', row[3] or '')
report.id = id
cursor.execute("SELECT item,name,value FROM bitten_report_item "
"WHERE report=%s ORDER BY item", (id,))
items = {}
for item, name, value in cursor:
items.setdefault(item, {})[name] = value
report.items = items.values()
return report
fetch = classmethod(fetch)
def select(cls, env, config=None, build=None, step=None, category=None,
db=None):
"""Retrieve existing reports from the database that match the specified
criteria.
"""
where_clauses = []
joins = []
if config is not None:
where_clauses.append(("config=%s", config))
joins.append("INNER JOIN bitten_build ON (bitten_build.id=build)")
if build is not None:
where_clauses.append(("build=%s", build))
if step is not None:
where_clauses.append(("step=%s", step))
if category is not None:
where_clauses.append(("category=%s", category))
if where_clauses:
where = "WHERE " + " AND ".join([wc[0] for wc in where_clauses])
else:
where = ""
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT bitten_report.id FROM bitten_report %s %s "
"ORDER BY category" % (' '.join(joins), where),
[wc[1] for wc in where_clauses])
for (id, ) in cursor:
yield Report.fetch(env, id, db=db)
select = classmethod(select)
schema = BuildConfig._schema + TargetPlatform._schema + Build._schema + \
BuildStep._schema + BuildLog._schema + Report._schema
schema_version = 10
| 37.000997
| 148
| 0.551304
| 36,309
| 0.978363
| 6,362
| 0.171427
| 0
| 0
| 0
| 0
| 9,788
| 0.263742
|
f797289b8fbe1305efddd975d80c58646d9ec219
| 2,555
|
py
|
Python
|
ml_model.py
|
CristopherNim/student_performance
|
f1ec90329e91c44a8155d83c0ac1569eb038954e
|
[
"MIT"
] | null | null | null |
ml_model.py
|
CristopherNim/student_performance
|
f1ec90329e91c44a8155d83c0ac1569eb038954e
|
[
"MIT"
] | null | null | null |
ml_model.py
|
CristopherNim/student_performance
|
f1ec90329e91c44a8155d83c0ac1569eb038954e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import OneHotEncoder
import pickle
from flask import Flask, request
np.random.seed(42)
df = pd.read_csv('StudentsPerformance.csv')
df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'parent_level_of_education',
'test preparation course': 'test_prep_course', 'math score': 'math_score',
'reading score': 'reading_score', 'writing score': 'writing_score'}, inplace=True)
# creating a categorical boolean mask
categorical_feature_mask = df.dtypes == object
# filtering out the categorical columns
categorical_cols = df.columns[categorical_feature_mask].tolist()
# instantiate the OneHotEncoder Object
one_hot = OneHotEncoder(handle_unknown='ignore', sparse=False)
# applying data
one_hot.fit(df[categorical_cols])
cat_one_hot = one_hot.transform(df[categorical_cols])
# creating Dataframe of the hot encoded columns
hot_df = pd.DataFrame(cat_one_hot, columns=one_hot.get_feature_names(input_features=categorical_cols))
df_OneHotEncoder = pd.concat([df, hot_df], axis=1).drop(columns=categorical_cols, axis=1)
X = df_OneHotEncoder.drop('math_score', axis=1)
y = df_OneHotEncoder['math_score']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
model = Ridge(alpha=.99).fit(X_train, y_train)
model_scores = cross_val_score(estimator=model, X=X_test, y=y_test, cv=5)
print('accuracy for ridge model: %.1f' % (model_scores.mean() * 100))
def row_pred(row):
row = np.column_stack(row)
cols = ['gender', 'race', 'parent_level_of_education', 'lunch', 'test_prep_course', 'reading_score',
'writing_score']
newdf = pd.DataFrame(row, columns=cols)
cat_ohe_new = one_hot.transform(newdf[categorical_cols])
ohe_new_df = pd.DataFrame(cat_ohe_new, columns=one_hot.get_feature_names(input_features=categorical_cols))
df_ohe_new = pd.concat([newdf, ohe_new_df], axis=1).drop(columns=categorical_cols, axis=1)
pred_score = model.predict(df_ohe_new)
a = pred_score.tolist()
print(f'predicted math score: {a[0]:.0f}')
# print(f'{a[0]:.0f}')
return f'{a[0]:.1f}'
pickle.dump(model, open('model.pkl', 'wb'))
row = ['male', 'group_a', 'some high school', 'standard', 'none', 80, 80]
result = row_pred(row)
print(result)
| 35.486111
| 111
| 0.720157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 706
| 0.276321
|
f7977957e2a1122df1f177f30c24042002dc1e88
| 1,088
|
py
|
Python
|
src/ralph/models/edx/enrollment/fields/contexts.py
|
p-bizouard/ralph
|
50a37f6b070dcb4109dcc49d8d885949a0099643
|
[
"MIT"
] | 5
|
2020-06-26T10:44:23.000Z
|
2022-01-26T11:41:03.000Z
|
src/ralph/models/edx/enrollment/fields/contexts.py
|
p-bizouard/ralph
|
50a37f6b070dcb4109dcc49d8d885949a0099643
|
[
"MIT"
] | 73
|
2020-02-18T15:09:25.000Z
|
2022-03-14T13:32:20.000Z
|
src/ralph/models/edx/enrollment/fields/contexts.py
|
p-bizouard/ralph
|
50a37f6b070dcb4109dcc49d8d885949a0099643
|
[
"MIT"
] | 4
|
2020-02-27T12:52:10.000Z
|
2021-11-23T19:45:07.000Z
|
"""Enrollment event models context fields definitions"""
from typing import Literal, Union
from ...base import BaseContextField
class EdxCourseEnrollmentUpgradeClickedContextField(BaseContextField):
"""Represents the `context` field of the `edx.course.enrollment.upgrade_clicked`
server statement.
In addition to the common context member fields, this statement also comprises the
`mode` context member field.
Attributes:
mode (str): Consists of either the `audit` or `honor` value. It identifies the
enrollment mode when the user clicked <kbd>Challenge Yourself</kbd>.
"""
mode: Union[Literal["audit"], Literal["honor"]]
class EdxCourseEnrollmentUpgradeSucceededContextField(BaseContextField):
"""Represents the `context` field of the `edx.course.enrollment.upgrade.succeeded`
server statement.
In addition to the common context member fields, this statement also comprises the
`mode` context member field.
Attributes:
mode (str): Consists of the `verified` value.
"""
mode: Literal["verified"]
| 31.085714
| 86
| 0.731618
| 952
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 796
| 0.731618
|
f7979a1edf5e664d9fd5011a9f7390b351722d3b
| 834
|
py
|
Python
|
tests/profiles/fontval_test.py
|
kennethormandy/fontbakery
|
ec569215cd7919e125089bd6f65346afa9e75546
|
[
"Apache-2.0"
] | null | null | null |
tests/profiles/fontval_test.py
|
kennethormandy/fontbakery
|
ec569215cd7919e125089bd6f65346afa9e75546
|
[
"Apache-2.0"
] | null | null | null |
tests/profiles/fontval_test.py
|
kennethormandy/fontbakery
|
ec569215cd7919e125089bd6f65346afa9e75546
|
[
"Apache-2.0"
] | 1
|
2020-06-14T17:13:59.000Z
|
2020-06-14T17:13:59.000Z
|
import os
import pytest
from fontbakery.utils import TEST_FILE
from fontbakery.checkrunner import ERROR
def test_check_fontvalidator():
""" MS Font Validator checks """
from fontbakery.profiles.fontval import com_google_fonts_check_fontvalidator as check
font = TEST_FILE("mada/Mada-Regular.ttf")
# we want to run all FValidator checks only once,
# so here we cache all results:
fval_results = list(check(font))
# Then we make sure that there wasn't an ERROR
# which would mean FontValidator is not properly installed:
for status, message in fval_results:
assert status != ERROR
# Simulate FontVal missing.
old_path = os.environ["PATH"]
os.environ["PATH"] = ""
with pytest.raises(OSError) as _:
status, message = list(check(font))[-1]
assert status == ERROR
os.environ["PATH"] = old_path
| 28.758621
| 87
| 0.732614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 287
| 0.344125
|
f797a2004904bea8641ef96760d4f8b68d968963
| 3,662
|
py
|
Python
|
app/views.py
|
sinantan/TechRSS
|
f07d21b5553534ef6ecb6da6dc89524a8bbdb505
|
[
"MIT"
] | 3
|
2019-10-26T13:31:21.000Z
|
2020-02-26T20:46:35.000Z
|
app/views.py
|
sinantan/TechRSS
|
f07d21b5553534ef6ecb6da6dc89524a8bbdb505
|
[
"MIT"
] | null | null | null |
app/views.py
|
sinantan/TechRSS
|
f07d21b5553534ef6ecb6da6dc89524a8bbdb505
|
[
"MIT"
] | null | null | null |
from run import app
from functools import wraps
from flask import render_template,flash,redirect,logging,session,url_for,request
from .models.database import user_register, user_login, get_feed, get_user_info, update_feed, change_password
#kullanıcı giriş decorator'u. bu yapı tüm decoratorlarda aynı.
def login_required(f): #bunu kullanıcı girişi gerektiren her sayfada kullanabiliriz.
@wraps(f)
def decorated_function(*args, **kwargs):
if "logged_in" in session:
return f(*args, **kwargs)
else:
flash("Lütfen giriş yap.","warning")
return redirect(url_for("auth"))
return decorated_function
def is_logged(f): #kullanıcı giriş yaptıysa login ve register a ulaşmamalı.
@wraps(f)
def decorated_function(*args, **kwargs):
if "logged_in" in session:
return redirect(url_for("index"))
else:
return f(*args, **kwargs)
return decorated_function
@app.route("/")
@login_required
def index():
topics = get_feed()
return render_template("index.html",topics = topics)
@app.route("/auth",methods=["GET","POST"])
@is_logged
def auth():
if request.method=="POST":
if request.form.get("login")=="Giriş Yap":
email = request.form["user_email"]
password = request.form["user_password"]
status = user_login(email,password)
if status==True:
return redirect(url_for("auth"))
elif status=="Yanlış şifre":
flash("Hatalı şifre girdiniz.","danger")
return redirect(url_for("auth"))
elif status=="Hesap yok":
flash("Böyle bir hesap yok.","warning")
return redirect(url_for("auth"))
elif request.form.get("register")=="Kayıt Ol":
username = request.form["user_name"]
email = request.form["user_email"]
password = request.form["user_password"]
if user_register(username,email,password):
flash("Başarıyla kayıt olundu! Giriş yapabilirsin.","success")
return redirect(url_for("auth"))
else:
flash("Bir hata meydana geldi.","warning")
return redirect(url_for("auth"))
else:
return render_template("auth.html")
@app.route("/settings",methods=["POST","GET"])
@login_required
def settings():
if request.method=="POST":
if request.form.get("save_feed_settings") != None:
webtekno_status = True if request.form.get("webtekno") != None else False #unchecked = None , checked = on
technopat_status = True if request.form.get("technopat") != None else False
shiftdelete_status = True if request.form.get("shiftdelete") != None else False
donanimhaber_status = True if request.form.get("donanimhaber") != None else False
chiptr_status = True if request.form.get("chiptr") != None else False
query_status= update_feed(webtekno_status,technopat_status,shiftdelete_status,donanimhaber_status,chiptr_status)
if query_status:
flash("Ayarlarınız kaydedildi.", "success")
else:
flash("Bir hata meydana geldi.","danger")
return redirect(url_for("settings"))
elif request.form.get("save_password_settings") != None:
old_password = request.form["oldpassword"]
new_password = request.form["newpassword"]
if change_password(old_password,new_password):
flash("Parola başarıyla değiştirildi.","success")
else:
flash("Parolaları kontrol ediniz.","warning")
return redirect(url_for("settings"))
else:
selected_websites,user_email = get_user_info()
return render_template("settings.html",selected_websites=selected_websites,user_email=user_email,all_websites=["webtekno","shiftdelete","chiptr","donanimhaber","technopat"])
@app.route("/logout")
def logout():
session.clear()
return redirect(url_for("auth"))
| 34.87619
| 176
| 0.706445
| 0
| 0
| 0
| 0
| 3,118
| 0.843158
| 0
| 0
| 1,080
| 0.29205
|
f797b24b3f7362f5aa140f0cf6036ab769afd566
| 4,671
|
py
|
Python
|
kubails/commands/service.py
|
DevinSit/kubails
|
b3b2f9487d815868f0fbe9fae649789a40b50ad8
|
[
"MIT"
] | 2
|
2019-05-28T00:26:52.000Z
|
2019-08-02T23:02:19.000Z
|
kubails/commands/service.py
|
DevinSit/kubails
|
b3b2f9487d815868f0fbe9fae649789a40b50ad8
|
[
"MIT"
] | 51
|
2019-12-23T04:34:40.000Z
|
2022-02-12T02:28:44.000Z
|
kubails/commands/service.py
|
DevinSit/kubails
|
b3b2f9487d815868f0fbe9fae649789a40b50ad8
|
[
"MIT"
] | 1
|
2019-09-11T20:12:18.000Z
|
2019-09-11T20:12:18.000Z
|
import click
import logging
import sys
from typing import Tuple
from kubails.commands import helpers
from kubails.services.config_store import ConfigStore
from kubails.services.service import Service
from kubails.resources.templates import SERVICE_TEMPLATES
from kubails.utils.command_helpers import log_command_args_factory
logger = logging.getLogger(__name__)
log_command_args = log_command_args_factory(logger, "Service '{}' args")
config_store = None
service_service = None
@click.group()
def service():
"""Manage the services for your project."""
global config_store
global service_service
config_store = ConfigStore()
service_service = Service()
@service.command()
@click.argument("service", nargs=-1)
@log_command_args
def start(service: Tuple[str]) -> None:
"""
Start up SERVICE locally.
If SERVICE is not specified, start all services.
"""
service_service.start(list(service))
@service.command()
@log_command_args
def destroy() -> None:
"""Teardown your local services."""
service_service.destroy()
@service.command()
@click.argument("service", nargs=-1)
@click.option("--tag")
@log_command_args
def lint(service: Tuple[str], tag: str) -> None:
"""
Lint SERVICE.
If SERVICE is not specified, lint all services.
"""
if not service_service.lint(list(service), tag):
sys.exit(1)
@service.command()
@click.argument("service", nargs=-1)
@click.option("--tag")
@log_command_args
def test(service: Tuple[str], tag) -> None:
"""
Test SERVICE.
If SERVICE is not specified, test all services.
"""
if not service_service.test(list(service), tag):
sys.exit(1)
@service.command()
@click.argument("service", nargs=-1)
@click.option("--tag")
@log_command_args
def ci(service: Tuple[str], tag: str) -> None:
"""
Run CI on SERVICE.
If SERVICE is not specified, run CI on all services.
"""
if not service_service.ci(list(service), tag):
sys.exit(1)
@service.command()
@click.argument("command", required=True)
@log_command_args
def make(command: str) -> None:
"""Execute a Make COMMAND on all your services."""
if not service_service.make(command):
sys.exit(1)
@service.command()
@click.option(
"--type", "service_type",
prompt=helpers.SERVICE_GENERATION_PROMPTS["without_index"]["service_type"],
type=click.Choice(SERVICE_TEMPLATES),
default=SERVICE_TEMPLATES[0],
help="The template to base the service off of."
)
@click.option(
"--subdomain",
prompt=helpers.SERVICE_GENERATION_PROMPTS["without_index"]["subdomain"],
default="",
help="The subdomain the service will have when deployed."
)
@click.option(
"--title",
prompt=helpers.SERVICE_GENERATION_PROMPTS["without_index"]["title"],
help="The title of the service."
)
@log_command_args
def generate(service_type: str, subdomain: str, title: str) -> None:
"""Generate a new service."""
helpers.generate_service(
service_service,
service_type=service_type,
subdomain=subdomain,
title=title,
)
@service.command()
@click.argument("service")
@click.option("--current-branch", default="master")
@log_command_args
def has_changed(current_branch: str, service: str) -> None:
"""Returns whether or not the given service has changed since the last build."""
if not config_store.is_changed_service(service, current_branch):
sys.exit(1)
############################################################
# Images sub-group
############################################################
@service.group()
def images():
"""Build and push Docker images for your services."""
pass
@images.command()
@click.argument("service", nargs=-1)
@click.option("--branch", help="The branch to tag the image with.")
@click.option("--commit", help="The commit to tag the image with.")
@log_command_args
def build(service: Tuple[str], branch: str, commit: str) -> None:
"""
Build the Docker image for SERVICE.
If SERVICE is not specified, build all services' Docker images.
"""
if not service_service.build(list(service), branch, commit):
sys.exit(1)
@images.command()
@click.argument("service", nargs=-1)
@click.option("--branch", help="The branch the image was tagged with.")
@click.option("--commit", help="The commit the image was tagged with.")
@log_command_args
def push(service: Tuple[str], branch: str, commit: str) -> None:
"""
Push the Docker image for SERVICE.
If SERVICE is not specified, push all services' Docker images.
"""
if not service_service.push(list(service), branch, commit):
sys.exit(1)
| 26.241573
| 84
| 0.676301
| 0
| 0
| 0
| 0
| 4,011
| 0.858703
| 0
| 0
| 1,587
| 0.339756
|
f797e5f31f0f4940006d8b4a1e545eb141db847d
| 10,703
|
py
|
Python
|
tests/sentry/integrations/cloudflare/test_webhook.py
|
jianyuan/sentry
|
ceb8389c54d29f80b27703bb76c3880d923a3a5a
|
[
"BSD-3-Clause"
] | 1
|
2017-10-18T19:40:14.000Z
|
2017-10-18T19:40:14.000Z
|
tests/sentry/integrations/cloudflare/test_webhook.py
|
Munyola/sentry
|
ab8923b2801d7d72d6903e0d9180584817bb1b9a
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T04:32:19.000Z
|
2021-02-24T04:32:19.000Z
|
tests/sentry/integrations/cloudflare/test_webhook.py
|
Munyola/sentry
|
ab8923b2801d7d72d6903e0d9180584817bb1b9a
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T09:53:39.000Z
|
2022-03-22T09:01:47.000Z
|
from __future__ import absolute_import
from hashlib import sha256
import hmac
import json
import six
from sentry import options
from sentry.models import ApiToken, ProjectKey
from sentry.testutils import TestCase
UNSET = object()
class BaseWebhookTest(TestCase):
def setUp(self):
super(BaseWebhookTest, self).setUp()
self.user = self.create_user(is_superuser=False)
self.org = self.create_organization(owner=None)
self.team = self.create_team(organization=self.org)
self.create_member(organization=self.org, user=self.user, role='owner', teams=[self.team])
self.project = self.create_project(name='a', team=self.team)
self.token = ApiToken.objects.create(
user=self.user,
token='55838c83b3ec4e3ebc24c10c7bd071ffb1dc91161d3d49aeaedd9bd35d84bbe2',
)
self.key = ProjectKey.objects.get_or_create(project=self.project)[0]
def post_webhook(self, data, signature=UNSET, variant=UNSET, key=None):
if key is None:
key = options.get('cloudflare.secret-key')
if not isinstance(data, six.string_types):
body = json.dumps(data)
else:
body = data
if signature is UNSET:
signature = hmac.new(
key=key.encode('utf-8'),
msg=body.encode('utf-8'),
digestmod=sha256,
).hexdigest()
if variant is UNSET:
variant = '1'
headers = {
'HTTP_X_SIGNATURE_HMAC_SHA256_HEX': signature,
'HTTP_X_SIGNATURE_KEY_VARIANT': variant,
}
return self.client.post(
'/extensions/cloudflare/webhook/',
body,
content_type='application/json',
**headers
)
class CloudflareWebhookTest(BaseWebhookTest):
def test_missing_signature(self):
resp = self.post_webhook(
{'event': 'test'},
signature=None,
)
assert resp.status_code == 400
def test_invalid_signature(self):
resp = self.post_webhook(
{'event': 'test'},
signature='a' * 40,
)
assert resp.status_code == 400
def test_invalid_json(self):
resp = self.post_webhook('a')
assert resp.status_code == 400
def test_missing_variant(self):
resp = self.post_webhook(
{'event': 'test'},
variant=None,
)
assert resp.status_code == 400
def test_invalid_variant(self):
resp = self.post_webhook(
{'event': 'test'},
variant='fizzbuz',
)
assert resp.status_code == 400
def test_invalid_signature_with_test_variant(self):
resp = self.post_webhook(
{'event': 'test'},
variant='test',
)
assert resp.status_code == 400
def test_invalid_app_id_test_variant(self):
resp = self.post_webhook(
{'event': 'test', 'app': {'id': 'buzz'}},
variant='test',
key='test-key',
)
assert resp.status_code == 400
def test_valid_test_variant(self):
resp = self.post_webhook(
{'event': 'test', 'app': {'id': 'local'}, 'install': {}},
variant='test',
key='test-key',
)
assert resp.status_code == 200
class PreviewWebhookTest(BaseWebhookTest):
def test_empty(self):
webhook_data = json.loads(self.load_fixture('cloudflare/preview-webhook.json'))
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data == {
'install': webhook_data['install'],
'proceed': True,
}
def test_prefills_data(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/preview-webhook-authenticated.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
resp = self.post_webhook(data=webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['schema']['properties']['organization']['enumNames'] == {
six.text_type(self.org.id): self.org.slug,
}
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id)]
assert resp.data['install']['schema']['properties']['project']['enumNames'] == {
six.text_type(self.project.id): self.project.slug,
}
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_multiple_projects(self):
project2 = self.create_project(name='b', team=self.team)
webhook_data = json.loads(self.load_fixture(
'cloudflare/preview-webhook-authenticated.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id), six.text_type(project2.id)]
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_no_projects(self):
self.project.delete()
webhook_data = json.loads(self.load_fixture(
'cloudflare/preview-webhook-authenticated.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == []
assert 'dsn' not in resp.data['install']['schema']['properties']
class OptionChangeAccountWebhookTest(BaseWebhookTest):
def test_without_authentication(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
del webhook_data['authentications']
resp = self.post_webhook(webhook_data)
assert resp.status_code == 401, resp.content
def test_prefills_data(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id)]
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_with_invalid_organization_selected(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
webhook_data['install']['options']['organization'] = -1
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id)]
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_with_existing_project_selected_and_no_keys(self):
project2 = self.create_project(name='b', team=self.team)
# kill the automatically generated keys
ProjectKey.objects.filter(project=project2).delete()
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
webhook_data['install']['options']['project'] = six.text_type(project2.id)
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id), six.text_type(project2.id)]
assert resp.data['install']['options']['project'] == six.text_type(project2.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == []
assert 'dsn' not in resp.data['install']['options']
| 41.484496
| 98
| 0.613566
| 10,458
| 0.977109
| 0
| 0
| 0
| 0
| 0
| 0
| 2,444
| 0.228347
|
f79944d2cfc0c1247874648a3a289225bce5b0b8
| 669
|
py
|
Python
|
teacher_files/ia_fopera/version sockets (unix only) d'H. Roussille/neurones.py
|
zomboyd/epi-ml
|
383c28f27e4fdef715e94d1a0e0cd24afe368f86
|
[
"MIT"
] | null | null | null |
teacher_files/ia_fopera/version sockets (unix only) d'H. Roussille/neurones.py
|
zomboyd/epi-ml
|
383c28f27e4fdef715e94d1a0e0cd24afe368f86
|
[
"MIT"
] | null | null | null |
teacher_files/ia_fopera/version sockets (unix only) d'H. Roussille/neurones.py
|
zomboyd/epi-ml
|
383c28f27e4fdef715e94d1a0e0cd24afe368f86
|
[
"MIT"
] | null | null | null |
from math import exp,sqrt
from random import randrange
class neurone:
def __init__(self,a,b):
self.a=a
self.b=b
def proceed(self,z):
t = z[0]*self.a + z[1]*self.b
return 1/(1+exp(-t))
n = 100
X_app = [(randrange(-500,501)/1000,randrange(-500,501)/1000) for i in range(n)]
Y_app = [1 if ((x[0]-0.3)+(x[1]-0.3))<0.2 else 0 for x in X_app]
a=1
Y_pred,Y_score = [None for i in range(1001)], [None for i in range(1001)]
for i in range(1001):
b=i/1000*4-1
ne = neurone(a,b)
Y_pred[i] = [ne.proceed(z) for z in X_app]
Y_score[i] = sum([abs(Y_pred[i][j]-Y_app[j]) for j in range(n)])
opt = min(Y_score)
print(Y_score)
| 27.875
| 79
| 0.600897
| 168
| 0.251121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f799652a7ef1e45432c147799e5cb274b7f73f41
| 29,113
|
py
|
Python
|
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/apps/storymap/storymap.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | null | null | null |
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/apps/storymap/storymap.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | 9
|
2020-02-03T15:50:10.000Z
|
2022-03-02T07:11:34.000Z
|
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/apps/storymap/storymap.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | null | null | null |
import json
import datetime
import mimetypes
from urllib.parse import urlparse
from arcgis import env
from arcgis.gis import GIS
from arcgis.gis import Item
from ._ref import reference
class JournalStoryMap(object):
"""
Represents a Journal Story Map
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
item Optional Item. The storymap item.
--------------- --------------------------------------------------------------------
gis Optional GIS. The connection to the Enterprise.
=============== ====================================================================
"""
_properties = None
_gis = None
_itemid = None
_item = None
def __init__(self, item=None, gis=None):
"""initializer"""
if gis is None:
self._gis = env.active_gis
else:
self._gis = gis
if item and isinstance(item, str):
self._item = gis.content.get(item)
self._itemid = self._item.itemid
self._properties = self._item.get_data()
elif item and isinstance(item, Item) and \
'MapJournal' in item.typeKeywords:
self._item = item
self._itemid = self._item.itemid
self._properties = self._item.get_data()
elif item and isinstance(item, Item) and \
'MapJournal' not in item.typeKeywords:
raise ValueError("Item is not a Journal Story Map")
else:
self._properties = reference['journal']
#----------------------------------------------------------------------
def __str__(self):
return json.dumps(self._properties)
#----------------------------------------------------------------------
def __repr__(self):
return self.__str__()
#----------------------------------------------------------------------
def _refresh(self):
if self._item:
self._properties = json.loads(self._item.get_data())
#----------------------------------------------------------------------
@property
def properties(self):
"""returns the storymap's JSON"""
return self._properties
#----------------------------------------------------------------------
def add(self, title,
url_or_item, content=None,
actions=None, visible=True,
alt_text="", display='stretch',
**kwargs):
"""
Adds a new section to the StoryMap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url_or_item Required string/Item. The web address to the resource or a Web Map
item.
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
**WebMap Options**
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
show_legend Optional boolean. If True, the legend will be visible.
-------------------- --------------------------------------------------------------------
show_default_legend Optional boolean. Shows the legend on default.
-------------------- --------------------------------------------------------------------
extent Optional dict/Envelope. The extent of the webmap.
-------------------- --------------------------------------------------------------------
layer_visibility Optional list. The visibility of the layers in a webmap. This is a
list of dictionaries where the syntax is as follows:
Syntax:
[
{
"id" : "<id>",
"visibility" : "<true/false>"
}
]
Example:
[
{
"id" : "csv_6005_0",
"visibility" : False,
},
{
"id" : "csv_6006_0",
"visibility" : True,
}
]
-------------------- --------------------------------------------------------------------
popup Optional dict. The popup definition for the webmap.
==================== ====================================================================
:return: Boolean
"""
if isinstance(url_or_item, Item):
show_legend = kwargs.pop("show_legend", False)
show_default_legend = kwargs.pop("show_default_legend", False)
extent = kwargs.pop("extent", None)
layer_visibility = kwargs.pop("layer_visibility", None)
popup = kwargs.pop("popup", None)
if layer_visibility:
layer_visibility = json.dumps(layer_visibility)
return self._add_webmap(item=url_or_item, title=title, content=content,
actions=actions, visible=visible, alt_text=alt_text,
display=display,
show_legend=show_legend,
show_default_legend=show_default_legend,
extent=extent,
layer_visibility=layer_visibility,
popup=popup)
elif isinstance(url_or_item, str):
mt = mimetypes.guess_type(url=url_or_item)
if mt[0].lower().find('video') > -1:
return self._add_video(url=url_or_item,
title=title,
content=content,
actions=actions,
visible=visible,
alt_text=alt_text,
display=display)
elif mt[0].lower().find('image') > -1:
return self._add_image(title=title, image=url_or_item,
content=content, actions=actions, visible=visible,
alt_text=alt_text, display=display)
else:
return self._add_webpage(title=title, url=url_or_item,
content=content, actions=actions, visible=visible,
alt_text=alt_text, display=display)
return False
#----------------------------------------------------------------------
def _add_webpage(self,
title,
url,
content=None,
actions=None,
visible=True,
alt_text="",
display='stretch'):
"""
Adds a webpage to the storymap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the webpage
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
self._properties['values']['story']['sections'].append(
{
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "webpage",
"webpage": {
"url": url,
"type": "webpage",
"altText": alt_text,
"display": display,
"unload": True,
"hash": "5"
}
}
}
)
return True
#----------------------------------------------------------------------
def _add_video(self,
url,
title,
content,
actions=None,
visible=True,
alt_text="",
display='stretch'
):
"""
Adds a video section to the StoryMap.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
video = {
"title": title,
"content": content,
"contentActions": actions,
"creaDate": 1523450612336,
"pubDate": 1523450580000,
"status": visible,
"media": {
"type": "video",
"video": {
"url": url,
"type": "video",
"altText": alt_text,
"display": display
}
}
}
self._properties['values']['story']['sections'].append(video)
return True
#----------------------------------------------------------------------
def _add_webmap(self,
item,
title,
content,
actions=None,
visible=True,
alt_text="",
display='stretch',
show_legend=False,
show_default_legend=False,
extent=None,
layer_visibility=None,
popup=None
):
"""
Adds a WebMap to the Section.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
item Required string/Item. The webmap Item Id or Item of a webmap.
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if isinstance(item, Item):
item = item.itemid
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
wm = {
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "webmap",
"webmap": {
"id": item,
"extent": extent,
"layers": layer_visibility,
"popup": popup,
"overview": {
"enable": False,
"openByDefault": True
},
"legend": {
"enable": show_legend,
"openByDefault": show_default_legend
},
"geocoder": {
"enable": False
},
"altText": alt_text
}
}
}
self._properties['values']['story']['sections'].append(wm)
return True
#----------------------------------------------------------------------
def _add_image(self,
title,
image,
content=None,
actions=None,
visible=True,
alt_text=None,
display='fill'):
"""
Adds a new image section to the storymap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
self._properties['values']['story']['sections'].append(
{
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "image",
"image": {
"url": image,
"type": "image",
"altText": alt_text,
"display": display
}
}
}
)
return True
#----------------------------------------------------------------------
def remove(self, index):
"""
Removes a section by index.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
index Required integer. The position of the section to remove.
=============== ====================================================================
:return: Boolean
"""
try:
item = self._properties['values']['story']['sections'][index]
self._properties['values']['story']['sections'].remove(item)
return True
except:
return False
#----------------------------------------------------------------------
def save(self, title=None, tags=None, description=None):
"""
Saves an Journal StoryMap to the GIS
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Optional string. The title of the StoryMap.
--------------- --------------------------------------------------------------------
tags Optional string. The tags of the StoryMap.
--------------- --------------------------------------------------------------------
description Optional string. The description of the StoryMap
=============== ====================================================================
:return: Boolean
"""
import uuid
if self._item:
p = {
'text' : json.dumps(self._properties)
}
if title:
p['title'] = title
if tags:
p['tags'] = tags
return self._item.update(item_properties=p)
else:
if title is None:
title = "Map Journal, %s" % uuid.uuid4().hex[:10]
if tags is None:
tags = "Story Map,Map Journal"
typeKeywords = ",".join(['JavaScript', 'layout-side', 'Map', 'MapJournal',
'Mapping Site', 'Online Map', 'Ready To Use',
'selfConfigured', 'Story Map', 'Story Maps',
'Web Map'])
item = self._gis.content.add(item_properties={
'title' : title,
'tags' : tags,
'text' : json.dumps(self._properties),
'typeKeywords' : typeKeywords,
'itemType' : 'text',
'type' : "Web Mapping Application",
})
parse = urlparse(self._gis._con.baseurl)
isinstance(self._gis, GIS)
if self._gis._portal.is_arcgisonline:
url = "%s://%s/apps/MapJournal/index.html?appid=%s" % (parse.scheme, parse.netloc, item.itemid)
else:
import os
wa = os.path.dirname(parse.path[1:])
url = "%s://%s/%s/sharing/rest/apps/MapJournal/index.html?appid=%s" % (parse.scheme, parse.netloc, wa, item.itemid)
return item.update(item_properties={
'url' : url
})
return False
#----------------------------------------------------------------------
def delete(self):
"""Deletes the saved item on ArcGIS Online/Portal"""
if self._item:
return self._item.delete()
return False
#----------------------------------------------------------------------
@property
def panel(self):
"""
Gets/Sets the panel state for the Journal Story Map
"""
return self._properties["values"]["settings"]["layout"]["id"]
#----------------------------------------------------------------------
@panel.setter
def panel(self, value):
"""
Gets/Sets the panel state for the Journal Story Map
"""
if value.lower() == "float":
self._properties["values"]["settings"]["layout"]["id"] = "float"
else:
self._properties["values"]["settings"]["layout"]["id"] = "side"
#----------------------------------------------------------------------
@property
def header(self):
"""gets/sets the headers for the Journal StoryMap"""
default = {
"social": {
"bitly": True,
"twitter": True,
"facebook": True
},
"logoURL": None,
"linkURL": "https://storymaps.arcgis.com",
"logoTarget": "",
"linkText": "A Story Map"
}
if 'header' in self._properties['values']['settings']:
return self._properties['values']['settings']['header']
else:
self._properties['values']['settings']['header'] = default
return default
#----------------------------------------------------------------------
@header.setter
def header(self, value):
""""""
if value is None:
default = {
"social": {
"bitly": True,
"twitter": True,
"facebook": True
},
"logoURL": None,
"linkURL": "https://storymaps.arcgis.com",
"logoTarget": "",
"linkText": "A Story Map"
}
self._properties['values']['settings']['header'] = default
else:
self._properties['values']['settings']['header'] = value
#----------------------------------------------------------------------
@property
def theme(self):
""""""
default = {
"colors": {
"text": "#FFFFFF",
"name": "float-default-1",
"softText": "#FFF",
"media": "#a0a0a0",
"themeMajor": "black",
"panel": "#000000",
"textLink": "#DDD",
"esriLogo": "white",
"dotNav": "#000000",
"softBtn": "#AAA"
},
"fonts": {
"sectionTitle": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
},
"sectionContent": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
}
}
}
if 'theme' in self._properties['values']['settings']:
return self._properties['values']['settings']['theme']
else:
self._properties['values']['settings']['theme'] = default
return self._properties['values']['settings']['theme']
return default
#----------------------------------------------------------------------
@theme.setter
def theme(self, value):
""""""
default = {
"colors": {
"text": "#FFFFFF",
"name": "float-default-1",
"softText": "#FFF",
"media": "#a0a0a0",
"themeMajor": "black",
"panel": "#000000",
"textLink": "#DDD",
"esriLogo": "white",
"dotNav": "#000000",
"softBtn": "#AAA"
},
"fonts": {
"sectionTitle": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
},
"sectionContent": {
"value": "font-family:\'open_sansregular\', sans-serif;",
"id": "default"
}
}
}
if 'theme' in self._properties['values']['settings']:
self._properties['values']['settings']['theme'] = value
elif not 'theme' in self._properties['values']['settings']:
self._properties['values']['settings']['theme'] = value
elif value is None:
self._properties['values']['settings']['theme'] = default
| 44.177542
| 131
| 0.334352
| 28,921
| 0.993405
| 0
| 0
| 4,287
| 0.147254
| 0
| 0
| 16,861
| 0.579157
|
f799698de0ff8776338f8a1ec460edf6e103c58f
| 703
|
py
|
Python
|
tests/test_core.py
|
emauton/aoc2015
|
f321571b623a0e7acaa173be57506e64bd32765f
|
[
"MIT"
] | null | null | null |
tests/test_core.py
|
emauton/aoc2015
|
f321571b623a0e7acaa173be57506e64bd32765f
|
[
"MIT"
] | null | null | null |
tests/test_core.py
|
emauton/aoc2015
|
f321571b623a0e7acaa173be57506e64bd32765f
|
[
"MIT"
] | null | null | null |
from aoc2015.core import dispatch
def test_dispatch_fail(capsys):
'''Dispatch fails properly when passed a bad day'''
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
dispatch(['204'])
captured = capsys.readouterr()
assert 'No module named aoc2015.day204' in captured.out
def test_dispatch_day0(capsys):
'''Dispatch to "template" day0 module works'''
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
dispatch(['0', 'arg1', 'arg2'])
captured = capsys.readouterr()
assert "day0: ['arg1', 'arg2']" in captured.out
| 35.15
| 73
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 407
| 0.578947
|
f79a3d652453d780701f42332b40981d8f1da3a1
| 2,749
|
py
|
Python
|
bin/pannzer/operators/output_DE.py
|
nestorzaburannyi/annotate
|
e175226504efef811d4ac3914f2ab342968edf98
|
[
"MIT"
] | 1
|
2021-11-26T17:29:56.000Z
|
2021-11-26T17:29:56.000Z
|
bin/pannzer/operators/output_DE.py
|
nestorzaburannyi/annotate
|
e175226504efef811d4ac3914f2ab342968edf98
|
[
"MIT"
] | 1
|
2020-03-19T21:12:23.000Z
|
2020-03-19T21:12:23.000Z
|
bin/pannzer/operators/output_DE.py
|
nestorzaburannyi/annotate
|
e175226504efef811d4ac3914f2ab342968edf98
|
[
"MIT"
] | null | null | null |
from myoperator import BlockOperator
import re
class output_DE(BlockOperator):
"""
Select one line per DE-cluster with the best quality description.
Creates cluster_data column 'desc','genename'
Inputs: data columns 'clusid','desc','FF','status'
cluster_data column 'clusid','genename'
"""
def __init__(self, glob):
[self.data,self.cluster_data]=glob.use_sheets(["data","cluster"])
[self.clusid_col1,self.desc_col1,self.qual_col,self.status_col,self.genename_col1]=self.data.use_columns(['clusid','desc','FF','DE_status',"genename"])
[self.desc_col2,self.genename_col2,self.clusid_col2]=self.cluster_data.use_columns(["desc","genename","clusid"])
self.MAXHITS=glob.param['PANZ_MAXHITS']
def process(self,block):
# remember RM2, best FF description per cluster
desc={}
bestqual={}
for row in block:
clusid=row[self.clusid_col1]
if not clusid in desc:
bestqual[clusid]=0.0
desc[clusid]=''
if row[self.status_col]=="False": continue
qual=float(row[self.qual_col])
if qual > bestqual[clusid]:
bestqual[clusid]=qual
desc[clusid]=row[self.desc_col1]
# gene names by majority vote
gncnt={}
totcnt=0.0
maxcnt=0
maxgn=""
for row in block:
if row[self.status_col]=="False": continue
gn=row[self.genename_col1]
if gn=="": continue
# exclude gene symbols with underscore
if re.search(r'\w+_\w+',gn): continue
if not gn in gncnt: gncnt[gn]=0
gncnt[gn]+=1
totcnt+=1.0
if totcnt>=self.MAXHITS: break
for gn in gncnt.keys():
if gncnt[gn]>maxcnt:
maxcnt=gncnt[gn]
maxgn=gn
if maxcnt/(1.0+totcnt) <= 0.5: maxgn="" # require majority
# save in cluster_data
for row in self.cluster_data.block:
clusid=row[self.clusid_col2]
if not clusid in desc: desc[clusid]=""
row[self.desc_col2]=desc[clusid]
row[self.genename_col2]=maxgn # copy winner to every cluster
| 45.816667
| 167
| 0.476901
| 2,699
| 0.981812
| 0
| 0
| 0
| 0
| 0
| 0
| 569
| 0.206984
|
f79b68b39e1d3fc6804f9e60df51a84aec79e5e5
| 6,016
|
py
|
Python
|
Utility.py
|
psarkozy/HWTester
|
2553398f4ac8645a897b4f41bd36a21d54d2b177
|
[
"MIT"
] | null | null | null |
Utility.py
|
psarkozy/HWTester
|
2553398f4ac8645a897b4f41bd36a21d54d2b177
|
[
"MIT"
] | null | null | null |
Utility.py
|
psarkozy/HWTester
|
2553398f4ac8645a897b4f41bd36a21d54d2b177
|
[
"MIT"
] | 2
|
2019-11-11T12:44:17.000Z
|
2020-11-20T11:08:53.000Z
|
import os
from StringIO import StringIO
from zipfile import ZipFile
import subprocess
import shutil
import fcntl
import time
import signal
import imp
import sys,traceback
def dir_clean_error(function,path,excinfo):
print 'WARNING: Ran into issues trying to remove directory:',path,str(function),str(excinfo)
def clean_dir(target_dir):
if os.path.exists(target_dir):
shutil.rmtree(target_dir, ignore_errors=True,onerror= dir_clean_error)
def unzip(data, target_dir, filename=None):
submission_zipfile = ZipFile(StringIO(data))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if filename:
submission_zipfile.extract(filename,target_dir)
else:
submission_zipfile.extractall(target_dir)
def magic_quote_splitter(params):
# hftester -param "hehehe" -v "path to file here":/usr/src/myapp -
out = []
inquotes = False
for i,param in enumerate(params.split()):
if inquotes:
out[-1]+=' ' +str(param)
else:
out+=[str(param)]
for c in param:
if c == '"' or c =='\'':
inquotes = not inquotes
print 'magic_quote_splitter: ',out
return out
def run(command_with_arguments, input, timeout = 5.0, dockercleanup = False):
timeout = 60
if dockercleanup:
cleanup_cmd = "docker rm -f hftester"
print "Running docker cleanup:",cleanup_cmd
os.system(cleanup_cmd)
pipe_buffer_size = 4096
if len(input) > pipe_buffer_size:
stdin_buffer_file = open('stdin_buffer_file.tmp','w')
stdin_buffer_file.write(input)
stdin_buffer_file.close()
stdin_buffer_file = open('stdin_buffer_file.tmp')
sp = subprocess.Popen(magic_quote_splitter(command_with_arguments), stdin=stdin_buffer_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=pipe_buffer_size, preexec_fn=os.setsid, universal_newlines = True)
else:
sp = subprocess.Popen(magic_quote_splitter(command_with_arguments), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=pipe_buffer_size, preexec_fn=os.setsid)
starttime = time.clock()
file_flags = fcntl.fcntl(sp.stdout.fileno(), fcntl.F_GETFL)
fcntl.fcntl(sp.stdout.fileno(), fcntl.F_SETFL, file_flags | os.O_NDELAY)
file_flags = fcntl.fcntl(sp.stderr.fileno(), fcntl.F_GETFL)
fcntl.fcntl(sp.stderr.fileno(), fcntl.F_SETFL, file_flags | os.O_NDELAY)
extraerrList = []
stdoutList = []
stderrList = []
linecount = 0
try:
#for line in input.split('\n'):
# print linecount,line
if (len(input) <= pipe_buffer_size):
sp.stdin.write(input)
sp.stdin.close()
#time.sleep(1)
#sp.stdin.flush()
totalOutput = 0
while totalOutput < 4096 * 1024 and sp.poll() is None and time.clock() - starttime < timeout:
try:
r = sp.stdout.read()
totalOutput = totalOutput + len(r)
stdoutList.append(r)
except IOError:
pass
except Exception, e:
print 'stdout:',sys.exc_info()
pass
try:
r = sp.stderr.read()
totalOutput = totalOutput + len(r)
stderrList.append(r)
except IOError:
pass
except Exception, e:
print 'stderr:',sys.exc_info()
pass
if sp.poll() is None:
if totalOutput >= 4096 * 1024:
extraerrList.append("Too much output data received, killing process!\n")
if time.clock() - starttime >= timeout - 0.5 :
print "Process killed because of timeout"
extraerrList.append("Maximum allowed time exceeded, killing process! First 10000 chars of input was: [%s]\n"%(input[0:min(10000,len(input))]))
os.killpg(os.getpgid(sp.pid), signal.SIGTERM)
os.system("sudo docker stop hftester")
#sp.kill()
#except ValueError:
#pass
except Exception, e:
print sys.exc_info()
extraerrList.append("Error:"+str(e))
joined_extraerrors = '\n'.join(extraerrList)
print 'extraerrList:',joined_extraerrors[0:min(200,len(joined_extraerrors))]
#raise e
joined_extraerrors = '\n'.join(extraerrList)
if len(stderrList)>0 or len(extraerrList)>0:
stderrList = list(filter(lambda x: "read unix /var/run/docker.sock: connection reset by peer" not in x, stderrList))
#for line in sdterrList:
print "Finished running command sdterr :", "".join(stderrList)," extraerr:", joined_extraerrors[0:min(200,len(joined_extraerrors))]
#sp.communicate(input=input)
return ("".join(stdoutList), "".join(stderrList), "".join(extraerrList))
def run_firejail(command_with_arguments, input, firejail_profile_file=None, timeout = 5.0):
params = ["firejail", "--quiet"]
if firejail_profile_file:
params.append("--profile=%s" % firejail_profile_file)
params.extend(command_with_arguments.split())
return run(" ".join(params), input=input, timeout=timeout)
def run_python_docker(python_file_path, input, firejail_profile_file=None, timeout = 5.0):
pydir, sep, pyfilename = python_file_path.rpartition(os.sep)
cmd = 'docker run -i --rm -m 400M --memory-swap -1 --ulimit cpu=%d --name hftester -v %s:/usr/src/myapp -w /usr/src/myapp python:3-alpine python %s'%(timeout, pydir, pyfilename)
print 'Running python docker command',cmd
#return None
return run(cmd, input=input,timeout = timeout,dockercleanup = True)
def get_class(classpath):
if not classpath.endswith(".py"):
classpath = classpath + ".py"
modname = os.path.basename(classpath).replace(".py", "")
mod = evaluatormod = imp.load_source(modname, classpath)
clazz = getattr(mod, modname)
return clazz
| 39.064935
| 223
| 0.636137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,038
| 0.17254
|
f79bf4e8cdd9d2e6fe7f0243351b84e61c125647
| 1,432
|
py
|
Python
|
wagtailsharing/tests/test_urls.py
|
mikiec84/wagtail-sharing
|
e3c338dae3327d955f058b5eb2f311d4dc0cbbf7
|
[
"CC0-1.0"
] | 1
|
2019-02-25T21:56:56.000Z
|
2019-02-25T21:56:56.000Z
|
wagtailsharing/tests/test_urls.py
|
mikiec84/wagtail-sharing
|
e3c338dae3327d955f058b5eb2f311d4dc0cbbf7
|
[
"CC0-1.0"
] | null | null | null |
wagtailsharing/tests/test_urls.py
|
mikiec84/wagtail-sharing
|
e3c338dae3327d955f058b5eb2f311d4dc0cbbf7
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
try:
from importlib import reload
except ImportError:
pass
from django.conf.urls import url
from django.test import TestCase
from mock import patch
try:
import wagtail.core.urls as wagtail_core_urls
except ImportError: # pragma: no cover; fallback for Wagtail <2.0
import wagtail.wagtailcore.urls as wagtail_core_urls
import wagtailsharing.urls
class TestUrlPatterns(TestCase):
def setUp(self):
def test_view():
pass # pragma: no cover
root_patterns = [
url(r'^foo/$', url, name='foo'),
url(r'^((?:[\w\-]+/)*)$', url, name='wagtail_serve'),
url(r'^bar/$', url, name='bar'),
]
self.patcher = patch.object(
wagtail_core_urls,
'urlpatterns',
root_patterns
)
self.patcher.start()
self.addCleanup(self.patcher.stop)
reload(wagtailsharing.urls)
self.urlpatterns = wagtailsharing.urls.urlpatterns
def test_leaves_previous_urls_alone(self):
self.assertEqual(self.urlpatterns[0].name, 'foo')
def test_replaces_wagtail_serve(self):
self.assertEqual(self.urlpatterns[1].name, 'wagtail_serve')
self.assertEqual(self.urlpatterns[1].callback.__name__, 'ServeView')
def test_leaves_later_urls_alone(self):
self.assertEqual(self.urlpatterns[2].name, 'bar')
| 28.078431
| 76
| 0.657821
| 1,006
| 0.702514
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.122207
|
f79fdffacf758f6c9d435f6cbf7feae5c9594ded
| 1,295
|
py
|
Python
|
py2neo/timing.py
|
VitalyRomanov/py2neo
|
2d0683cf2ab8b77b0c5bbba4eade0003c68d5905
|
[
"Apache-2.0"
] | null | null | null |
py2neo/timing.py
|
VitalyRomanov/py2neo
|
2d0683cf2ab8b77b0c5bbba4eade0003c68d5905
|
[
"Apache-2.0"
] | null | null | null |
py2neo/timing.py
|
VitalyRomanov/py2neo
|
2d0683cf2ab8b77b0c5bbba4eade0003c68d5905
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monotonic import monotonic
class Timer(object):
def __init__(self, seconds):
self.__t0 = t0 = monotonic()
self.__t1 = t0 + (seconds or 0)
def __bool__(self):
return self.remaining() > 0
__nonzero__ = __bool__
def remaining(self):
diff = self.__t1 - monotonic()
return diff if diff > 0 else 0.0
def repeater(at_least, timeout):
""" Yield an incrementing number at least `at_least` times,
thereafter continuing until the timeout has been reached.
"""
timer = Timer(timeout)
repeat = 0
while repeat < at_least or timer.remaining():
yield repeat
repeat += 1
| 27.553191
| 74
| 0.68417
| 326
| 0.251737
| 299
| 0.230888
| 0
| 0
| 0
| 0
| 743
| 0.573745
|
f7a103da5061022bae213f777c49b0abb01710f8
| 656
|
py
|
Python
|
LeapYearFinderClass.py
|
MichaelWiciak/LeapYearFinderClass
|
1bc1326f60115bddc1639ff50256888448dd9645
|
[
"MIT"
] | null | null | null |
LeapYearFinderClass.py
|
MichaelWiciak/LeapYearFinderClass
|
1bc1326f60115bddc1639ff50256888448dd9645
|
[
"MIT"
] | null | null | null |
LeapYearFinderClass.py
|
MichaelWiciak/LeapYearFinderClass
|
1bc1326f60115bddc1639ff50256888448dd9645
|
[
"MIT"
] | null | null | null |
class LeapYearFinder(object):
def __init__(self):
pass
def findLeapYear(self, startYear, endYear):
leapYearRecord = []
for i in range(int(startYear),int(endYear)):
year = i
print(year,end = "\t")
#If year is divisible by 4 and not 100 unless it is also divisable by 400
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
print(year, 'is a leap year.')
leapYearRecord.append(str(year))
else:
print(year, 'is not leap year.')
return leapYearRecord
| 31.238095
| 88
| 0.492378
| 663
| 0.985141
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.197623
|
f7a18de75e2c5d8f0a8fefb62bdf2a19e6aa2d6f
| 1,662
|
py
|
Python
|
versions/versions.py
|
juanfec/juan_rueda_test
|
1e5d6bccadc24569db26b5a8b58486c8295cef12
|
[
"MIT"
] | null | null | null |
versions/versions.py
|
juanfec/juan_rueda_test
|
1e5d6bccadc24569db26b5a8b58486c8295cef12
|
[
"MIT"
] | null | null | null |
versions/versions.py
|
juanfec/juan_rueda_test
|
1e5d6bccadc24569db26b5a8b58486c8295cef12
|
[
"MIT"
] | null | null | null |
# check to strings that represent version numbers and finds the greatest,
# 'equals' if they are the same version or 'Invalid Format'
# example: “1.2” is greater than “1.1”.
# for reusability this function just returns the version number or the word equals
# if a more elaborated answer is needed an interface would be usefull
def versions(version1, version2):
try:
#first we get a list of the integers that make up the version
list1 = list(map(int,version1.rsplit('.')))
list2 = list(map(int,version2.rsplit('.')))
#then we look for diferences in three cases
# when they are the same size and when one version
# has more digits than the other one
# the only way to versions can be equal is when they have the
# same amount of digits, this can change based on
# business logic
if len(list1) < len(list2):
for index , i in enumerate(list1):
if(i>list2[index]):
return version1
elif(i<list2[index]):
return version2
return version2
elif len(list1) > len(list2):
for index , i in enumerate(list2):
if(i>list1[index]):
return version2
elif(i<list1[index]):
return version1
return version1
else:
for index , i in enumerate(list1):
if(i>list2[index]):
return version1
elif(i<list2[index]):
return version2
return 'Equals'
except ValueError:
return 'Invalid Format'
| 42.615385
| 82
| 0.570397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 680
| 0.407186
|
e38a4870c5f8e88d0175c33a20658010577ed3a2
| 824
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/match7.py
|
Strum355/pyright
|
01f15ce31f8f6cd9a054e21fc48cb762923ae25d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/match7.py
|
Strum355/pyright
|
01f15ce31f8f6cd9a054e21fc48cb762923ae25d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/match7.py
|
Strum355/pyright
|
01f15ce31f8f6cd9a054e21fc48cb762923ae25d
|
[
"MIT"
] | null | null | null |
# This sample tests type narrowing of subject expressions for
# match statements.
from typing import Literal
def func1(subj: int | dict[str, str] | tuple[int] | str, cond: bool):
match subj:
case (3 | "hi"):
t_v1: Literal["Literal[3, 'hi']"] = reveal_type(subj)
return
case int(y) if cond:
t_v2: Literal["int"] = reveal_type(subj)
return
case int(y):
t_v3: Literal["int"] = reveal_type(subj)
return
case int():
t_v4: Literal["Never"] = reveal_type(subj)
return
case str(z):
t_v5: Literal["str"] = reveal_type(subj)
return
case z:
t_v6: Literal["dict[str, str] | tuple[int]"] = reveal_type(subj)
return
return subj
| 23.542857
| 76
| 0.527913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.18568
|
e38a5b70b844182225b5e64d4a3c3af08686053b
| 1,869
|
py
|
Python
|
examples/finance/stocks_baselines.py
|
TianhaoFu/MultiBench
|
b174a3187124d6f92be1ff3b487eef292f7883bb
|
[
"MIT"
] | null | null | null |
examples/finance/stocks_baselines.py
|
TianhaoFu/MultiBench
|
b174a3187124d6f92be1ff3b487eef292f7883bb
|
[
"MIT"
] | null | null | null |
examples/finance/stocks_baselines.py
|
TianhaoFu/MultiBench
|
b174a3187124d6f92be1ff3b487eef292f7883bb
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.getcwd())
import argparse
import numpy as np
import pmdarima
import torch
import torch.nn.functional as F
from torch import nn
from fusions.common_fusions import Stack
from unimodals.common_models import LSTMWithLinear
from datasets.stocks.get_data import get_dataloader
parser = argparse.ArgumentParser()
parser.add_argument('--input-stocks', metavar='input', help='input stocks')
parser.add_argument('--target-stock', metavar='target', help='target stock')
args = parser.parse_args()
print('Input: ' + args.input_stocks)
print('Target: ' + args.target_stock)
stocks = sorted(args.input_stocks.split(' '))
train_loader, val_loader, test_loader = get_dataloader(stocks, stocks, [args.target_stock], modality_first=True)
def baselines():
def best_constant(y_prev, y):
return float(nn.MSELoss()(torch.ones_like(y) * torch.mean(y), y))
def copy_last(y_prev, y):
return nn.MSELoss()(torch.cat([y_prev[-1:], y[:-1]]), y).item()
def arima(y_prev, y):
arr = y_prev.cpu()
arima = pmdarima.arima.auto_arima(arr)
pred = arima.predict(len(y))
return nn.MSELoss()(torch.tensor(pred, device='cuda').reshape(y.shape), y)
print('Best constant val MSE loss: ' + str(best_constant(train_loader.dataset.Y, val_loader.dataset.Y)))
print('Best constant test MSE loss: ' + str(best_constant(val_loader.dataset.Y, test_loader.dataset.Y)))
print('Copy-last val MSE loss: ' + str(copy_last(train_loader.dataset.Y, val_loader.dataset.Y)))
print('Copy-last test MSE loss: ' + str(copy_last(val_loader.dataset.Y, test_loader.dataset.Y)))
print('ARIMA val MSE loss: ' + str(arima(train_loader.dataset.Y, val_loader.dataset.Y)))
print('ARIMA test MSE loss: ' + str(arima(torch.cat([train_loader.dataset.Y, val_loader.dataset.Y]), test_loader.dataset.Y)))
baselines()
| 41.533333
| 129
| 0.721776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 262
| 0.140182
|
e38a5e9b805eaee8a5ed1bb8c56f9e375e34bdfa
| 3,329
|
py
|
Python
|
textmate/bundles/LaTeX.tmbundle/Support/lib/Python/tmprefs.py
|
leo-brewin/hybrid-latex
|
2debaf3f97eb551928d08dc4baded7ef7a4ab29a
|
[
"MIT"
] | 16
|
2018-10-12T06:31:49.000Z
|
2022-03-31T23:16:08.000Z
|
textmate/bundles/LaTeX.tmbundle/Support/lib/Python/tmprefs.py
|
leo-brewin/hybrid-latex
|
2debaf3f97eb551928d08dc4baded7ef7a4ab29a
|
[
"MIT"
] | null | null | null |
textmate/bundles/LaTeX.tmbundle/Support/lib/Python/tmprefs.py
|
leo-brewin/hybrid-latex
|
2debaf3f97eb551928d08dc4baded7ef7a4ab29a
|
[
"MIT"
] | 2
|
2021-06-27T03:29:40.000Z
|
2022-03-30T17:17:18.000Z
|
# -- Imports ------------------------------------------------------------------
from __future__ import print_function
from __future__ import unicode_literals
from Foundation import CFPreferencesAppSynchronize, CFPreferencesCopyAppValue
from os import getenv
# -- Class --------------------------------------------------------------------
class Preferences(object):
"""Process the current preferences of the LaTeX bundle.
This class reads the LaTeX preferences and provides a dictionary-like
interface to process them.
"""
def __init__(self):
"""Create a new Preferences object from the current settings.
Examples:
>>> preferences = Preferences()
>>> keys = ['latexViewer', 'latexEngine', 'latexUselatexmk',
... 'latexVerbose', 'latexDebug', 'latexAutoView',
... 'latexKeepLogWin', 'latexEngineOptions']
>>> all([key in preferences.prefs for key in keys])
True
"""
tm_identifier = getenv('TM_APP_IDENTIFIER', 'com.macromates.textmate')
CFPreferencesAppSynchronize(tm_identifier)
self.default_values = {
'latexAutoView': True,
'latexEngine': "pdflatex",
'latexEngineOptions': "",
'latexVerbose': False,
'latexUselatexmk': True,
'latexViewer': "TextMate",
'latexKeepLogWin': True,
'latexDebug': False,
}
self.prefs = self.default_values.copy()
for key in self.prefs:
preference_value = CFPreferencesCopyAppValue(key, tm_identifier)
if preference_value is not None:
self.prefs[key] = preference_value
def __getitem__(self, key):
"""Return a value stored inside Preferences.
If the value is no defined then ``None`` will be returned.
Arguments:
key
The key of the value that should be returned
Examples:
>>> preferences = Preferences()
>>> preferences['latexEngine'].find('tex') >= 0
True
>>> isinstance(preferences['latexUselatexmk'], bool)
True
"""
return self.prefs.get(key, None)
def defaults(self):
"""Return a string containing the default preference values.
Returns: ``str``
Examples:
>>> preferences = Preferences()
>>> print(preferences.defaults()) # doctest:+NORMALIZE_WHITESPACE
{ latexAutoView = 1;
latexDebug = 0;
latexEngine = pdflatex;
latexEngineOptions = "";
latexKeepLogWin = 1;
latexUselatexmk = 1;
latexVerbose = 0;
latexViewer = TextMate; }
"""
plist = {preference: int(value) if isinstance(value, bool) else value
for preference, value in self.default_values.items()}
preference_items = [
'{} = {};'.format(preference,
plist[preference] if
str(plist[preference]) else '""')
for preference in sorted(plist)]
return '{{ {} }}'.format(' '.join(preference_items))
if __name__ == '__main__':
from doctest import testmod
testmod()
| 31.11215
| 79
| 0.545209
| 2,910
| 0.874136
| 0
| 0
| 0
| 0
| 0
| 0
| 1,950
| 0.585761
|
e38ad8911f43a8dc1cf2caa5fecf9c3fdcb3062c
| 1,916
|
py
|
Python
|
parsetab.py
|
UVG-Teams/analizador-lexico-sintactico
|
71ac98e11fc63c6fcba36e94d9d40f0e59b6d55f
|
[
"MIT"
] | null | null | null |
parsetab.py
|
UVG-Teams/analizador-lexico-sintactico
|
71ac98e11fc63c6fcba36e94d9d40f0e59b6d55f
|
[
"MIT"
] | null | null | null |
parsetab.py
|
UVG-Teams/analizador-lexico-sintactico
|
71ac98e11fc63c6fcba36e94d9d40f0e59b6d55f
|
[
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'leftIMPLIESSIMPLIESleftANDORleftRPARENLPARENrightNEGATIONALPHABET AND IMPLIES LPAREN NEGATION OR PREDICATE RPAREN SIMPLIESexpr : expr AND exprexpr : ALPHABETexpr : expr OR exprexpr : NEGATION exprexpr : expr IMPLIES exprexpr : expr SIMPLIES exprexpr : LPAREN expr RPAREN'
_lr_action_items = {'ALPHABET':([0,3,4,5,6,7,8,],[2,2,2,2,2,2,2,]),'NEGATION':([0,3,4,5,6,7,8,],[3,3,3,3,3,3,3,]),'LPAREN':([0,3,4,5,6,7,8,],[4,4,4,4,4,4,4,]),'$end':([1,2,9,11,12,13,14,15,],[0,-2,-4,-1,-3,-5,-6,-7,]),'AND':([1,2,9,10,11,12,13,14,15,],[5,-2,-4,5,-1,-3,5,5,-7,]),'OR':([1,2,9,10,11,12,13,14,15,],[6,-2,-4,6,-1,-3,6,6,-7,]),'IMPLIES':([1,2,9,10,11,12,13,14,15,],[7,-2,-4,7,-1,-3,-5,-6,-7,]),'SIMPLIES':([1,2,9,10,11,12,13,14,15,],[8,-2,-4,8,-1,-3,-5,-6,-7,]),'RPAREN':([2,9,10,11,12,13,14,15,],[-2,-4,15,-1,-3,-5,-6,-7,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expr':([0,3,4,5,6,7,8,],[1,9,10,11,12,13,14,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> expr","S'",1,None,None,None),
('expr -> expr AND expr','expr',3,'p_and','main.py',48),
('expr -> ALPHABET','expr',1,'p_expr','main.py',52),
('expr -> expr OR expr','expr',3,'p_or','main.py',56),
('expr -> NEGATION expr','expr',2,'p_negation','main.py',60),
('expr -> expr IMPLIES expr','expr',3,'p_implies','main.py',64),
('expr -> expr SIMPLIES expr','expr',3,'p_simplies','main.py',69),
('expr -> LPAREN expr RPAREN','expr',3,'p_parens','main.py',73),
]
| 50.421053
| 538
| 0.601775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 804
| 0.419624
|
e38c0cf17108b05c381e4d71a5abca8083aae594
| 6,936
|
py
|
Python
|
Q1_ab.py
|
dkilike/Image-Segmentation
|
6a3ab7f96d105475051502aba8013c242eba5fcb
|
[
"ADSL"
] | 2
|
2019-05-05T01:52:23.000Z
|
2019-05-06T22:56:54.000Z
|
Q1_ab.py
|
dkilike/Image-Segmentation
|
6a3ab7f96d105475051502aba8013c242eba5fcb
|
[
"ADSL"
] | null | null | null |
Q1_ab.py
|
dkilike/Image-Segmentation
|
6a3ab7f96d105475051502aba8013c242eba5fcb
|
[
"ADSL"
] | null | null | null |
'''Please write a program to read the scan and print out
The maximum voxel intensity
The mean voxel intensity
The coordinates of the centre of the image volume, in the scanner coordinate system.
'''
import pydicom
import numpy as np
import matplotlib.pyplot as plt
import cv2
import glob
import os
import image_slicer
'''form a 3D array by stacking all CT scan slices'''
# load the DICOM files
src_path = r'C:\Users\GGPC\SegmentationTest\Image-Segmentation'
DICOM_dir_path = src_path + '\DICOM data'
# snapshot dicom file
files = []
for fname in glob.glob(DICOM_dir_path+'\*', recursive=False):
print("loading: {}".format(fname))
files.append(pydicom.read_file(fname))
print("file count: {}".format(len(files)))
# skip files with no SliceLocation
slices = []
skipcount = 0
for f in files:
if hasattr(f, 'SliceLocation'):
slices.append(f)
else:
skipcount = skipcount + 1
print("skipped, no SliceLocation: {}".format(skipcount))
# ensure they are in the correct order
slices = sorted(slices, key=lambda s: s.SliceLocation)
# create 3D array (assuming that each slice has the same pixel size)
img_shape = list(slices[0].pixel_array.shape)
img_shape.append(len(slices))
img3d = np.zeros(img_shape)
# fill 3D array with the images from the files
for i, s in enumerate(slices):
img3d[:, :, i] = s.pixel_array
input("Press Enter to continue showing Question 1 (a) results...")
'''start solving Q1_a read and print'''
# first two questions are straight-forward
print()
print('Question 1 (a)')
print('i. The maximum voxel intensity is {}'.format(img3d.max()))
print('ii. The mean voxel intensity is {}'.format(img3d.mean()))
# centre of the image volume is at (256.5,256.5) pixel position between the 100th and 101st slices
ImagePlanePosition_of_100th_slice = np.array(slices[99].ImagePositionPatient)
RowChangeInX_of_100th_slice = np.array(slices[99].ImageOrientationPatient[0:3]) * slices[99].PixelSpacing[0] * 256.5
ColumnChangeInY_of_100th_slice = np.array(slices[99].ImageOrientationPatient[3:6]) * slices[99].PixelSpacing[1] * 256.5
coordinate_of_100th_slice = ImagePlanePosition_of_100th_slice + RowChangeInX_of_100th_slice + ColumnChangeInY_of_100th_slice
ImagePlanePosition_of_101th_slice = np.array(slices[100].ImagePositionPatient)
RowChangeInX_of_101th_slice = np.array(slices[100].ImageOrientationPatient[0:3]) * slices[100].PixelSpacing[0] * 256.5
ColumnChangeInY_of_101th_slice = np.array(slices[100].ImageOrientationPatient[3:6]) * slices[100].PixelSpacing[1] * 256.5
coordinate_of_101th_slice = ImagePlanePosition_of_101th_slice + RowChangeInX_of_101th_slice + ColumnChangeInY_of_101th_slice
coordinate_of_ImageVolumeCentre = (coordinate_of_100th_slice+coordinate_of_101th_slice)/2
print('iii. coordinates of the centre of the image volume is {} mm'.format(list(coordinate_of_ImageVolumeCentre)))
input("Press Enter to continue showing Question 1 (b) results...")
'''start solving Q1_b'''
# plot the maximum voxel intensity of each slice
MaxVoxelList=[]
MeanVoxelList=[]
for s in slices:
MaxVoxelList.append(s.pixel_array.max())
MeanVoxelList.append(s.pixel_array.mean())
print('Close plot to continue')
plt.scatter(range(0,len(MaxVoxelList)), MaxVoxelList)
plt.xlabel('slice index (1-200)')
plt.ylabel('maximum voxel intensity')
plt.title('Scatter of Max Voxel over Slice Index')
plt.show()
# selection voxel intensity threshold of 3000
Threshold = 3000
print('Close plot of an mask dection example to continue')
a1 = plt.subplot(2, 2, 1)
plt.imshow(img3d[:, :, 30])
a1 = plt.subplot(2, 2, 2)
plt.imshow(img3d[:, :, 30]>Threshold)
a1 = plt.subplot(2, 2, 3)
plt.imshow(img3d[:, :, 176])
a1 = plt.subplot(2, 2, 4)
plt.imshow(img3d[:, :, 176]>Threshold)
plt.show()
input("Press Enter to continue generating images and masks to Folders: SegmentationMask(metal mask) and Images(ct scan slices)...")
# generate images and masks
NameCount = 300
for s in slices:
ImageName = '\SegmentationMask\IM00' + str(NameCount) + '.png'
img = s.pixel_array>Threshold
img = img.astype('uint8')*255
cv2.imwrite(src_path + ImageName, img)
print(ImageName + ' has been saved')
NameCount+=1
NameCount = 300
for s in slices:
ImageName = '\Images\IM00' + str(NameCount) + '.png'
img = (s.pixel_array - img3d.min())/(img3d.max()-img3d.min())*255
cv2.imwrite(src_path + ImageName, img)
print(ImageName + ' has been saved')
NameCount+=1
# NameCount = 300
# for s in slices:
# ImageName = '\SegmentationBoneMask\IM00' + str(NameCount) + '.png'
# img = s.pixel_array>0
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset_Slicer\masks\IM00' + str(NameCount) + '.png'
# img = s.pixel_array>Threshold
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# image_slicer.slice(src_path + ImageName,14)
# os.remove(src_path + ImageName)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset_Slicer\images\IM00' + str(NameCount) + '.png'
# img = (s.pixel_array - img3d.min())/(img3d.max()-img3d.min())*255
# cv2.imwrite(src_path + ImageName, img)
# image_slicer.slice(src_path + ImageName, 14)
# os.remove(src_path + ImageName)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset_Slicer\masks\IM00' + str(NameCount) + '.png'
# img = s.pixel_array>0
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
# os.mkdir(src_path + '\\Dataset')
# for fname in glob.glob(DICOM_dir_path + '\*', recursive=False):
# os.mkdir(src_path + '\\Dataset' + fname[-8:])
# os.mkdir(src_path + '\\Dataset' + fname[-8:] + '\\images')
# os.mkdir(src_path + '\\Dataset' + fname[-8:] + '\\masks')
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset\IM00' + str(NameCount) + '\masks\MetalMask.png'
# img = s.pixel_array>Threshold
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset\IM00' + str(NameCount) + '\images' + '\IM00' + str(NameCount) + '.png'
# img = (s.pixel_array - img3d.min())/(img3d.max()-img3d.min())*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset\IM00' + str(NameCount) + '\masks\PositiveVoxelMask.png'
# img = s.pixel_array>0
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
| 36.698413
| 131
| 0.697953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,034
| 0.581603
|
e38c2e039b77985412d12e168040f3115a42b4f3
| 1,759
|
py
|
Python
|
12/day_twelve.py
|
tmay-sarsaparilla/advent-of-code-2021
|
3cd827df57d315dd96627544b9f5c31b7db1aa11
|
[
"MIT"
] | null | null | null |
12/day_twelve.py
|
tmay-sarsaparilla/advent-of-code-2021
|
3cd827df57d315dd96627544b9f5c31b7db1aa11
|
[
"MIT"
] | null | null | null |
12/day_twelve.py
|
tmay-sarsaparilla/advent-of-code-2021
|
3cd827df57d315dd96627544b9f5c31b7db1aa11
|
[
"MIT"
] | null | null | null |
def find_paths(start, connections, visited=None, small_cave_visited_twice=False):
if visited is None:
visited = ["start"]
possible_connections = [e for s, e in connections if s == start] + [s for s, e in connections if e == start]
paths = []
if not possible_connections:
raise ValueError(f"No possible connections from cave {start}.")
for e in possible_connections:
path = []
if start == "start":
path.append(start)
visited_on_path = [] + visited
small_cave_visited_twice_on_path = small_cave_visited_twice
if e == "start":
continue # don't return to the start cave
if e == e.lower() and e in visited_on_path and small_cave_visited_twice_on_path:
continue # don't visit small caves more than once
if e == e.lower() and e in visited_on_path:
small_cave_visited_twice_on_path = True
path.append(e)
if e == "end":
paths.append(path)
continue # path is complete
visited_on_path.append(e)
try:
resulting_paths = find_paths(
start=e,
connections=connections,
visited=visited_on_path,
small_cave_visited_twice=small_cave_visited_twice_on_path
)
resulting_paths = [path + p for p in resulting_paths]
except ValueError:
continue # skip connections which lead to a dead end
paths.extend(resulting_paths)
return paths
def main():
with open("data.txt") as f:
connections = [c.replace("\n", "").split("-") for c in f.readlines()]
print(len(find_paths("start", connections)))
if __name__ == "__main__":
main()
| 34.490196
| 112
| 0.604321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.136441
|
e38df443614ff2ea0fe09c328aa447f49ee9a8df
| 146
|
py
|
Python
|
pythonic_binance/__init__.py
|
hANSIc99/pythonic-binance
|
df25353cc20f545224d97a34544844029dabe4ce
|
[
"MIT"
] | 1
|
2020-12-23T03:20:01.000Z
|
2020-12-23T03:20:01.000Z
|
pythonic_binance/__init__.py
|
hANSIc99/pythonic-binance
|
df25353cc20f545224d97a34544844029dabe4ce
|
[
"MIT"
] | null | null | null |
pythonic_binance/__init__.py
|
hANSIc99/pythonic-binance
|
df25353cc20f545224d97a34544844029dabe4ce
|
[
"MIT"
] | null | null | null |
"""An unofficial Python wrapper for the Binance exchange API v3
.. moduleauthor:: Sam McHardy
.. modified by Stephan Avenwedde for Pythonic
"""
| 20.857143
| 63
| 0.753425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.993151
|
e38e7b771230b46e0d59d9e9f903a942ff021817
| 1,643
|
py
|
Python
|
tests/unit/test_question_answer.py
|
Lunga001/pmg-cms-2
|
10cea3979711716817b0ba2a41987df73f2c7642
|
[
"Apache-2.0"
] | 2
|
2019-06-11T20:46:43.000Z
|
2020-08-27T22:50:32.000Z
|
tests/unit/test_question_answer.py
|
Lunga001/pmg-cms-2
|
10cea3979711716817b0ba2a41987df73f2c7642
|
[
"Apache-2.0"
] | 70
|
2017-05-26T14:04:06.000Z
|
2021-06-30T10:21:58.000Z
|
tests/unit/test_question_answer.py
|
OpenUpSA/pmg-cms-2
|
ec5f259dae81674ac7a8cdb80f124a8b0f167780
|
[
"Apache-2.0"
] | 4
|
2017-08-29T10:09:30.000Z
|
2021-05-25T11:29:03.000Z
|
import os
from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeQuestionData
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(CommitteeQuestionData,)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/", base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result["question"] for result in results]
self.assertIn(
self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions,
)
self.assertIn(
self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions,
)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000",
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result["question"] for result in results]
self.assertNotIn(
self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions,
)
self.assertIn(
self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions,
)
| 33.530612
| 80
| 0.645161
| 1,540
| 0.93731
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.102861
|
e38eb4838b0bca24f076f914d11b9ed6e01734df
| 1,470
|
py
|
Python
|
Job-Interviews/Python/BinaryTrees/Problem2.py
|
JuanPabloMontoya271/ITC
|
f5899e7a8fed4e9f90e97bb3af635a276d9cf13a
|
[
"MIT"
] | 1
|
2020-11-02T15:18:16.000Z
|
2020-11-02T15:18:16.000Z
|
Job-Interviews/Python/BinaryTrees/Problem2.py
|
JuanPabloMontoya271/ITC
|
f5899e7a8fed4e9f90e97bb3af635a276d9cf13a
|
[
"MIT"
] | null | null | null |
Job-Interviews/Python/BinaryTrees/Problem2.py
|
JuanPabloMontoya271/ITC
|
f5899e7a8fed4e9f90e97bb3af635a276d9cf13a
|
[
"MIT"
] | 1
|
2021-10-30T14:18:29.000Z
|
2021-10-30T14:18:29.000Z
|
class Tree:
def __init__(self, val,left = None, right = None):
self.val = val
self.left = left
self.right = right
root = Tree(4, left = Tree(3), right=Tree(5, left= Tree(4)))
#InOrderTraversal
def InOrderTraversal(root, res = []):
if root is None:
return res
InOrderTraversal(root.left, res)
InOrderTraversal(root.right, res)
res.append(root.val)
return res
print("In order:", InOrderTraversal(root))
#PreOrderTraversal
def PreOrderTraversal(root, res = []):
if root is None:
return res
res.append(root.val)
InOrderTraversal(root.left, res)
InOrderTraversal(root.right, res)
return res
print("Pre order:", PreOrderTraversal(root))
#PostOrderTraversal
def PostOrderTraversal(root, res = []):
if root is None:
return res
InOrderTraversal(root.left, res)
res.append(root.val)
InOrderTraversal(root.right, res)
return res
print("Post order:", PostOrderTraversal(root))
def LevelOrderTraversal(root, res =[ ]):
if root is None:
return res
queue = []
queue.append(root)
while len(queue)>0:
res.append(queue[0].val)
node = queue.pop(0)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return res
print("Level order Traversal: ", LevelOrderTraversal(root))
| 28.269231
| 61
| 0.614286
| 145
| 0.098639
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.080272
|
e38ef669498a375d8712d764abb891d4af6ac615
| 4,429
|
py
|
Python
|
csv_analyzer/apps/dataset/api/dataset.py
|
saduqz/csv-analyzer-test
|
732d4902aeba9278e7547ed5a83e4a482790076c
|
[
"MIT"
] | null | null | null |
csv_analyzer/apps/dataset/api/dataset.py
|
saduqz/csv-analyzer-test
|
732d4902aeba9278e7547ed5a83e4a482790076c
|
[
"MIT"
] | null | null | null |
csv_analyzer/apps/dataset/api/dataset.py
|
saduqz/csv-analyzer-test
|
732d4902aeba9278e7547ed5a83e4a482790076c
|
[
"MIT"
] | null | null | null |
from datetime import datetime
# Rest framework
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import RetrieveModelMixin, ListModelMixin, UpdateModelMixin, CreateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework.permissions import IsAuthenticated
# Serializers
from csv_analyzer.apps.dataset.serializers import (
DataSetModelSerializer,
CreateDataSetModelSerializer,
FileDataSetModelSerializer,
)
# Models
from csv_analyzer.apps.dataset.models import DataSet
# Permissions
from csv_analyzer.apps.dataset.permissions.dataset import IsDataSetOwner
# Celery
from csv_analyzer.apps.dataset.tasks import populate_dataset_file
# MongoDB utils
from csv_analyzer.apps.mongodb.utils import MongoDBConnection
class DataSetViewSet(CreateModelMixin, ListModelMixin, RetrieveModelMixin, UpdateModelMixin, GenericViewSet):
permission_classes = (IsAuthenticated, IsDataSetOwner)
def get_queryset(self, *args, **kwargs):
# Using prefetch related to improve query performance.
return DataSet.objects.filter(owner=self.request.user).prefetch_related('files')
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
data['weather_date'] = self._get_data_set_weather_data(
from_date=request.GET.get('from_date'),
to_date=request.GET.get('to_date'),
data_set_id=str(instance.id)
)
return Response(data)
@staticmethod
def _get_data_set_weather_data(from_date, to_date, data_set_id):
"""
Get a data set's weather data.
:param from_date: String or None. Data Set from date filter. e.g. 2011-09-01
:param to_date: String or None. Data Set to date filter. e.g. 2011-09-21
:param data_set_id: String, Data Set Id.
:return: Dict with count of results and the data.
"""
mongo_client = MongoDBConnection()
mongo_query = {
'data_set_id': data_set_id,
}
if from_date or to_date:
mongo_query['date'] = {}
if from_date:
from_date = datetime.strptime(from_date, '%Y-%m-%d')
from_date = datetime.combine(from_date.date(), datetime.min.time())
mongo_query['date']['$gte'] = from_date
if to_date:
to_date = datetime.strptime(to_date, '%Y-%m-%d')
to_date = datetime.combine(to_date.date(), datetime.max.time())
mongo_query['date']['$lt'] = to_date
files_data = mongo_client.get_list(query=mongo_query)
return {
'count': len(files_data),
'data': files_data,
}
def get_serializer_class(self):
"""Return serializer based on action."""
if self.action == 'create':
return CreateDataSetModelSerializer
elif self.action == 'add_file':
return FileDataSetModelSerializer
return DataSetModelSerializer
def create(self, request, *args, **kwargs):
data = request.data.copy()
data.update({
'owner': request.user.id,
'is_analyzed': False,
})
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=True, methods=["POST"], url_path='add-file')
def add_file(self, request, *args, **kwargs):
dataset = self.get_object()
serializer_class = self.get_serializer_class()
try:
data = request.data.copy()
except Exception:
data = request.data
data.update({
'data_set': dataset.id,
'is_analyzed': False,
})
serializer = serializer_class(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
populate_dataset_file.delay(dataset_file_id=serializer.instance.id)
dataset = self.get_object()
data = DataSetModelSerializer(dataset).data
return Response(data=data, status=status.HTTP_201_CREATED)
| 33.300752
| 109
| 0.664484
| 3,574
| 0.806954
| 0
| 0
| 2,002
| 0.452021
| 0
| 0
| 687
| 0.155114
|
e38f3cccadcd0f9b76ee123f571fa527044e60b4
| 11,984
|
py
|
Python
|
tfx_bsl/tfxio/tensor_to_arrow_test.py
|
brills/tfx-bsl
|
089d6673a8d3cccef84ff3d6583808544d2da038
|
[
"Apache-2.0"
] | null | null | null |
tfx_bsl/tfxio/tensor_to_arrow_test.py
|
brills/tfx-bsl
|
089d6673a8d3cccef84ff3d6583808544d2da038
|
[
"Apache-2.0"
] | null | null | null |
tfx_bsl/tfxio/tensor_to_arrow_test.py
|
brills/tfx-bsl
|
089d6673a8d3cccef84ff3d6583808544d2da038
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx_bsl.tfxio.tensor_to_arrow."""
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tensor_to_arrow
from google.protobuf import text_format
from absl.testing import absltest
from absl.testing import parameterized
from tensorflow_metadata.proto.v0 import schema_pb2
_TF_TYPE_TO_ARROW_TYPE = {
tf.int8: pa.int8(),
tf.int16: pa.int16(),
tf.int32: pa.int32(),
tf.int64: pa.int64(),
tf.uint8: pa.uint8(),
tf.uint16: pa.uint16(),
tf.uint32: pa.uint32(),
tf.uint64: pa.uint64(),
tf.float32: pa.float32(),
tf.float64: pa.float64(),
tf.string: pa.binary(),
}
_ROW_PARTITION_DTYPES = {
"INT64": np.int64,
"INT32": np.int32
}
def _make_2d_varlen_sparse_tensor_test_cases():
result = []
for tf_type, arrow_type in _TF_TYPE_TO_ARROW_TYPE.items():
if tf_type == tf.string:
values = tf.constant([b"1", b"2", b"3"], dtype=tf.string)
expected_array = pa.array([[b"1"], [], [b"2", b"3"], []],
type=pa.list_(arrow_type))
else:
values = tf.constant([1, 2, 3], dtype=tf_type)
expected_array = pa.array([[1], [], [2, 3], []],
type=pa.list_(arrow_type))
result.append(
dict(
testcase_name="2d_varlen_sparse_tensor_%s" % tf_type.name,
type_specs={"sp": tf.SparseTensorSpec([None, None], tf_type)},
expected_schema={"sp": pa.list_(arrow_type)},
expected_tensor_representations={
"sp": """varlen_sparse_tensor { column_name: "sp" }""",
},
tensor_input={
"sp":
tf.SparseTensor(
values=values,
indices=[[0, 0], [2, 0], [2, 1]],
dense_shape=[4, 2]),
},
expected_record_batch={"sp": expected_array}))
return result
def _make_3d_ragged_tensor_test_cases():
result = []
for row_partition_dtype in _ROW_PARTITION_DTYPES:
row_partition_numpy_type = _ROW_PARTITION_DTYPES[row_partition_dtype]
for tf_type, arrow_type in _TF_TYPE_TO_ARROW_TYPE.items():
if tf_type == tf.string:
values = tf.RaggedTensor.from_row_splits(
values=tf.constant([b"1", b"2", b"3"], dtype=tf_type),
row_splits=np.asarray([0, 1, 1, 3, 3],
dtype=row_partition_numpy_type))
expected_array = pa.array([[[b"1"], [], [b"2", b"3"]], [[]]],
type=pa.list_(pa.list_(arrow_type)))
else:
values = tf.RaggedTensor.from_row_splits(
values=tf.constant([1, 2, 3], dtype=tf_type),
row_splits=np.asarray([0, 1, 1, 3, 3],
dtype=row_partition_numpy_type))
expected_array = pa.array([[[1], [], [2, 3]], [[]]],
type=pa.list_(pa.list_(arrow_type)))
result.append(
dict(
testcase_name="3d_ragged_tensor_%s_row_partition_dtype_%s" %
(tf_type.name, row_partition_dtype),
type_specs={
"sp":
tf.RaggedTensorSpec(
tf.TensorShape([2, None, None]),
tf_type,
ragged_rank=2,
row_splits_dtype=tf.dtypes.as_dtype(
row_partition_numpy_type))
},
expected_schema={"sp": pa.list_(pa.list_(arrow_type))},
expected_tensor_representations={
"sp":
"""ragged_tensor {
feature_path {
step: "sp"
}
row_partition_dtype: %s
}""" % row_partition_dtype,
},
tensor_input={
"sp":
tf.RaggedTensor.from_row_splits(
values=values,
row_splits=np.asarray([0, 3, 4],
dtype=row_partition_numpy_type))
},
expected_record_batch={"sp": expected_array}))
return result
_CONVERT_TEST_CASES = [
dict(
testcase_name="multiple_tensors",
type_specs={
"sp1": tf.SparseTensorSpec([None, None], tf.int32),
"sp2": tf.SparseTensorSpec([None, None], tf.string),
},
expected_schema={
"sp1": pa.list_(pa.int32()),
"sp2": pa.list_(pa.binary()),
},
expected_tensor_representations={
"sp1": """varlen_sparse_tensor { column_name: "sp1" }""",
"sp2": """varlen_sparse_tensor { column_name: "sp2" }""",
},
tensor_input={
"sp1":
tf.SparseTensor(
values=tf.constant([1, 2], dtype=tf.int32),
indices=[[0, 0], [2, 0]],
dense_shape=[4, 1]),
"sp2":
tf.SparseTensor(
values=[b"aa", b"bb"],
indices=[[2, 0], [2, 1]],
dense_shape=[4, 2])
},
expected_record_batch={
"sp1":
pa.array([[1], [], [2], []], type=pa.list_(pa.int32())),
"sp2":
pa.array([[], [], [b"aa", b"bb"], []],
type=pa.list_(pa.binary()))
}),
dict(
testcase_name="ragged_tensors",
type_specs={
"sp1":
tf.RaggedTensorSpec(
tf.TensorShape([2, None]),
tf.int64,
ragged_rank=1,
row_splits_dtype=tf.int64),
"sp2":
tf.RaggedTensorSpec(
tf.TensorShape([2, None]),
tf.string,
ragged_rank=1,
row_splits_dtype=tf.int64),
},
expected_schema={
"sp1": pa.list_(pa.int64()),
"sp2": pa.list_(pa.binary()),
},
expected_tensor_representations={
"sp1":
"""ragged_tensor {
feature_path {
step: "sp1"
}
row_partition_dtype: INT64
}""",
"sp2":
"""ragged_tensor {
feature_path {
step: "sp2"
}
row_partition_dtype: INT64
}""",
},
tensor_input={
"sp1":
tf.RaggedTensor.from_row_splits(
values=np.asarray([1, 5, 9], dtype=np.int64),
row_splits=np.asarray([0, 2, 3], dtype=np.int64)),
"sp2":
tf.RaggedTensor.from_row_splits(
values=np.asarray([b"x", b"y", b"z"], dtype=np.str),
row_splits=np.asarray([0, 2, 3], dtype=np.int64))
},
expected_record_batch={
"sp1": pa.array([[1, 5], [9]], type=pa.list_(pa.int32())),
"sp2": pa.array([[b"x", b"y"], [b"z"]], type=pa.list_(pa.binary())),
})
] + _make_2d_varlen_sparse_tensor_test_cases(
) + _make_3d_ragged_tensor_test_cases()
class TensorToArrowTest(tf.test.TestCase, parameterized.TestCase):
def _assert_tensor_alike_equal(self, left, right):
self.assertIsInstance(left, type(right))
if isinstance(left, tf.SparseTensor):
self.assertAllEqual(left.values, right.values)
self.assertAllEqual(left.indices, right.indices)
self.assertAllEqual(left.dense_shape, right.dense_shape)
else:
self.assertAllEqual(left, right)
@parameterized.named_parameters(*_CONVERT_TEST_CASES)
def test_convert(self, type_specs, expected_schema,
expected_tensor_representations, tensor_input,
expected_record_batch):
converter = tensor_to_arrow.TensorsToRecordBatchConverter(type_specs)
expected_schema = pa.schema(
[pa.field(n, t) for n, t in sorted(expected_schema.items())])
self.assertTrue(converter.arrow_schema().equals(expected_schema),
"actual: {}".format(converter.arrow_schema()))
canonical_expected_tensor_representations = {}
for n, r in expected_tensor_representations.items():
if not isinstance(r, schema_pb2.TensorRepresentation):
r = text_format.Parse(r, schema_pb2.TensorRepresentation())
canonical_expected_tensor_representations[n] = r
self.assertEqual(canonical_expected_tensor_representations,
converter.tensor_representations())
rb = converter.convert(tensor_input)
self.assertTrue(
rb.equals(
pa.record_batch(
[arr for _, arr in sorted(expected_record_batch.items())],
schema=expected_schema)))
# Test that TensorAdapter(TensorsToRecordBatchConverter()) is identity.
adapter = tensor_adapter.TensorAdapter(
tensor_adapter.TensorAdapterConfig(
arrow_schema=converter.arrow_schema(),
tensor_representations=converter.tensor_representations()))
adapter_output = adapter.ToBatchTensors(rb, produce_eager_tensors=True)
self.assertEqual(adapter_output.keys(), tensor_input.keys())
for k in adapter_output.keys():
self._assert_tensor_alike_equal(adapter_output[k], tensor_input[k])
def test_unable_to_handle(self):
with self.assertRaisesRegex(ValueError, "No handler found"):
tensor_to_arrow.TensorsToRecordBatchConverter(
{"sp": tf.SparseTensorSpec([None, None, None], tf.int32)})
with self.assertRaisesRegex(ValueError, "No handler found"):
tensor_to_arrow.TensorsToRecordBatchConverter(
{"sp": tf.SparseTensorSpec([None, None], tf.bool)})
def test_incompatible_type_spec(self):
converter = tensor_to_arrow.TensorsToRecordBatchConverter(
{"sp": tf.SparseTensorSpec([None, None], tf.int32)})
with self.assertRaisesRegex(TypeError, "Expected SparseTensorSpec"):
converter.convert({
"sp":
tf.SparseTensor(
indices=[[0, 1]],
values=tf.constant([0], dtype=tf.int64),
dense_shape=[4, 1])
})
def test_unable_to_handle_ragged(self):
# This case is for a value tensor of bool type
with self.assertRaisesRegex(ValueError, "No handler found"):
tensor_to_arrow.TensorsToRecordBatchConverter({
"sp":
tf.RaggedTensorSpec(
shape=[2, None, None],
dtype=tf.bool,
ragged_rank=2,
row_splits_dtype=tf.int64)
})
# This case is for a 2D leaf values tensor.
with self.assertRaisesRegex(ValueError, "No handler found"):
tensor_to_arrow.TensorsToRecordBatchConverter({
"sp":
tf.RaggedTensorSpec(
shape=[2, None, None],
dtype=tf.int32,
ragged_rank=1,
row_splits_dtype=tf.int64)
})
if __name__ == "__main__":
# Do not run these tests under TF1.x -- TensorToArrow does not support TF 1.x.
if tf.__version__ >= "2":
absltest.main()
| 38.044444
| 80
| 0.550484
| 3,809
| 0.31784
| 0
| 0
| 1,714
| 0.143024
| 0
| 0
| 2,095
| 0.174816
|
e38f485bd754322f09d50cbe4ef3dae03d97f83a
| 417
|
py
|
Python
|
th_watchdog/failure_email.py
|
hwjeremy/th-watchdog
|
c32682f838fffa3396cabc3d83eeb4960c765fc9
|
[
"MIT"
] | null | null | null |
th_watchdog/failure_email.py
|
hwjeremy/th-watchdog
|
c32682f838fffa3396cabc3d83eeb4960c765fc9
|
[
"MIT"
] | null | null | null |
th_watchdog/failure_email.py
|
hwjeremy/th-watchdog
|
c32682f838fffa3396cabc3d83eeb4960c765fc9
|
[
"MIT"
] | null | null | null |
"""
Thornleigh Farm - VPN Watchdog
Failure Email Module
author: hugh@blinkybeach.com
"""
from th_watchdog.email import Email
class FailureEmail(Email):
"""
An email notifying the administrator of a failed state
"""
SUBJECT = 'Starport VPN connection lost'
BODY = 'Starport has lost connection to the VPN'
def __init__(self):
super().__init__(self.SUBJECT, self.BODY)
return
| 21.947368
| 58
| 0.690647
| 289
| 0.693046
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.549161
|
e390652d724f8e334ded89e33631ccb73990db8b
| 2,749
|
py
|
Python
|
nemo/nemo/backends/pytorch/common/data.py
|
petermartigny/NeMo
|
b20821e637314940e36b63d32c601c43d1b74051
|
[
"Apache-2.0"
] | 1
|
2020-03-22T11:23:11.000Z
|
2020-03-22T11:23:11.000Z
|
nemo/nemo/backends/pytorch/common/data.py
|
petermartigny/NeMo
|
b20821e637314940e36b63d32c601c43d1b74051
|
[
"Apache-2.0"
] | null | null | null |
nemo/nemo/backends/pytorch/common/data.py
|
petermartigny/NeMo
|
b20821e637314940e36b63d32c601c43d1b74051
|
[
"Apache-2.0"
] | 1
|
2019-10-23T01:19:19.000Z
|
2019-10-23T01:19:19.000Z
|
__all__ = ['TextDataLayer']
from functools import partial
import torch
from torch.utils.data import DataLoader, DistributedSampler
from nemo.backends.pytorch.common.parts import TextDataset
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core import DeviceType
from nemo.core.neural_types import *
from nemo.utils.misc import pad_to
class TextDataLayer(DataLayerNM):
"""A simple Neural Module for loading textual data
Args:
path: (str) Path to file with newline separate strings of text
labels (list): List of string labels to use when to str2int translation
eos_id (int): Label position of end of string symbol
pad_id (int): Label position of padding symbol
batch_size (int): Size of batches to generate in data loader
drop_last (bool): Whether we drop last (possibly) incomplete batch.
Defaults to False.
num_workers (int): Number of processes to work on data loading (0 for
just main process).
Defaults to 0.
"""
@staticmethod
def create_ports():
input_ports = {}
output_ports = {
'texts': NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
})
}
return input_ports, output_ports
def __init__(self, path, labels, eos_id, pad_id,
batch_size, drop_last=False, num_workers=0,
**kwargs):
super().__init__(**kwargs)
self._dataset = TextDataset(path, labels, eos_id)
if self._placement == DeviceType.AllGpu:
sampler = DistributedSampler(self._dataset)
else:
sampler = None
# noinspection PyTypeChecker
self._dataloader = DataLoader(
dataset=self._dataset,
batch_size=batch_size,
collate_fn=partial(self._collate_fn, pad_id=pad_id, pad8=True),
drop_last=drop_last,
shuffle=sampler is None,
sampler=sampler,
num_workers=num_workers
)
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
# return self._dataset
return None
@property
def data_iterator(self):
return self._dataloader
@staticmethod
def _collate_fn(batch_list, pad_id, pad8=False):
max_len = max(len(s) for s in batch_list)
if pad8:
max_len = pad_to(max_len, 8)
texts = torch.empty(len(batch_list), max_len,
dtype=torch.long)
texts.fill_(pad_id)
for i, s in enumerate(batch_list):
texts[i].narrow(0, 0, s.size(0)).copy_(s)
assert len(texts.shape) == 2
return texts
| 29.244681
| 79
| 0.616588
| 2,399
| 0.872681
| 0
| 0
| 877
| 0.319025
| 0
| 0
| 722
| 0.262641
|
e39338fd7b675e7103c88c702302ca7865a71de5
| 21,161
|
py
|
Python
|
bot.py
|
marsDurden/UnipdBot
|
402b74f6bd876265b952f052e2c132f6aa3c050d
|
[
"Unlicense"
] | 4
|
2018-04-12T03:39:36.000Z
|
2019-11-26T07:52:30.000Z
|
bot.py
|
marsDurden/UnipdBot
|
402b74f6bd876265b952f052e2c132f6aa3c050d
|
[
"Unlicense"
] | null | null | null |
bot.py
|
marsDurden/UnipdBot
|
402b74f6bd876265b952f052e2c132f6aa3c050d
|
[
"Unlicense"
] | 1
|
2019-10-07T16:50:48.000Z
|
2019-10-07T16:50:48.000Z
|
import logging
from datetime import time, timedelta
from functools import wraps
# Telegram bot libraries
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler
from telegram import KeyboardButton, ReplyKeyboardMarkup, ParseMode, Bot, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.error import TelegramError, Unauthorized, BadRequest, TimedOut, ChatMigrated, NetworkError
# Local files
from utils import *
from orario import *
import config
from captions import Captions
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.WARNING)
logger = logging.getLogger(__name__)
# Captions class handles languages
languages = Captions(config.supported_languages, config.captions_path)
# Decorators
def admin(func):
@wraps(func)
def wrapped(update, context, *args, **kwargs):
# Set administrators of bot
admins = [config.botAdminID]
# Get user ID of message
user_id = update.message.chat.id
if user_id in admins:
return func(update, context, *args, **kwargs)
return
return wrapped
def autoban(func):
@wraps(func)
def wrapped(update, context, *args, **kwargs):
# Save user
new_user(update)
# Autoban system TODO
bans = []
user_id = update.message.chat.id
if user_id not in bans:
return func(update, context, *args, **kwargs)
else:
return
return wrapped
# Update Handlers
@autoban
def start(update, context):
# TODO User privacy disclaimer
new_user(update)
home(update, context)
@autoban
def home(update, context):
chat_id = get_chat_id(update)
reply = languages.get_reply('home', lang=get_lang(update))
if chat_id > 0:
markup = languages.get_keyboard('home', lang=get_lang(update))
# Add location button to keyboard
markup[3][0] = KeyboardButton(text=str(markup[3][0]), request_location=True)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
else:
markup = languages.get_keyboard('home', lang=get_lang(update), isGroup=True)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def mensa(update, context):
reply = languages.get_reply('mensa', lang=get_lang(update))
markup = languages.get_keyboard('mensa', lang=get_lang(update))
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def aulastudio(update, context):
reply = languages.get_reply('aulastudio', lang=get_lang(update))
markup = languages.get_keyboard('aulastudio', lang=get_lang(update))
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def biblioteca(update, context):
reply = languages.get_reply('biblioteca', lang=get_lang(update))
markup = languages.get_keyboard('biblioteca', lang=get_lang(update))
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def dirittostudio(update, context):
reply = languages.get_reply('diritto_studio', lang=get_lang(update))
markup = languages.get_keyboard('diritto_studio', lang=get_lang(update))
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def udupadova(update, context):
reply = languages.get_reply('udupadova', lang=get_lang(update))
markup = languages.get_keyboard('udupadova', lang=get_lang(update))
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def botinfo(update, context):
reply = languages.get_reply('botinfo', lang=get_lang(update))
markup = [[InlineKeyboardButton('Source code on Github', url='https://github.com/marsDurden/UnipdBot')]]
markup = InlineKeyboardMarkup(markup)
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
@autoban
def sub_command(update, context):
# Save message
save_msg(update)
chat_id = get_chat_id(update)
command = str(update.message.text).replace('/', '').lower()
try:
command = languages.inverse_command_map(command, lang=get_lang(update))
except KeyError:
pass
reply = languages.get_reply(command, lang=get_lang(update))
if reply == '': reply = 'Testo da inserire'
inline, markup, lat, lon = languages.get_keyboard(command, lang=get_lang(update))
if inline:
markup = [[InlineKeyboardButton(text=line['text'], url=line['url'])] for line in markup.values()]
markup = InlineKeyboardMarkup(markup)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
else:
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN)
if lat is not None and lon is not None:
context.bot.sendLocation(chat_id=chat_id,
latitude=lat,
longitude=lon)
@autoban
def cerca(update, context):
# Save message
save_msg(update)
args = context.args
chat_id = get_chat_id(update)
context.bot.sendChatAction(chat_id=chat_id,
action="typing")
reply, markup = cerca_facile('%20'.join(args), languages.get_reply('cerca', lang=get_lang(update)))
if markup is not None:
markup = [[InlineKeyboardButton(markup['text'], url=markup['url'])]]
markup = InlineKeyboardMarkup(markup)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
else:
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN)
@autoban
def settings(update, context):
reply = languages.get_reply('settings', lang=get_lang(update))
reply, keyboard = get_user_settings(update, reply)
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
@autoban
def orario(update, context):
u_id = str(update.message.from_user.id)
chat_id = update.message.chat_id
lang_str = languages.get_reply('orario', lang=get_lang('', u_id=u_id))
reply, keyboard = orarioSetup(chat_id, lang_str, resetDate=True)
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
def callback_orario(update, context):
data = update.callback_query.data[2:]
ultima_data = update.callback_query.message.text.splitlines()[0][-11:-1]
u_id = str(update.callback_query.from_user.id)
chat_id = update.callback_query.message.chat_id
lang_str = languages.get_reply('orario', lang=get_lang('', u_id=u_id))
reply, keyboard = orarioSaveSetting(chat_id, data, lang_str, last_date=ultima_data)
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.editMessageText(text=reply,
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
def callback_settings(update, context):
data = update.callback_query.data[2:].split('-')
u_id = str(update.callback_query.from_user.id)
chat_id = update.callback_query.message.chat_id
if data[0] == 'alarm':
if data[1] == 'on':
# Chiude il job
unset_job_orario(str(chat_id), context.job_queue)
set_alarm_value(u_id, None)
elif data[1] == 'off':
# Scelta timing orario
lang_list = languages.get_reply('settings', lang=get_lang('', u_id=u_id))
markup = []
for hour in [5, 7, 9, 12, 18, 21]:
markup.append([InlineKeyboardButton(str(hour)+':00', callback_data='2-alarm-set-'+str(hour)+':00'), InlineKeyboardButton(str(hour)+':30', callback_data='2-alarm-set-'+str(hour)+':30'),
InlineKeyboardButton(str(hour+1)+':00', callback_data='2-alarm-set-'+str(hour+1)+':00'), InlineKeyboardButton(str(hour+1)+':30', callback_data='2-alarm-set-'+str(hour+1)+':30')])
markup = InlineKeyboardMarkup(markup)
context.bot.editMessageText(text=lang_list[5],
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
return
elif data[1] == 'set':
set_job_orario(str(chat_id), u_id, context.job_queue, orario=data[2])
set_alarm_value(u_id, data[2])
elif data[0] == 'mensa':
if data[1] == 'enable':
# Scelta mensa
mense_list = languages.get_keyboard('mensa')
lang_list = languages.get_reply('settings', lang=get_lang('', u_id=u_id))
markup = []
for row in mense_list:
for mensa in row:
if mensa != '/home':
markup.append([InlineKeyboardButton(mensa.replace('/',''), callback_data='2-mensa-set-'+mensa.replace('/',''))])
markup = InlineKeyboardMarkup(markup)
context.bot.editMessageText(text=lang_list[9],
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
return
elif data[1] == 'set':
set_fav_mensa(u_id, data[2])
elif data[1] == 'disable':
set_fav_mensa(u_id, None)
elif data[0] == 'lang':
changed = set_lang(u_id, data[1])
if not changed: return
reply, keyboard = get_user_settings(update, languages.get_reply('settings', lang=get_lang('', u_id=u_id)), u_id=u_id)
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.editMessageText(text=reply,
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
def job_orario(context):
chat_id = context.job.context[0]
u_id = context.job.context[0]
lang_str = languages.get_reply('orario', lang=get_lang('', u_id=u_id))
reply, keyboard = orarioSetup(chat_id, lang_str, resetDate=True)
# Check if orario is empty
if lang_str['text'][9] in reply: return
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN,
disable_notification=True,
reply_markup=reply_markup)
def set_job_orario(chat_id, u_id, job_queue, orario):
try:
# 0: lun, 1: mar, 2: mer, 3: gio, 4: ven
orario = orario.split(':')
job_queue.run_daily(job_orario, time=time(int(orario[0]), int(orario[1]), 0), days=(0, 1, 2, 3, 4), context=[chat_id, u_id])
#job_queue.run_repeating(job_orario, timedelta(seconds=10), context=[chat_id, u_id]) # For testing
except (IndexError, ValueError):
pass
def unset_job_orario(chat_id, job_queue):
for job in job_queue.jobs():
try:
if job.context[0] == chat_id:
job.schedule_removal()
except:
pass
def job_mensa(context):
while languages.daily_mensa['new']:
mensa = languages.daily_mensa['new'].pop()
print('Aggiornamento mensa', mensa)
for user_id in get_fav_mensa_users(mensa):
reply = languages.get_reply('mensa', lang=get_lang('', u_id=user_id))
markup = languages.get_keyboard('mensa', lang=get_lang('', u_id=user_id))
context.bot.sendMessage(chat_id=user_id, text=reply,
parse_mode=ParseMode.MARKDOWN, disable_notification=True,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
languages.daily_mensa['completed'].append(mensa)
def set_job_mensa(context):
# Run job for 4 hours (= 14400 seconds) so 9:00 ~ 13:00
context.job_queue.run_repeating(job_mensa, interval=14400)
@autoban
def position(update, context):
# Save message
save_loc(update)
usrCoord = update.message.location
reply, markup = languages.reply_position(usrCoord, lang=get_lang(update))
context.bot.sendMessage(chat_id=get_chat_id(update),
text=reply,
parse_mode=ParseMode.MARKDOWN,
reply_markup=ReplyKeyboardMarkup(markup, resize_keyboard=True))
@autoban
def simpleText(update, context):
cmd = update.message.text.lower().strip().replace("\\","").replace("/","").replace("%","")
inoltra = False
if cmd in ['mensa', 'menu', 'menù']:
mensa(update, context)
elif cmd in ['help','info', 'aiuto']:
botinfo(update, context)
elif cmd in ['orari', 'orario']:
orario(update, context)
elif cmd in ['biblioteca', 'biblio']:
biblioteca(update, context)
elif cmd in ['home','start']:
home(update, context)
elif cmd in ['aulastudio', 'aula studio', 'aule studio']:
aulastudio(update, context)
elif cmd in ['impostazioni']:
settings(update, context)
elif cmd in config.sub_commands:
sub_command(update, context)
elif cmd == 'pio x':
update.message.text = 'acli'
sub_command(update, context)
elif cmd.find("sds") >= 0 or cmd.find("sindacato degli studenti") >= 0:
context.bot.sendMessage(chat_id=update.message.chat_id,
text="Sindacato degli Studenti?\nNe ho sentito parlare, non ho ancora avuto il piacere di conoscere il loro BOT")
inoltra = True
elif cmd in ['votare', 'votazioni', 'seggi', 'seggio', 'elezioni']:
seggi(update, context)
else:
inoltra = True
if inoltra:
# Save message
save_msg(update)
# Forward message to admin
context.bot.forwardMessage(chat_id=config.botAdminID,
from_chat_id=update.message.chat_id,
disable_notification=True,
message_id=update.message.message_id)
text = '<code>/reply ' + str(update.message.chat.id) + '</code>'
context.bot.sendMessage(chat_id=config.botAdminID,
parse_mode=ParseMode.HTML,
disable_notification=True,
text=text)
def admin_forward(update, context):
context.bot.forwardMessage(chat_id=config.botAdminID,
from_chat_id=get_chat_id(update),
message_id=update.message.message_id)
text = '<code>/reply ' + str(update.message.chat.id) + '</code>'
context.bot.sendMessage(chat_id=config.botAdminID,
parse_mode=ParseMode.HTML,
disable_notification=True,
text=text)
@admin
def admin_reply(update, context):
args = context.args
msg = update.message.to_dict()
servicer = Bot(token=config.bot_token)
try:
tmp = "/reply " + args[0] + " "
sent = context.bot.sendMessage(chat_id=args[0],
text=(update.message.text).replace(tmp, ""))
servicer.sendMessage(chat_id=config.botAdminID, text='Messaggio inviato a '+str(sent['chat']['first_name']))
except:
servicer.sendMessage(chat_id=config.botAdminID, parse_mode=ParseMode.MARKDOWN, text="*ERRORE*\nMessaggio non inviato")
@admin
def admin_update(update, context):
#languages.update_mense()
languages.update_json()
context.bot.sendMessage(chat_id=config.botAdminID, text='Mense: updated\nJson reloaded')
def error(update, context):
try:
# Normal message
context.bot.sendMessage(str(config.botAdminID),parse_mode=ParseMode.MARKDOWN, text=('*ERROR*\nID: `%s`\ntext: %s\ncaused error: _%s_' % (update.message.chat_id, update.message.text, context.error)))
logger.warn('Update "%s" caused error "%s"' % (update.message.text, context.error))
except:
# Callback message
context.bot.sendMessage(str(config.botAdminID),parse_mode=ParseMode.MARKDOWN, text=('*ERROR*\nID: `%s`\ntext: %s\ncaused error: _%s_' % (update.callback_query.message.chat_id, update.callback_query.data, context.error)))
logger.warn('Update "%s" caused error "%s"' % (update.callback_query.data, context.error))
finally:
with open('error.log', 'a') as f:
f.write(str(update))
f.write('\n')
f.write(str(context.error))
f.write('\n\n\n')
f.close()
def load_jobs(jq):
# Daily orario
for item in get_enabled_alarm_users():
set_job_orario(item[0], item[0], jq, item[1])
# Daily mensa
jq.run_daily(set_job_mensa, time=time(9, 0, 0), days=(0, 1, 2, 3, 4))
def main():
# Run bot
# Create the Updater and pass it your bot's token.
updater = Updater(config.bot_token, use_context=True)
job_queue = updater.job_queue
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("home", home))
dp.add_handler(CommandHandler(languages.get_command_handlers('help'), botinfo))
dp.add_handler(CommandHandler(languages.get_command_handlers('botinfo'), botinfo))
dp.add_handler(CommandHandler(languages.get_command_handlers('mensa'), mensa))
dp.add_handler(CommandHandler(languages.get_command_handlers('aulastudio'), aulastudio))
dp.add_handler(CommandHandler(languages.get_command_handlers('biblioteca'), biblioteca))
dp.add_handler(CommandHandler(languages.get_command_handlers('udupadova'), udupadova))
dp.add_handler(CommandHandler(languages.get_command_handlers('diritto_studio'), dirittostudio))
dp.add_handler(CommandHandler(languages.get_command_handlers('cerca'), cerca, pass_args=True))
dp.add_handler(CommandHandler(languages.get_command_handlers('impostazioni'), settings))
# Subcommands
dp.add_handler(CommandHandler(languages.get_command_handlers('sub_commands'), sub_command))
dp.add_handler(CommandHandler(config.sub_commands, sub_command))
# Orario
dp.add_handler(CommandHandler(languages.get_command_handlers('orario'), orario))
# Inline callbacks
#
# pattern | class
# 0- | do nothing
# 1- | orario
# 2- | settings
# 3- | beta-testing
dp.add_handler(CallbackQueryHandler(callback_orario, pattern='^1-', pass_job_queue=True))
dp.add_handler(CallbackQueryHandler(callback_settings, pattern='^2-', pass_job_queue=True))
# Vicino a me
dp.add_handler(MessageHandler(Filters.location, position))
# Admin
dp.add_handler(CommandHandler("reply", admin_reply, pass_args=True))
dp.add_handler(CommandHandler("update", admin_update))
dp.add_handler(MessageHandler(Filters.text | Filters.command, simpleText))
dp.add_handler(MessageHandler(Filters.contact | Filters.voice | Filters.video |
Filters.sticker | Filters.document | Filters.photo |
Filters.audio | Filters.invoice, admin_forward))
# log all errors
dp.add_error_handler(error)
# Load user daily_orario jobs
load_jobs(job_queue)
# Start the Bot
updater.start_polling()
# Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or
# SIGABRT. This should be used most of the time, since start_polling() is
# non-blocking and will stop the bot gracefully.
updater.idle()
# Stop update process in background
languages.stop()
if __name__ == '__main__':
main()
| 40.460803
| 228
| 0.641038
| 0
| 0
| 0
| 0
| 9,826
| 0.464323
| 0
| 0
| 2,826
| 0.133541
|
e39343d1ccb1c9771b5f47a5eb48d8ff84409b31
| 2,591
|
py
|
Python
|
server/twitter.py
|
abijith-kp/Emolytics
|
00e94798ab20621b51f6ce2a058e0dd8dec1cdba
|
[
"BSD-3-Clause"
] | null | null | null |
server/twitter.py
|
abijith-kp/Emolytics
|
00e94798ab20621b51f6ce2a058e0dd8dec1cdba
|
[
"BSD-3-Clause"
] | null | null | null |
server/twitter.py
|
abijith-kp/Emolytics
|
00e94798ab20621b51f6ce2a058e0dd8dec1cdba
|
[
"BSD-3-Clause"
] | null | null | null |
from server import db, auth, emolytics
from server.models import Tweet
from classifier import create_classifier
from tweepy import Stream
from tweepy.streaming import StreamListener
from flask.ext.rq import job
import json
import random
from multiprocessing import Process
from sqlalchemy.exc import IntegrityError
def get_document(status):
status = json.loads(status)
lat = 0.0
lon = 0.0
try:
lon, lat = status["place"]["bounding_box"]["coordinates"][0][0]
except:
pass
return {"tweet": status["text"], "pos": [lat, lon]}
class StdOutListener(StreamListener):
def on_data(self, status):
with emolytics.app_context():
try:
doc = get_document(status)
loc = doc["pos"]
if loc != [0, 0]:
t = Tweet(doc['tweet'], loc[0], loc[1])
db.session.add(t)
db.session.commit()
except IntegrityError, ie:
pass
except Exception, e:
pass
return True
def on_error(self, error_code):
pass
@job('emolytics')
def start_streaming(track=[""], locations=[-180,-90,180,90], languages=["en"]):
print "Starting streaming"
l = StdOutListener()
stream = Stream(auth, l)
while True:
try:
stream.disconnect()
stream.filter(track=track, locations=locations, languages=languages)
except Exception, e:
pass
@job('emolytics')
def classify():
print "Starting classification"
with emolytics.app_context():
CLF = create_classifier()
c = {0: "green", 1: "red"}
while True:
result = Tweet.query.filter((Tweet.flag == False)).all()
try:
for t in result:
r = CLF.predict(t.tweet.encode('utf-8'))
t.color = c[int(r)]
db.session.commit()
except IntegrityError, ie:
pass
db.session.rollback()
except Exception, e:
pass
'''
def start_thread(track):
global process
if process != None and process.is_alive():
process.terminate()
process = Process(target=start_streaming, kwargs={"track": track})
process.start()
print "Started the thread"
def start_classification():
global clf_process
if clf_process != None and clf_process.is_alive():
clf_process.terminate()
clf_process = Process(target=classify)
clf_process.start()
print "Started classification"
'''
| 27.56383
| 80
| 0.580857
| 555
| 0.214203
| 0
| 0
| 971
| 0.374759
| 0
| 0
| 646
| 0.249325
|
e3946d8baaf74d520467a9069ae0fdd15f75585b
| 1,340
|
py
|
Python
|
misc_code/extractGridFeatures.py
|
Lab-Work/gpsresilience
|
7c5183092013d44ce6d295469880502407c0e4ac
|
[
"NCSA",
"Unlicense"
] | 21
|
2015-03-10T19:13:38.000Z
|
2021-06-27T06:01:00.000Z
|
misc_code/extractGridFeatures.py
|
Lab-Work/gpsresilience
|
7c5183092013d44ce6d295469880502407c0e4ac
|
[
"NCSA",
"Unlicense"
] | null | null | null |
misc_code/extractGridFeatures.py
|
Lab-Work/gpsresilience
|
7c5183092013d44ce6d295469880502407c0e4ac
|
[
"NCSA",
"Unlicense"
] | 17
|
2015-03-10T19:13:39.000Z
|
2020-11-14T08:30:40.000Z
|
import csv
import os
import shutil
from datetime import datetime
from grid import *
#from cluster import *
from regions import *
start_time = datetime.now()
print("Allocating...")
#grid2
#gridSystem = GridSystem(-74.04, -73.775, 5, 40.63, 40.835, 5)
#gridname = "grid2"
#grid3
#gridSystem = GridSystem(-74.02, -73.938, 4, 40.7, 40.815, 6)
#gridname = "grid3"
#cluster1
#gridSystem = ClusterSystem("cluster1/clusters.csv")
#gridname = "cluster1"
gridSystem = RegionSystem("4year_features")
gridname = "region1"
invalids = 0
for y in ["FOIL2010", "FOIL2011", "FOIL2012", "FOIL2013"]:
for n in range(1,13):
filename = "../../new_chron/" + y + "/trip_data_" + str(n) + ".csv"
print("Reading file " + filename)
r = csv.reader(open(filename, "r"))
i = 0
header = True
for line in r:
if(header):
Trip.initHeader(line)
header = False
else:
trip = None
try:
trip = Trip(line)
except ValueError:
invalids += 1
if(trip!= None and (y!="FOIL" + str(trip.date.year) or n!= trip.date.month)):
trip.has_other_error = True
gridSystem.record(trip)
i += 1
if(i%1000000==0):
print("Read " + str(i) + " rows")
gridSystem.close()
end_time = datetime.now()
program_duration = end_time - start_time
print("Processing took " + str(program_duration))
| 20
| 81
| 0.636567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 451
| 0.336567
|
e394ba0d9db0eefab6e3defb03dc2daf2b6faade
| 6,472
|
py
|
Python
|
haiku/_src/stateful_test.py
|
madisonmay/dm-haiku
|
de95f6f83561edeb582d46b2e3bf135051792b91
|
[
"Apache-2.0"
] | null | null | null |
haiku/_src/stateful_test.py
|
madisonmay/dm-haiku
|
de95f6f83561edeb582d46b2e3bf135051792b91
|
[
"Apache-2.0"
] | null | null | null |
haiku/_src/stateful_test.py
|
madisonmay/dm-haiku
|
de95f6f83561edeb582d46b2e3bf135051792b91
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.stateful."""
from absl.testing import absltest
from haiku._src import base
from haiku._src import module
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class StatefulTest(absltest.TestCase):
@test_utils.transform_and_run
def test_grad(self):
x = jnp.array(3.)
g = stateful.grad(SquareModule())(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_value_and_grad(self):
x = jnp.array(2.)
y, g = stateful.value_and_grad(SquareModule())(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_value_and_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.value_and_grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
g, aux = stateful.grad(f, has_aux=True)(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
@test_utils.transform_and_run
def test_value_and_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
(y, aux), g = stateful.value_and_grad(f, has_aux=True)(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
def test_grad_and_jit(self):
def f(x):
g = stateful.grad(SquareModule())(x)
return g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
g, state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
def test_value_and_grad_and_jit(self):
def f(x):
y, g = stateful.value_and_grad(SquareModule())(x)
return y, g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
(y, g), state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
@test_utils.transform_and_run
def test_jit(self):
mod = SquareModule()
x = jnp.array(2)
y = stateful.jit(mod)(x)
self.assertEqual(y, x ** 2)
def test_jit_no_transform(self):
x = jnp.array(2)
with self.assertRaises(ValueError, msg="Use jax.jit() instead"):
stateful.jit(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_remat(self):
forward, backward = [], []
callback = _callback_prim(lambda: forward.append(None),
lambda: backward.append(None))
def test(remat):
x = jnp.array(3.)
mod = CountingModule()
self.assertEqual(mod.count, 0)
f = lambda x: callback(mod(x))
if remat:
f = stateful.remat(f)
y, g = stateful.value_and_grad(f)(x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
self.assertEqual(mod.count, 1)
num_forward = len(forward)
num_backward = len(backward)
del forward[:], backward[:]
return num_forward, num_backward
# Sanity check.
self.assertEqual(test(remat=True), test(remat=True))
self.assertEqual(test(remat=False), test(remat=False))
# NOTE: JAX does not guarantee to execute primitives once and only once for
# a given function (we observe f=2,b=1 without remat and f=5,b=1 with
# remat), but we do expect that JAX will execute our primitive forward at
# least one more time with remat than without it.
num_forward_remat, num_backward_remat = test(remat=True)
num_forward_no_remat, num_backward_no_remat = test(remat=False)
self.assertGreater(num_forward_remat, num_forward_no_remat)
self.assertEqual(num_backward_remat, num_backward_no_remat)
def test_remat_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.remat() instead"):
stateful.remat(lambda x: x**2)(x)
def test_cond(self):
def f(x):
mod = SquareModule()
return stateful.cond(x == 2, x, mod, x, lambda x: mod(x + 1))
f = transform.transform_with_state(f)
for x, y in ((1, 4), (2, 4), (3, 16)):
x, y = map(jnp.array, (x, y))
params, state = f.init(None, x)
out, state = f.apply(params, state, None, x)
self.assertEqual(state, {"square_module": {"y": y}})
self.assertEqual(out, y)
def test_cond_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.cond() instead"):
stateful.cond(x == 2, x, lambda x: x**2, x, lambda x: (x + 1)**2)
def _callback_prim(forward, backward):
def f_impl(x):
forward()
return x
def b_impl(x):
backward()
return (x,)
prim = jax.core.Primitive("hk_callback")
prim.def_impl(f_impl)
prim.def_abstract_eval(f_impl)
jax.ad.deflinear(prim, b_impl)
return prim.bind
class CountingModule(module.Module):
@property
def count(self):
return base.get_state("count", [], init=jnp.zeros)
def __call__(self, x):
y = x ** 2
base.set_state("count", self.count + 1)
return y
class SquareModule(module.Module):
def __call__(self, x):
assert x.ndim == 0
p = base.get_parameter("p", [], jnp.int32, init=lambda *_: jnp.array(2))
y = x ** p
base.set_state("y", y)
return y
if __name__ == "__main__":
absltest.main()
| 30.102326
| 80
| 0.65204
| 5,130
| 0.792645
| 0
| 0
| 2,662
| 0.41131
| 0
| 0
| 1,198
| 0.185105
|
e395c0ffd85f1bb1c10ed76e50b3673348515641
| 1,205
|
py
|
Python
|
pocketbook/commands/rename.py
|
ejfitzgerald/tools-pocketbook
|
c36254f3e39e875cacf50a5e90b9723e19c1ed9b
|
[
"Apache-2.0"
] | 1
|
2021-09-08T05:30:11.000Z
|
2021-09-08T05:30:11.000Z
|
pocketbook/commands/rename.py
|
ejfitzgerald/tools-pocketbook
|
c36254f3e39e875cacf50a5e90b9723e19c1ed9b
|
[
"Apache-2.0"
] | 14
|
2019-12-30T17:43:22.000Z
|
2021-11-18T10:45:14.000Z
|
pocketbook/commands/rename.py
|
ejfitzgerald/tools-pocketbook
|
c36254f3e39e875cacf50a5e90b9723e19c1ed9b
|
[
"Apache-2.0"
] | 3
|
2019-12-24T10:50:44.000Z
|
2021-11-20T21:24:32.000Z
|
def run_rename(args):
from pocketbook.address_book import AddressBook
from pocketbook.key_store import KeyStore
address_book = AddressBook()
key_store = KeyStore()
# make sure that the new name is not present either as a key, or as an address
new_present = args.new in address_book.keys() or args.new in key_store.list_keys()
if new_present:
print('{} is already present, please choose a different destination name'.format(args.new))
return 1
# check the old address or key name
old_is_address = args.old in address_book.keys()
old_is_key = args.old in key_store.list_keys()
success = False
if old_is_address and old_is_key:
raise RuntimeError('Data store corrupting, key looks like an address + key')
elif old_is_address:
success = address_book.rename(args.old, args.new)
elif old_is_key:
success = key_store.rename_key(args.old, args.new)
else:
print('{} doesn\'t appear to be a valid key or address name, please check and try again'.format(args.old))
return 1
if not success:
print('Failed to rename {} to {}'.format(args.old, args.new))
return 1
return 0
| 35.441176
| 114
| 0.682988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 345
| 0.286307
|
e396807417ead50e7f98ae6a7599b85f383deff5
| 395
|
py
|
Python
|
src/orion/algo/robo/__init__.py
|
lebrice/orion.algo.robo
|
f7e14e305619344ed9afd303fecbfcabda6ae7ce
|
[
"BSD-3-Clause"
] | null | null | null |
src/orion/algo/robo/__init__.py
|
lebrice/orion.algo.robo
|
f7e14e305619344ed9afd303fecbfcabda6ae7ce
|
[
"BSD-3-Clause"
] | null | null | null |
src/orion/algo/robo/__init__.py
|
lebrice/orion.algo.robo
|
f7e14e305619344ed9afd303fecbfcabda6ae7ce
|
[
"BSD-3-Clause"
] | 2
|
2020-09-28T15:18:19.000Z
|
2021-06-29T20:27:18.000Z
|
# -*- coding: utf-8 -*-
"""
Wrapper for RoBO
"""
__descr__ = "TODO"
__license__ = "BSD 3-Clause"
__author__ = u"Epistímio"
__author_short__ = u"Epistímio"
__author_email__ = "xavier.bouthillier@umontreal.ca"
__copyright__ = u"2021, Epistímio"
__url__ = "https://github.com/Epistimio/orion.algo.robo"
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
| 21.944444
| 56
| 0.741772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 200
| 0.502513
|
e399392a521bfe1faf93a652bafa6c185ca4d8e0
| 2,343
|
py
|
Python
|
minimum/minimum-function.py
|
gunater/Numerical-methods
|
4cf676b7d3996b7e70c6f4b50b15acc330a0d763
|
[
"MIT"
] | null | null | null |
minimum/minimum-function.py
|
gunater/Numerical-methods
|
4cf676b7d3996b7e70c6f4b50b15acc330a0d763
|
[
"MIT"
] | null | null | null |
minimum/minimum-function.py
|
gunater/Numerical-methods
|
4cf676b7d3996b7e70c6f4b50b15acc330a0d763
|
[
"MIT"
] | null | null | null |
"""
Znaleść kąt á, przy którym zasięg skoku z wahadła będzie maksymalny. Należy
posłużyć się metodą złotego podziału.
Mateusz Ostrowski
Index:216708
"""
import matplotlib.pyplot as plt
import numpy as np
class Zloty_podzial:
def __init__(self, h, line, a0):
tau = (np.sqrt(5) - 1) / 2
a = 0.2
b = 0.7
dok = 1e-9
self.xw = self.st_rad(np.linspace(1, int(a0)-1, int(a0)-1))
self.line = float(line)
self.h = float(h)
self.a0 = float(self.st_rad(int(a0)))
n = int(np.ceil((np.log(2 * dok) - np.log(b - a)) / np.log(tau)))
wynik, blad = self.steps(n, tau, a, b)
print("dokladnosc: {} krokow: {}\nZasięg skoku będzie maksymalny dla kąta: {} +/- {} stopni.".format(dok, n, '{0:.9f}'.format(self.rad_st(wynik)), "{0:.9f}".format(self.rad_st(blad))))
def steps(self, n, tau, a, b):
for i in range(n):
t1 = a + (1 - tau) * (b - a)
t2 = b - (1 - tau) * (b - a)
f1 = self.f(t1, self.a0, self.h, self.line)
f2 = self.f(t2, self.a0, self.h, self.line)
if f1 > f2:
b = t2
plt.text(b, self.f(b, self.a0, self.h, self.line), i, color="blue", fontsize=10)
ax.plot(b, self.f(b, self.a0, self.h, self.line), 'ob', markersize=2)
else:
a = t1
plt.text(a, self.f(a, self.a0, self.h, self.line), i, color="red", fontsize=10)
ax.plot(a, self.f(a, self.a0, self.h, self.line), 'or', markersize=2)
return (a+b)/2, (b-a)/2
def st_rad(self, a):
return a * (np.pi / 180)
def rad_st(self, a):
return a * (180 / np.pi)
def f(self, a, a0, h, line):
return line * np.sin(a) + 2 * line * (np.cos(a) - np.cos(a0)) * np.cos(a) * (np.sin(a) + np.sqrt(np.sin(a) ** 2 + ((h / line - np.cos(a)) / (np.cos(a) - np.cos(a0)))))
while True:
h0 = input("podaj wysokosc:")
line0 = input("podaj długość liny:")
a00 = input("podaj amplitude wahañ W stopniach:")
if int(line0) > int(h0):
print("Error: wysokość mniejsza od długości liny!!!")
else:
break
fig = plt.figure()
ax = fig.add_subplot(111)
a = Zloty_podzial(h0, line0, a00)
ax.plot(a.xw, a.f(a.xw, a.a0, a.h, a.line), "-b")
plt.show()
| 31.662162
| 193
| 0.521554
| 1,709
| 0.721097
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.181013
|
e399bf6533e93672377322199e348e30448cbeff
| 3,967
|
py
|
Python
|
app/products/product_info.py
|
Group-16-COSC-310/grocery-chat-bot
|
a9759dd0a6d5b91733267ec4ed156f85f45c05ff
|
[
"MIT"
] | null | null | null |
app/products/product_info.py
|
Group-16-COSC-310/grocery-chat-bot
|
a9759dd0a6d5b91733267ec4ed156f85f45c05ff
|
[
"MIT"
] | 7
|
2022-03-10T00:24:51.000Z
|
2022-03-19T01:37:18.000Z
|
app/products/product_info.py
|
Group-16-COSC-310/grocery-chat-bot
|
a9759dd0a6d5b91733267ec4ed156f85f45c05ff
|
[
"MIT"
] | 1
|
2022-03-31T03:28:27.000Z
|
2022-03-31T03:28:27.000Z
|
from app.database import MOCK_PRODUCT_DATA
import re
from app.products.base_handler import BaseHandler
class ProductInfoHandler(BaseHandler):
"""
A class used to represent a mini-bot to handle product queries.
"""
def __init__(self) -> None:
super().__init__()
def create_match_paterns(self):
# Product-related patterns
self.price_pattern = re.compile(
r"(price|cost|how much|money)", re.IGNORECASE)
self.stock_pattern = re.compile(r"(stock|how many|amount)", re.IGNORECASE)
self.nutrition_pattern = re.compile(
r"(calories|protein|carbs|carbohydrates|sugar|fat|nutrition|nutritional|weight|health|healthy)", re.IGNORECASE)
def dispose(self):
super().dispose()
def handle_prod_intent(self, product: str, intent: str) -> str:
intent = intent.split("-")[1] # hardcoded to filter intent: product-<intent> Ex. product-price -> intent = price
request = None
cursor = self.db.execute_query(
"SELECT product.id FROM product WHERE product.name = ? OR product.names = ?",
params=tuple([product, product]))
data = cursor.fetchone()
if (not data):
return None
request = {"request": intent, "id": data[0]}
return self.handle_product_info(None, **request)
def handle(self, message: str, intent=None) -> str: # if 2 args => message = product_name
if intent is not None:
return self.handle_prod_intent(message, intent)
# Call parser
kwargs = self.parse(message=message)
# If there is a topic detected, we find the response
# By calling the handler with the message (for convenience) and its necessary arguments
response = None
if kwargs:
response = self.handle_product_info(message, **kwargs)
return response
def parse(self, message: str) -> dict:
request = None
# Check for keywords for prices
if self.nutrition_pattern.search(message):
request = "nutrition"
elif self.price_pattern.search(message):
request = "price"
elif self.stock_pattern.search(message):
request = "stock"
# If the request is truly about product
if request:
id = None
for prod in MOCK_PRODUCT_DATA:
prod_name = prod["name"]
prod_id = prod["id"]
prod_names = prod["names"]
if prod_name in message or prod_id in message or prod_names in message:
id = prod["id"]
return {"request": request, "id": id} if request else None
def handle_product_info(self, message=None, **kwargs) -> str:
# kwargs are arguments such as product_name, price, operators (<. >)
# This really depends on how you define your parser
prod_id = kwargs["id"]
# Get the product information
products = self.db.get_product("id", prod_id)
# Since id is unique, we can assume there is only one product
product = products[0]
reply = None
prod_msg_type = kwargs.get("request")
if prod_msg_type == "price":
reply = "%s cost $%s %s." % (
product['names'].capitalize(), product['price'], product['price_scale'])
elif prod_msg_type == "stock":
if product['in_stock']:
reply = "%s are in stock." % (product['names'].capitalize())
else:
reply = "%s are out of stock." % (
product['names'].capitalize())
elif prod_msg_type == "nutrition":
reply = "%s Nutrition Facts: Calories = %s, Protein = %s, Carbs = %s, Sugar = %s, Fat = %s." % (
product['name'].capitalize(), product['calories'], product['protein'], product['carbs'], product['sugar'], product['fat'])
return reply
| 36.731481
| 138
| 0.590623
| 3,862
| 0.973532
| 0
| 0
| 0
| 0
| 0
| 0
| 1,235
| 0.311318
|
e39a4f94218457c4ddd8055721b30ec15463d320
| 5,940
|
py
|
Python
|
qcdb/iface_psi4/runner.py
|
vivacebelles/qcdb
|
5bbdcb5c833277647a36bb0a5982abb56bf29b20
|
[
"BSD-3-Clause"
] | 1
|
2019-02-20T20:18:02.000Z
|
2019-02-20T20:18:02.000Z
|
qcdb/iface_psi4/runner.py
|
vivacebelles/qcdb
|
5bbdcb5c833277647a36bb0a5982abb56bf29b20
|
[
"BSD-3-Clause"
] | null | null | null |
qcdb/iface_psi4/runner.py
|
vivacebelles/qcdb
|
5bbdcb5c833277647a36bb0a5982abb56bf29b20
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import copy
import pprint
pp = pprint.PrettyPrinter(width=120)
import inspect
import numpy as np
from .. import __version__
from .. import qcvars
from ..driver.driver_helpers import print_variables
from ..exceptions import *
from ..molecule import Molecule
from ..pdict import PreservingDict
from .worker import psi4_subprocess
from .botanist import muster_inherited_options
def run_psi4(name, molecule, options, **kwargs):
#print('run_psi4 options <<<\n', options.print_changed(), '\n>>>')
#calledby = inspect.stack()
#print('CALLEDBY')
#for cur in calledby:
# print('CUR', cur[3])
if kwargs['ptype'] not in ['energy', 'properties', 'gradient', 'hessian']:
raise ValidationError("""run_psi4: ptype not regonized: {}""".format(ptype))
jobrec = {}
jobrec['error'] = ''
jobrec['success'] = None
jobrec['return_output'] = True
prov = {}
prov['creator'] = 'QCDB'
prov['version'] = __version__
prov['routine'] = sys._getframe().f_code.co_name
jobrec['provenance'] = [prov]
jobrec['molecule'] = molecule.to_dict(np_out=False)
jobrec['method'] = name
jobrec['driver'] = kwargs['ptype']
jobrec['kwargs'] = kwargs
jobrec['options'] = copy.deepcopy(options)
jobrec['hooks'] = kwargs.get('hooks', {})
jobrec = psi4_driver(jobrec)
return jobrec
def psi4_driver(jobrec):
import json
try:
jobrec['molecule']
jobrec['method']
except KeyError as err:
#raise KeyError(
# 'Required fields missing from ({})'.format(jobrec.keys())) from err
jobrec['error'] += repr(err) + 'Required fields missing from ({})'.format(jobrec.keys())
return jobrec
#print('[1] PSI4 JOBREC PRE-PLANT (j@i) <<<')
#pp.pprint(jobrec)
#print('>>>')
psi4rec = psi4_plant(jobrec)
# test json roundtrip
jpsi4rec = json.dumps(psi4rec)
psi4rec = json.loads(jpsi4rec)
#print('[2] PSI4REC PRE-SUBPROCESS (x@i) <<<')
#pp.pprint(psi4rec)
#print('>>>\n')
psi4_subprocess(psi4rec) # updates psi4rec
#print('[3] PSI4REC POST-SUBPROCESS (x@io) <<<')
#pp.pprint(psi4rec)
#print('>>>\n')
psi4_harvest(jobrec, psi4rec) # updates jobrec
#print('[4] PSI4 JOBREC POST-HARVEST (j@io) <<<')
#pp.pprint(jobrec)
#print('>>>')
return jobrec
def psi4_plant(jobrec): # jobrec@i -> psi4@i
psi4rec = {}
psi4rec['json'] = {}
opts = jobrec['options']
# NOTE TODO very limited OPTIONS HANDSHAKE
muster_inherited_options(opts)
omem = opts.scroll['QCDB'].pop('MEMORY')
psi4rec['json']['memory'] = omem.value
psi4rec['json']['molecule'] = {'qm': jobrec['molecule']}
psi4rec['json']['driver'] = jobrec['driver']
mtd = jobrec['method']
psi4rec['json']['method'] = mtd[3:] if mtd.startswith('p4-') else mtd
#psi4rec['json']['args'] =
psi4rec['json']['kwargs'] = jobrec['kwargs']
#psi4rec['json']['scratch_location'] =
psi4rec['json']['return_output'] = True
#for hookkey, hookfunc in jobrec['hooks']['pre'].items():
# psi4rec['json']['in_' + hookkey] = hookfunc()
if opts.scroll['PSI4']['GRIDDAT'].value != '':
psi4rec['json']['infile_' + 'grid.dat'] = opts.scroll['PSI4']['GRIDDAT'].value
popts = {}
for k, v in opts.scroll['QCDB'].items():
if v.disputed():
popts[k] = v.value
for k, v in opts.scroll['PSI4'].items():
if v.disputed():
popts[k] = v.value
psi4rec['json']['options'] = popts
# Handle qcdb keywords implying cfour keyword values
# if core.get_option('CFOUR', 'TRANSLATE_PSI4'):
# harvester.muster_inherited_options(jobrec['options'])
# Handle conversion of qcdb keyword structure into psi4 format
# * psi wants python anyways, so no action needed
#psi4rec['command'] = ['psi4', '--json']
psi4rec['command'] = ['psi4', '--json', '--nthread', '6'] # TODO
return psi4rec
def psi4_harvest(jobrec, psi4rec): # jobrec@i, psi4rec@io -> jobrec@io
"""Processes raw results from read-only `psi4rec` into QCAspect fields in returned `jobrec`."""
psi4rec = psi4rec['json'] # TODO NOT how this should be done figure out 1-tier/2-tier
try:
pass
#jobrec['molecule']['real']
#jobrec['do_gradient']
except KeyError as err:
raise KeyError(
'Required fields missing from ({})'.format(jobrec.keys())) from err
try:
psi4rec['raw_output']
#if jobrec['do_gradient'] is True:
# dftd3rec['dftd3_gradient']
except KeyError as err:
raise KeyError('Required fields missing from ({})'.format(
psi4rec.keys())) from err
if psi4rec['error']:
raise RuntimeError(psi4rec['error'])
#c4files = {}
for fl in psi4rec.keys():
if fl.startswith('outfile_'):
jobrec[fl] = psi4rec[fl]
#for fl in ['GRD', 'FCMFINAL', 'DIPOL']:
# field = 'output_' + fl.lower()
# if field in cfourrec:
# text += ' Cfour scratch file {} has been read\n'.format(fl)
# text += cfourrec[field]
# c4files[fl] = cfourrec[field]
# Absorb results into qcdb data structures
progvars = PreservingDict(psi4rec['psivars'])
import psi4
progarrs = {k: np.array(psi4.core.Matrix.from_serial(v)) for k, v in psi4rec['psiarrays'].items()}
progvars.update(progarrs)
qcvars.build_out(progvars)
calcinfo = qcvars.certify(progvars)
jobrec['raw_output'] = psi4rec['raw_output']
jobrec['qcvars'] = calcinfo
#prov = {}
#prov['creator'] = 'Psi4'
#prov['routine'] = sys._getframe().f_code.co_name
#prov['version'] = version
jobrec['provenance'].append(psi4rec['provenance'])
return jobrec
"""
Required Input Fields
---------------------
Optional Input Fields
---------------------
Output Fields
-------------
"""
| 28.834951
| 102
| 0.605724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,793
| 0.470202
|
e39aa6632ad08319c28262297573d3c36b620844
| 670
|
py
|
Python
|
users/models.py
|
tanmayag8958/upes-fipi-jigyasa
|
e05e41e7624175ae64216a54cc546bbb74b2df61
|
[
"MIT"
] | 8
|
2019-03-08T10:28:38.000Z
|
2019-10-17T00:04:44.000Z
|
users/models.py
|
tanmayag8958/upes-fipi-jigyasa
|
e05e41e7624175ae64216a54cc546bbb74b2df61
|
[
"MIT"
] | 124
|
2020-02-11T23:51:09.000Z
|
2022-01-13T01:06:09.000Z
|
users/models.py
|
tanmayag8958/upes-fipi-jigyasa
|
e05e41e7624175ae64216a54cc546bbb74b2df61
|
[
"MIT"
] | 3
|
2019-03-07T18:44:55.000Z
|
2019-03-08T10:36:50.000Z
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class User_details(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
team_count = models.IntegerField(default=1)
date_registered = models.DateTimeField(default=timezone.now)
contact_no = models.BigIntegerField(default=91)
status = models.CharField('STATUS', max_length=12, default='Not Paid')
referral = models.CharField('REFERRAL', max_length=30, null=True, blank=True)
def __str__(self):
return self.user.username
| 39.411765
| 81
| 0.770149
| 477
| 0.71194
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.041791
|
e39b51664f30f25e2e70980af59a04bbf06d0208
| 10,685
|
py
|
Python
|
script/study/sedov/sedov_main_function.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | 8
|
2017-05-04T07:50:02.000Z
|
2019-05-17T02:27:20.000Z
|
script/study/sedov/sedov_main_function.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | null | null | null |
script/study/sedov/sedov_main_function.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | null | null | null |
#Main Sedov Code Module
#Ported to python from fortran code written by James R Kamm and F X Timmes
#Original Paper and code found at http://cococubed.asu.edu/papers/la-ur-07-2849.pdf
import numpy as np
from globalvars import comvars as gv
from sedov_1d import sed_1d
from sedov_1d_time import sed_1d_time
from matplotlib import pyplot as plt
import pickle
gv.its = 20
# define sedov_main as a function
def sedov_main(geom_in, omega_in, time_in, blast_energy, gamma_in, outfile):
##Explicitly set variables
##Standard Cases
##Spherical constant density should reach r=1 at t=1
nstep = 12000
eblast = blast_energy
gv.xgeom = geom_in
gv.omega = omega_in
#outputfile = ??????
##input parameters
time = time_in
rho0 = 1.225E0
vel0 = 0.0E0
ener0 = 0.0E0
pres0 = 0.0E0
cs0 = 342.3E0
gv.gamma = gamma_in
##number of grid points, spatial domain, spatial stepsize.
##to match hydrocode output, use the mid-sell points.
#zpos = array of spatial points
zlo = 0.0E0
zhi = 1.2E3
zstep = (zhi - zlo)/float(nstep)
zpos = np.arange(zlo + zstep, zhi + zstep, zstep)
den, vel, pres, enertot, enertherm, enerkin, mach, zpos = sed_1d(time, nstep, zpos, eblast, rho0, vel0, ener0, pres0, cs0, gv)
#create final dictionary to pickle
###dictionary is a flexible array
single_time_output = {'density': den, 'velocity': vel, 'pressure': pres,
'total_energy': enertot, 'thermal_energy': enertherm,
'kinetic_energy': enerkin, 'mach': mach, 'position': zpos}
#open file, pickle and dump data, close file
output = open(outfile, 'wb')
pickle.dump(single_time_output, output)
output.close()
#plot outputs vss position
#zmax controls the maximum of the x-axis on the graphs.
zmax = 1.5 * gv.r2
plt.plot(zpos, den)
plt.axis([0, zmax, 0, max(den)])
plt.title('Density vs. Position')
plt.ylabel('Density (kg/m^3)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, vel)
plt.axis([0, zmax, 0, max(vel)])
plt.title('Velocity vs. Position')
plt.ylabel('Velocity (m/s)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, pres)
plt.axis([0, zmax, 0, max(pres)])
plt.title('Pressure vs. Position')
plt.ylabel('Pressure (Pa)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enertot)
plt.axis([0, zmax, 0, max(enertot)])
plt.title('Total Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enertherm)
plt.axis([0, zmax, 0, max(enertherm)])
plt.title('Thermal Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enerkin)
plt.axis([0, zmax, 0, max(enerkin)])
plt.title('Kinetic Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, mach)
plt.axis([0, zmax, 0, max(mach)])
plt.title('Mach Number vs. Position')
plt.ylabel('Mach Number')
plt.xlabel('Position (m)')
plt.show()
#final graph plots scaled density, pressure and velocity one one plot.
plt.plot(zpos, den/max(den), 'b', label = 'Density')
plt.plot(zpos, pres/max(pres), 'g', label = 'Pressure')
plt.plot(zpos, vel/max(vel), 'r', label = 'Velocity')
plt.axis([0, zmax, 0, 1])
plt.legend(loc = 'upper left')
plt.title('Scaled Density, Pressure, and Velocity')
plt.ylabel('Scaled Value (x/max(x))')
plt.xlabel('Position (m)')
plt.show()
#define function to produce results at different points in time instead of sedov_1d
def sedov_main_time(geom_in, omega_in, time_initial, time_final, time_steps, blast_energy, gamma_in, outfile):
##Explicitly set variables
##Standard Cases
##Spherical constant density should reach r=1 at t=1
nstep = 12000
eblast = blast_energy
gv.xgeom = geom_in
gv.omega = omega_in
#outputfile = ??????
##input parameters
rho0 = 1.225E0
vel0 = 0.0E0
ener0 = 0.0E0
pres0 = 0.0E0
cs0 = 342.3E0
gv.gamma = gamma_in
##number of grid points, spatial domain, spatial stepsize.
##to match hydrocode output, use the mid-sell points.
#zpos = array of spatial points
zlo = 0.0E0
zhi = 3.0E2
zstep = (zhi - zlo)/float(nstep)
zposition = np.arange(zlo + zstep, zhi + zstep, zstep)
den_time, vel_time, pres_time, enertot_time, enertherm_time, enerkin_time, mach_time, zpos_time, time = sed_1d_time(time_initial, time_final, time_steps, nstep, zposition, eblast, rho0, vel0, ener0, pres0, cs0, gv)
#create final dictionary to pickle
###dictionary is flexible array
time_step_output = {'density': den_time, 'velocity': vel_time, 'pressure': pres_time,
'total_energy': enertot_time, 'thermal_energy': enertherm_time,
'kinetic_energy': enerkin_time, 'mach': mach_time,
'position': zpos_time, 'time': time}
#open file, pickle and dump data, close file
output = open(outfile, 'wb')
pickle.dump(time_step_output, output)
output.close()
#zmax controls the maximum of the x-axis on the graphs.
zmax = 1.5 * gv.r2
# for loops graph a plot for each time step in the final soulution
for i in range(0, time_steps):
plt.plot(zpos_time[i], den_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Density vs. Position')
plt.ylabel('Density (kg/m^3)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], vel_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Velocity vs. Position')
plt.ylabel('Velocity (m/s)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], pres_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Pressure vs. Position')
plt.ylabel('Pressure (Pa)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], enertot_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Total Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], enertherm_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Thermal Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], enerkin_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Kinetic Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], mach_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Mach Number vs. Position')
plt.ylabel('Mach Number')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
#final graph plots scaled density, pressure and velocity one one plot.
# plt.plot(zpos, den/max(den), 'b', label = 'Density')
# plt.plot(zpos, pres/max(pres), 'g', label = 'Pressure')
# plt.plot(zpos, vel/max(vel), 'r', label = 'Velocity')
# plt.axis([0, zmax, 0, 1])
# plt.legend(loc = 'upper left')
# plt.title('Scaled Density, Pressure, and Velocity')
# plt.ylabel('Scaled Value (x/max(x))')
# plt.xlabel('Position (m)')
# plt.show()
| 44.3361
| 232
| 0.467759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,254
| 0.304539
|
e39bbfc2ad38c9bcb6507c0a41b528d361682009
| 224
|
py
|
Python
|
packs/hue/actions/rgb.py
|
jonico/st2contrib
|
149c9c553f24360d91a14fef7ea6146707de75fd
|
[
"Apache-2.0"
] | 164
|
2015-01-17T16:08:33.000Z
|
2021-08-03T02:34:07.000Z
|
packs/hue/actions/rgb.py
|
jonico/st2contrib
|
149c9c553f24360d91a14fef7ea6146707de75fd
|
[
"Apache-2.0"
] | 442
|
2015-01-01T11:19:01.000Z
|
2017-09-06T23:26:17.000Z
|
packs/hue/actions/rgb.py
|
jonico/st2contrib
|
149c9c553f24360d91a14fef7ea6146707de75fd
|
[
"Apache-2.0"
] | 202
|
2015-01-13T00:37:40.000Z
|
2020-11-07T11:30:10.000Z
|
from lib import action
class RGBAction(action.BaseAction):
def run(self, light_id, red, green, blue, transition_time):
light = self.hue.lights.get(light_id)
light.rgb(red, green, blue, transition_time)
| 28
| 63
| 0.705357
| 198
| 0.883929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e39de89d4940f42750569f7100f7133d1493a932
| 1,755
|
py
|
Python
|
test/test_math_challenge.py
|
nikett/math_challenge_eval
|
bafe9f6d30fc5ffd97492ce5e42716f839c29c4f
|
[
"Apache-2.0"
] | null | null | null |
test/test_math_challenge.py
|
nikett/math_challenge_eval
|
bafe9f6d30fc5ffd97492ce5e42716f839c29c4f
|
[
"Apache-2.0"
] | null | null | null |
test/test_math_challenge.py
|
nikett/math_challenge_eval
|
bafe9f6d30fc5ffd97492ce5e42716f839c29c4f
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from typing import Dict, List
from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS
from src.student_info import StudentInfo
class TestChallenge(unittest.TestCase):
def test_preprocess(self):
self.assertEqual(Challenge.preprocess_ans("a. 37th floor, b. 42nd floor, c. 39th floor, d. 40th floor"), 37423940)
self.assertEqual(Challenge.preprocess_ans("answer is 25 ducklings"), 25)
self.assertEqual(Challenge.preprocess_ans("4 c.1"), 41)
self.assertEqual(Challenge.preprocess_ans(a="abcd efgh. ij"), DEFAULT_EMPTY_ANS)
self.assertEqual(Challenge.preprocess_ans(a="5 blue. ", text_retain_dict={"blue":1}), 51)
self.assertEqual(Challenge.preprocess_ans(a="5 {blue}. __OR__ 6 {brown}", text_retain_dict={"blue":0, "brown":1}), 5061)
self.assertEqual(Challenge.preprocess_ans(a="5 blue. ", text_retain_dict={"blue":1}), 51)
def test_gold_loading(self):
g, challenge_wise_retaining = Challenge.load_gold_answers(fp="test_data/test_correct_answers.csv")
assert len(g) == 2
assert g["MC2"].challenge_name == "MC2"
assert not g["MC2"].student # Gold has no student name.
# We did not pass any retaining text (i.e., usually all text
# except numbers is removed, except the special retaining strings)
assert challenge_wise_retaining["MC2"][8] == {"blue":0, "red":1}
def test_student_ans_loading(self):
s: Dict[StudentInfo, List["Challenge"]] = Challenge.load_student_answers(fp="test_data/test_student_answers.csv", challenge_wise_retaining={})
assert len(s) == 5, f"There should be 5 student entries, assuming no repeated entries in the test file."
if __name__ == '__main__':
unittest.main()
| 51.617647
| 150
| 0.707123
| 1,555
| 0.88604
| 0
| 0
| 0
| 0
| 0
| 0
| 540
| 0.307692
|
e39ec861b279f059e70d4bea0eec9d21d1b19ced
| 6,784
|
py
|
Python
|
MyBot.py
|
joebieb/halite
|
da389aa8e9f97a0ac6a417ca53023609376c0dc6
|
[
"MIT"
] | null | null | null |
MyBot.py
|
joebieb/halite
|
da389aa8e9f97a0ac6a417ca53023609376c0dc6
|
[
"MIT"
] | null | null | null |
MyBot.py
|
joebieb/halite
|
da389aa8e9f97a0ac6a417ca53023609376c0dc6
|
[
"MIT"
] | null | null | null |
import hlt
import logging
from collections import OrderedDict
# GAME START
game = hlt.Game("Spoof_v7")
logging.info('Starting my %s bot!', game._name)
TURN = 0
def navigate(ship, entity, multiplier = 1):
navigate_command = ship.navigate(
ship.closest_point_to(entity),
game_map,
speed=int(hlt.constants.MAX_SPEED * multiplier),
ignore_ships=True)
if navigate_command:
command_queue.append(navigate_command)
def kamikazee(ship, entity):
navigate_command = ship.navigate(
entity,
game_map,
speed=int(hlt.constants.MAX_SPEED),
ignore_ships=False)
if navigate_command:
command_queue.append(navigate_command)
while True:
# TURN START
TURN += 1
group_attack_limit = 3
attack_ship_modifier = .4
game_map = game.update_map()
command_queue = []
me = game_map.get_me()
enemies = [enemy for enemy in game_map.all_players() if enemy.id != me.id]
my_ships = me.all_ships()
my_docked_ships = [ship for ship in my_ships if ship.docking_status != ship.DockingStatus.UNDOCKED]
#planet_docking_status = []
enemy_ships = [ship for ship in game_map._all_ships() if ship not in my_ships]
docked_enemy_ships = [ship for ship in enemy_ships if ship.docking_status != ship.DockingStatus.UNDOCKED]
unowned_planets = [planet for planet in game_map.all_planets() if not planet.is_owned()]
my_planets = [planet for planet in game_map.all_planets() if planet.is_owned() and planet.owner.id == me.id]
enemy_planets = [planet for planet in game_map.all_planets() if planet.is_owned() and planet.owner.id != me.id]
targeted_planets = []
targeted_ships = []
# find center of enemy mass
planet_x = [planet.x for planet in enemy_planets]
ship_x = [ship.x for ship in enemy_ships]
planet_y = [planet.y for planet in enemy_planets]
ship_y = [ship.y for ship in enemy_ships]
x = planet_x + ship_x
y = planet_y + ship_y
enemy_centroid = hlt.entity.Position(0,0)
if len(x):
enemy_centroid = hlt.entity.Position(sum(x) / len(x), sum(y) / len(y))
entities_by_distance_to_enemy_centroid = OrderedDict(sorted(game_map.nearby_entities_by_distance(enemy_centroid).items(), key=lambda t: t[0]))
my_ships_by_distance_to_enemy_centroid = [entities_by_distance_to_enemy_centroid[distance][0]
for distance in entities_by_distance_to_enemy_centroid
if entities_by_distance_to_enemy_centroid[distance][0] in my_ships
and entities_by_distance_to_enemy_centroid[distance][0] not in my_docked_ships]
# adjust limits based on ship counts
my_ship_count = len(my_ships)
enemy_ship_count = len(enemy_ships)
if my_ship_count > 0 and enemy_ship_count > 0:
ratio = (my_ship_count / enemy_ship_count)
if ratio > 1:
group_attack_limit *= ratio
# logging.info('group attack limit: %s', group_attack_limit)
#logging.info(enemy_centroid)
# find undocked ships that are closest to action and make them fighters first set the rest as miners
attack_ships = my_ships_by_distance_to_enemy_centroid[0 : int(len(my_ships_by_distance_to_enemy_centroid) * attack_ship_modifier)]
# logging.info('Number of attack ships: %s', len(attack_ships))
# For every ship that I control
for ship in my_ships:
# If the ship is docked
if ship.docking_status != ship.DockingStatus.UNDOCKED:
# Skip this ship
continue
entities_by_distance = OrderedDict(sorted(game_map.nearby_entities_by_distance(ship).items(), key=lambda t: t[0]))
target_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in game_map.all_planets() and entities_by_distance[distance][0] not in targeted_planets]
target_unowned_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in unowned_planets and entities_by_distance[distance][0] not in targeted_planets]
target_enemy_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in enemy_planets]
target_ships = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in enemy_ships]
target_docked_ships = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in docked_enemy_ships]
# if ship in attack_ships attack
if ship in attack_ships:
for enemy_ship in target_ships:
# if unowned planet is closer, then dock, otherwise attack
# if target_unowned_planets[0]:
# if ship.calculate_distance_between(target_unowned_planets[0]) < ship.calculate_distance_between(enemy_ship):
# if ship.can_dock(target_unowned_planets[0]):
# command_queue.append(ship.dock(target_unowned_planets[0]))
# else:
# navigate(ship, enemy_ship, 1)
# else:
# if enemy is targeted by n ships then get next closest ship
if enemy_ship in targeted_ships:
if targeted_ships.count(enemy_ship) >= group_attack_limit:
# logging.info('group attack limit met, trying next ship')
continue
targeted_ships.append(enemy_ship)
navigate(ship, enemy_ship, 1)
break
else:
for planet in target_planets:
# If we can dock, let's (try to) dock. If two ships try to dock at once, neither will be able to.
if ship.can_dock(planet) and planet in unowned_planets:
command_queue.append(ship.dock(planet))
elif ship.can_dock(planet) and planet in my_planets and not planet.is_full():
command_queue.append(ship.dock(planet))
# if planet is owned then attack
elif planet.is_owned() and planet in enemy_planets:
for enemy_ship in planet.all_docked_ships():
if enemy_ship:
navigate(ship, enemy_ship)
break
else:
targeted_planets.append(planet)
navigate(ship, planet)
break
# Send our set of commands to the Halite engine for this turn
game.send_command_queue(command_queue)
# TURN END
# GAME END
| 48.457143
| 223
| 0.658461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,213
| 0.178803
|
e39f7df5983d058b6f0e12ddeb89dfebc298ff47
| 2,875
|
py
|
Python
|
test/test_horizons.py
|
bluePhlavio/eph
|
9ad8d753ba70d7ed147a591c4181edd56b9533cc
|
[
"MIT"
] | 1
|
2021-05-07T23:35:32.000Z
|
2021-05-07T23:35:32.000Z
|
test/test_horizons.py
|
bluePhlavio/eph
|
9ad8d753ba70d7ed147a591c4181edd56b9533cc
|
[
"MIT"
] | 8
|
2019-11-02T01:04:26.000Z
|
2021-06-02T00:01:37.000Z
|
test/test_horizons.py
|
bluePhlavio/eph
|
9ad8d753ba70d7ed147a591c4181edd56b9533cc
|
[
"MIT"
] | 1
|
2019-03-20T13:55:52.000Z
|
2019-03-20T13:55:52.000Z
|
import pytest
from eph.horizons import *
@pytest.fixture(params=[
('earth', '399'),
('\'earth\'', '399'),
('Earth', '399'),
('399', '399'),
('\'399\'', '399'),
('pluto', 'pluto'),
])
def codify_obj_data(request):
return request.param
def test_codify_obj(codify_obj_data):
data, result = codify_obj_data
assert codify_obj(data) == result
@pytest.fixture(params=[
('earth', '@399'),
('\'earth\'', '@399'),
('\'@earth\'', '@earth'),
('399', '@399'),
('\'399\'', '@399'),
('\'@399\'', '@399'),
])
def codify_site_data(request):
return request.param
def test_codify_site(codify_site_data):
data, result = codify_site_data
assert codify_site(data) == result
@pytest.fixture(params=[
('399', 'earth'),
('299', 'venus'),
('@499', 'mars'),
('1@399', '1@399'),
('@earth', '@earth'),
])
def humanify_data(request):
return request.param
def test_humanify(humanify_data):
data, result = humanify_data
assert humanify(data) == result
@pytest.fixture(params=[
'2017-04-22 00:00',
Time('2017-4-22'),
])
def format_time_data(request):
return request.param
def test_format_time(format_time_data):
assert str(format_time(format_time_data)) == '2017-04-22 00:00'
@pytest.fixture(params=[
('COMMAND', 'COMMAND'),
('Command', 'COMMAND'),
('target', 'COMMAND'),
('OBJECT', 'COMMAND'),
('table-type', 'TABLE_TYPE'),
('key', None),
])
def transformkey_data(request):
return request.param
def test_transformkey(transformkey_data):
key, jplparam = transformkey_data
try:
assert transform_key(key) == jplparam
except Exception as e:
assert e.__class__ == JplBadParamError
@pytest.fixture(params=[
('COMMAND', 'earth', '399'),
('CENTER', '@399', '@399'),
('CENTER', '399', '@399'),
])
def transformvalue_data(request):
return request.param
def test_transformvalue(transformvalue_data):
key, value, result = transformvalue_data
assert transform_value(key, value) == result
@pytest.fixture(params=[
(('target', 'earth'), ('COMMAND', '399')),
(('Command', 'Earth'), ('COMMAND', '399')),
(('OBJECT', '399'), ('COMMAND', '399')),
(('Origin', 'earth'), ('CENTER', '@399')),
(('key', 'value'), (None, None)),
])
def transform_data(request):
return request.param
def test_transform(transform_data):
data, result = transform_data
key, value = data
try:
assert transform(key, value) == result
except Exception as e:
assert e.__class__ == JplBadParamError
@pytest.fixture(params=[
('START_TIME', True),
('object', True),
('key', False),
])
def is_jpl_param_data(request):
return request.param
def test_is_jpl_param(is_jpl_param_data):
key, result = is_jpl_param_data
assert is_jpl_param(key) == result
| 21.455224
| 67
| 0.614261
| 0
| 0
| 0
| 0
| 1,668
| 0.580174
| 0
| 0
| 594
| 0.206609
|
e3a03a276ee7eba66fe85aa5ecec8c492d7bc5fa
| 950
|
py
|
Python
|
demo.py
|
ademilly/sqs-service
|
cd6cb1e7ca904472376eafb8682621675c310f2e
|
[
"MIT"
] | null | null | null |
demo.py
|
ademilly/sqs-service
|
cd6cb1e7ca904472376eafb8682621675c310f2e
|
[
"MIT"
] | null | null | null |
demo.py
|
ademilly/sqs-service
|
cd6cb1e7ca904472376eafb8682621675c310f2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import sqs_service
"""Usage:
python demo.py
Expected set environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_DEFAULT_REGION
- AWS_SESSION_TOKEN for IAM roles
- AWS_SECURITY_TOKEN for IAM roles
Send 'Hello World' to queue 'TEST', listen to the queue and
print first message received
"""
def run():
sqs_svc = sqs_service.SQSService(queue_name='TEST')
sqs_svc.send(body='Hello World', attributes={
'MessageType': 'Greeting'
})
t_end = time.time() + 30
while time.time() < t_end:
sqs_svc.listen(for_how_many=1, with_attributes=['MessageType'])
if sqs_svc.has_got_messages():
first_message = sqs_svc.get_first_message()
print 'Message received:', first_message.body()
print 'Message is a', first_message.get_attribute('MessageType')
first_message.delete()
if __name__ == '__main__':
run()
| 22.093023
| 76
| 0.678947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.445263
|
e3a0f222ef753f195c708bec4e9a91e2e4562806
| 34,106
|
py
|
Python
|
web_console_v2/api/testing/workflow_template/psi_join_tree_model_no_label.py
|
chen1i/fedlearner
|
981514dadbd0aa49ae87d185dd247d310e35605c
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/testing/workflow_template/psi_join_tree_model_no_label.py
|
chen1i/fedlearner
|
981514dadbd0aa49ae87d185dd247d310e35605c
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/testing/workflow_template/psi_join_tree_model_no_label.py
|
chen1i/fedlearner
|
981514dadbd0aa49ae87d185dd247d310e35605c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import json
from google.protobuf.json_format import MessageToDict
from fedlearner_webconsole.proto.workflow_definition_pb2 import (
WorkflowDefinition, JobDefinition, JobDependency
)
from fedlearner_webconsole.proto.common_pb2 import (
Variable
)
def make_workflow_template():
workflow = WorkflowDefinition(
group_alias='psi_join_tree_model',
is_left=True,
variables=[
Variable(
name='image_version',
value='v1.5-rc3',
access_mode=Variable.PEER_READABLE),
Variable(
name='num_partitions',
value='2',
access_mode=Variable.PEER_WRITABLE),
],
job_definitions=[
JobDefinition(
name='raw-data-job',
job_type=JobDefinition.RAW_DATA,
is_federated=False,
is_manual=False,
variables=[
Variable(
name='input_dir',
value='/app/deploy/integrated_test/tfrecord_raw_data',
access_mode=Variable.PRIVATE),
Variable(
name='file_wildcard',
value='*.rd',
access_mode=Variable.PRIVATE),
Variable(
name='batch_size',
value='1024',
access_mode=Variable.PEER_WRITABLE),
Variable(
name='input_format',
value='TF_RECORD',
access_mode=Variable.PRIVATE),
Variable(
name='worker_cpu',
value='2000m',
access_mode=Variable.PEER_WRITABLE),
Variable(
name='worker_mem',
value='4Gi',
access_mode=Variable.PEER_WRITABLE),
],
yaml_template='''{
"apiVersion": "fedlearner.k8s.io/v1alpha1",
"kind": "FLApp",
"metadata": {
"name": "${workflow.jobs.raw-data-job.name}",
"namespace": "${project.variables.namespace}"
},
"spec": {
"cleanPodPolicy": "All",
"flReplicaSpecs": {
"Master": {
"template": {
"spec": {
"containers": [
{
"resources": {
"limits": {
"cpu": "1000m",
"memory": "2Gi"
},
"requests": {
"cpu": "1000m",
"memory": "2Gi"
}
},
"image": "artifact.bytedance.com/fedlearner/fedlearner:${workflow.variables.image_version}",
"ports": [
{
"containerPort": 50051,
"name": "flapp-port"
}
],
"command": [
"/app/deploy/scripts/data_portal/run_data_portal_master.sh"
],
"args": [],
"env": [
${system.basic_envs},
{
"name": "EGRESS_URL",
"value": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80"
},
{
"name": "EGRESS_HOST",
"value": "${project.participants[0].egress_host}"
},
{
"name": "EGRESS_DOMAIN",
"value": "${project.participants[0].egress_domain}"
},
{
"name": "STORAGE_ROOT_PATH",
"value": "${project.variables.storage_root_dir}"
},
{
"name": "APPLICATION_ID",
"value": "${workflow.jobs.raw-data-job.name}"
},
{
"name": "DATA_PORTAL_NAME",
"value": "${workflow.jobs.raw-data-job.name}"
},
{
"name": "OUTPUT_PARTITION_NUM",
"value": "${workflow.variables.num_partitions}"
},
{
"name": "INPUT_BASE_DIR",
"value": "${workflow.jobs.raw-data-job.variables.input_dir}"
},
{
"name": "OUTPUT_BASE_DIR",
"value": "${project.variables.storage_root_dir}/raw_data/${workflow.jobs.raw-data-job.name}"
},
{
"name": "RAW_DATA_PUBLISH_DIR",
"value": "portal_publish_dir/${workflow.jobs.raw-data-job.name}"
},
{
"name": "DATA_PORTAL_TYPE",
"value": "PSI"
},
{
"name": "FILE_WILDCARD",
"value": "${workflow.jobs.raw-data-job.variables.file_wildcard}"
}
],
"volumeMounts": [
{
"mountPath": "/data",
"name": "data"
}
],
"imagePullPolicy": "IfNotPresent",
"name": "tensorflow"
}
],
"imagePullSecrets": [
{
"name": "regcred"
}
],
"volumes": [
{
"persistentVolumeClaim": {
"claimName": "pvc-fedlearner-default"
},
"name": "data"
}
],
"restartPolicy": "Never"
}
},
"pair": false,
"replicas": 1
},
"Worker": {
"replicas": ${workflow.variables.num_partitions},
"template": {
"spec": {
"containers": [
{
"resources": {
"limits": {
"cpu": "${workflow.jobs.raw-data-job.variables.worker_cpu}",
"memory": "${workflow.jobs.raw-data-job.variables.worker_mem}"
},
"requests": {
"cpu": "${workflow.jobs.raw-data-job.variables.worker_cpu}",
"memory": "${workflow.jobs.raw-data-job.variables.worker_mem}"
}
},
"image": "artifact.bytedance.com/fedlearner/fedlearner:${workflow.variables.image_version}",
"command": [
"/app/deploy/scripts/data_portal/run_data_portal_worker.sh"
],
"args": [],
"env": [
${system.basic_envs},
{
"name": "EGRESS_URL",
"value": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80"
},
{
"name": "EGRESS_HOST",
"value": "${project.participants[0].egress_host}"
},
{
"name": "EGRESS_DOMAIN",
"value": "${project.participants[0].egress_domain}"
},
{
"name": "STORAGE_ROOT_PATH",
"value": "${project.variables.storage_root_dir}"
},
{
"name": "APPLICATION_ID",
"value": "${workflow.jobs.raw-data-job.name}"
},
{
"name": "BATCH_SIZE",
"value": "${workflow.jobs.raw-data-job.variables.batch_size}"
},
{
"name": "INPUT_DATA_FORMAT",
"value": "${workflow.jobs.raw-data-job.variables.input_format}"
},
{
"name": "COMPRESSED_TYPE",
"value": ""
},
{
"name": "OUTPUT_DATA_FORMAT",
"value": "TF_RECORD"
}
],
"volumeMounts": [
{
"mountPath": "/data",
"name": "data"
}
],
"imagePullPolicy": "IfNotPresent",
"name": "tensorflow"
}
],
"imagePullSecrets": [
{
"name": "regcred"
}
],
"volumes": [
{
"persistentVolumeClaim": {
"claimName": "pvc-fedlearner-default"
},
"name": "data"
}
],
"restartPolicy": "Never"
}
},
"pair": false
}
}
}
}
'''
),
JobDefinition(
name='data-join-job',
job_type=JobDefinition.PSI_DATA_JOIN,
is_federated=True,
is_manual=False,
variables=[
Variable(
name='worker_cpu',
value='4000m',
access_mode=Variable.PEER_WRITABLE),
Variable(
name='worker_mem',
value='4Gi',
access_mode=Variable.PEER_WRITABLE),
Variable(
name='rsa_public_key_path',
value='',
access_mode=Variable.PRIVATE),
],
dependencies=[
JobDependency(source='raw-data-job')
],
yaml_template='''
{
"apiVersion": "fedlearner.k8s.io/v1alpha1",
"kind": "FLApp",
"metadata": {
"name": "${workflow.jobs.data-join-job.name}",
"namespace": "${project.variables.namespace}"
},
"spec": {
"role": "Follower",
"cleanPodPolicy": "All",
"peerSpecs": {
"Follower": {
"peerURL": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80",
"authority": "${project.participants[0].egress_domain}",
"extraHeaders": {
"x-host": "default.fedlearner.operator"
}
}
},
"flReplicaSpecs": {
"Master": {
"template": {
"spec": {
"restartPolicy": "Never",
"containers": [
{
"env": [
${system.basic_envs},
{
"name": "EGRESS_URL",
"value": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80"
},
{
"name": "EGRESS_HOST",
"value": "${project.participants[0].egress_host}"
},
{
"name": "EGRESS_DOMAIN",
"value": "${project.participants[0].egress_domain}"
},
{
"name": "APPLICATION_ID",
"value": "${workflow.jobs.data-join-job.name}"
},
{
"name": "STORAGE_ROOT_PATH",
"value": "${project.variables.storage_root_dir}"
},
{
"name": "ROLE",
"value": "follower"
},
{
"name": "RAW_DATA_SUB_DIR",
"value": "portal_publish_dir/${workflow.jobs.raw-data-job.name}"
},
{
"name": "OUTPUT_BASE_DIR",
"value": "${project.variables.storage_root_dir}/data_source/${workflow.jobs.data-join-job.name}"
},
{
"name": "PARTITION_NUM",
"value": "${workflow.variables.num_partitions}"
},
{
"name": "START_TIME",
"value": "0"
},
{
"name": "END_TIME",
"value": "999999999999"
},
{
"name": "NEGATIVE_SAMPLING_RATE",
"value": "1.0"
}
],
"imagePullPolicy": "IfNotPresent",
"name": "tensorflow",
"volumeMounts": [
{
"mountPath": "/data",
"name": "data"
}
],
"image": "artifact.bytedance.com/fedlearner/fedlearner:${workflow.variables.image_version}",
"ports": [
{
"containerPort": 50051,
"name": "flapp-port"
}
],
"command": [
"/app/deploy/scripts/wait4pair_wrapper.sh"
],
"args": [
"/app/deploy/scripts/rsa_psi/run_psi_data_join_master.sh"
],
"resources": {
"limits": {
"cpu": "2000m",
"memory": "3Gi"
},
"requests": {
"cpu": "2000m",
"memory": "3Gi"
}
},
}
],
"imagePullSecrets": [
{
"name": "regcred"
}
],
"volumes": [
{
"persistentVolumeClaim": {
"claimName": "pvc-fedlearner-default"
},
"name": "data"
}
]
}
},
"pair": true,
"replicas": 1
},
"Worker": {
"template": {
"spec": {
"restartPolicy": "Never",
"containers": [
{
"env": [
${system.basic_envs},
{
"name": "EGRESS_URL",
"value": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80"
},
{
"name": "EGRESS_HOST",
"value": "${project.participants[0].egress_host}"
},
{
"name": "EGRESS_DOMAIN",
"value": "${project.participants[0].egress_domain}"
},
{
"name": "STORAGE_ROOT_PATH",
"value": "${project.variables.storage_root_dir}"
},
{
"name": "APPLICATION_ID",
"value": "${workflow.jobs.data-join-job.name}"
},
{
"name": "ROLE",
"value": "follower"
},
{
"name": "OUTPUT_BASE_DIR",
"value": "${project.variables.storage_root_dir}/data_source/${workflow.jobs.data-join-job.name}"
},
{
"name": "RSA_KEY_PATH",
"value": "${workflow.jobs.data-join-job.rsa_public_key_path}"
},
{
"name": "PSI_RAW_DATA_ITER",
"value": "TF_RECORD"
},
{
"name": "PSI_OUTPUT_BUILDER",
"value": "TF_RECORD"
},
{
"name": "DATA_BLOCK_BUILDER",
"value": "TF_RECORD"
},
{
"name": "DATA_BLOCK_DUMP_INTERVAL",
"value": "600"
},
{
"name": "DATA_BLOCK_DUMP_THRESHOLD",
"value": "524288"
},
{
"name": "EXAMPLE_ID_DUMP_INTERVAL",
"value": "600"
},
{
"name": "EXAMPLE_ID_DUMP_THRESHOLD",
"value": "524288"
},
{
"name": "EXAMPLE_JOINER",
"value": "SORT_RUN_JOINER"
},
{
"name": "SIGN_RPC_TIMEOUT_MS",
"value": "128000"
},
{
"name": "PARTITION_NUM",
"value": "${workflow.variables.num_partitions}"
},
{
"name": "RAW_DATA_SUB_DIR",
"value": "portal_publish_dir/${workflow.jobs.raw-data-job.name}"
}
],
"imagePullPolicy": "IfNotPresent",
"name": "tensorflow",
"volumeMounts": [
{
"mountPath": "/data",
"name": "data"
}
],
"image": "artifact.bytedance.com/fedlearner/fedlearner:${workflow.variables.image_version}",
"ports": [
{
"containerPort": 50051,
"name": "flapp-port"
}
],
"command": [
"/app/deploy/scripts/wait4pair_wrapper.sh"
],
"args": [
"/app/deploy/scripts/rsa_psi/run_psi_data_join_worker.sh"
],
"resources": {
"limits": {
"cpu": "${workflow.jobs.data-join-job.variables.worker_cpu}",
"memory": "${workflow.jobs.data-join-job.variables.worker_mem}"
},
"requests": {
"cpu": "${workflow.jobs.data-join-job.variables.worker_cpu}",
"memory": "${workflow.jobs.data-join-job.variables.worker_mem}"
}
}
}
],
"imagePullSecrets": [
{
"name": "regcred"
}
],
"volumes": [
{
"persistentVolumeClaim": {
"claimName": "pvc-fedlearner-default"
},
"name": "data"
}
]
}
},
"pair": true,
"replicas": ${workflow.variables.num_partitions}
}
}
}
}
'''
),
JobDefinition(
name='train-job',
job_type=JobDefinition.TREE_MODEL_TRAINING,
is_federated=True,
is_manual=False,
variables=[
Variable(
name='worker_cpu',
value='4000m',
access_mode=Variable.PEER_WRITABLE),
Variable(
name='worker_mem',
value='8Gi',
access_mode=Variable.PEER_WRITABLE),
Variable(
name='num_parallel',
value='4',
access_mode=Variable.PEER_WRITABLE),
],
dependencies=[
JobDependency(source='data-join-job')
],
yaml_template='''
{
"apiVersion": "fedlearner.k8s.io/v1alpha1",
"kind": "FLApp",
"metadata": {
"name": "${workflow.jobs.train-job.name}",
"namespace": "${project.variables.namespace}"
},
"spec": {
"role": "Follower",
"cleanPodPolicy": "All",
"peerSpecs": {
"Leader": {
"peerURL": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80",
"authority": "${project.participants[0].egress_domain}",
"extraHeaders": {
"x-host": "default.fedlearner.operator"
}
}
},
"flReplicaSpecs": {
"Worker": {
"template": {
"spec": {
"restartPolicy": "Never",
"containers": [
{
"env": [
${system.basic_envs},
{
"name": "EGRESS_URL",
"value": "fedlearner-stack-ingress-nginx-controller.default.svc.cluster.local:80"
},
{
"name": "EGRESS_HOST",
"value": "${project.participants[0].egress_host}"
},
{
"name": "EGRESS_DOMAIN",
"value": "${project.participants[0].egress_domain}"
},
{
"name": "APPLICATION_ID",
"value": "${workflow.jobs.train-job.name}"
},
{
"name": "STORAGE_ROOT_PATH",
"value": "${project.variables.storage_root_dir}"
},
{
"name": "ROLE",
"value": "follower"
},
{
"name": "OUTPUT_BASE_DIR",
"value": "${project.variables.storage_root_dir}/job_output/${workflow.jobs.train-job.name}"
},
{
"name": "MODE",
"value": "train"
},
{
"name": "NUM_PARALLEL",
"value": "${workflow.jobs.train-job.variables.num_parallel}"
},
{
"name": "DATA_SOURCE",
"value": "${workflow.jobs.data-join-job.name}"
}
],
"imagePullPolicy": "IfNotPresent",
"name": "tensorflow",
"volumeMounts": [
{
"mountPath": "/data",
"name": "data"
}
],
"image": "artifact.bytedance.com/fedlearner/fedlearner:${workflow.variables.image_version}",
"ports": [
{
"containerPort": 50051,
"name": "flapp-port"
}
],
"command": [
"/app/deploy/scripts/wait4pair_wrapper.sh"
],
"args": [
"/app/deploy/scripts/trainer/run_tree_worker.sh"
],
"resources": {
"limits": {
"cpu": "${workflow.jobs.train-job.variables.worker_cpu}",
"memory": "${workflow.jobs.train-job.variables.worker_mem}"
},
"requests": {
"cpu": "${workflow.jobs.train-job.variables.worker_cpu}",
"memory": "${workflow.jobs.train-job.variables.worker_mem}"
}
}
}
],
"imagePullSecrets": [
{
"name": "regcred"
}
],
"volumes": [
{
"persistentVolumeClaim": {
"claimName": "pvc-fedlearner-default"
},
"name": "data"
}
]
}
},
"pair": true,
"replicas": 1
}
}
}
}
'''
)
])
return workflow
if __name__ == '__main__':
print(json.dumps(MessageToDict(
make_workflow_template(),
preserving_proto_field_name=True,
including_default_value_fields=True)))
| 46.592896
| 136
| 0.27306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30,407
| 0.891544
|
e3a19136ac88239183f6ccc7f508189df0b1db51
| 675
|
py
|
Python
|
utils/sys_utils.py
|
machine2learn/galaina
|
47ea16dd99687b38307674dd16ab7b7e99453910
|
[
"BSD-3-Clause"
] | 3
|
2019-05-04T16:46:27.000Z
|
2021-03-05T14:37:05.000Z
|
utils/sys_utils.py
|
machine2learn/galaina
|
47ea16dd99687b38307674dd16ab7b7e99453910
|
[
"BSD-3-Clause"
] | 2
|
2019-08-08T13:01:32.000Z
|
2019-08-19T13:32:22.000Z
|
utils/sys_utils.py
|
machine2learn/galaina
|
47ea16dd99687b38307674dd16ab7b7e99453910
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
def delete_configs(config, dataset, username):
if config != 'all':
paths = [os.path.join('user_data', username, dataset, config)]
else:
paths = [os.path.join('user_data', username, dataset, d) for d in
os.listdir(os.path.join('user_data', username, dataset)) if
os.path.isdir(os.path.join('user_data', username, dataset, d)) and d != 'input' and d != 'factor']
for path in paths:
shutil.rmtree(path)
def delete_dataset(APP_ROOT, username, dataset):
path = os.path.join(APP_ROOT, 'user_data', username, dataset)
print('removing ...' + str(path))
shutil.rmtree(path)
| 33.75
| 115
| 0.631111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.131852
|
e3a225a979c9b3bf4e5e8bd9eaea65a272da293b
| 14,693
|
py
|
Python
|
RasaMakeupRobot/script/dst/trade/utils.py
|
xiaobuguilaile/rasa-conversational-robot
|
05f03c8e928a7c6bef284cdc18a89ef423977974
|
[
"Apache-2.0"
] | 2
|
2021-01-05T08:56:54.000Z
|
2021-01-22T06:05:55.000Z
|
script/dst/trade/utils.py
|
qq751220449/xbot
|
557793302badfce1c0befa81de8948da99c0baae
|
[
"MIT"
] | null | null | null |
script/dst/trade/utils.py
|
qq751220449/xbot
|
557793302badfce1c0befa81de8948da99c0baae
|
[
"MIT"
] | null | null | null |
import os
import bz2
import json
import random
import pickle
from collections import defaultdict, Counter
from tqdm import tqdm
import torch
from data.crosswoz.data_process.dst.trade_preprocess import (
EXPERIMENT_DOMAINS,
Lang,
get_seq,
get_slot_information,
)
class CNEmbedding:
def __init__(self, vector_path):
self.word2vec = {}
with bz2.open(vector_path, "rt", encoding="utf8") as fin:
lines = fin.readlines()
# 第一行是元信息
for line in tqdm(lines[1:], desc="Generating pretrained embedding"):
line = line.strip()
tokens = line.split()
word = tokens[0]
vec = tokens[1:]
vec = [float(item) for item in vec]
self.word2vec[word] = vec
self.embed_size = 300
def emb(self, token, default="zero"):
get_default = {
"none": lambda: None,
"zero": lambda: 0.0,
"random": lambda: random.uniform(-0.1, 0.1),
}[default]
vec = self.word2vec.get(token, None)
if vec is None:
vec = [get_default()] * self.embed_size
return vec
def dump_pretrained_emb(orig_embedding_path, index2word, dump_path):
print("Dumping pretrained embeddings...")
embeddings = [CNEmbedding(orig_embedding_path)]
embedding = []
count = [0.0, 0.0]
for i in tqdm(range(len(index2word))):
w = index2word[i]
e = []
for emb in embeddings:
e += emb.emb(w, default="zero")
# stat embed existence
count[1] += 1.0 # 总词数
if w in embeddings[0].word2vec:
count[0] += 1.0 # 存在于 embedding 中的词数
# e += [0.] * 300
embedding.append(e)
with open(dump_path, "w") as f:
json.dump(embedding, f)
print(f"Word exists in embedding mat: {count[0] / count[1] * 100}")
def fix_general_label_error(belief_state):
"""
:param belief_state:
"belief_state": [
{
"slots": [
[
"餐馆-推荐菜",
"驴 杂汤"
]
]
},
{
"slots": [
[
"餐馆-人均消费",
"100 - 150 元"
]
]
}
]
:return:
"""
belief_state_dict = {
slot_value["slots"][0][0]: slot_value["slots"][0][1]
for slot_value in belief_state
}
return belief_state_dict
def read_langs(
file_name, gating_dict, slots, dataset, lang, mem_lang, load_lang, config
):
print(("Reading from {}".format(file_name)))
data = []
max_resp_len, max_value_len = 0, 0
domain_counter = defaultdict(int) # 每个 domain 有多少个
gate_counter = []
with open(file_name, "r", encoding="utf8") as f:
dials = json.load(f)
if config["debug"]:
dials = dials[:10]
# create vocab first
for dial_dict in dials: # 一个 dial_dict 就是一个对话,包括多轮
if not load_lang and (config["all_vocab"] or dataset == "train"):
for ti, turn in enumerate(dial_dict["dialogue"]):
# 生成 utterance 的词表
lang.index_words(turn["system_transcript"], "utter")
lang.index_words(turn["transcript"], "utter")
for dial_dict in dials:
dialog_history = ""
# Filtering and counting domains
for domain in dial_dict["domains"]:
if domain not in EXPERIMENT_DOMAINS:
continue
domain_counter[domain] += 1
# Reading data
for ti, turn in enumerate(dial_dict["dialogue"]):
turn_domain = turn["domain"]
turn_id = turn["turn_idx"] # 数据集里都是 0,好像有问题
turn_uttr = turn["system_transcript"] + " ; " + turn["transcript"]
turn_uttr_strip = turn_uttr.strip()
dialog_history += (
turn["system_transcript"] + " ; " + turn["transcript"] + " ; "
)
source_text = dialog_history.strip()
# 用于英文数据集, {"餐馆-推荐菜": "驴 杂汤"} dev_dials.json 第一个
turn_belief_dict = fix_general_label_error(turn["belief_state"])
# List['domain-slot-value']
turn_belief_list = [
str(k) + "-" + str(v) for k, v in turn_belief_dict.items()
]
if not load_lang and (config["all_vocab"] or dataset == "train"):
# 生成 slot-value 的词表
mem_lang.index_words(turn_belief_dict, "belief")
class_label, generate_y, slot_mask, gating_label = [], [], [], []
# 一个轮次的 slot 的 values 和 ontology 中的数量一样多
for slot in slots: # ontology
# 只关注本轮需要的 ontology
if slot in turn_belief_dict.keys(): # dialogue
generate_y.append(turn_belief_dict[slot])
# ontology 中是有 none 的情况的
if turn_belief_dict[slot] == "none": # none 存在也只能是下面那种情况
gating_label.append(gating_dict["none"])
else:
gating_label.append(gating_dict["ptr"])
if max_value_len < len(turn_belief_dict[slot]):
max_value_len = len(turn_belief_dict[slot])
else:
generate_y.append("none")
gating_label.append(gating_dict["none"])
gate_counter.extend(gating_label)
# 可以根据ID和turn_idx将内容复原
data_detail = {
"ID": dial_dict["dialogue_idx"],
"domains": dial_dict["domains"],
"turn_domain": turn_domain,
"turn_id": turn_id, # 好像都是 0
"dialog_history": source_text,
"turn_belief": turn_belief_list,
"gating_label": gating_label,
"turn_uttr": turn_uttr_strip, # 每一轮的系统和人的话语
"generate_y": generate_y,
}
data.append(data_detail)
if max_resp_len < len(source_text.split()):
max_resp_len = len(source_text.split()) # 对话数量,系统和人各算一个
# add t{} to the lang file 用来干啥的
if "t{}".format(max_value_len - 1) not in mem_lang.word2index.keys():
for time_i in range(max_value_len):
mem_lang.index_words("t{}".format(time_i), "utter")
print("domain_counter", domain_counter)
print("gate counter", Counter(gate_counter))
return data, max_resp_len
def prepare_data_seq(config):
eval_batch = (
config["eval_batch_size"] if config["eval_batch_size"] else config["batch_size"]
)
train_file_path = config["train_dials"]
dev_file_path = config["dev_dials"]
test_file_path = config["test_dials"]
ontology_file_path = config["ontology"]
# load domain-slot pairs from ontology
ontology = json.load(open(ontology_file_path, "r", encoding="utf8"))
slots = get_slot_information(ontology)
gating_dict = {"ptr": 0, "none": 1}
# Vocabulary
lang_name = "lang-all.pkl" if config["all_vocab"] else "lang-train.pkl"
mem_lang_name = "mem-lang-all.pkl" if config["all_vocab"] else "mem-lang-train.pkl"
if config["debug"]:
lang_name = "debug-" + lang_name
mem_lang_name = "debug-" + mem_lang_name
lang_file_path = os.path.join(config["data_path"], lang_name)
mem_lang_file_path = os.path.join(config["data_path"], mem_lang_name)
load_lang = False
if (
os.path.exists(lang_file_path) and os.path.exists(mem_lang_file_path)
) and not config["clean_cache"]:
print("Loading saved lang files...")
load_lang = True
with open(lang_file_path, "rb") as f:
lang = pickle.load(f)
with open(mem_lang_file_path, "rb") as f:
mem_lang = pickle.load(f)
else:
lang, mem_lang = Lang(config), Lang(config)
# 都包含了 ontology 中的 domain 和 slot,之后分别包含 utterance 和 domain-slot-value
lang.index_words(slots, "slot")
mem_lang.index_words(slots, "slot")
# 生成 dataloader
pair_train, train_max_len = read_langs(
train_file_path, gating_dict, slots, "train", lang, mem_lang, load_lang, config
)
train_loader = get_seq(
pair_train,
lang,
mem_lang,
config["batch_size"],
config["n_gpus"],
shuffle=True,
config=config,
)
train_vocab_size = lang.n_words
pair_dev, dev_max_len = read_langs(
dev_file_path, gating_dict, slots, "dev", lang, mem_lang, load_lang, config
)
dev_loader = get_seq(
pair_dev,
lang,
mem_lang,
eval_batch,
config["n_gpus"],
shuffle=False,
config=config,
)
pair_test, test_max_len = read_langs(
test_file_path, gating_dict, slots, "tests", lang, mem_lang, load_lang, config
)
test_loader = get_seq(
pair_test,
lang,
mem_lang,
eval_batch,
config["n_gpus"],
shuffle=False,
config=config,
)
# 保存中间数据
if (
not (os.path.exists(lang_file_path) and os.path.exists(mem_lang_file_path))
or config["clean_cache"]
):
print("Dumping lang files...")
with open(lang_file_path, "wb") as f:
pickle.dump(lang, f)
with open(mem_lang_file_path, "wb") as f:
pickle.dump(mem_lang, f)
emb_dump_path = os.path.join(config["data_path"], f"emb{len(lang.index2word)}")
if (not os.path.exists(emb_dump_path) or config["clean_cache"]) and config[
"load_embedding"
]:
dump_pretrained_emb(
config["orig_pretrained_embedding"], lang.index2word, emb_dump_path
)
max_dialogue_history_length = max(train_max_len, dev_max_len, test_max_len) + 1
print("Read %s pairs train" % len(pair_train))
print("Read %s pairs dev" % len(pair_dev))
print("Read %s pairs tests" % len(pair_test))
print("Vocab_size: %s " % lang.n_words)
print("Vocab_size Training %s" % train_vocab_size)
print("Vocab_size Belief %s" % mem_lang.n_words)
print("Max. length of dialog words for RNN: %s " % max_dialogue_history_length)
langs = [lang, mem_lang]
# dataloader, dataloader, dataloader, dataloader, List[Lang], List[Dict[str, str]], Dict[str, int], int
return train_loader, dev_loader, test_loader, langs, slots, gating_dict
def masked_cross_entropy_for_value(logits, target, mask):
# logits: b * |s| * m * |v|
# target: b * |s| * m
# mask: b * |s|
logits_flat = logits.view(-1, logits.size(-1))
# print(logits_flat.size())
log_probs_flat = torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s| * m
loss = masking(losses, mask)
return loss
def masking(losses, mask):
mask_ = []
batch_size = mask.size(0)
max_len = losses.size(2)
for si in range(mask.size(1)):
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if mask[:, si].is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (
mask[:, si].unsqueeze(1).expand_as(seq_range_expand)
) # (bs, max_len)
mask_.append((seq_range_expand < seq_length_expand))
mask_ = torch.stack(mask_)
mask_ = mask_.transpose(0, 1) # (bs, num_slots, max_len)
if losses.is_cuda:
mask_ = mask_.cuda()
losses = losses * mask_.float()
loss = losses.sum() / (mask_.sum().float())
return loss
def reformat_belief_state(raw_state):
belief_state = []
for item in raw_state:
dsv_triple = item.split("-", 2)
domain = dsv_triple[0].strip()
slot = dsv_triple[1].strip()
value = dsv_triple[2].strip()
belief_state.append({"slots": [[domain + "-" + slot, value]]})
return belief_state
def compute_acc(gold, pred, slot_temp):
# TODO 为什么不求交集直接算
miss_gold = 0
miss_slot = []
for g in gold:
if g not in pred:
miss_gold += 1
miss_slot.append(g.rsplit("-", 1)[0]) # g=domain-slot-value
wrong_pred = 0
for p in pred:
if p not in gold and p.rsplit("-", 1)[0] not in miss_slot:
wrong_pred += 1
acc_total = len(slot_temp)
# slot_temp 包含所有 80 个 domain-slot,一轮对话总共可能就几个,这么算不合适吧
acc = len(slot_temp) - miss_gold - wrong_pred
acc = acc / float(acc_total)
return acc
def compute_prf(gold, pred):
tp, fp, fn = 0, 0, 0
if len(gold) != 0:
count = 1
for g in gold:
if g in pred:
tp += 1
else:
fn += 1
for p in pred:
if p not in gold:
fp += 1
precision = tp / float(tp + fp) if (tp + fp) != 0 else 0
recall = tp / float(tp + fn) if (tp + fn) != 0 else 0
f1 = (
2 * precision * recall / float(precision + recall)
if (precision + recall) != 0
else 0
)
else:
if not pred:
precision, recall, f1, count = 1, 1, 1, 1
else:
precision, recall, f1, count = 0, 0, 0, 1
return f1, recall, precision, count
def evaluate_metrics(all_prediction, from_which, slot_temp):
total, turn_acc, joint_acc, f1_pred, f1_count = 0, 0, 0, 0, 0
for d, v in all_prediction.items():
for t in range(len(v)):
cv = v[t]
if set(cv["turn_belief"]) == set(cv[from_which]):
joint_acc += 1
total += 1
# Compute prediction slot accuracy
temp_acc = compute_acc(
set(cv["turn_belief"]), set(cv[from_which]), slot_temp
)
turn_acc += temp_acc
# Compute prediction joint F1 score
temp_f1, temp_r, temp_p, count = compute_prf(
set(cv["turn_belief"]), set(cv[from_which])
)
f1_pred += temp_f1
f1_count += count
joint_acc_score = joint_acc / float(total) if total != 0 else 0
turn_acc_score = turn_acc / float(total) if total != 0 else 0
f1_score = f1_pred / float(f1_count) if f1_count != 0 else 0
return joint_acc_score, f1_score, turn_acc_score
| 33.699541
| 107
| 0.562445
| 916
| 0.060418
| 0
| 0
| 0
| 0
| 0
| 0
| 3,372
| 0.222413
|
e3a3386ce9240964c1a178b8bb4fca5a690e725d
| 789
|
py
|
Python
|
pharmrep/reports/urls.py
|
boyombo/pharmrep
|
2293ceb235dec949c58fa40d1ee43fce172e0ceb
|
[
"MIT"
] | null | null | null |
pharmrep/reports/urls.py
|
boyombo/pharmrep
|
2293ceb235dec949c58fa40d1ee43fce172e0ceb
|
[
"MIT"
] | null | null | null |
pharmrep/reports/urls.py
|
boyombo/pharmrep
|
2293ceb235dec949c58fa40d1ee43fce172e0ceb
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.views.generic import TemplateView
from reports import views
urlpatterns = [
url(r'balance/$', views.balance, name='report_balance'),
url(r'performance/$', views.performance, name='report_performance'),
url(r'last_activity/$', views.last_activity, name='last_activity'),
url(r'collection/$', views.CollectionListView.as_view(),
name='report_collection'),
url(r'saleschart/$', TemplateView.as_view(
template_name='reports/sales_chart.html'), name='chart_sales'),
url(r'paymentchart/$', TemplateView.as_view(
template_name='reports/payment_chart.html'), name='chart_payment'),
url(r'callchart/$', TemplateView.as_view(
template_name='reports/calls_chart.html'), name='chart_call'),
]
| 39.45
| 75
| 0.714829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 297
| 0.376426
|
e3a354a397453e432d52c1cd363a4b2592457f4b
| 1,150
|
py
|
Python
|
pycam/pycam/Utils/progress.py
|
pschou/py-sdf
|
0a269ed155d026e29429d76666fb63c95d2b4b2c
|
[
"MIT"
] | null | null | null |
pycam/pycam/Utils/progress.py
|
pschou/py-sdf
|
0a269ed155d026e29429d76666fb63c95d2b4b2c
|
[
"MIT"
] | null | null | null |
pycam/pycam/Utils/progress.py
|
pschou/py-sdf
|
0a269ed155d026e29429d76666fb63c95d2b4b2c
|
[
"MIT"
] | null | null | null |
from pycam.Utils.events import get_event_handler, get_mainloop
class ProgressContext:
def __init__(self, title):
self._title = title
self._progress = get_event_handler().get("progress")
def __enter__(self):
if self._progress:
self._progress.update(text=self._title, percent=0)
# start an indefinite pulse (until we receive more details)
self._progress.update()
else:
self._progress = None
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._progress:
self._progress.finish()
def update(self, *args, **kwargs):
mainloop = get_mainloop()
if mainloop is None:
return False
mainloop.update()
if self._progress:
return self._progress.update(*args, **kwargs)
else:
return False
def set_multiple(self, count, base_text=None):
if self._progress:
self._progress.set_multiple(count, base_text=base_text)
def update_multiple(self):
if self._progress:
self._progress.update_multiple()
| 28.75
| 71
| 0.616522
| 1,084
| 0.942609
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.06
|
e3a40b62615cdac16bdbd6f21218bc07a791e56b
| 4,030
|
py
|
Python
|
validate_submission.py
|
ChunghyunPark/semantic-kitti-api
|
8863f21cb05fd99667b4a1bb755cc432c430c9fe
|
[
"MIT"
] | 1
|
2019-10-18T15:12:24.000Z
|
2019-10-18T15:12:24.000Z
|
validate_submission.py
|
ZiyiLiubird/semantic-kitti-api
|
9a6366264b1fd95d7a84e05bd41659524fd9fd32
|
[
"MIT"
] | null | null | null |
validate_submission.py
|
ZiyiLiubird/semantic-kitti-api
|
9a6366264b1fd95d7a84e05bd41659524fd9fd32
|
[
"MIT"
] | 1
|
2019-10-11T22:30:53.000Z
|
2019-10-11T22:30:53.000Z
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import zipfile
import argparse
import os
class ValidationException(Exception):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Validate a submission zip file needed to evaluate on CodaLab competitions.\n\nThe verification tool checks:\n 1. correct folder structure,\n 2. existence of label files for each scan,\n 3. count of labels for each scan.\nInvalid labels are ignored by the evaluation script, therefore we don't check\nfor invalid labels.", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"zipfile",
type=str,
help='zip file that should be validated.',
)
parser.add_argument(
'dataset',
type=str,
help='directory containing the folder "sequences" containing folders "11", ..., "21" with the "velodyne" files.'
)
parser.add_argument(
"--task",
type=str,
choices=["segmentation"],
default="segmentation",
help='task for which the zip file should be validated.'
)
FLAGS, _ = parser.parse_known_args()
checkmark = "\u2713"
try:
print('Validating zip archive "{}".\n'.format(FLAGS.zipfile))
print(" 1. Checking filename.............. ", end="", flush=True)
if not FLAGS.zipfile.endswith('.zip'):
raise ValidationException('Competition bundle must end with ".zip"')
print(checkmark)
with zipfile.ZipFile(FLAGS.zipfile) as zipfile:
if FLAGS.task == "segmentation":
print(" 2. Checking directory structure... ", end="", flush=True)
directories = [folder.filename for folder in zipfile.infolist() if folder.filename.endswith("/")]
if "sequences/" not in directories:
raise ValidationException('Directory "sequences" missing inside zip file.')
for sequence in range(11, 22):
sequence_directory = "sequences/{}/".format(sequence)
if sequence_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(sequence_directory))
predictions_directory = sequence_directory + "predictions/"
if predictions_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(predictions_directory))
print(checkmark)
print(' 3. Checking file sizes............ ', end='', flush=True)
prediction_files = {info.filename: info for info in zipfile.infolist() if not info.filename.endswith("/")}
for sequence in range(11, 22):
sequence_directory = 'sequences/{}'.format(sequence)
velodyne_directory = os.path.join(FLAGS.dataset, 'sequences/{}/velodyne/'.format(sequence))
velodyne_files = sorted([os.path.join(velodyne_directory, file) for file in os.listdir(velodyne_directory)])
label_files = sorted([os.path.join(sequence_directory, "predictions", os.path.splitext(filename)[0] + ".label")
for filename in os.listdir(velodyne_directory)])
for velodyne_file, label_file in zip(velodyne_files, label_files):
num_points = os.path.getsize(velodyne_file) / (4 * 4)
if label_file not in prediction_files:
raise ValidationException('"' + label_file + '" is missing inside zip.')
num_labels = prediction_files[label_file].file_size / 4
if num_labels != num_points:
raise ValidationException('label file "' + label_file +
"' should have {} labels, but found {} labels!".format(int(num_points), int(num_labels)))
print(checkmark)
else:
# TODO scene completion.
raise NotImplementedError("Unknown task.")
except ValidationException as ex:
print("\n\n " + "\u001b[1;31m>>> Error: " + str(ex) + "\u001b[0m")
exit(1)
print("\n\u001b[1;32mEverything ready for submission!\u001b[0m \U0001F389")
| 39.90099
| 390
| 0.657072
| 44
| 0.010918
| 0
| 0
| 0
| 0
| 0
| 0
| 1,376
| 0.341439
|
e3a5131242de5ad9d1dfff7bb93f08796a6b50ce
| 5,231
|
py
|
Python
|
sheldon_behaviors/ship_behavior/scripts/behavior_service.py
|
shinselrobots/sheldon
|
911148cd82d28e37aebc5e083fbf830d1c9768ab
|
[
"Apache-2.0"
] | 1
|
2021-01-02T18:17:52.000Z
|
2021-01-02T18:17:52.000Z
|
sheldon_behaviors/ship_behavior/scripts/behavior_service.py
|
shinselrobots/sheldon
|
911148cd82d28e37aebc5e083fbf830d1c9768ab
|
[
"Apache-2.0"
] | null | null | null |
sheldon_behaviors/ship_behavior/scripts/behavior_service.py
|
shinselrobots/sheldon
|
911148cd82d28e37aebc5e083fbf830d1c9768ab
|
[
"Apache-2.0"
] | 4
|
2017-09-16T03:56:01.000Z
|
2018-09-19T02:15:57.000Z
|
#! /usr/bin/env python
# License: Apache 2.0. See LICENSE file in root directory.
#
# For simple behaviors that can run syncronously, Python provides
# a simple way to implement this. Add the work of your behavior
# in the execute_cb callback
#
import rospy
import actionlib
import behavior_common.msg
import time
import random
from std_msgs.msg import Float64
from std_msgs.msg import UInt16
from std_msgs.msg import UInt32
from std_msgs.msg import Bool
from std_msgs.msg import Empty
# for talking
import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
# for servos
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.right_arm_servo_publishers import *
#from sheldon_servos.left_arm_servo_publishers import *
from sheldon_servos.standard_servo_positions import *
from sheldon_servos.set_servo_speed import *
from sheldon_servos.set_servo_torque import *
class BehaviorAction(object):
_feedback = behavior_common.msg.behaviorFeedback()
_result = behavior_common.msg.behaviorResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo('%s: Initializing Sleep behavior service' % (self._action_name))
# enable/disable microphone when robot is moving servos.
# (Note system_enable vs. speech_enable vs. user_enable)
self.mic_system_enable_pub = rospy.Publisher('/microphone/system_enable', Bool, queue_size=1)
def execute_cb(self, goal):
rospy.loginfo('%s: Executing behavior' % (self._action_name))
rospy.loginfo( "Param1: '%s'", goal.param1)
rospy.loginfo( "Param2: '%s'", goal.param2)
# =========== Behavior Implementation ==============
success = True
r = rospy.Rate(1.0)
pub_eye_cmd = rospy.Publisher('/head/eye_cmd', UInt16, queue_size=10)
pub_light_mode = rospy.Publisher('/arm_led_mode', UInt16, queue_size=10)
pub_ear_cmd = rospy.Publisher('/head/ear_cmd', UInt16, queue_size=10)
rospy.loginfo("Waiting for speech server (press ctrl-c to cancel at anytime)")
client = actionlib.SimpleActionClient("/speech_service", audio_and_speech_common.msg.speechAction)
client.wait_for_server()
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="moving into shipping position")
client.send_goal(goal)
result = client.wait_for_result() # wait for speech to complete
rospy.loginfo("Speech goal returned result: %d", result)
# mute the microphone
self.mic_system_enable_pub.publish(False)
# Move head and arms to sleep position
SetServoTorque(0.8, all_servo_joints)
SetServoSpeed(0.5, head_joints)
SetServoSpeed(1.0, right_arm_joints)
SetServoSpeed(1.0, left_arm_joints)
# Move elbows at fast speed to lock
SetSingleServoSpeed(2.0, "right_arm_elbow_bend_joint")
SetSingleServoSpeed(2.0, "left_arm_elbow_bend_joint")
time.sleep(0.5)
all_sleep() # Move all servos to sleep position 1
time.sleep(2)
# lock arms
pub_right_arm_elbow_bend.publish(3.00)
pub_left_arm_elbow_bend.publish(3.13)
time.sleep(1)
# Move arms forward, so they point down after waist moves
#pub_right_arm_shoulder_rotate.publish(0.78)
#pub_left_arm_shoulder_rotate.publish(0.78)
# Turn off servo torque
rospy.loginfo("Turning off servo torque and eyes")
SetServoTorque(0.0, all_servo_joints)
pub_eye_cmd.publish(0) # 0 = Turn eyes off
pub_ear_cmd.publish(0) # 0 = Turn ear lights off
pub_light_mode.publish(0) # 0 = Turn lights off
# Move Waist into position
time.sleep(3)
waist_full_down()
time.sleep(5.0) # seconds
# Turn off servo torque
#SetServoTorque(0.0, all_servo_joints)
#time.sleep(5.0) # seconds
rospy.loginfo(' Ship Complete. Running until some other behavior preempts, to suppress Idle behavior...')
#rospy.loginfo('%s: Running behavior' % (self._action_name))
self._feedback.running = True
self._as.publish_feedback(self._feedback)
# Run forever to keep Idle behavior from running.
# may be prempted by any other behavior (such as wake)
while True:
# check that preempt has not been requested by the client
if self._as.is_preempt_requested():
rospy.loginfo('%s: Behavior preempted' % self._action_name)
self._as.set_preempted()
success = True
break
r.sleep()
if success:
rospy.loginfo('%s: Behavior complete' % self._action_name)
self._as.set_succeeded(self._result)
# un-mute the microphone
self.mic_system_enable_pub.publish(True)
if __name__ == '__main__':
rospy.init_node('ship_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
| 36.326389
| 150
| 0.676161
| 4,180
| 0.799082
| 0
| 0
| 0
| 0
| 0
| 0
| 1,926
| 0.36819
|
e3a55d32d7c5a654a176400a4c92296634a021f4
| 7,831
|
py
|
Python
|
src/core/utils/bert_utils.py
|
joe3d1998/GraphFlow
|
8a751e4fc69a1e0c06ded23b7d1096f3161931a1
|
[
"Apache-2.0"
] | 30
|
2019-08-18T21:56:20.000Z
|
2022-03-18T10:04:02.000Z
|
src/core/utils/bert_utils.py
|
joe3d1998/GraphFlow
|
8a751e4fc69a1e0c06ded23b7d1096f3161931a1
|
[
"Apache-2.0"
] | 2
|
2019-11-12T02:28:36.000Z
|
2022-03-20T05:27:05.000Z
|
src/core/utils/bert_utils.py
|
joe3d1998/GraphFlow
|
8a751e4fc69a1e0c06ded23b7d1096f3161931a1
|
[
"Apache-2.0"
] | 11
|
2020-02-17T02:47:26.000Z
|
2021-09-05T05:37:29.000Z
|
from collections import defaultdict, namedtuple
import torch
# When using the sliding window trick for long sequences,
# we take the representation of each token with maximal context.
# Take average of the BERT embeddings of these BPE sub-tokens
# as the embedding for the word.
# Take *weighted* average of the word embeddings through all layers.
def extract_bert_ques_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, turn_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, turn_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = []
for t, para_feature in enumerate(ex_feature): # Turn
para_token_count = defaultdict(int)
for j, chunk_feature in enumerate(para_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, t, doc_word_idx] += all_encoder_layers[:, i, t, j, k]
para_token_count[doc_word_idx] += 1
ex_token_count.append(para_token_count)
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for t, para_token_count in enumerate(ex_token_count):
for doc_word_idx, count in para_token_count.items():
out_features[:, i, t, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def extract_bert_ctx_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = defaultdict(int)
for j, chunk_feature in enumerate(ex_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, doc_word_idx] += all_encoder_layers[:, i, j, k]
ex_token_count[doc_word_idx] += 1
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for doc_word_idx, count in ex_token_count.items():
out_features[:, i, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def convert_text_to_bert_features(text, bert_tokenizer, max_seq_length, doc_stride):
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(text):
sub_tokens = bert_tokenizer.wordpiece_tokenizer.tokenize(token.lower())
for sub_ in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_)
# The -2 accounts for [CLS] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
out_features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
feature = BertInputFeatures(
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
out_features.append(feature)
return out_features
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class BertInputFeatures(object):
"""A single set of BERT features of data."""
def __init__(self,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
| 38.960199
| 99
| 0.70106
| 619
| 0.079045
| 0
| 0
| 0
| 0
| 0
| 0
| 1,984
| 0.253352
|
e3a5f11059953bb156d9e0590e2727a61cd805cc
| 282
|
py
|
Python
|
apps/lectures/serializers.py
|
csilouanos/student-management-system
|
91800a1d95234918ab7e9ce5a2a017eb93e81431
|
[
"MIT"
] | null | null | null |
apps/lectures/serializers.py
|
csilouanos/student-management-system
|
91800a1d95234918ab7e9ce5a2a017eb93e81431
|
[
"MIT"
] | null | null | null |
apps/lectures/serializers.py
|
csilouanos/student-management-system
|
91800a1d95234918ab7e9ce5a2a017eb93e81431
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Lecture
class LectureSerializer(serializers.ModelSerializer):
class Meta:
model = Lecture
fields = ('id', 'title', 'lecturer_name', 'date', 'duration',
'slides_url', 'is_required')
| 31.333333
| 70
| 0.659574
| 213
| 0.755319
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.237589
|
e3a71e4692cca720e8c9165426c410f3c3fef261
| 396
|
py
|
Python
|
OpenDataCatalog/suggestions/urls.py
|
timwis/Open-Data-Catalog
|
0ccdc71f28773508c337875fd32478dd4324a50c
|
[
"MIT"
] | 3
|
2016-08-07T17:25:56.000Z
|
2019-11-12T00:51:14.000Z
|
suggestions/urls.py
|
opensandiego/Open-Data-Catalog
|
06f93bab36d22431ff86a87faea4e388d0491846
|
[
"MIT"
] | 1
|
2021-04-17T10:52:53.000Z
|
2021-04-17T10:52:53.000Z
|
suggestions/urls.py
|
opensandiego/Open-Data-Catalog
|
06f93bab36d22431ff86a87faea4e388d0491846
|
[
"MIT"
] | 2
|
2016-10-28T14:20:27.000Z
|
2021-04-17T10:52:28.000Z
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
(r'^$', 'suggestions.views.list_all'),
(r'^post/$', 'suggestions.views.add_suggestion'),
(r'^vote/(?P<suggestion_id>.*)/$', 'suggestions.views.add_vote'),
(r'^unvote/(?P<suggestion_id>.*)/$', 'suggestions.views.remove_vote'),
(r'^close/(?P<suggestion_id>.*)/$', 'suggestions.views.close'),
)
| 39.6
| 73
| 0.659091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 262
| 0.661616
|
e3a9a5af8690681698031e1a127157f06eef690e
| 3,850
|
py
|
Python
|
fronteira_eficiente2.py
|
samuelbarrosm/Python-for-finances-
|
e1fd118b05f6efa2c4c72e88c5b2bf028d120c45
|
[
"MIT"
] | null | null | null |
fronteira_eficiente2.py
|
samuelbarrosm/Python-for-finances-
|
e1fd118b05f6efa2c4c72e88c5b2bf028d120c45
|
[
"MIT"
] | null | null | null |
fronteira_eficiente2.py
|
samuelbarrosm/Python-for-finances-
|
e1fd118b05f6efa2c4c72e88c5b2bf028d120c45
|
[
"MIT"
] | null | null | null |
#Esse codigo é utilizado para calcular a fronteira eficiente de um portfolio
#Esse codigo tem como objetivo avaliar a eficiencia das de um portfolio
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas_datareader import data as wb
assets = ['PG', '^GSPC']
pf_data = pd.DataFrame()
for a in assets:
pf_data[a] = wb.DataReader(a, data_source='yahoo', start='2010-1-1')['Adj Close']
pf_data.tail()
(pf_data / pf_data.iloc[0] * 100).plot(figsize=(10, 5))
log_returns = np.log(pf_data / pf_data.shift(1))
log_returns.mean() * 250
log_returns['PG'].cov(log_returns['^GSPC']) * 250
#Correlacao superior a 30% indica que sao muito correlacionados, isso é bom
log_returns['PG'].corr(log_returns['^GSPC'])
#Agora vamos aprtir pra uma otimizacao do portfolio por uma perspectiva mais tecnica
#Vamos criar uma variavel que ira contar o numero de ativos na nossa carteira
num_assets = len(assets)
#Agora iremos criar dois pesos alatorios para esses ativos
#O metodo random.random pode gerar dois numeros aleatorios entre o e 1
arr = np.random.random(2)
#Vamos calcular a soma do valor dos dois pesos obtidos aleatoriamente
arr[0] + arr[1]
#A soma desses pesos aleatorios nem sempre sera igual a 1
#Para fazer com que a soma seja = 1, temos
weights = np.random.random(num_assets)
weights /= np.sum(weights)
print(weights)
#O codigo /= significa o peso, dividido pela soma dos pesos, como um loop
#Lembrando, quando se usa o numpy estamos transformando esses valores em elementos da matriz
#Por isso quando atribuimos esse codigo a soma dos pesos é igual a 1
#Para escrever o retorno esperado de um portfolio:
#Retorno = soma do produto da media dos retornos logaritmicos anualizados pelo seus respectivos pesos
#Essa funcao .sun do numpy, funciona somando objetos em mais de uma dimensao, por isso difere do sum(nativo do python)
np.sum(weights * log_returns.mean()) * 250
#Esse codigo como ja foi visto fornece a variancia
np.dot(weights.T, np.dot(log_returns['PG'].cov(log_returns['^GSPC']) * 250, weights))
#Esse codigo como ja foi visto fornece a volatilidade
np.sqrt(np.dot(weights.T, np.dot(log_returns['PG'].cov(log_returns['^GSPC']) * 250, weights)))
#Usaremos esses 3 codigos para calcular o retorno e a volatilidade na simulacao dos portfolios de minima variancia
#Agora iremos criar um grafico onde mil simulacoes de minima variancia serao plotadas
#Nao estamos fazendo 1000 investimentos diferentes
#estamos fazendo 1000 combinacoes dos mesmos ativos(pesos)
#Esse loop, gerara uma repeticao de 1000 possibilidades para os pesos dos ativos
pfolio_returns = []
pfolio_volatilities = []
for x in range (1000):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
pfolio_returns.append(np.sum(weights * log_returns.mean()) * 250)
pfolio_volatilities.append(np.sqrt(np.dot(weights.T, np.dot(log_returns['PG'].cov(log_returns['^GSPC']) * 250, weights))))
#Fazemos isso para transformar os numeros dispersos, em arrays contidos numa matriz, fica mais pratico de trabalhar
pfolio_returns = np.array(pfolio_returns)
pfolio_volatilities = np.array(pfolio_volatilities)
pfolio_volatilities,pfolio_returns
#Agora iremos criar um objeto no dataframe com duas colunas, uma para os retornos e outra para as respectivas volatilidades
portfolios = pd.DataFrame({'Return': pfolio_returns, 'Volatility': pfolio_volatilities})
portfolios.head()
portfolios.tail()
#Agora estamos plotando os valores do dataframe num grafico
#O tipo de grafico que estamos inserindo é to tipo scatter (grafico de dispersao)
portfolios.plot(x='Volatility', y='Return', kind='scatter', figsize=(10, 6))
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
plt.show()
| 36.666667
| 127
| 0.744416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,263
| 0.587182
|
e3aa556fa11d4a3e0d7e99f07a6cd0ab4a4331f6
| 7,607
|
py
|
Python
|
test/integration/test_build.py
|
DahlitzFlorian/wily
|
069c26bff9741b49420e3cfd7b0954ac9b88cc3f
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_build.py
|
DahlitzFlorian/wily
|
069c26bff9741b49420e3cfd7b0954ac9b88cc3f
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_build.py
|
DahlitzFlorian/wily
|
069c26bff9741b49420e3cfd7b0954ac9b88cc3f
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for the wily build command.
All of the following tests will use a click CLI runner to fully simulate the CLI.
Many of the tests will depend on a "builddir" fixture which is a compiled wily cache.
TODO : Test build + build with extra operator
"""
import pathlib
import pytest
from click.testing import CliRunner
from git import Repo, Actor
from mock import patch
import wily.__main__ as main
from wily.archivers import ALL_ARCHIVERS
def test_build_not_git_repo(tmpdir):
"""
Test that build defaults to filesystem in a non-git directory
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build", "test.py"])
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "filesystem" / "index.json"
assert index_path.exists()
def test_build_invalid_path(tmpdir):
"""
Test that build fails with a garbage path
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", "/fo/v/a", "build", "test.py"])
assert result.exit_code == 1, result.stdout
def test_build_no_target(tmpdir):
"""
Test that build fails with no target
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build"])
assert result.exit_code == 2, result.stdout
def test_build_crash(tmpdir):
"""
Test that build works in a basic repository.
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc")
with open(tmppath / ".gitignore", "w") as test_txt:
test_txt.write(".wily/")
index = repo.index
index.add(["test.py", ".gitignore"])
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
index.commit("basic test", author=author, committer=committer)
import wily.commands.build
with patch.object(
wily.commands.build.Bar, "finish", side_effect=RuntimeError("arggh")
) as bar_finish:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build", "test.py"])
assert bar_finish.called_once
assert result.exit_code == 1, result.stdout
with patch("wily.commands.build.logger") as logger:
logger.level = "DEBUG"
with patch.object(
wily.commands.build.Bar, "finish", side_effect=RuntimeError("arggh")
) as bar_finish:
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", tmpdir, "build", "test.py"]
)
assert bar_finish.called_once
assert result.exit_code == 1, result.stdout
def test_build(tmpdir):
"""
Test that build works in a basic repository.
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc")
with open(tmppath / ".gitignore", "w") as test_txt:
test_txt.write(".wily/")
index = repo.index
index.add(["test.py", ".gitignore"])
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
commit = index.commit("basic test", author=author, committer=committer)
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", tmpdir, "build", "test.py"]
)
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "git" / "index.json"
assert index_path.exists()
rev_path = tmpdir / ".wily" / "git" / commit.name_rev.split(" ")[0] + ".json"
assert rev_path.exists()
def test_build_twice(tmpdir):
"""
Test that build works when run twice.
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc")
with open(tmppath / ".gitignore", "w") as test_txt:
test_txt.write(".wily/")
index = repo.index
index.add(["test.py", ".gitignore"])
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
commit = index.commit("basic test", author=author, committer=committer)
runner = CliRunner()
result = runner.invoke(main.cli, ["--debug", "--path", tmpdir, "build", "test.py"])
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "git" / "index.json"
assert index_path.exists()
rev_path = tmpdir / ".wily" / "git" / commit.name_rev.split(" ")[0] + ".json"
assert rev_path.exists()
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc\nfoo = 1")
index.add(["test.py"])
commit2 = index.commit("basic test", author=author, committer=committer)
result = runner.invoke(main.cli, ["--debug", "--path", tmpdir, "build", "test.py"])
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "git" / "index.json"
assert index_path.exists()
rev_path = tmpdir / ".wily" / "git" / commit.name_rev.split(" ")[0] + ".json"
assert rev_path.exists()
rev_path2 = tmpdir / ".wily" / "git" / commit2.name_rev.split(" ")[0] + ".json"
assert rev_path2.exists()
def test_build_no_commits(tmpdir):
"""
Test that build fails cleanly with no commits
"""
repo = Repo.init(path=tmpdir)
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", tmpdir, "build", tmpdir, "--skip-ignore-check"]
)
assert result.exit_code == 1, result.stdout
def test_build_dirty_repo(builddir):
"""
Test that build fails cleanly with a dirty repo
"""
tmppath = pathlib.Path(builddir)
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc\nfoo = 1")
runner = CliRunner()
result = runner.invoke(main.cli, ["--debug", "--path", builddir, "build", builddir])
assert result.exit_code == 1, result.stdout
def test_build_no_git_history(tmpdir):
repo = Repo.init(path=tmpdir)
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build", "src/test.py"])
assert result.exit_code == 1, result.stdout
archivers = {name for name in ALL_ARCHIVERS.keys()}
@pytest.mark.parametrize("archiver", archivers)
def test_build_archiver(gitdir, archiver):
"""
Test the build against each type of archiver
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(
main.cli, ["--path", gitdir, "build", "src/test.py", "-a", archiver]
)
assert result.exit_code == 0, result.stdout
cache_path = gitdir / ".wily"
assert cache_path.exists()
index_path = gitdir / ".wily" / archiver / "index.json"
assert index_path.exists()
| 31.962185
| 88
| 0.625739
| 0
| 0
| 0
| 0
| 572
| 0.075194
| 0
| 0
| 2,207
| 0.290128
|
e3aa733a9aa92608aebcdce4ac3a723c8a9e99a6
| 356
|
py
|
Python
|
authz/test/test_obp_helper.py
|
shivdeep-singh/conversational-ai-chatbot
|
b67802a96b3fe3d64457931a8cbf8bf03442fd0d
|
[
"BSD-3-Clause"
] | 11
|
2021-09-09T16:16:48.000Z
|
2022-03-31T21:25:46.000Z
|
authz/test/test_obp_helper.py
|
shivdeep-singh/conversational-ai-chatbot
|
b67802a96b3fe3d64457931a8cbf8bf03442fd0d
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T06:08:11.000Z
|
2022-02-10T06:08:11.000Z
|
authz/test/test_obp_helper.py
|
shivdeep-singh/conversational-ai-chatbot
|
b67802a96b3fe3d64457931a8cbf8bf03442fd0d
|
[
"BSD-3-Clause"
] | 12
|
2021-09-19T10:39:27.000Z
|
2022-03-09T05:17:05.000Z
|
import unittest
from zmq_integration_lib import RPCClient, RPCServer
import unittest.mock as mock
class TestOBPHelper(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def logout(self, mock_zmq):
self.assertTrue(True)
def get_login_token(self, mock_zmq):
self.assertTrue(True)
| 18.736842
| 52
| 0.676966
| 250
| 0.702247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e3aa8e55cd8c639086caeeedaf341d4860bf12f5
| 1,657
|
py
|
Python
|
cvtData.py
|
leduchust/ST-GCN_HAR
|
778e7931b24eaa7d78b5a61216bb9a7a2ad6ab9e
|
[
"MIT"
] | null | null | null |
cvtData.py
|
leduchust/ST-GCN_HAR
|
778e7931b24eaa7d78b5a61216bb9a7a2ad6ab9e
|
[
"MIT"
] | null | null | null |
cvtData.py
|
leduchust/ST-GCN_HAR
|
778e7931b24eaa7d78b5a61216bb9a7a2ad6ab9e
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import tqdm as tqdm
def cvt_Data():
if not os.path.exists('./fphab_data/newData'):
os.mkdir('./fphab_data/newData')
#for i in range(1,7):
# os.mkdir('./fphab_data/newData/Subject_'+str(i))
subject_list=os.listdir('./fphab_data/data')
for s in subject_list:
action_list=os.listdir('./fphab_data/data/'+s)
index=0
for a in action_list:
index+=1
number=os.listdir('./fphab_data/data/'+s+'/'+a)
datas=[]
for i in number:
lines= open('./fphab_data/data/'+s+'/'+a+'/'+i+'/skeleton.txt','rt').read().strip().split('\n')
f=open('./fphab_data/newData/'+str(index)+'_'+s+'_'+i+'.txt','w')
for l in lines:
frame_data = l.strip().split(' ')
frame_data = frame_data[1:]
count=1
for fr in frame_data:
fr=str(fr)
#f.write(fr)
f.write(fr+' ')
if count%3==0:
f.write('\n')
count+=1
#frame_data.reshape(21,3)
#listToString = (' '.join(str(elm) for elm in frame_data))
#f.write('\n')
#f.write(listToString)
cvtData.cvt_Data()
print('done')
| 36.021739
| 116
| 0.388654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 421
| 0.254074
|
e3aac0da41e8ebd49fe3952b0e96fb25ef7523c4
| 2,412
|
py
|
Python
|
src/zope/app/applicationcontrol/browser/runtimeinfo.py
|
zopefoundation/zope.app.applicationcontrol
|
de7b160dde9ce01f65af5412a984065c5a1a9284
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/app/applicationcontrol/browser/runtimeinfo.py
|
zopefoundation/zope.app.applicationcontrol
|
de7b160dde9ce01f65af5412a984065c5a1a9284
|
[
"ZPL-2.1"
] | 4
|
2017-05-02T18:43:09.000Z
|
2021-09-20T06:29:14.000Z
|
src/zope/app/applicationcontrol/browser/runtimeinfo.py
|
zopefoundation/zope.app.applicationcontrol
|
de7b160dde9ce01f65af5412a984065c5a1a9284
|
[
"ZPL-2.1"
] | 1
|
2015-04-03T07:25:44.000Z
|
2015-04-03T07:25:44.000Z
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Define runtime information view component for Application Control
"""
__docformat__ = 'restructuredtext'
from zope.app.applicationcontrol.interfaces import IRuntimeInfo
from zope.app.applicationcontrol.i18n import ZopeMessageFactory as _
class RuntimeInfoView(object):
_fields = (
"ZopeVersion",
"PythonVersion",
"PythonPath",
"SystemPlatform",
"PreferredEncoding",
"FileSystemEncoding",
"CommandLine",
"ProcessId",
"DeveloperMode",
)
_unavailable = _("Unavailable")
def runtimeInfo(self):
try:
ri = IRuntimeInfo(self.context)
except TypeError:
formatted = dict.fromkeys(self._fields, self._unavailable)
formatted["Uptime"] = self._unavailable
else:
formatted = self._getInfo(ri)
return formatted
def _getInfo(self, ri):
formatted = {}
for name in self._fields:
value = self._unavailable
try:
value = getattr(ri, "get" + name)()
except ValueError: # pragma: no cover
pass
formatted[name] = value
formatted["Uptime"] = self._getUptime(ri)
return formatted
def _getUptime(self, ri):
# make a unix "uptime" uptime format
uptime = int(ri.getUptime())
minutes, seconds = divmod(uptime, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return _('${days} day(s) ${hours}:${minutes}:${seconds}',
mapping={'days': '%d' % days,
'hours': '%02d' % hours,
'minutes': '%02d' % minutes,
'seconds': '%02d' % seconds})
| 33.5
| 78
| 0.563018
| 1,524
| 0.631841
| 0
| 0
| 0
| 0
| 0
| 0
| 1,041
| 0.431592
|
e3aad4147f45eb6d3a2f6a2928f807f8445336c7
| 1,171
|
py
|
Python
|
helper/storageHelper.py
|
LHGames-2018/DCI5espaces
|
8f71ca3b6cf2bae78822d8a4a8546b5482eaa627
|
[
"MIT"
] | null | null | null |
helper/storageHelper.py
|
LHGames-2018/DCI5espaces
|
8f71ca3b6cf2bae78822d8a4a8546b5482eaa627
|
[
"MIT"
] | null | null | null |
helper/storageHelper.py
|
LHGames-2018/DCI5espaces
|
8f71ca3b6cf2bae78822d8a4a8546b5482eaa627
|
[
"MIT"
] | 5
|
2017-10-07T14:54:28.000Z
|
2018-09-27T20:16:59.000Z
|
import json
import os.path
class StorageHelper:
__document = None
__path = None
@staticmethod
def write(key, data):
StorageHelper.__init()
StorageHelper.__document[key] = json.dumps(data)
StorageHelper.__store()
@staticmethod
def read(key):
StorageHelper.__init()
data = StorageHelper.__document[key]
if data is None:
return None
return json.loads(data)
@staticmethod
def __init():
if StorageHelper.__path is None:
if 'LOCAL_STORAGE' in os.environ:
StorageHelper.__path = os.environ['LOCAL_STORAGE'] + '/document.json'
else:
StorageHelper.__path = '/data/document.json'
if StorageHelper.__document is None:
if os.path.isfile(StorageHelper.__path) is False:
StorageHelper.__document = dict()
else:
file = open(StorageHelper.__path)
StorageHelper.__document = json.load(file)
@staticmethod
def __store():
with open(StorageHelper.__path, 'w+') as file:
json.dump(StorageHelper.__document, file)
| 27.880952
| 85
| 0.599488
| 1,141
| 0.974381
| 0
| 0
| 1,057
| 0.902647
| 0
| 0
| 71
| 0.060632
|
e3ab35bc88f90fb1279165d05f8411f9b2a64d26
| 12,383
|
py
|
Python
|
ddot/cx_services_old-8-31-17/align.py
|
pupster90/ddot2
|
1952bff30383b35dff72b332592e1471201d40f3
|
[
"MIT"
] | 1
|
2018-11-08T14:41:43.000Z
|
2018-11-08T14:41:43.000Z
|
ddot/cx_services_old-8-31-17/align.py
|
pupster90/ddot2
|
1952bff30383b35dff72b332592e1471201d40f3
|
[
"MIT"
] | null | null | null |
ddot/cx_services_old-8-31-17/align.py
|
pupster90/ddot2
|
1952bff30383b35dff72b332592e1471201d40f3
|
[
"MIT"
] | null | null | null |
import ndex.client as nc
from ndex.networkn import NdexGraph
import io
import json
from IPython.display import HTML
from time import sleep
import os, time, tempfile
import sys
import time
import logging
import grpc
import networkx as nx
import cx_pb2
import cx_pb2_grpc
import numpy as np
import inspect
from concurrent import futures
from itertools import combinations
from subprocess import Popen, PIPE, STDOUT
import pandas as pd
from ddot import Ontology, align_hierarchies
from ddot.utils import update_nx_with_alignment
from ddot.cx_services.cx_utils import yield_ndex, required_params, cast_params
from ddot.config import default_params
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
verbose = True
class CyServiceServicer(cx_pb2_grpc.CyServiceServicer):
def format_params(self, params):
required = [
'ndex_user',
'ndex_pass',
'ndex_server',
'ont1_ndex_uuid',
'ont2_ndex_uuid',
]
required_params(params, required)
cast = [
('iterations', int),
('threads', int),
('ont1_ndex_uuid', str),
('ont2_ndex_uuid', str)
]
cast_params(params, cast)
def StreamElements(self, element_iterator, context):
try:
params = {
'name' : 'Data-Driven Ontology',
'ont1_ndex_uuid' : None,
'ont2_ndex_uuid' : None,
'iterations' : 3,
'threads' : 4
}
params.update(default_params)
G, params, errors = self.read_element_stream(element_iterator, params)
self.format_params(params)
if verbose:
print('Parameters:', params)
## Read graphs using NDEx client
hier1, hier2 = self.read_hierarchies(params)
if True:
hier1_ont = Ontology.from_NdexGraph(hier1)
hier2_ont = Ontology.from_NdexGraph(hier2)
print('Summary of hier1_ont:', hier1_ont.summary())
print('Summary of hier2_ont:', hier2_ont.summary())
hier1_collapsed, hier2_collapsed = Ontology.mutual_collapse(hier1_ont, hier2_ont, verbose=True)
assert len(hier1_collapsed.terms) < 3000, len(hier1_collapsed.terms)
assert len(hier2_collapsed.terms) < 3000, len(hier2_collapsed.terms)
if verbose:
print 'Aligning hierarchies'
alignment = align_hierarchies(
hier1_collapsed,
hier2_collapsed,
params['iterations'],
params['threads'])
if verbose:
print('One-to-one term alignments:', alignment.shape[0])
print(alignment.iloc[:30,:])
update_nx_with_alignment(hier1, alignment)
ont_url = hier1.upload_to(params['ndex_server'],
params['ndex_user'],
params['ndex_pass'])
if verbose:
print 'ontology_url:', ont_url
for elt in yield_ndex(ont_url):
yield elt
else:
for caught_error in errors:
error = self.create_internal_crash_error(caught_error.message, 500)
log_error(error)
yield error
except Exception as e:
message = "Unexpected error: " + str(e)
error = self.create_internal_crash_error(message, 500)
log_error(error)
import traceback
print traceback.print_exc()
yield error
def read_hierarchies(self, params):
# Read hierarchy 1
hier1 = NdexGraph(server=params['ndex_server'],
username=params['ndex_user'],
password=params['ndex_pass'],
uuid=params['ont1_ndex_uuid'])
# Read hierarchy 2
hier2 = NdexGraph(server=params['ndex_server'],
username=params['ndex_user'],
password=params['ndex_pass'],
uuid=params['ont2_ndex_uuid'])
return hier1, hier2
def stream_ontology(self, ontology, term_sizes, term_2_uuid):
node_2_id = {}
node_id = 0
for node_name in ontology.genes:
yield self.create_node(node_id, node_name)
yield self.create_nodeAttribute(node_id, 'Gene_or_Term', 'Gene')
yield self.create_nodeAttribute(node_id, 'Size', '1')
node_2_id[node_name] = node_id
node_id += 1
for node_name in ontology.terms:
yield self.create_node(node_id, node_name)
yield self.create_nodeAttribute(node_id, 'Gene_or_Term', 'Term')
yield self.create_nodeAttribute(node_id, 'ndex:internalLink', term_2_uuid[node_name])
yield self.create_nodeAttribute(node_id, 'Size', str(term_sizes[node_name]))
node_2_id[node_name] = node_id
node_id += 1
edge_id = 0
for g in ontology.genes:
for t_i in ontology.gene_2_terms[g]:
t = ontology.terms[t_i]
yield self.create_edge(edge_id, node_2_id[g], node_2_id[t])
yield self.create_edgeAttribute(edge_id, 'Relation', 'Gene-Term Annotation')
edge_id += 1
for p, c_list in ontology.term_2_terms.iteritems():
for c in c_list:
yield self.create_edge(edge_id, node_2_id[c], node_2_id[p])
yield self.create_edgeAttribute(edge_id, 'Relation', 'Child-Parent Hierarchical Relation')
edge_id += 1
def upload_subnetworks_2_ndex(self, ontology, arr, arr_genes_index,
ndex_server, ndex_user, ndex_pass, name):
"""Push subnetworks"""
#print ontology.get_term_2_genes()
term_2_url = {}
for t in ontology.terms:
#print 't:', t
genes = np.array([ontology.genes[g] for g in ontology.get_term_2_genes()[t]])
#print 'genes:', genes
idx = np.array([arr_genes_index[g] for g in genes])
#print 'idx:', idx
subarr = arr[idx,:][:,idx]
# Set nan to 0
subarr[np.isnan(subarr)] = 0
row, col = subarr.nonzero()
row, col = row[row < col], col[row < col]
G = NdexGraph()
G.create_from_edge_list(zip(genes[row], genes[col]))
for i in np.arange(row.size):
G.set_edge_attribute(i+1, "similarity", str(subarr[row[i], col[i]]))
G.set_name('%s supporting network for CLIXO:%s' % (name, t))
G.set_network_attribute('Description', '%s supporting network for CLIXO:%s' % (name, t))
ndex_url = G.upload_to(ndex_server, ndex_user, ndex_pass)
term_2_url[t] = ndex_url
return term_2_url
def create_node(self, node_id, node_name):
element = cx_pb2.Element()
node = element.node
node.id = node_id
node.name = node_name
return element
def create_nodeAttribute(self, node_id, key, val):
element = cx_pb2.Element()
attr = element.nodeAttribute
attr.nodeId = node_id
attr.name = key
attr.value = val
return element
def create_edge(self, edge_id, node1, node2):
element = cx_pb2.Element()
edge = element.edge
edge.id = edge_id
edge.sourceId = node1
edge.targetId = node2
return element
def create_edgeAttribute(self, edge_id, key, val):
element = cx_pb2.Element()
attr = element.edgeAttribute
attr.edgeId = edge_id
attr.name = key
attr.value = val
return element
# def create_output_attribute(self, node_id, value, attribute_name, suffix):
# element = cx_pb2.Element()
# attr = element.nodeAttribute
# attr.nodeId = node_id
# attr.name = attribute_name + suffix
# attr.value = value
# return element
def create_internal_crash_error(self, message, status):
element = cx_pb2.Element()
error = element.error
error.status = status
error.code = 'cy://align-hierarchies/' + str(status)
error.message = message
error.link = 'http://align-hierarchies'
return element
def read_element_stream(self, element_iter, parameters):
errors = []
edgesAttr_dict = {}
nodesAttr_dict = {}
edges_dict = {}
nodes_dict = {}
for element in element_iter:
ele_type = element.WhichOneof('value')
if ele_type == 'error':
errors.append(element.error)
elif ele_type == 'parameter':
param = element.parameter
parameters[param.name] = param.value
elif ele_type == 'node':
node = element.node
nodes_dict[node.id] = node.name
elif ele_type == 'edge':
edge = element.edge
edges_dict[edge.id] = (edge.sourceId, edge.targetId)
elif ele_type == 'nodeAttribute':
pass
elif ele_type == 'edgeAttribute':
edgeAttr = element.edgeAttribute
if edgesAttr_dict.has_key(edgeAttr.name):
edgesAttr_dict[edgeAttr.name][edgeAttr.edgeId] = edgeAttr.value
else:
edgesAttr_dict[edgeAttr.name] = {edgeAttr.edgeId : edgeAttr.value}
G = nx.Graph()
for n_id, u in nodes_dict.iteritems():
G.add_node(u, node_id=n_id)
edge_attributes_list = edgesAttr_dict.keys()
for e_id, (u, v) in edges_dict.iteritems():
G.add_edge(nodes_dict[u], nodes_dict[v],
attr_dict={k : edgesAttr_dict[k][e_id] for k in edge_attributes_list if edgesAttr_dict[k].has_key(e_id)},
edge_id=e_id)
return G, parameters, errors
# def read_element_stream(self, element_iter, parameters):
# errors = []
# edges_dict = {}
# nodes_dict = {}
# for element in element_iter:
# ele_type = element.WhichOneof('value')
# if ele_type == 'error':
# errors.append(element.error)
# elif ele_type == 'parameter':
# param = element.parameter
# parameters[param.name] = param.value
# elif ele_type == 'node':
# node = element.node
# nodes_dict[node.id] = node.name
# elif ele_type == 'edge':
# edge = element.edge
# if edges_dict.has_key(edge.id):
# edges_dict[edge.id][:2] = [edge.sourceId, edge.targetId]
# else:
# edges_dict[edge.id] = [edge.sourceId, edge.targetId, None]
# elif ele_type == 'nodeAttribute':
# pass
# elif ele_type == 'edgeAttribute':
# edgeAttr = element.edgeAttribute
# if edgeAttr.name == 'similarity':
# if edges_dict.has_key(edgeAttr.edgeId):
# edges_dict[edgeAttr.edgeId][2] = float(edgeAttr.value)
# else:
# edges_dict[edgeAttr.edgeId] = [None, None, float(edgeAttr.value)]
# return (nodes_dict, edges_dict), parameters, errors
def log_info(message):
logging.info(message)
def log_error(message):
logging.error(message)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
cx_pb2_grpc.add_CyServiceServicer_to_server(
CyServiceServicer(), server)
server.add_insecure_port('0.0.0.0:8081')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s %(message)s')
log_info("Listening for requests on '0.0.0.0:8081'")
serve()
| 36.313783
| 128
| 0.562868
| 11,022
| 0.890091
| 4,062
| 0.32803
| 0
| 0
| 0
| 0
| 2,700
| 0.218041
|
e3abcdda55715b1f38a3a0acd05e2b4c08e37048
| 3,509
|
py
|
Python
|
biosignalsnotebooks/build/lib/biosignalsnotebooks/__init__.py
|
csavur/biosignalsnotebooks
|
c99596741a854c58bdefb429906023ac48ddc3b7
|
[
"MIT"
] | 1
|
2020-06-26T05:05:11.000Z
|
2020-06-26T05:05:11.000Z
|
biosignalsnotebooks/build/lib/biosignalsnotebooks/__init__.py
|
csavur/biosignalsnotebooks
|
c99596741a854c58bdefb429906023ac48ddc3b7
|
[
"MIT"
] | null | null | null |
biosignalsnotebooks/build/lib/biosignalsnotebooks/__init__.py
|
csavur/biosignalsnotebooks
|
c99596741a854c58bdefb429906023ac48ddc3b7
|
[
"MIT"
] | null | null | null |
"""
OPENSIGNALSFACTORY PACKAGE INITIALISATION FILE (WITH IMPORT STATEMENTS)
The main purpose of biosignalsnotebooks package is to support the users of PLUX acquisition
devices, such as biosgnalsplux or bitalino, in some processing tasks that can be applied to the
acquired electrophysiological signals, namely ECG, EMG...
This package had been developed as part of the "OpenSignals Tools" project, that offers a set of
Jupyter Notebooks (tutorials) where it is explained step by step how the user can execute the
previously mentioned processing tasks (such as detection of muscular activation from EMG signal,
determination of each cardiac cycle duration from an ECG acquisition or monitoring fatigue by
generating EMG median power frequency evolution time series).
At the end of each Notebook is referred the correspondent biosignalsnotebooks function that synthesises
the processing functionality presented step by step.
In spite of being 'part' of an integrate solution for OpenSignals users, this package can be used
independently.
Package Documentation
---------------------
The docstring presented as the initial statement of each module function will help the user to
correctly and effectively use all biosignalsnotebooks functions.
A full guide that collects all the function docstring is available for download at:
...
OpenSignals Tools Project Repository
------------------------------------
More information's about the project and the respective files are available at:
https://github.com/biosignalsplux/biosignalsnotebooks
Available Modules
-----------------
aux_functions
Includes a set of auxiliary functions that are invoked in other biosignalsnotebooks modules.
This module has a 'private' classification, i.e., it was not specially designed for users.
__notebook_support__
Set of functions invoked in OpenSignals Tools Notebooks to present some graphical results.
These function are only designed for a single end, but we made them available to the user if he
want to explore graphical functionalities in an example format.
conversion
Module responsible for the definition of functions that convert Raw units (available in the
acquisition files returned by OpenSignals) and sample units to physical units like mV, A, ºC,
s,..., accordingly to the sensor under analysis.
detect
Contains functions intended to detect events on electrophysiological signals.
extract
Ensures to the user that he can extract multiple parameters from a specific electrophysiological
signal at once.
open
Module dedicated to read/load data from .txt and .h5 files generated by OpenSignals.
With the available functions the user can easily access data inside files (signal samples)
together with the metadata in the file header.
process
Processing capabilities that are more general than the categories of the remaining modules.
signal_samples
A module that gives an easy access to the biosignalsnotebooks dataset/library of signals (used in
OpenSignals Tools Notebooks).
visualise
Graphical data representation functions based on the application of Bokeh main functionalities.
/\
"""
from .conversion import *
from .detect import *
from .extract import *
from .load import *
from .process import *
from .visualise import *
from .signal_samples import *
from .factory import *
from .synchronisation import *
from .train_and_classify import *
from .__notebook_support__ import *
from .synchronisation import *
# 11/10/2018 16h45m :)
| 43.320988
| 103
| 0.785694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,182
| 0.906553
|