hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73eee2fb344cce481c9e4bf622cf22c5054e99f7
| 3,833
|
py
|
Python
|
tests/template_tests/filter_tests/test_unordered_list.py
|
DasAllFolks/django
|
9f427617e4559012e1c2fd8fce46cbe225d8515d
|
[
"BSD-3-Clause"
] | 1
|
2015-01-09T08:45:54.000Z
|
2015-01-09T08:45:54.000Z
|
tests/template_tests/filter_tests/test_unordered_list.py
|
DasAllFolks/django
|
9f427617e4559012e1c2fd8fce46cbe225d8515d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/template_tests/filter_tests/test_unordered_list.py
|
DasAllFolks/django
|
9f427617e4559012e1c2fd8fce46cbe225d8515d
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from django.test import SimpleTestCase
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.safestring import mark_safe
from ..utils import render, setup
class UnorderedListTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
output = render('unordered_list01', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render('unordered_list02', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
output = render('unordered_list03', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
output = render('unordered_list04', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
output = render('unordered_list05', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
class DeprecatedUnorderedListSyntaxTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render('unordered_list01', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render('unordered_list02', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render('unordered_list03', {'a': ['x>', [[mark_safe('<y'), []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render('unordered_list04', {'a': ['x>', [[mark_safe('<y'), []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInDjango20Warning)
output = render('unordered_list05', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
| 50.434211
| 97
| 0.60527
| 3,628
| 0.946517
| 0
| 0
| 3,470
| 0.905296
| 0
| 0
| 1,540
| 0.401774
|
73efefef974776a64a4da11b84a452736ff6369e
| 5,218
|
py
|
Python
|
models/train_classifier.py
|
jcardenas14/Disaster-Response
|
303cbbc9098e3e1d163e8a6a7bc4bcdc8f134395
|
[
"MIT"
] | null | null | null |
models/train_classifier.py
|
jcardenas14/Disaster-Response
|
303cbbc9098e3e1d163e8a6a7bc4bcdc8f134395
|
[
"MIT"
] | null | null | null |
models/train_classifier.py
|
jcardenas14/Disaster-Response
|
303cbbc9098e3e1d163e8a6a7bc4bcdc8f134395
|
[
"MIT"
] | null | null | null |
import numpy as np
import nltk
import re
import pandas as pd
import sys
import pickle
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score, precision_score, recall_score
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sqlalchemy import create_engine
# download nltk libraries and stopwords
nltk.download(['punkt', 'wordnet','stopwords','averaged_perceptron_tagger'])
stop_words = stopwords.words('english')
# function to load data
def load_data(database_filepath):
'''
load data from sql database given the database file path.
Returns:
X (DataFrame): DataFrame - each row is a message
Y (DataFrame): DataFrame - each column is a category
categories (list): List of category names
'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql_table('disaster_cleaned', con=engine)
X = df['message'].values
Y = df.drop(columns = ['id', 'message', 'original', 'genre']).values
categories = df.drop(columns = ['id', 'message', 'original', 'genre']).columns
return X, Y, categories
def tokenize(text):
"""Returns list of processed and tokenized text given input text."""
# tokenize text and convert to lower case
tokens = [tok.lower() for tok in word_tokenize(text)]
# remove stop words and non alpha-numeric characters
tokens = [tok for tok in tokens if tok not in stop_words and tok.isalnum()]
# initialize WordNetLemmatizer object
lemmatizer = WordNetLemmatizer()
# create list of lemmatized tokens
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
'''
Returns multi-output random forest classifier pipeline.
Construct pipeline for count vectorization of input text, TF-IDF
transformation, and initialization of multi-output
random forest classifier. Initialize hyperparameter tuning
using GridSearchCV.
'''
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {
'clf__estimator__n_estimators': [50, 100, 200],
'clf__estimator__min_samples_split': [2, 3, 4]
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
Returns f1 score, precision, and recall for each category.
Parameters:
model: trained model object
X_test: DataFrame of test messages
Y_test: DataFrame of test classified categories
category_names: List of category names
Returns:
eval_df: DataFrame of f1 score, precision, and recall per category.
'''
# predict on test data
y_pred = model.predict(X_test)
# calculate f1 score, precision, and recall
f1 = []
precision = []
recall = []
for i in range(y_pred.shape[1]):
f1.append(f1_score(Y_test[:,i], y_pred[:,i], average='macro', zero_division=0))
precision.append(precision_score(Y_test[:,i], y_pred[:,i], average='macro', zero_division=0))
recall.append(recall_score(Y_test[:,i], y_pred[:,i], average='macro'))
eval_df = pd.DataFrame({"f1":f1, "precision":precision, "recall":recall}, index=category_names)
return eval_df
def save_model(model, model_filepath):
"""Save trained model as pickle file to given path."""
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=21)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| 33.025316
| 101
| 0.675929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,052
| 0.393254
|
73f111cc65a7da55125e7eb4f996288413f32c34
| 3,850
|
py
|
Python
|
getauditrecords.py
|
muzznak/pyviyatools
|
58a99656e0a773370c050de191999fbc98ac5f03
|
[
"Apache-2.0"
] | 25
|
2019-04-09T19:52:54.000Z
|
2022-03-07T02:11:58.000Z
|
getauditrecords.py
|
muzznak/pyviyatools
|
58a99656e0a773370c050de191999fbc98ac5f03
|
[
"Apache-2.0"
] | 49
|
2018-12-13T15:53:16.000Z
|
2022-03-09T15:31:13.000Z
|
getauditrecords.py
|
muzznak/pyviyatools
|
58a99656e0a773370c050de191999fbc98ac5f03
|
[
"Apache-2.0"
] | 25
|
2019-08-23T19:58:29.000Z
|
2022-02-24T16:14:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# getauditrecords.py January 2020
#
# Extract list of audit records from SAS Infrastructure Data Server using REST API.
#
# Examples:
#
# 1. Return list of audit events from all users and applications
# ./getauditrecords.py
#
# Change History
#
# 10JAN2020 Comments added
#
# Copyright © 2018, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and limitations under the License.
#
# Import Python modules
import json
import socket
import argparse, sys
from sharedfunctions import callrestapi,getinputjson,simpleresults,getbaseurl,printresult
# Sample reqval="/audit/entries?filter=and(eq(application,'reports'),eq(state,'success'),ge(timeStamp,'2018-11-20'),le(timeStamp,'2020-11-20T23:59:59.999Z'))&sortBy=timeStamp&limit=1000"
# Parse arguments based on parameters that are passed in on the command line
parser = argparse.ArgumentParser()
parser.add_argument("-a","--application", help="Filter by Application or Service name",default=None)
parser.add_argument("-l","--limit", help="Maximum number of records to display",default='1000')
parser.add_argument("-t","--type", help="Filter by entry Type",default=None)
parser.add_argument("-c","--action", help="Filter by entry Action",default=None)
parser.add_argument("-s","--state", help="Filter by entry State",default=None)
parser.add_argument("-u","--user", help="Filter by Username",default=None)
parser.add_argument("-A","--after", help="Filter entries that are created after the specified timestamp. For example: 2020-01-03 or 2020-01-03T18:15Z",default=None)
parser.add_argument("-B","--before", help="Filter entries that are created before the specified timestamp. For example: 2020-01-03 or 2020-01-03T18:15Z",default=None)
parser.add_argument("-S","--sortby", help="Sort the output ascending by this field",default='timeStamp')
parser.add_argument("-o","--output", help="Output Style", choices=['csv','json','simple','simplejson'],default='csv')
args = parser.parse_args()
appname=args.application
output_style=args.output
sort_order=args.sortby
output_limit=args.limit
username=args.user
entry_type=args.type
entry_action=args.action
entry_state=args.state
ts_after=args.after
ts_before=args.before
# Create list for filter conditions
filtercond=[]
if appname!=None: filtercond.append("eq(application,'"+appname+"')")
if username!=None: filtercond.append("eq(user,'"+username+"')")
if entry_type!=None: filtercond.append("eq(type,'"+entry_type+"')")
if entry_action!=None: filtercond.append("eq(action,'"+entry_action+"')")
if entry_state!=None: filtercond.append("eq(state,'"+entry_state+"')")
if ts_after!=None: filtercond.append("ge(timeStamp,'"+ts_after+"')")
if ts_before!=None: filtercond.append("le(timeStamp,'"+ts_before+"')")
# Construct filter
delimiter = ','
completefilter = 'and('+delimiter.join(filtercond)+')'
# Set request
reqtype = 'get'
reqval = "/audit/entries?filter="+completefilter+"&limit="+output_limit+"&sortBy="+sort_order
# Construct & print endpoint URL
baseurl=getbaseurl()
endpoint=baseurl+reqval
# print("REST endpoint: " +endpoint)
# Make REST API call, and process & print results
files_result_json=callrestapi(reqval,reqtype)
cols=['id','timeStamp','type','action','state','user','remoteAddress','application','description','uri']
printresult(files_result_json,output_style,cols)
| 43.258427
| 189
| 0.751169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,293
| 0.59543
|
73f1a91dc045f413a69942d834270e344133624f
| 6,345
|
py
|
Python
|
async_blp/handlers.py
|
rockscie/async_blp
|
acb8777ccf2499681bde87d76ca780b61219699c
|
[
"MIT"
] | 12
|
2019-08-05T16:56:54.000Z
|
2021-02-02T11:09:37.000Z
|
async_blp/handlers.py
|
lightning-like/async_blp
|
acb8777ccf2499681bde87d76ca780b61219699c
|
[
"MIT"
] | null | null | null |
async_blp/handlers.py
|
lightning-like/async_blp
|
acb8777ccf2499681bde87d76ca780b61219699c
|
[
"MIT"
] | 5
|
2019-12-08T15:43:13.000Z
|
2021-11-14T08:38:07.000Z
|
"""
File contains handler for ReferenceDataRequest
"""
import asyncio
import uuid
from typing import Dict
from typing import List
from .base_handler import HandlerBase
from .base_request import RequestBase
from .requests import Subscription
from .utils.blp_name import RESPONSE_ERROR
from .utils.log import get_logger
# pylint: disable=ungrouped-imports
try:
import blpapi
except ImportError:
from async_blp.utils import env_test as blpapi
LOGGER = get_logger()
class RequestHandler(HandlerBase):
"""
Handler gets response events from Bloomberg from other thread,
then puts it to request queue. Each handler opens its own session
Sends requests and processes incoming responses.
"""
def __init__(self,
session_options: blpapi.SessionOptions,
loop: asyncio.AbstractEventLoop = None):
super().__init__(session_options, loop)
local_methods = {
blpapi.Event.RESPONSE: self._response_handler,
blpapi.Event.PARTIAL_RESPONSE: self._partial_response_handler,
# according to BLPAPI-Core-Developer-Guide section 10.1,
# REQUEST_STATUS event is send only with RequestFailure messages
blpapi.Event.REQUEST_STATUS: self._raise_exception,
}
self._method_map.update(local_methods)
async def send_requests(self, requests: List[RequestBase]):
"""
Send requests to Bloomberg
Wait until session is started and required service is opened,
then send requests
"""
await self.session_started.wait()
for request in requests:
corr_id = blpapi.CorrelationId(uuid.uuid4())
self._current_requests[corr_id] = request
# wait until the necessary service is opened
service = await self._get_service(request.service_name)
blp_request = request.create(service)
self._session.sendRequest(blp_request, correlationId=corr_id)
LOGGER.debug('%s: request send:\n%s',
self.__class__.__name__,
blp_request)
@classmethod
def _is_error_msg(cls, msg: blpapi.Message) -> bool:
"""
Return True if msg contains responseError element. It indicates errors
such as lost connection, request limit reached etc.
"""
if msg.hasElement(RESPONSE_ERROR):
LOGGER.debug('%s: error message received:\n%s',
cls.__name__,
msg)
return True
return False
def _partial_response_handler(self, event_: blpapi.Event):
"""
Process blpapi.Event.PARTIAL_RESPONSE events. Send all valid messages
from the given event to the requests with the corresponding
correlation id
"""
for msg in event_:
if self._is_error_msg(msg):
self._close_requests(msg.correlationIds())
continue
for cor_id in msg.correlationIds():
request = self._current_requests[cor_id]
request.send_queue_message(msg)
def _response_handler(self, event_: blpapi.Event):
"""
Process blpapi.Event.RESPONSE events. This is the last event for the
corresponding requests, therefore after processing all messages
from the event, None will be send to the corresponding requests.
"""
self._partial_response_handler(event_)
for msg in event_:
self._close_requests(msg.correlationIds())
class SubscriptionHandler(HandlerBase):
"""
Handler gets response events from Bloomberg from other thread,
then puts it to request queue. Each handler opens its own session
Used for handling subscription requests and responses
"""
def __init__(self,
session_options: blpapi.SessionOptions,
loop: asyncio.AbstractEventLoop = None):
super().__init__(session_options, loop)
# only for typing
self._current_requests: Dict[blpapi.CorrelationId, Subscription] = {}
local_methods = {
blpapi.Event.SUBSCRIPTION_STATUS: self._subscriber_status_handler,
blpapi.Event.SUBSCRIPTION_DATA: self._subscriber_data_handler,
}
self._method_map.update(local_methods)
def _subscriber_data_handler(self, event_: blpapi.Event):
"""
Redirect data to the request queue.
"""
for msg in event_:
for cor_id in msg.correlationIds():
self._current_requests[cor_id].send_queue_message(msg)
def _subscriber_status_handler(self, event_: blpapi.Event):
"""
Raise exception if something goes wrong
"""
for msg in event_:
if msg.asElement().name() not in ("SubscriptionStarted",
"SubscriptionStreamsActivated",
):
self._raise_exception(msg)
async def subscribe(self, subscriptions: List[Subscription]):
"""
Send subscriptions to Bloomberg
Wait until session is started, then send subscription
"""
await self.session_started.wait()
for subscription in subscriptions:
corr_id = blpapi.CorrelationId(str(uuid.uuid4()))
self._current_requests[corr_id] = subscription
blp_subscription = subscription.create_subscription(corr_id)
self._session.subscribe(blp_subscription)
LOGGER.debug('%s: subscription send:\n%s',
self.__class__.__name__,
blp_subscription)
async def read_subscribers(self, security_id: str = None):
"""
You can check what are already come from Bloomberg
"""
if security_id is None:
tasks = [asyncio.create_task(request.process())
for request in self._current_requests.values()]
else:
tasks = [asyncio.create_task(request.process())
for request in self._current_requests.values() if
security_id in request.securities]
requests_result = await asyncio.gather(*tasks)
return requests_result
| 33.571429
| 78
| 0.628684
| 5,865
| 0.92435
| 0
| 0
| 450
| 0.070922
| 2,054
| 0.323719
| 1,851
| 0.291726
|
73f2bc3599ec98d3aba14c518c543be223219c33
| 4,759
|
py
|
Python
|
cytochrome-b6f-nn-np-model-kinetics.py
|
vstadnyt/cytochrome
|
546aa450fa6dc2758b079aba258e3572dd24d60c
|
[
"MIT"
] | null | null | null |
cytochrome-b6f-nn-np-model-kinetics.py
|
vstadnyt/cytochrome
|
546aa450fa6dc2758b079aba258e3572dd24d60c
|
[
"MIT"
] | null | null | null |
cytochrome-b6f-nn-np-model-kinetics.py
|
vstadnyt/cytochrome
|
546aa450fa6dc2758b079aba258e3572dd24d60c
|
[
"MIT"
] | 1
|
2021-09-28T17:17:48.000Z
|
2021-09-28T17:17:48.000Z
|
import cytochrome_lib #This is a cytochrome library
import matplotlib.pyplot as plt
import numpy as np
version = "Last update: Aug 8, 2017"
desription = "This code calculates population distribution in the cytochrome b6f protein and plots kinetic profiles for two different models: \n'nn' and 'np' models \n The outputs are: \n Figure 1: \n Figure 2: The ppulation distributions for different oxydations states of the cytochrome proteins. \n Figure 3: the resulting absorbance and circular dichroism kinetics for two different models"
print desription
print version
#the eclusions_lst is a list of hemes that are taken into account during calculations (1 - include; 0 - exclude);
#There are 8 values for 4 hemes and 2 dipoles per heme: [Qx_p1, Qy_p1, Qx_n1, Qy_n1, Qx_p2, Qy_p2, Qx_n2, Qy_n2]
##This is a main part of a code
#This part creates two lists of several instances of a cyt class (see cytochrome library) with different input files
exclusions_lst = []
exclusions_lst.append([0,0,0,0,0,0,0,0])
exclusions_lst.append([0,0,1,1,0,0,0,0])
exclusions_lst.append([1,1,1,1,0,0,0,0])
exclusions_lst.append([1,1,1,1,0,0,1,1])
exclusions_lst.append([1,1,1,1,1,1,1,1])
cyt_b6f_np = []
for excl in exclusions_lst:
cyt_b6f_np.append(cytochrome_lib.cyt('cytochrome_b6f.txt',excl))
for i in range(len(exclusions_lst)):
cyt_b6f_np[i].read_structure_file()
cyt_b6f_np[i].Hamiltonian()
cyt_b6f_np[i].D_and_R_strength()
cyt_b6f_np[i].spectra_plot()
exclusions_lst = []
exclusions_lst.append([0,0,0,0,0,0,0,0])
exclusions_lst.append([0,0,1,1,0,0,0,0])
exclusions_lst.append([0,0,1,1,0,0,1,1])
exclusions_lst.append([1,1,1,1,0,0,1,1])
exclusions_lst.append([1,1,1,1,1,1,1,1])
cyt_b6f_nn = []
for excl in exclusions_lst:
cyt_b6f_nn.append(cytochrome_lib.cyt('cytochrome_b6f.txt',excl))
for i in range(len(exclusions_lst)):
cyt_b6f_nn[i].read_structure_file()
cyt_b6f_nn[i].Hamiltonian()
cyt_b6f_nn[i].D_and_R_strength()
cyt_b6f_nn[i].spectra_plot()
x_range_nm = cyt_b6f_nn[0].x_range_nm
plt.figure(1)
plt.ion()
plt.subplot(2,2,1)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_nn[i].specR,axis = 0),linewidth=2)
#plt.plot(x_range_nm,np.sum(specR_full,axis = 0),linewidth=5)
#plt.legend(['n1p1','n1n2','n1p2','p1n2','p1p2','n2p2']);
plt.title('cytochrome b6f np model')
plt.subplot(2,2,2)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_np[i].specR,axis = 0),linewidth=2)
#plt.plot(x_range_nm,np.sum(specR_full,axis = 0),linewidth=5)
plt.title('cytochrome b6f nn model')
plt.subplot(2,2,3)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_nn[i].specD,axis = 0),linewidth=2)
#plt.plot(x_range_nm,np.sum(specR_full,axis = 0),linewidth=5)
plt.subplot(2,2,4)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_np[i].specD,axis = 0),linewidth=2)
plt.show()
length = 10000
population = cytochrome_lib.kinetics_solve(np.array([1,1,1,1,0,0,0]),length)
plt.figure(2)
plt.ion()
for i in range(5):
plt.plot(range(length),population[i,:])
plt.title("Population distribution of proteins in different oxydation states")
plt.legend(['0e- state (fully oxydized)','1e- state','2e- state','3e- state','4e- state(fully reduced)'])
plt.show()
Absorbance_lst_b6f_nn = []
Circular_Dichroism_lst_b6f_nn = []
for i in range(5):
Absorbance_lst_b6f_nn.append(population[i,:]*np.sum(np.sum(cyt_b6f_nn[i].specD,axis = 0)))
Circular_Dichroism_lst_b6f_nn.append(population[i,:]*np.sum(np.abs(np.sum(cyt_b6f_nn[i].specR,axis = 0))))
Absorbance_b6f_nn = np.asarray(Absorbance_lst_b6f_nn)
Circular_Dichroism_b6f_nn = np.asarray(Circular_Dichroism_lst_b6f_nn)
Absorbance_lst_b6f_np = []
Circular_Dichroism_lst_b6f_np = []
for i in range(5):
Absorbance_lst_b6f_np.append(population[i,:]*np.sum(np.sum(cyt_b6f_np[i].specD,axis = 0)))
Circular_Dichroism_lst_b6f_np.append(population[i,:]*np.sum(np.abs(np.sum(cyt_b6f_np[i].specR,axis = 0))))
Absorbance_b6f_np = np.asarray(Absorbance_lst_b6f_np)
Circular_Dichroism_b6f_np = np.asarray(Circular_Dichroism_lst_b6f_np)
plt.figure(3)
plt.ion()
plt.title('cytochrome b6f nn and np models')
plt.plot(range(length),np.sum(Absorbance_b6f_nn, axis = 0)/np.max(np.sum(Absorbance_b6f_nn, axis = 0)))
plt.plot(range(length),np.sum(Absorbance_b6f_np, axis = 0)/np.max(np.sum(Absorbance_b6f_np, axis = 0)))
plt.plot(range(length),np.sum(Circular_Dichroism_b6f_nn, axis = 0)/np.max(np.sum(Circular_Dichroism_b6f_nn, axis = 0)))
plt.plot(range(length),np.sum(Circular_Dichroism_b6f_np, axis = 0)/np.max(np.sum(Circular_Dichroism_b6f_np, axis = 0)))
plt.legend(['OD_nn','OD_np','CD_nn','CD_np'])
plt.show()
print "\nCalculations are finished. Please, see figures 1-3"
| 36.328244
| 394
| 0.741963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,408
| 0.29586
|
73f3505bc64c937e900a105ef529d5195af953f8
| 10,062
|
py
|
Python
|
moderation/models.py
|
raja-creoit/django-moderation
|
627afeeeb272d8d7e8f4893e8418d8942ccb80ba
|
[
"BSD-3-Clause"
] | null | null | null |
moderation/models.py
|
raja-creoit/django-moderation
|
627afeeeb272d8d7e8f4893e8418d8942ccb80ba
|
[
"BSD-3-Clause"
] | 1
|
2020-01-31T20:37:53.000Z
|
2020-01-31T20:37:53.000Z
|
moderation/models.py
|
raja-creoit/django-moderation
|
627afeeeb272d8d7e8f4893e8418d8942ccb80ba
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.conf import settings
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from . import moderation
from .constants import (MODERATION_READY_STATE,
MODERATION_DRAFT_STATE,
MODERATION_STATUS_REJECTED,
MODERATION_STATUS_APPROVED,
MODERATION_STATUS_PENDING)
from .diff import get_changes_between_models
from .fields import SerializedObjectField
from .managers import ModeratedObjectManager
from .signals import post_moderation, pre_moderation
from .utils import django_19
import datetime
MODERATION_STATES = Choices(
(MODERATION_READY_STATE, 'ready', _('Ready for moderation')),
(MODERATION_DRAFT_STATE, 'draft', _('Draft')),
)
STATUS_CHOICES = Choices(
(MODERATION_STATUS_REJECTED, 'rejected', _("Rejected")),
(MODERATION_STATUS_APPROVED, 'approved', _("Approved")),
(MODERATION_STATUS_PENDING, 'pending', _("Pending")),
)
class ModeratedObject(models.Model):
content_type = models.ForeignKey(ContentType, null=True, blank=True,
on_delete=models.SET_NULL,
editable=False)
object_pk = models.PositiveIntegerField(null=True, blank=True,
editable=False, db_index=True)
content_object = GenericForeignKey(ct_field="content_type",
fk_field="object_pk")
created = models.DateTimeField(auto_now_add=True, editable=False)
updated = models.DateTimeField(auto_now=True)
state = models.SmallIntegerField(choices=MODERATION_STATES,
default=MODERATION_DRAFT_STATE,
editable=False)
status = models.SmallIntegerField(
choices=STATUS_CHOICES,
default=MODERATION_STATUS_PENDING,
editable=False)
by = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
blank=True, null=True, editable=False, on_delete=models.SET_NULL,
related_name='moderated_objects')
on = models.DateTimeField(editable=False, blank=True, null=True)
reason = models.TextField(blank=True, null=True)
changed_object = SerializedObjectField(serialize_format='json',
editable=False)
changed_by = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
blank=True, null=True, editable=True, on_delete=models.SET_NULL,
related_name='changed_by_set')
objects = ModeratedObjectManager()
content_type.content_type_filter = True
def __init__(self, *args, **kwargs):
self.instance = kwargs.get('content_object')
super(ModeratedObject, self).__init__(*args, **kwargs)
def __unicode__(self):
return "%s" % self.changed_object
def __str__(self):
return "%s" % self.changed_object
def save(self, *args, **kwargs):
if self.instance:
self.changed_object = self.instance
super(ModeratedObject, self).save(*args, **kwargs)
class Meta:
verbose_name = _('Moderated Object')
verbose_name_plural = _('Moderated Objects')
ordering = ['status', 'created']
def automoderate(self, user=None):
'''Auto moderate object for given user.
Returns status of moderation.
'''
if user is None:
user = self.changed_by
else:
self.changed_by = user
# No need to save here, both reject() and approve() will save us.
# Just save below if the moderation result is PENDING.
if self.moderator.visible_until_rejected:
changed_object = self.get_object_for_this_type()
else:
changed_object = self.changed_object
status, reason = self._get_moderation_status_and_reason(
changed_object,
user)
if status == MODERATION_STATUS_REJECTED:
self.reject(by=self.by, reason=reason)
elif status == MODERATION_STATUS_APPROVED:
self.approve(by=self.by, reason=reason)
else: # MODERATION_STATUS_PENDING
self.save()
return status
def _get_moderation_status_and_reason(self, obj, user):
'''
Returns tuple of moderation status and reason for auto moderation
'''
reason = self.moderator.is_auto_reject(obj, user)
if reason:
return MODERATION_STATUS_REJECTED, reason
else:
reason = self.moderator.is_auto_approve(obj, user)
if reason:
return MODERATION_STATUS_APPROVED, reason
return MODERATION_STATUS_PENDING, None
def get_object_for_this_type(self):
pk = self.object_pk
obj = self.content_type.model_class()._default_unmoderated_manager.get(pk=pk)
return obj
def get_absolute_url(self):
if hasattr(self.changed_object, 'get_absolute_url'):
return self.changed_object.get_absolute_url()
return None
def get_admin_moderate_url(self):
if django_19():
return "/admin/moderation/moderatedobject/%s/change/" % self.pk
else:
return "/admin/moderation/moderatedobject/%s/" % self.pk
@property
def moderator(self):
model_class = self.content_type.model_class()
return moderation.get_moderator(model_class)
def _send_signals_and_moderate(self, new_status, by, reason):
pre_moderation.send(sender=self.changed_object.__class__,
instance=self.changed_object,
status=new_status)
self._moderate(new_status, by, reason)
post_moderation.send(sender=self.content_type.model_class(),
instance=self.content_object,
status=new_status)
def _moderate(self, new_status, by, reason):
# See register.py pre_save_handler() for the case where the model is
# reset to its old values, and the new values are stored in the
# ModeratedObject. In such cases, on approval, we should restore the
# changes to the base object by saving the one attached to the
# ModeratedObject.
if (self.status == MODERATION_STATUS_PENDING and
new_status == MODERATION_STATUS_APPROVED and
not self.moderator.visible_until_rejected):
base_object = self.changed_object
base_object_force_save = True
else:
# The model in the database contains the most recent data already,
# or we're not ready to approve the changes stored in
# ModeratedObject.
obj_class = self.changed_object.__class__
pk = self.changed_object.pk
base_object = obj_class._default_unmoderated_manager.get(pk=pk)
base_object_force_save = False
if new_status == MODERATION_STATUS_APPROVED:
# This version is now approved, and will be reverted to if
# future changes are rejected by a moderator.
self.state = MODERATION_READY_STATE
self.status = new_status
self.on = datetime.datetime.now()
self.by = by
self.reason = reason
self.save()
if self.moderator.visibility_column:
old_visible = getattr(base_object,
self.moderator.visibility_column)
if new_status == MODERATION_STATUS_APPROVED:
new_visible = True
elif new_status == MODERATION_STATUS_REJECTED:
new_visible = False
else: # MODERATION_STATUS_PENDING
new_visible = self.moderator.visible_until_rejected
if new_visible != old_visible:
setattr(base_object, self.moderator.visibility_column,
new_visible)
base_object_force_save = True
if base_object_force_save:
# avoid triggering pre/post_save_handler
with transaction.atomic(using=None, savepoint=False):
base_object.save_base(raw=True)
# The _save_parents call is required for models with an
# inherited visibility_column.
base_object._save_parents(base_object.__class__, None, None)
if self.changed_by:
self.moderator.inform_user(self.content_object, self.changed_by)
def has_object_been_changed(self, original_obj, only_excluded=False, update_fields=None):
excludes = includes = None
if only_excluded:
if update_fields:
includes = list(update_fields & set(self.moderator.fields_exclude))
else:
includes = self.moderator.fields_exclude
else:
if update_fields:
includes = list(update_fields - set(self.moderator.fields_exclude))
else:
excludes = self.moderator.fields_exclude
changes = get_changes_between_models(original_obj,
self.changed_object,
excludes,
includes)
for change in changes:
left_change, right_change = changes[change].change
if left_change != right_change:
return True
return False
def approve(self, by=None, reason=None):
self._send_signals_and_moderate(MODERATION_STATUS_APPROVED, by, reason)
def reject(self, by=None, reason=None):
self._send_signals_and_moderate(MODERATION_STATUS_REJECTED, by, reason)
| 38.7
| 93
| 0.633174
| 8,764
| 0.871
| 0
| 0
| 142
| 0.014113
| 0
| 0
| 1,401
| 0.139237
|
73f3c138d83e22bb6c02d12e03c089fb61651fa0
| 3,684
|
py
|
Python
|
hygnd/munge.py
|
thodson-usgs/hygnd
|
04d3596f79350ba19e08851e494c8feb7d68c0e0
|
[
"MIT"
] | 2
|
2018-07-27T22:29:27.000Z
|
2020-03-04T18:01:47.000Z
|
hygnd/munge.py
|
thodson-usgs/hygnd
|
04d3596f79350ba19e08851e494c8feb7d68c0e0
|
[
"MIT"
] | null | null | null |
hygnd/munge.py
|
thodson-usgs/hygnd
|
04d3596f79350ba19e08851e494c8feb7d68c0e0
|
[
"MIT"
] | null | null | null |
from math import floor
import pandas as pd
def filter_param_cd(df, code):
"""Return df filtered by approved data
"""
approved_df = df.copy()
params = [param.strip('_cd') for param in df.columns if param.endswith('_cd')]
for param in params:
#filter out records where param_cd doesn't contain 'A' for approved.
approved_df[param].where(approved_df[param + '_cd'].str.contains(code), inplace=True)
# drop any rows where all params are nan and return
#return approved_df.dropna(axis=0, how='all', subset=params)
return approved_df
def interp_to_freq(df, freq=15, interp_limit=120, fields=None):
"""
WARNING: for now this only works on one site at a time,
Also must review this function further
Args:
df (DataFrame): a dataframe with a datetime index
freq (int): frequency in minutes
interp_limit (int): max time to interpolate over
Returns:
DataFrame
"""
#XXX assumes no? multiindex
df = df.copy()
if type(df) == pd.core.series.Series:
df = df.to_frame()
#df.reset_index(level=0, inplace=True)
limit = floor(interp_limit/freq)
freq_str = '{}min'.format(freq)
start = df.index[0]
end = df.index[-1]
new_index = pd.date_range(start=start, end=end, periods=None, freq=freq_str)
#new_index = new_index.union(df.index)
new_df = pd.DataFrame(index=new_index)
new_df = new_df.merge(df, how='outer', left_index=True, right_index=True)
#new_df = pd.merge(df, new_df, how='outer', left_index=True, right_index=True)
#this resampling eould be more efficient
out_df = new_df.interpolate(method='time',limit=limit, limit_direction='both').asfreq(freq_str)
out_df = out_df.resample('{}T'.format(freq)).asfreq()
out_df.index.name = 'datetime'
return out_df
#out_df.set_index('site_no', append=True, inplace=True)
#return out_df.reorder_levels(['site_no','datetime'])
def fill_iv_w_dv(iv_df, dv_df, freq='15min', col='00060'):
"""Fill gaps in an instantaneous discharge record with daily average estimates
Args:
iv_df (DataFrame): instantaneous discharge record
dv_df (DataFrame): Average daily discharge record.
freq (int): frequency of iv record
Returns:
DataFrame: filled-in discharge record
"""
#double brackets makes this a dataframe
dv_df.rename(axis='columns',
mapper={'00060_Mean':'00060'},
inplace=True)
#limit ffill to one day or 96 samples at 15min intervals
updating_field = dv_df[[col]].asfreq(freq).ffill(limit=96)
iv_df.update(updating_field, overwrite=False)
#return update_merge(iv_df, updating_field, na_only=True)
return iv_df
#This function may be deprecated once pandas.update support joins besides left.
def update_merge(left, right, na_only=False, on=None):
"""Performs a combination
Args:
left (DataFrame): original data
right (DataFrame): updated data
na_only (bool): if True, only update na values
TODO: na_only
"""
df = left.merge(right, how='outer',
left_index=True, right_index=True)
# check for column overlap and resolve update
for column in df.columns:
#if duplicated column, use the value from right
if column[-2:] == '_x':
name = column[:-2] # find column name
if na_only:
df[name] = df[name+'_x'].fillna(df[name+'_y'])
else:
df[name+'_x'].update(df[name+'_y'])
df[name] = df[name+'_x']
df.drop([name + '_x', name + '_y'], axis=1, inplace=True)
return df
| 32.034783
| 99
| 0.646851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,849
| 0.5019
|
73f42c1536b7cbae9884bce03cfe3067637e0ad1
| 3,681
|
py
|
Python
|
get_stock_data.py
|
jeremychonggg/Alpaca-Trading-Bot
|
82df00e327e2e55f5a0cdf85cd950c49c59bf669
|
[
"MIT"
] | null | null | null |
get_stock_data.py
|
jeremychonggg/Alpaca-Trading-Bot
|
82df00e327e2e55f5a0cdf85cd950c49c59bf669
|
[
"MIT"
] | null | null | null |
get_stock_data.py
|
jeremychonggg/Alpaca-Trading-Bot
|
82df00e327e2e55f5a0cdf85cd950c49c59bf669
|
[
"MIT"
] | null | null | null |
import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
try:
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/New_York")
df_data_tickers[symbol] = df_data
except:
pass
print("---- Created for [{}]".format(symbol))
return df_data_tickers
| 39.159574
| 115
| 0.551481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,421
| 0.386036
|
73f83bfdf1bdf3cc0ae0369940411280ceef339a
| 4,420
|
py
|
Python
|
yaps/server/subscription.py
|
victorhook/vqtt
|
f79f9826ce91bf77a75047c22d7a729d539f83f9
|
[
"MIT"
] | null | null | null |
yaps/server/subscription.py
|
victorhook/vqtt
|
f79f9826ce91bf77a75047c22d7a729d539f83f9
|
[
"MIT"
] | null | null | null |
yaps/server/subscription.py
|
victorhook/vqtt
|
f79f9826ce91bf77a75047c22d7a729d539f83f9
|
[
"MIT"
] | 1
|
2021-03-02T19:18:30.000Z
|
2021-03-02T19:18:30.000Z
|
import asyncio
from yaps.api import protocol
from yaps.utils.log import Log
SLEEP_SLOT_TIME = 1 # In seconds.
class State:
PING_PONG = 1
PING_PONG_1_MISS = 2
PING_PONG_2_MISS = 3
class Subscription:
"""
Abstraction for handling a subscription.
This class has utilites that lets it increment a counter and indicate
if it has timed out or not.
It can also send new data to the subscriber.
"""
def __init__(self,
topic: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter):
self._time = 0
self._state = State.PING_PONG
self._reader = reader
self._writer = writer
self._alive = True
self._set_identifier(topic)
async def start_idle(self) -> None:
""" Sets the task into idle sleep, count up a timer.
When the timer reaches timeout, timed_out() will return True.
"""
while self._alive:
# Go idle so other tasks can run.
await asyncio.sleep(SLEEP_SLOT_TIME)
# Update timer.
self._time += SLEEP_SLOT_TIME
self.die()
def _next_state(self) -> bool:
""" Advances to the next state. Returns true if the subscription
should be kept alive, and false if it should die.
"""
alive = True
if self._state == State.PING_PONG:
self._state = State.PING_PONG_1_MISS
elif self._state == State.PING_PONG_1_MISS:
self._state = State.PING_PONG_2_MISS
elif self._state == State.PING_PONG_2_MISS:
alive = False
return alive
async def ping(self) -> None:
""" Pings the subscriber and waits for a PONG back.
If the subscriber doesn't pong back, the subscription is closed.
"""
await protocol.send_packet(self._writer, protocol.Commands.PING)
Log.debug(f'Ping {self}')
pong = await protocol.read_packet(self._reader)
if await protocol.async_cmd_ok(pong, protocol.Commands.PONG):
# If PONG, reset timer.
self._time = 0
else:
Log.err(f'Bad ping! {self._alive} -> {self._state}')
# If no PONG, advance to next state, and potentially close.
alive = self._next_state()
if not alive:
self.die()
async def new_data(self, message: str) -> bool:
""" Sends the new data to the subscriber.
Returns true if succesful, false if not.
"""
send_ok = True
try:
# Send new data to subscriber
await protocol.send_packet(self._writer,
protocol.Commands.NEW_DATA,
data=message.encode('utf-8'))
# Wait for SUBSCRIBE_ACK
response = await protocol.read_packet(self._reader)
except (BrokenPipeError, ConnectionResetError):
send_ok = False
if send_ok:
# If no ACK is recieved, close the connection.
if not await protocol.async_cmd_ok(response,
protocol.Commands.NEW_DATA_ACK,
self._writer):
send_ok = False
if not send_ok:
self.die()
# Reset timer.
self._time = 0
return send_ok
def timed_out(self):
return self._time > protocol.PING_PONG_TIMEOUT
def is_dead(self) -> bool:
return not self._alive
def die(self) -> None:
if not self._alive:
return
self._alive = False
Log.debug(f'Subscription died {self}')
def _set_identifier(self, topic: str) -> None:
""" Sets the identification of the subscription.
This consists of:
1. Topic
2. File descripter number from reader/writer stream.
"""
self.topic = topic
try:
self.fd = self._writer.get_extra_info('socket').fileno()
except AttributeError:
# Streams are incorrect
Log.err(f'Incorrect streams to subscription to {self.topic}')
self.fd = None
def __repr__(self):
return f'| ID:{self.fd} Topic: {self.topic} |'
def __lt__(self, other):
return self._time - other._time
| 31.126761
| 78
| 0.563801
| 4,293
| 0.971267
| 0
| 0
| 0
| 0
| 2,156
| 0.487783
| 1,394
| 0.315385
|
73fb16f86099e7cc34882ec8e6eb8ce6cb617a74
| 487
|
py
|
Python
|
processing/1_comset.py
|
acleclair/ICPC2020_GNN
|
a8b03de597e8f25c17503c3834c7956ecc8f2247
|
[
"MIT"
] | 58
|
2020-04-09T20:29:34.000Z
|
2022-03-28T11:38:40.000Z
|
processing/1_comset.py
|
acleclair/ICPC2020_GNN
|
a8b03de597e8f25c17503c3834c7956ecc8f2247
|
[
"MIT"
] | 11
|
2020-04-11T14:19:01.000Z
|
2021-11-27T07:38:41.000Z
|
processing/1_comset.py
|
acleclair/ICPC2020_GNN
|
a8b03de597e8f25c17503c3834c7956ecc8f2247
|
[
"MIT"
] | 14
|
2020-06-15T14:32:03.000Z
|
2022-01-23T10:33:15.000Z
|
import pickle
bad_fid = pickle.load(open('autogenfid.pkl', 'rb'))
comdata = 'com_pp.txt'
good_fid = []
outfile = './output/dataset.coms'
fo = open(outfile, 'w')
for line in open(comdata):
tmp = line.split(',')
fid = int(tmp[0].strip())
if bad_fid[fid]:
continue
com = tmp[1].strip()
com = com.split()
if len(com) > 13 or len(com) < 3:
continue
com = ' '.join(com)
fo.write('{}, <s> {} </s>\n'.format(fid, com))
fo.close()
| 20.291667
| 51
| 0.546201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.170431
|
73fb988d86cc41ea1f693ac556af57905fca2bc3
| 22,588
|
py
|
Python
|
shibuya/cubesym.py
|
Parcly-Taxel/Shibuya
|
aa79b47d2a5fc859acb9645ebd635578fe2f145b
|
[
"MIT"
] | null | null | null |
shibuya/cubesym.py
|
Parcly-Taxel/Shibuya
|
aa79b47d2a5fc859acb9645ebd635578fe2f145b
|
[
"MIT"
] | null | null | null |
shibuya/cubesym.py
|
Parcly-Taxel/Shibuya
|
aa79b47d2a5fc859acb9645ebd635578fe2f145b
|
[
"MIT"
] | null | null | null |
"""
Cubic symmetric graphs. Most of the embeddings realised here were taken from MathWorld.
"""
from mpmath import *
from functools import reduce
from shibuya.generators import cu, star_radius, ring_edges, lcf_edges
from shibuya.generators import all_unit_distances, circumcentre
from shibuya.generators import fixparams_unitdist, symmetrise
# F4A = tetrahedron() or complete(4) (not unit-distance)
# F6A = circulant(6, (1, 3)) or mobiusladder(3) (not unit-distance)
# F8A = genpetersen("cube")
# F10A = genpetersen("petersen")
def heawood():
"""Return the symmetric unit-distance embedding of the Heawood graph (F14A)
tucked away in Mathematica's GraphData."""
P = [10485760, 78643200, 263192576, 543686656, 812777472, 942080000, 843317248, 552468480, 208879616, -31170560, -99213312, -76779520, -32795648, 7878144, 17269760, 16256512, 11392032, 4836080, 3014064, 361320, 69498, -165789]
v0 = findroot(lambda v: polyval(P, v), 0.275)
p0 = mpc(0.5, v0)
p1 = mpc(sqrt(1-(v0+0.5)**2)-0.5, -0.5)
p2 = cu(p0, -p0)
p3 = cu(p1, -p1)
p4 = cu(p2, p3)
vertices = [mpc(s*re(v), im(v)) for s in (1, -1) for v in (p0, -p0, p1, -p1, p2, p3, p4)]
return all_unit_distances(vertices)
# F16A = genpetersen("mobiuskantor")
def pappus():
"""Return a unit-distance embedding of the Pappus graph (F18A)."""
u6 = unitroots(6)
r0 = [u*0.5j for u in u6]
z1 = cu(r0[2], r0[0])
r1 = [z1*u for u in u6]
z2 = cu(0, z1)
r2 = [z2*u for u in u6]
vertices = r0 + r1 + r2
edges = ring_edges(6, ((0, 0, 3), (0, 1, 0), (0, 1, -2), (2, 2, 1), (1, 2, 0)))
return (vertices, edges)
# F20A = genpetersen("dodecahedron")
# F20B = genpetersen("desargues")
# F24A = genpetersen("nauru")
def f26a_vertices(t):
A, B, C = unitroots(6)[4:1:-1]
p2 = mpc(t, sqrt(1-t**2)) / 2
p1 = p2 * root(1, 6, 1)
p3 = p2 * root(1, 6, 5)
p4 = cu(p1, B)
p5 = cu(p2, C)
p6 = cu(p3, -A)
p7 = cu(p1, -p6)
p8 = cu(p4, p2)
p9 = cu(p5, p3)
p10 = cu(-p8, p7)
V = (A, B, C, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
return ([v*s for v in V for s in (1, -1)], abs(p9 - p10) - 1)
def f26a():
"""Return a unit-distance embedding of the F26A graph."""
t0 = findroot(lambda t: f26a_vertices(t)[1], 0.2)
return all_unit_distances(f26a_vertices(t0)[0])
def coxeter():
"""Return a unit-distance embedding of the Coxeter graph (F28A)."""
u7 = unitroots(7)
s1 = star_radius(7)
s2 = star_radius(7, 2)
s3 = star_radius(7, 3)
r0 = [-s2*u for u in u7]
r1 = [s3*u for u in u7]
z2 = cu(r0[0], r1[3])
r2 = [z2*u for u in u7]
z3 = cu(0, z2, s1, 1)
r3 = [z3*u for u in u7]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(7, ((0, 0, 2), (1, 1, 3), (3, 3, 1), (0, 2, 0), (1, 2, -3), (2, 3, 0)))
return (vertices, edges)
@fixparams_unitdist(-1.76, -0.76)
def tutte8(a, b):
z1 = 1.69j
z2 = z1 + expj(a)
z3 = z2 + sign(-z2)
z4 = cu(z1*root(1,5,2), z1*root(1,5,-2))
d1 = abs(z3-z4) - 1
z5 = z2 + expj(b)
z6 = cu(z5*root(1,5,2), z5)
d2 = abs(z6-z3*root(1,5,1)) - 1
vertices = symmetrise((z1, z2, z3, z4, z5, z6), "C5")
return (vertices, (d1, d2))
def dyck():
"""Return a unit-distance embedding of the Dyck graph (F32A)."""
r0 = unitroots(8)
r1 = [sqrt(2)*u for u in r0]
z2 = cu(r0[1], 0, 1, star_radius(8))
r2 = [z2*u for u in r0]
z3 = cu(0, r1[0], star_radius(8, 3), 1)
r3 = [z3*u for u in r0]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(8, ((0, 1, 1), (0, 1, -1), (0, 2, -1), (2, 2, 1), (1, 3, 0), (3, 3, 3)))
return (vertices, edges)
def dyck2_vertices(a):
p1 = mpc(a, 0.5)
p2 = mpc(3*a, 0.5)
p3 = cu(p1, -1j*p1)
p4 = cu(p2, -1j*p2)
vertices = [u*p for u in (1, 1j, -1, -1j) for p in (p1, p2, p3, p4)]
vertices.extend([conj(p) for p in vertices])
return (vertices, abs(p3 - conj(p4)) - 1)
def dyck2():
"""Return a unit-distance embedding of the Dyck graph with D4 symmetry."""
t0 = findroot(lambda t: dyck2_vertices(t)[1], 0.1)
return all_unit_distances(dyck2_vertices(t0)[0])
def f38a_vertices(t):
u6 = unitroots(6)
vertices = []
p0 = 1
p1 = 2
p2 = rect(0.5, t)
p3 = cu(p2, p1)
p4 = cu(p3, 2*u6[1])
for (i, u) in enumerate(u6):
p5 = cu(u6[4]*p2, p4) if i in (2, 5) else cu(p4, u6[4]*p2)
vertices.extend(p*u for p in (p0, p1, p2, p3, p4, p5))
p6 = circumcentre(vertices[5], vertices[17], vertices[29])
vertices.extend((p6, -p6))
return (vertices, abs(p6 - vertices[5]) - 1)
def f38a():
"""Return a unit-distance embedding of the F38A graph."""
t0 = findroot(lambda t: f38a_vertices(t)[1], 0.29)
return all_unit_distances(f38a_vertices(t0)[0])
def f40a(x=0.75):
"""Return a unit-distance embedding of F40A (bipartite double cover of F20A).
x can be anything between (sqrt(5)-1)/2 and 1."""
u10 = unitroots(10)
z0 = star_radius(10)
r0 = [z0*u for u in u10]
z1 = cu(r0[1], 0, 1, x)
r1 = [z1*u for u in u10]
z2 = cu(r1[2], r1[-2])
r2 = [z2*u for u in u10]
z3 = cu(0, z2, z0, 1)
r3 = [z3*u for u in u10]
vertices = r0 + r1 + r2 + r3
return all_unit_distances(vertices)
def f42a_vertices(a, b, c):
u7 = unitroots(7)
pa = mpc(a, 0.5)
pb = mpc(b, 0.5)
pc = mpc(c, 0.5)
pac, pbc, pcc = (conj(p) for p in (pa, pb, pc))
d1 = abs(pa - u7[1]*pbc)**2 - 1
d2 = abs(pb - u7[2]*pcc)**2 - 1
d3 = abs(pc - u7[4]*pac)**2 - 1
vertices = [u*p for u in u7 for p in (pa, pb, pc, pac, pbc, pcc)]
return (vertices, (d1, d2, d3))
def f42a(mode=0):
"""Return a unit-distance embedding of the F42A graph.
mode (0 or 1) selects between two algebraically related forms."""
x0 = (0.27, 1.36, 0.52) if mode == 0 else (1.24, 0.18, -0.53)
t0 = findroot(lambda *t: f42a_vertices(*t)[1], x0)
return all_unit_distances(f42a_vertices(*t0)[0])
# F48A = genpetersen("f48a") but the resulting embedding is vertex-edge-degenerate, so...
def f48a():
"""Return a non-degenerate unit-distance embedding of the F48A graph."""
R = (2 + 3*sqrt(2) + sqrt(12*sqrt(6)-26)) / 4
r = (2 + 3*sqrt(2) - sqrt(12*sqrt(6)-26)) / 4
L = R-1
l = r-1
u24 = unitroots(24)
ring_R = [u*R for u in u24[::2]]
ring_r = [u*r for u in u24[1::2]]
ring_L = [u*L for u in u24[::2]]
ring_l = [u*l for u in u24[1::2]]
vertices = ring_R + ring_r + ring_L + ring_l
edges = ring_edges(12, ((0, 1, 0), (0, 1, -1), (0, 2, 0), (1, 3, 0), (2, 3, 2), (2, 3, -3)))
return (vertices, edges)
def f50a_vertices(t):
u = root(1, 5, 1)
table = {(): (0, 3, 4, 5, 6),
(1,): (2, 35, 36, 37, 38),
(1, 1, 1, 1): (48, 21, 22, 23, 24),
(2,): (1, 28, 27, 26, 25),
(1, 1): (34, 17, 18, 39, 40),
(1, 2): (49, 46, 45, 44, 43),
(1, 1, 1): (16, 19, 20, 41, 42),
(2, 1): (33, 30, 29, 8, 7),
(1, 1, 2): (47, 14, 13, 12, 11),
(2, 1, 1): (15, 32, 31, 10, 9)}
p0 = rect(star_radius(10), 0.9*pi)
p3 = rect(star_radius(10, 3), -0.7*pi)
p4 = cu(p3, -conj(p3))
p5 = p4 + expj(t)
p6 = cu(p5, u*p5)
seeds = [p0, p3, p4, p5, p6]
vertices = [None] * 50
ops = {1: lambda z: u*z, 2: conj}
for (aut, coset) in table.items():
for (ring, i) in enumerate(coset):
vertices[i] = -1j * reduce(lambda z, k: ops[k](z), aut, seeds[ring])
return (vertices, re(vertices[40]) + 0.5)
def f50a():
"""Return a unit-distance embedding of the F50A graph, an embedding
found by the computer (specifically the embedding_run() function in embeddingsearch)."""
t0 = findroot(lambda t: f50a_vertices(t)[1], 2)
return (f50a_vertices(t0)[0], lcf_edges(50, [21, -21, -19, 19, -19, 19, -19, 19, 21, -21]))
def f54a_vertices(t):
u18 = unitroots(18)
r0 = [u/2 for u in u18]
z1 = cu(r0[1], r0[-1])
r1 = [z1*u for u in u18]
z2a = r1[0] + expj(t)
z2b = circumcentre(z2a, u18[2]*z2a, r1[1])
r2 = [u*z for u in unitroots(9) for z in (z2a, z2b)]
vertices = r0 + r1 + r2
return (vertices, abs(z2b - r1[1]) - 1)
def f54a(i=2):
"""Return one of three (depending on i in {0, 1, 2}) algebraically related
unit-distance embeddings of the F54A graph."""
px = [[3], [-10, -12], [13, 6, 34], [-17, -5, -14]] # x = a(1-c)
py = [[3], [2, -2, -10], [1, -6, 9], [-19, 41, -10]] # y = c(1-a)
pz = [[3], [5, -8, 2], [11, -14, -13], [-19, 41, -10]] # z = b(1-b)
x = polyroots([polyval(l, 2*cos(pi/9)) for l in px])[i]
y = polyroots([polyval(l, 2*cos(pi/9)) for l in py])[i]
sxy = sqrt((1+x-y)**2 - 4*x)
a = (1+x-y+sxy) / 2
c = (1-x+y+sxy) / 2
z = polyroots([polyval(l, 2*cos(pi/9)) for l in pz])[(1-i)%3]
b = (1 + (-1 if i else 1)*sqrt(1-4*z)) / 2
triple = [a, b, c]
line = [p-d for p in triple for d in (0, 1)]
return all_unit_distances(symmetrise(line, "C9"))
def f56a():
"""Return a unit-distance embedding of the F56A graph.
Note that MathWorld's LCF notation for this is incorrect;
it should be [11, 13, -13, -11]^14."""
t = tan(pi/14)
u = sqrt(polyval([-21, 98, 71], t*t))
z1 = 2*sqrt(14*polyval([31*u, -20, -154*u, 104, 87*u, -68], t))
z2 = 7*t*(t*t-3)**2 - 4*u
a = (z1 + z2) / 64
b = (z1 - z2) / 64
u14 = unitroots(14)
pa = mpc(a, 0.5)
pb = mpc(b, 0.5)
pac, pbc = conj(pa), conj(pb)
d1 = abs(pa - u14[-1]*pb)**2 - 1
d2 = abs(pb - u14[-2]*pa)**2 - 1
vertices = [u*p for u in u14 for p in (pa, pb, pac, pbc)]
return all_unit_distances(vertices)
def klein(a1=4.47, a2=2.42, a3=0.7, s1=1, s2=-1):
"""Return a unit-distance embedding of the cubic Klein graph (F56B)."""
u7 = unitroots(7)
z0 = star_radius(7)
r0 = [z0*u for u in u7]
z1 = z0 + expj(a1)
z2 = z1 + expj(a2)
z3 = z1 + expj(a3)
r1 = [z1*u for u in u7]
r2 = [z2*u for u in u7]
r3 = [z3*u for u in u7]
z4 = cu(*(r2[2], r3[0])[::s1])
z5 = cu(*(r2[0], r3[1])[::s2])
r4 = [z4*u for u in u7]
r5 = [z5*u for u in u7]
z6 = cu(0, r4[0], star_radius(7, 2), 1)
z7 = cu(0, r5[0], star_radius(7, 3), 1)
r6 = [z6*u for u in u7]
r7 = [z7*u for u in u7]
vertices = r0 + r1 + r2 + r3 + r4 + r5 + r6 + r7
edges = ring_edges(7, ((0, 0, 1), (0, 1, 0), (1, 2, 0), (1, 3, 0),
(2, 4, -2), (3, 4, 0), (2, 5, 0), (3, 5, -1),
(4, 6, 0), (5, 7, 0), (6, 6, 2), (7, 7, 3)))
return (vertices, edges)
def f56c():
"""Return a unit-distance embedding of the F56C graph,
the bipartite double cover of the Coxeter graph."""
u14 = unitroots(14)
z0 = star_radius(14, 5)
r0 = [z0*u for u in u14]
z1 = star_radius(14, 3)
r1 = [z1*u for u in u14]
z2 = cu(r1[4], r0[0])
r2 = [z2*u for u in u14]
z3 = cu(0, z2, star_radius(14), 1)
r3 = [z3*u for u in u14]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(14, ((0, 0, 5), (1, 1, 3), (2, 1, 4), (2, 0, 0), (2, 3, 0), (3, 3, 1)))
return (vertices, edges)
def f60a(t=-0.35):
"""Return a unit-distance embedding of the F60A graph."""
u15 = unitroots(15)
z0 = star_radius(15, 7)
r0 = [z0*u for u in u15]
z1 = z0 + expj(t)
r1 = [z1*u for u in u15]
z2 = cu(r1[3], r1[0])
r2 = [z2*u for u in u15]
z3 = cu(0, z2, star_radius(15, 2), 1)
r3 = [z3*u for u in u15]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(15, ((0, 0, 7), (0, 1, 0), (2, 1, 0), (2, 1, 3), (2, 3, 0), (3, 3, 2)))
return (vertices, edges)
def f62a_vertices(*params):
u6 = unitroots(6)
tree = [1j, 2j]
tree.append(tree[-1] + expj(2.939))
tree.append(tree[-1] + expj(-1.025))
for (i, v) in enumerate((3, 1, 5, 6, 6)):
tree.append(tree[v] + expj(params[i]))
star = mpc(params[-2], params[-1])
cc1 = circumcentre(tree[8], star, u6[4]*tree[7])
cc2 = circumcentre(u6[2]*tree[8], star, tree[7])
cc3 = circumcentre(u6[4]*tree[8], star, u6[2]*tree[7])
cons = (abs(tree[2] - u6[1]*tree[5])**2 - 1,
abs(tree[4] - u6[1]*tree[7])**2 - 1,
abs(tree[3] - tree[8])**2 - 1,
4*abs(tree[4])**2 - 1,
abs(star - cc1)**2 - 1,
abs(star - cc2)**2 - 1,
abs(star - cc3)**2 - 1)
vertices = [u*t for u in u6 for t in tree]
vertices.extend(s*v for s in (1, -1) for v in (cc1, cc2, cc3, star))
return (vertices, cons)
def f62a():
"""Return a unit-distance embedding of the F62A graph."""
t0 = [-1.017, -0.819, 2.96, -0.282, -1.091, -0.624, 0.354]
t0 = findroot(lambda *t: f62a_vertices(*t)[1], t0)
return all_unit_distances(f62a_vertices(*t0)[0])
def f64a_vertices(a, b):
u8 = unitroots(8)
p1 = mpc(a, 0.5)
p2 = mpc(b, 0.5)
p3 = cu(u8[3]*p1, conj(p2), 2, 1)
p4 = (u8[3]*p1 + p3) / 2
d1 = abs(u8[1]*p3 - p4)**2 - 1
d2 = abs(p1 - u8[1]*conj(p2))**2 - 1
vertices = [u*p for u in u8 for p in (p1, p2, p3, p4)]
vertices += [conj(p) for p in vertices]
return vertices, (d1, d2)
def f64a():
"""Return a unit-distance embedding of the F64A graph."""
t0 = findroot(lambda *t: f64a_vertices(*t)[1], (0.53, 1.6))
return all_unit_distances(f64a_vertices(*t0)[0])
def f72a_vertices(t):
u24 = unitroots(24)
u12 = unitroots(12)
z0 = star_radius(24, 11)
r0 = [u*z0 for u in u24]
z1 = star_radius(24, 7) * expj(t)
r1 = [u*z1 for u in u24]
z2 = cu(r0[0], r1[9])
r2 = [u*z2 for u in u12]
z3 = cu(r0[15], r1[6])
r3 = [u*z3 for u in u12]
vertices = r0 + r1 + r2 + r3
return (vertices, abs(z2 - z3) - 1)
def f72a():
"""Return a unit-distance embedding of the F72A graph."""
t0 = findroot(lambda t: f72a_vertices(t)[1], 2.2)
return all_unit_distances(f72a_vertices(t0)[0])
def f74a_vertices(*params):
u6 = unitroots(6)
tree = [1j, 2j, 2j-expj(-pi/6), 2j+expj(pi/6)]
params = [-1.04, 3.92] + list(params)
for (i, v) in enumerate((2, 3, 4, 5, 5, 6, 7)):
tree.append(tree[v] + expj(params[i]))
star = mpc(params[-2], params[-1])
cc1 = circumcentre(tree[8], star, -tree[9])
cc2 = circumcentre(u6[2]*tree[8], star, -u6[2]*tree[9])
cc3 = circumcentre(u6[4]*tree[8], star, -u6[4]*tree[9])
cons = (abs(tree[6] - u6[1]*tree[8])**2 - 1,
abs(tree[4] - tree[7])**2 - 1,
abs(tree[9] + tree[10])**2 - 1,
4*abs(tree[10])**2 - 1,
abs(star - cc1)**2 - 1,
abs(star - cc2)**2 - 1,
abs(star - cc3)**2 - 1)
vertices = [u*t for u in u6 for t in tree]
vertices.extend(s*v for s in (1, -1) for v in (star, cc1, cc2, cc3))
return (vertices, cons)
def f74a():
"""Return a unit-distance embedding of the F74A graph."""
t0 = [2.91, 4.74, 5.5, 4.88, 5, -0.05, 0.07]
t0 = findroot(lambda *t: f74a_vertices(*t)[1], t0)
return all_unit_distances(f74a_vertices(*t0)[0])
@fixparams_unitdist(-1.1, 1.6, 2.2)
def f78a(a, b, c):
u = unitroots(13)
pa = mpc(a, 0.5)
pb = mpc(b, 0.5)
pc = mpc(c, 0.5)
d1 = abs(pa - u[5]*conj(pb))**2 - 1
d2 = abs(pb - u[2]*conj(pc))**2 - 1
d3 = abs(pc - u[6]*conj(pa))**2 - 1
return (symmetrise((pa, pb, pc), "D13"), (d1, d2, d3))
def f80a(t=1.39):
"""Return a unit-distance embedding of the F80A graph."""
u20 = unitroots(20)
z0 = star_radius(20, 7)
r0 = [z0*u for u in u20]
z1 = z0 + expj(t)
r1 = [z1*u for u in u20]
z2 = cu(r1[2], r1[0])
r2 = [z2*u for u in u20]
z3 = cu(0, z2, star_radius(20, 3), 1)
r3 = [z3*u for u in u20]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(20, ((0, 0, 7), (0, 1, 0), (2, 1, 0), (2, 1, 2), (2, 3, 0), (3, 3, 3)))
return (vertices, edges)
def f84a_vertices(p2, a, b, c):
u7 = unitroots(7)
p0 = star_radius(7)
p1 = p0 + 1
p3 = p2 + 1 # has a sign variation
p4 = cu(p2, u7[3]*p1)
p5 = mpc(a, 0.5)
p6 = mpc(b, 0.5)
p7 = mpc(c, 0.5)
d1 = abs(p3 - u7[4]*p5)**2 - 1
d2 = abs(p4 - u7[2]*p7)**2 - 1
d3 = abs(p5 - u7[4]*conj(p6))**2 - 1
d4 = abs(p6 - u7[-1]*p7)**2 - 1
vertices = [u*p for u in u7 for p in (p0, p1, p2, p3, p4, p5, p6, p7)]
vertices.extend([u*conj(p) for u in u7 for p in (p4, p5, p6, p7)])
vertices = list(map(lambda z: z*1j, vertices))
return (vertices, (d1, d2, d3, d4))
def f84a():
"""Return a unit-distance embedding of the F84A graph - not degenerate
despite its looks. The graph is notable in having the simple PSL(2,8)
as its automorphism group."""
t0 = findroot(lambda *t: f84a_vertices(*t)[1], (-0.46, -1.44, 0.25, 0.75))
return all_unit_distances(f84a_vertices(*t0)[0])
def f86a_vertices(*params):
u6 = unitroots(6)
tree = [1j, 2j, 2j-expj(-pi/6), 2j+expj(pi/6)]
params = [5.24451, 5.34434, 5.00597] + list(params)
for (i, v) in enumerate((2, 3, 4, 5, 5, 6, 7, 8, 9)):
tree.append(tree[v] + expj(params[i]))
star = mpc(params[-2], params[-1])
cc1 = circumcentre(tree[10], star, tree[11])
cc2 = circumcentre(u6[2]*tree[10], star, u6[2]*tree[11])
cc3 = circumcentre(u6[4]*tree[10], star, u6[4]*tree[11])
cons = (abs(tree[6] - u6[1]*tree[8])**2 - 1,
abs(tree[4] - tree[7])**2 - 1,
abs(tree[12] - tree[10])**2 - 1,
4*abs(tree[9])**2 - 1,
abs(tree[12] - u6[4]*tree[11])**2 - 1,
abs(star - cc1)**2 - 1,
abs(star - cc2)**2 - 1,
abs(star - cc3)**2 - 1)
vertices = [u*t for u in u6 for t in tree]
vertices.extend(s*v for s in (1, -1) for v in (star, cc1, cc2, cc3))
return (vertices, cons)
def f86a():
"""Return a unit-distance embedding of the F86A graph."""
t0 = [3.60383, 3.44007, 4.34048, 5.63174, 3.26345, 0.488743, 0.113378, 0.236693]
t0 = findroot(lambda *t: f86a_vertices(*t)[1], t0)
return all_unit_distances(f86a_vertices(*t0)[0])
def foster_vertices(n, t):
s2, s3 = (n&2)-1, ((n&1)<<1)-1
c = star_radius(10)
cp = c*root(1, 30, 1)
a = rect(sec(pi/10), t)
ap = rect(tan(pi/10), t+2*pi/5)
b = cu(*(a, c)[::s2])
bp = cu(*(ap, cp)[::s3])
arc = (cp, bp, ap, a, b, c)
vertices = [u*p for u in unitroots(15) for p in arc]
return (vertices, abs(vertices[1] - vertices[82]) - 1)
def foster(i=5):
"""Return any one of six (depending on 0 <= i <= 5) unit-distance
embeddings of the Foster graph (F90A)."""
n, t0 = [(0, 0.38), (1, 1.35), (2, 0.15), (2, 1.18), (2, 4.68), (3, [1.5, 1.6])][i]
tstar = findroot(lambda t: foster_vertices(n, t)[1], t0)
return (foster_vertices(n, tstar)[0], lcf_edges(90, (17, -9, 37, -37, 9, -17)))
def foster_old_vertices(r):
v3a = 0.265
v3 = v3a * root(1, 5, 2)
v2 = cu(v3, v3a)
v5r = root(1, 20, 7) * r
v5r2 = -v5r.conjugate()
v5 = v5r * root(1, 15, 14)
v0 = cu(v5r, v5r2)
v1 = cu(v2, v0)
v4 = cu(v3, v5)
vgens = (v0, v1, v2, v3, v4, v5)
vertices = [v*u for v in vgens for u in unitroots(15)]
return (vertices, abs(v1 - v4*root(1, 15, 2)) - 1)
def foster_old():
"""Return the unit-distance embedding of the Foster graph (F90A)
originally in Dounreay."""
r0 = findroot(lambda r: foster_old_vertices(r)[1], 0.35)
vertices = foster_old_vertices(r0)[0]
edges = ring_edges(15, ((0, 1, 0), (1, 2, 0), (2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 0, -1),
(0, 5, -2), (2, 3, -6), (4, 1, -2)))
return (vertices, edges)
def f96a_vertices(a):
u12 = unitroots(12)
p1 = a+0.5j
p2 = 2.3+0.5j
p3 = cu(u12[-1]*p1, p2) # ring 3
p4 = cu(p2, u12[1]*p1) # ring 2
vertices = [u*p for u in u12 for p in (p1, p2, p3, p4)]
vertices.extend([conj(p) for p in vertices])
return (vertices, abs(p3 - u12[3]*conj(p4)) - 1)
def f96a():
"""Return a unit-distance embedding of the F96A graph."""
a0 = findroot(lambda a: f96a_vertices(a)[1], 0.7)
return all_unit_distances(f96a_vertices(a0)[0])
def f96b(a=2.32, b=1.92, c=-0.26, s1=-1, s2=1, s3=1, s4=-1):
"""Return a unit-distance embedding of the F96B graph."""
u12 = unitroots(12)
z2 = star_radius(12, 5)
r2 = [u*z2 for u in u12]
z3 = z2 + expj(a)
r3 = [u*z3 for u in u12]
z1 = z3 + expj(b)
r1 = [u*z1 for u in u12]
z4 = cu(*(u12[3]*z1, z3)[::s1])
r4 = [u*z4 for u in u12]
z5 = z1 + expj(c)
r5 = [u*z5 for u in u12]
z6 = cu(*(u12[-5]*z5, z4)[::s2])
r6 = [u*z6 for u in u12]
z8 = cu(*(u12[-4]*z6, z5)[::s3])
r8 = [u*z8 for u in u12]
z7 = cu(z8, 0, 1, star_radius(12)) if s4 == 1 else cu(0, z8, star_radius(12), 1)
r7 = [u*z7 for u in u12]
vertices = r1 + r2 + r3 + r4 + r5 + r6 + r7 + r8
edges = ring_edges(12, ((1, 1, 5), (1, 2, 0), (2, 0, 0), (3, 0, 3),
(3, 2, 0), (4, 0, 0), (5, 4, -5), (5, 3, 0),
(7, 5, -4), (7, 4, 0), (6, 7, 0), (6, 6, 1)))
return (vertices, edges)
@fixparams_unitdist(1.43, 2.28)
def f98a(a, b):
r = root(1,7,1)
p1 = 1.2+0.5j
p7 = 2.3+0.5j
p3 = mpc(a, -0.5)*sqrt(r)
p6 = cu(conj(p7)*r, p1)
p2 = (p1+expj(b))/r
p4 = conj(cu(p2, p3/r))
p5 = conj(cu(p2, p7))*r
cons = (abs(p3 - conj(p6)) - 1, abs(p4 - conj(p5)) - 1)
return (symmetrise((p1, p2, p3, p4, p5, p6, p7), "D7"), cons)
@fixparams_unitdist(3.2, -2.5, 2.7, -2.7, -3.2)
def f98b(a, b, c, d, e):
u = root(1, 7, 1)
z1 = 3.175+0.5j
z2 = z1+expj(a)
z3 = z1+expj(b)
z4 = z3+expj(c)
z5 = z3+expj(d)
z6 = z4+expj(e)
z7 = -0.0375+0.5j
cons = (abs(z2 - conj(z2)*u) - 1,
abs(z2 - z5*u) - 1,
abs(z4 - z7/u**2) - 1,
abs(z5 - z6/u) - 1,
abs(z6 - conj(z7)/u) - 1)
vertices = symmetrise((z1, z2, z3, z4, z5, z6, z7), "D7")
return (vertices, cons)
def biggssmith():
"""Return a unit-distance embedding of the Biggs–Smith graph (F102A)."""
s1 = star_radius(17)
s2 = star_radius(17, 2)
s4 = star_radius(17, 4)
s8 = star_radius(17, 8)
u17 = unitroots(17)
r1 = [s1*u*1j for u in u17]
r4 = [s4*u*1j for u in u17]
r8 = [s8*u*-1j for u in u17]
sh1 = cu(r1[0], r4[0])
rh1 = [sh1*u for u in u17]
sh2 = cu(sh1, r8[7])
rh2 = [sh2*u for u in u17]
s2 = cu(sh2, 0, 1, s2)
r2 = [s2*u for u in u17]
vertices = r1 + r4 + rh1 + r8 + rh2 + r2
edges = ring_edges(17, ((0, 0, 1), (1, 1, 4), (3, 3, 8), (5, 5, 2),
(0, 2, 0), (1, 2, 0), (2, 4, 0), (4, 5, 0), (4, 3, 7)))
return (vertices, edges)
| 35.91097
| 230
| 0.527625
| 0
| 0
| 0
| 0
| 1,547
| 0.068482
| 0
| 0
| 3,025
| 0.133909
|
73fbbcb7d6e336ad39011f035279ed591c9a4ab4
| 937
|
py
|
Python
|
src/create_scatterplot.py
|
djparente/coevol-utils
|
966a1f16872d72886b92cc3fa51f803412acc481
|
[
"BSD-3-Clause"
] | 1
|
2016-03-13T05:26:40.000Z
|
2016-03-13T05:26:40.000Z
|
src/create_scatterplot.py
|
djparente/coevol-utils
|
966a1f16872d72886b92cc3fa51f803412acc481
|
[
"BSD-3-Clause"
] | null | null | null |
src/create_scatterplot.py
|
djparente/coevol-utils
|
966a1f16872d72886b92cc3fa51f803412acc481
|
[
"BSD-3-Clause"
] | null | null | null |
#!/cygdrive/c/Python27/python.exe
# Daniel J. Parente, Ph.D.
# Swint-Kruse Laboratory
# Physician Scientist Training Program
# University of Kansas Medical Center
# This code is adapted from the example available at
# http://pandasplotting.blogspot.com/2012/04/added-kde-to-scatter-matrix-diagonals.html
# Creates a scatterplot matrix (off-diagonals) with a kernal density estimate (KDE)
# of the distribution of (univariate) data on the diagonal
import numpy as np
import matplotlib.pyplot as plt
import pandas
import sys
infile=sys.argv[1]
outfile=sys.argv[2]
maindata = pandas.read_csv(infile, sep="\t")
plt.rcParams['patch.facecolor'] = 'k' # Make the markers black
# Plot
ax = pandas.tools.plotting.scatter_matrix(maindata, alpha=0.1, marker='k.', figsize=(8,8), diagonal='kde', range_padding=0.1)
# Give a small inter-plot spacing
plt.subplots_adjust(wspace=.05, hspace=.05)
#Save the figure
plt.savefig(outfile, dpi=600)
| 28.393939
| 125
| 0.760939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 550
| 0.58698
|
73fdabf0cf89f2998b7ab3d1732e81dfc49cf70e
| 4,143
|
py
|
Python
|
core/perspective_projection.py
|
sam-lb/python-grapher
|
657c423fa6e1f2260988749807db9a5beaf1fef2
|
[
"MIT"
] | 2
|
2019-08-21T15:02:51.000Z
|
2019-09-03T00:26:48.000Z
|
core/perspective_projection.py
|
sam-lb/mathgraph3d
|
657c423fa6e1f2260988749807db9a5beaf1fef2
|
[
"MIT"
] | 6
|
2019-07-28T21:28:11.000Z
|
2019-11-05T12:08:23.000Z
|
core/perspective_projection.py
|
sam-lb/mathgraph3d
|
657c423fa6e1f2260988749807db9a5beaf1fef2
|
[
"MIT"
] | null | null | null |
import pygame;
import numpy as np;
from math import sin, cos;
pygame.init();
width, height, depth = 640, 480, 800;
camera = [width // 2, height // 2, depth];
units_x, units_y, units_z = 8, 8, 8;
scale_x, scale_y, scale_z = width / units_x, height / units_y, depth / units_z;
screen = pygame.display.set_mode((width, height));
pygame.display.set_caption("3D perspective projection test");
pygame.key.set_repeat(100, 50);
def scale(p):
""" scale a point by the number of pixels per unit in each direction """
return p[0] * scale_x, p[1] * scale_y, p[2] * scale_z;
def translate_to_screen(p):
""" convert from projected cartesian coordinates to canvas coordinates """
return p[0] + width // 2, height // 2 - p[1];
def project(p):
""" project a point onto the 2D plane """
proj_x = (camera[2] * (p[0] - camera[0])) / (camera[2] + p[2]) + camera[0];
proj_y = (camera[2] * (p[1] - camera[1])) / (camera[2] + p[2]) + camera[1];
return proj_x, proj_y;
def rproj(a, tx, ty, tz):
rotation = rot_mat_x(tx).dot(rot_mat_y(ty)).dot(rot_mat_z(tz));
sub = np.array([a]) - np.array([camera]);
d = list(sub.dot(rotation)[0]);
e = width, height, depth;
return e[2] / d[2] * d[0] + e[0], e[2] / d[2] * d[1] + e[1];
def screen_point(p):
""" convert a point in 3D cartesian space to a point in 2D canvas space """
return translate_to_screen(project(scale(p)));
def project_triangle(tri):
""" return the screen coordinates of a triangle """
angs = (tx, ty, tz);
return rproj(tri[0], *angs), rproj(tri[1], *angs), rproj(tri[2], *angs);
## return screen_point(tri[0]), screen_point(tri[1]), screen_point(tri[2]);
def project_line(line):
""" return the screen coordinates of a line """
return screen_point(line[0]), screen_point(line[1]);
def rot_mat_x(theta):
return np.array([
[1, 0, 0],
[0, cos(theta), -sin(theta)],
[0, sin(theta), cos(theta)],
]);
def rot_mat_y(theta):
return np.array([
[cos(theta), 0, sin(theta)],
[0, 1, 0],
[-sin(theta), 0, cos(theta)],
]);
def rot_mat_z(theta):
return np.array([
[cos(theta), -sin(theta), 0],
[sin(theta), cos(theta), 0],
[0, 0, 1],
]);
triangle = ((1, 1, 1), (2, 2, 2), (1, 2, 1));
x_axis = ((-2, 0, 0), (2, 0, 0));
y_axis = ((0, -2, 0), (0, 2, 0));
z_axis = ((0, 0, -2), (0, 0, 2));
tx, ty, tz = 0, 0, 0;
clock = pygame.time.Clock();
running = True;
while running:
screen.fill((255, 255, 200));
proj_triangle = project_triangle(triangle);
pygame.draw.polygon(screen, (255, 0, 200), proj_triangle);
pygame.draw.polygon(screen, (0, 0, 0), proj_triangle, 1);
pygame.draw.rect(screen, (255, 0, 0), (*proj_triangle[0], 10, 10));
pygame.draw.rect(screen, (0, 255, 0), (*proj_triangle[1], 10, 10));
pygame.draw.rect(screen, (0, 0, 255), (*proj_triangle[2], 10, 10));
## proj_ax, proj_ay, proj_az = project_line(x_axis), project_line(y_axis), project_line(z_axis);
## pygame.draw.line(screen, (255, 0, 0), proj_ax[0], proj_ax[1], 1);
## pygame.draw.line(screen, (0, 255, 0), proj_ay[0], proj_ay[1], 1);
## pygame.draw.line(screen, (0, 0, 255), proj_az[0], proj_az[1], 1);
pygame.display.flip();
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False;
break;
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
#camera[0] -= 25;
## camera = list(np.array([camera]).dot(rot_mat_y(0.2).dot(rot_mat_z(0.1)))[0]);
tx += 0.1;
elif event.key == pygame.K_RIGHT:
#camera[0] += 25;
## camera = list(np.array([camera]).dot(rot_mat_z(-0.1))[0]);
tx -= 0.1;
elif event.key == pygame.K_UP:
ty += 0.1;
elif event.key == pygame.K_DOWN:
ty -= 0.1;
elif event.key == pygame.K_SPACE:
print(camera);
elif event.key == pygame.K_ESCAPE:
running = False;
break;
clock.tick(30);
pygame.quit();
| 34.525
| 99
| 0.565291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 987
| 0.238233
|
73fea2fbc1c54c3ba581a8b82427643b53be014d
| 1,444
|
py
|
Python
|
manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py
|
robertocalandra/the-feeling-of-success
|
7bb895897e369ae9f5fcaeed61d401e019a9cdf1
|
[
"MIT"
] | 10
|
2018-05-31T04:57:25.000Z
|
2021-05-28T11:22:29.000Z
|
manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py
|
robertocalandra/the-feeling-of-success
|
7bb895897e369ae9f5fcaeed61d401e019a9cdf1
|
[
"MIT"
] | null | null | null |
manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py
|
robertocalandra/the-feeling-of-success
|
7bb895897e369ae9f5fcaeed61d401e019a9cdf1
|
[
"MIT"
] | 3
|
2018-05-31T05:00:08.000Z
|
2019-02-25T06:32:45.000Z
|
import grasp_net, grasp_params, h5py, aolib.img as ig, os, numpy as np, aolib.util as ut
net_pr = grasp_params.im_fulldata_v5()
net_pr = grasp_params.gel_im_fulldata_v5()
checkpoint_file = '/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-6499'
gpu = '/gpu:0'
db_file = '/media/backup_disk/dataset_manu/ver2/2017-06-22/2017-06-22_212702.hdf5'
with h5py.File(db_file, 'r') as db:
pre, mid, _ = grasp_net.milestone_frames(db)
# sc = lambda x : ig.scale(x, (224, 224))
def sc(x):
""" do a center crop (helps with gelsight) """
x = ig.scale(x, (256, 256))
return ut.crop_center(x, 224)
u = ig.uncompress
crop = grasp_net.crop_kinect
inputs = dict(
gel0_pre=sc(u(db['GelSightA_image'].value[pre])),
gel1_pre=sc(u(db['GelSightB_image'].value[pre])),
gel0_post=sc(u(db['GelSightA_image'].value[mid])),
gel1_post=sc(u(db['GelSightB_image'].value[mid])),
im0_pre=sc(crop(u(db['color_image_KinectA'].value[pre]))),
im0_post=sc(crop(u(db['color_image_KinectA'].value[mid]))),
# these are probably unnecessary
depth0_pre=sc(crop(db['depth_image_KinectA'].value[pre].astype('float32'))),
depth0_post=sc(crop(db['depth_image_KinectA'].value[mid].astype('float32'))))
net = grasp_net.NetClf(net_pr, checkpoint_file, gpu)
prob = net.predict(**inputs)
print 'prob = ', prob
| 40.111111
| 121
| 0.668975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 484
| 0.33518
|
73febbee1eb35f8161409705d1117c1808557690
| 3,683
|
py
|
Python
|
API/models/models.py
|
Rkanehisa/Stone_Projeto
|
b022cc7031ba2c3b29181df2720197ca9edc1ab3
|
[
"MIT"
] | null | null | null |
API/models/models.py
|
Rkanehisa/Stone_Projeto
|
b022cc7031ba2c3b29181df2720197ca9edc1ab3
|
[
"MIT"
] | null | null | null |
API/models/models.py
|
Rkanehisa/Stone_Projeto
|
b022cc7031ba2c3b29181df2720197ca9edc1ab3
|
[
"MIT"
] | null | null | null |
from API.db import db
from datetime import datetime
from passlib.apps import custom_app_context as pwd_context
class User(db.Model):
__tablename__ = "users"
def __init__(self, username, password):
self.username = username
self.password = pwd_context.hash(password)
self.limit = 0
self.user_limit = 0
def verify_password(self, password):
return pwd_context.verify(password, self.password)
def json(self):
return {"id": self.id, "username": self.username, "password": str(self.password), "limit": self.limit,
"user limit": self.user_limit, "spent_limit": self.get_spent()}
def save_in_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_by_id(cls, user_id):
return cls.query.filter_by(id=user_id).first()
@classmethod
def get_by_username(cls, username):
return cls.query.filter_by(username=username).first()
def get_limit(self):
return self.limit
def get_user_limit(self):
return self.user_limit
def set_limit(self, limit):
self.limit = limit
def set_user_limit(self, limit):
self.user_limit = limit
def get_cards(self):
return self.cards
def delete(self):
db.session.delete(self)
db.session.commit()
def get_cards(self):
return self.cards.order_by(Card.spent_limit).all()
def get_spent(self):
return sum(x.spent_limit for x in self.cards.all())
class Card(db.Model):
__tablename__ = "card"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=False)
number = db.Column(db.String(16), nullable=False)
ccv = db.Column(db.String(3), nullable=False)
due_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
expiration_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
limit = db.Column(db.Float, nullable=False)
spent_limit = db.Column(db.Float, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
user = db.relationship("User", back_populates="cards")
def __init__(self, username, name, number, ccv, due_date, expiration_date, limit):
# Parse strings to convert to to datetime
datetime_due_date = datetime.strptime(due_date, "%Y/%m/%d")
datetime_expiration_date = datetime.strptime(expiration_date, "%Y/%m/%d")
self.user_id = User.query.filter_by(username=username).first().id
self.name = name
self. number = number
self.ccv = ccv
self.due_date = datetime_due_date
self.expiration_date = datetime_expiration_date
self.limit = limit
self.spent_limit = 0
def json(self):
return {"id": self.id, "name": self.name, "number": self.number, "ccv": self.ccv,
"due_date": str(self.due_date), "expiration_date": str(self.expiration_date), "limit": self.limit,
"spent limit": self.spent_limit, "user_id": self.user_id}
def save_in_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_by_number(cls, number):
return cls.query.filter_by(number=number).first()
def get_limit(self):
return self.limit
def get_spent_limit(self):
return self.spent_limit
def set_spent_limit(self, spent_limit):
self.spent_limit = spent_limit
def delete(self):
user = User.query.filter_by(id=self.user_id).first()
user.set_limit(user.limit-self.limit)
db.session.delete(self)
db.session.commit()
| 31.211864
| 114
| 0.65653
| 3,564
| 0.967689
| 0
| 0
| 320
| 0.086886
| 0
| 0
| 232
| 0.062992
|
73fecce467712a52b8aaf68f72a88091d6f9da83
| 418
|
py
|
Python
|
src/TestDice.py
|
Yamanama/CodeMonkeyApplication
|
4dc24016b96dbed5b8e833d5248dd76d1f3dfc08
|
[
"MIT"
] | null | null | null |
src/TestDice.py
|
Yamanama/CodeMonkeyApplication
|
4dc24016b96dbed5b8e833d5248dd76d1f3dfc08
|
[
"MIT"
] | null | null | null |
src/TestDice.py
|
Yamanama/CodeMonkeyApplication
|
4dc24016b96dbed5b8e833d5248dd76d1f3dfc08
|
[
"MIT"
] | null | null | null |
import unittest
from Dice import Dice
class TestDice(unittest.TestCase):
def setUp(self):
self.sides = 8
self.dice = Dice(self.sides)
def test_roll(self):
for i in range(1000):
self.assertLessEqual(self.dice.roll(), self.sides)
def test_error(self):
self.assertRaises(ValueError, Dice, 0)
if __name__ == '__main__': # pragma no cover
unittest.main()
| 19.904762
| 62
| 0.636364
| 309
| 0.739234
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.064593
|
73fef45c289e3867a6d35ff55ed2c6e15b25c65c
| 16,694
|
py
|
Python
|
accProcess.py
|
CASSON-LAB/BiobankActivityCSF
|
d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c
|
[
"BSD-2-Clause"
] | 3
|
2020-08-03T12:08:34.000Z
|
2021-03-16T11:31:01.000Z
|
accProcess.py
|
CASSON-LAB/BiobankActivityCSF
|
d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c
|
[
"BSD-2-Clause"
] | null | null | null |
accProcess.py
|
CASSON-LAB/BiobankActivityCSF
|
d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c
|
[
"BSD-2-Clause"
] | 1
|
2020-08-05T16:13:02.000Z
|
2020-08-05T16:13:02.000Z
|
"""Command line tool to extract meaningful health info from accelerometer data."""
import accelerometer.accUtils
import argparse
import collections
import datetime
import accelerometer.device
import json
import os
import accelerometer.summariseEpoch
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from filter_data import data_filter
from import_npy import import_npy
def main():
"""
Application entry point responsible for parsing command line requests
"""
parser = argparse.ArgumentParser(
description="""A tool to extract physical activity information from
raw accelerometer files.""", add_help=True
)
# required
parser.add_argument('rawFile', metavar='input file', type=str,
help="""the <.cwa/.cwa.gz> file to process
(e.g. sample.cwa.gz). If the file path contains
spaces,it must be enclosed in quote marks
(e.g. \"../My Documents/sample.cwa\")
""")
#optional inputs
parser.add_argument('--startTime',
metavar='e.g. 1991-01-01T23:59', default=None,
type=str2date, help="""removes data before this
time in the final analysis
(default : %(default)s)""")
parser.add_argument('--endTime',
metavar='e.g 1991-01-01T23:59', default=None,
type=str2date, help="""removes data after this
time in the final analysis
(default : %(default)s)""")
parser.add_argument('--timeSeriesDateColumn',
metavar='True/False', default=False, type=str2bool,
help="""adds a date/time column to the timeSeries
file, so acceleration and imputation values can be
compared easily. This increases output filesize
(default : %(default)s)""")
parser.add_argument('--processRawFile',
metavar='True/False', default=True, type=str2bool,
help="""False will skip processing of the .cwa file
(the epoch.csv file must already exist for this to
work) (default : %(default)s)""")
parser.add_argument('--epochPeriod',
metavar='length', default=30, type=int,
help="""length in seconds of a single epoch (default
: %(default)ss, must be an integer)""")
parser.add_argument('--sampleRate',
metavar='Hz, or samples/second', default=100,
type=int, help="""resample data to n Hz (default
: %(default)ss, must be an integer)""")
parser.add_argument('--useAbs',
metavar='useAbs', default=False, type=str2bool,
help="""use abs(VM) instead of trunc(VM)
(default : %(default)s)""")
parser.add_argument('--skipFiltering',
metavar='True/False', default=False, type=str2bool,
help="""Skip filtering stage
(default : %(default)s)""")
# calibration parameters
parser.add_argument('--skipCalibration',
metavar='True/False', default=False, type=str2bool,
help="""skip calibration? (default : %(default)s)""")
parser.add_argument('--calOffset',
metavar=('x', 'y', 'z'),default=[0.0, 0.0, 0.0],
type=float, nargs=3,
help="""accelerometer calibration offset (default :
%(default)s)""")
parser.add_argument('--calSlope',
metavar=('x', 'y', 'z'), default=[1.0, 1.0, 1.0],
type=float, nargs=3,
help="""accelerometer calibration slope linking
offset to temperature (default : %(default)s)""")
parser.add_argument('--calTemp',
metavar=('x', 'y', 'z'), default=[0.0, 0.0, 0.0],
type=float, nargs=3,
help="""mean temperature in degrees Celsius of
stationary data for calibration
(default : %(default)s)""")
parser.add_argument('--meanTemp',
metavar="temp", default=20.0, type=float,
help="""mean calibration temperature in degrees
Celsius (default : %(default)s)""")
parser.add_argument('--stationaryStd',
metavar='mg', default=13, type=int,
help="""stationary mg threshold (default
: %(default)s mg))""")
parser.add_argument('--calibrationSphereCriteria',
metavar='mg', default=0.3, type=float,
help="""calibration sphere threshold (default
: %(default)s mg))""")
# activity parameters
parser.add_argument('--mgMVPA',
metavar="mg", default=100, type=int,
help="""MVPA threshold (default : %(default)s)""")
parser.add_argument('--mgVPA',
metavar="mg", default=425, type=int,
help="""VPA threshold (default : %(default)s)""")
# calling helper processess and conducting multi-threadings
parser.add_argument('--rawDataParser',
metavar="rawDataParser", default="AxivityAx3Epochs",
type=str,
help="""file containing a java program to process
raw .cwa binary file, must end with .class (omitted)
(default : %(default)s)""")
parser.add_argument('--javaHeapSpace',
metavar="amount in MB", default="", type=str,
help="""amount of heap space allocated to the java
subprocesses,useful for limiting RAM usage (default
: unlimited)""")
# activity classification arguments
parser.add_argument('--activityClassification',
metavar='True/False', default=True, type=str2bool,
help="""Use pre-trained random forest to predict
activity type
(default : %(default)s)""")
parser.add_argument('--activityModel', type=str,
default="activityModels/doherty2018.tar",
help="""trained activity model .tar file""")
parser.add_argument('--rawOutput',
metavar='True/False', default=False, type=str2bool,
help="""output calibrated and resampled raw data to
a .csv.gz file? NOTE: requires ~50MB per day.
(default : %(default)s)""")
parser.add_argument('--npyOutput',
metavar='True/False', default=True, type=str2bool,
help="""output calibrated and resampled raw data to
.npy file? NOTE: requires ~60MB per day.
(default : %(default)s)""")
parser.add_argument('--fftOutput',
metavar='True/False', default=False, type=str2bool,
help="""output FFT epochs to a .csv file? NOTE:
requires ~0.1GB per day. (default : %(default)s)""")
# optional outputs
parser.add_argument('--outputFolder', metavar='filename',default="",
help="""folder for all of the output files, \
unless specified using other options""")
parser.add_argument('--summaryFolder', metavar='filename',default="",
help="folder for -summary.json summary stats")
parser.add_argument('--epochFolder', metavar='filename', default="",
help="""folder -epoch.csv.gz - must be an existing
file if "-processRawFile" is set to False""")
parser.add_argument('--timeSeriesFolder', metavar='filename', default="",
help="folder for -timeSeries.csv.gz file")
parser.add_argument('--nonWearFolder', metavar='filename',default="",
help="folder for -nonWearBouts.csv.gz file")
parser.add_argument('--stationaryFolder', metavar='filename', default="",
help="folder -stationaryPoints.csv.gz file")
parser.add_argument('--rawFolder', metavar='filename', default="",
help="folder for raw .csv.gz file")
parser.add_argument('--verbose',
metavar='True/False', default=False, type=str2bool,
help="""enable verbose logging? (default :
%(default)s)""")
parser.add_argument('--deleteIntermediateFiles',
metavar='True/False', default=True, type=str2bool,
help="""True will remove extra "helper" files created
by the program (default : %(default)s)""")
parser.add_argument('--intensityDistribution',
metavar='True/False', default=False, type=str2bool,
help="""Save intensity distribution
(default : %(default)s)""")
#
# check that enough command line arguments are entered
#
if len(sys.argv) < 2:
msg = "\nInvalid input, please enter at least 1 parameter, e.g."
msg += "\npython accProcess.py data/sample.cwa.gz \n"
accelerometer.accUtils.toScreen(msg)
parser.print_help()
sys.exit(-1)
processingStartTime = datetime.datetime.now()
args = parser.parse_args()
##########################
# check input/output files/dirs exist and validate input args
##########################
if args.processRawFile is False:
#! TODO: this breaks for .cwa.gz files
if len(args.rawFile.split('.')) < 2:
args.rawFile += ".cwa" # TODO edge case since we still need a name?
elif not os.path.isfile(args.rawFile):
if args.rawFile:
print("error: specified file " + args.rawFile + " does not exist. Exiting..")
else:
print("error: no file specified. Exiting..")
sys.exit(-2)
# get file extension
rawFilePath, rawFileName = os.path.split(args.rawFile)
rawFileName = rawFileName.split('.')[0] # remove any extension
# check target output folders exist
for path in [args.summaryFolder, args.nonWearFolder, args.epochFolder,
args.stationaryFolder, args.timeSeriesFolder, args.outputFolder]:
if len(path) > 0 and not os.access(path, os.F_OK):
print("error: " + path + " is not a valid directory")
sys.exit(-3)
# assign output file names
if args.outputFolder == "" and rawFilePath != "":
args.outputFolder = rawFilePath + '/'
if args.summaryFolder == "":
args.summaryFolder = args.outputFolder
if args.nonWearFolder == "":
args.nonWearFolder = args.outputFolder
if args.epochFolder == "":
args.epochFolder = args.outputFolder
if args.stationaryFolder == "":
args.stationaryFolder = args.outputFolder
if args.timeSeriesFolder == "":
args.timeSeriesFolder = args.outputFolder
if args.rawFolder == "":
args.rawFolder = args.outputFolder
args.summaryFile = args.summaryFolder + rawFileName + "-summary.json"
args.nonWearFile = args.nonWearFolder + rawFileName + "-nonWearBouts.csv.gz"
args.epochFile = args.epochFolder + rawFileName + "-epoch.csv.gz"
args.stationaryFile = args.stationaryFolder + rawFileName + "-stationaryPoints.csv"
args.tsFile = args.timeSeriesFolder + rawFileName + "-timeSeries.csv.gz"
args.rawOutputFile = args.rawFolder + rawFileName + ".csv.gz"
args.npyOutputFile = args.rawFolder + rawFileName + ".npy"
# check user specified end time is not before start time
if args.startTime and args.endTime:
if args.startTime >= args.endTime:
print("start and end time arguments are invalid!")
print("startTime:", args.startTime.strftime("%Y-%m-%dT%H:%M"))
print("endTime:", args.endTime.strftime("%Y-%m-%dT%H:%M"))
sys.exit(-4)
# print processing options to screen
print("processing file " + args.rawFile + "' with these arguments:\n")
for key, value in sorted(vars(args).items()):
if not (isinstance(value, str) and len(value)==0):
print(key.ljust(15), ':', value)
print("\n")
##########################
# start processing file
##########################
summary = {}
# now process the .CWA file
if args.processRawFile:
summary['file-name'] = args.rawFile
accelerometer.device.processRawFileToEpoch(args.rawFile, args.epochFile,
args.stationaryFile, summary, skipCalibration=args.skipCalibration,
stationaryStd=args.stationaryStd, xIntercept=args.calOffset[0],
yIntercept=args.calOffset[1], zIntercept=args.calOffset[2],
xSlope=args.calSlope[0], ySlope=args.calSlope[1],
zSlope=args.calSlope[2], xTemp=args.calTemp[0],
yTemp=args.calTemp[1], zTemp=args.calTemp[2],
meanTemp=args.meanTemp, rawDataParser=args.rawDataParser,
javaHeapSpace=args.javaHeapSpace, skipFiltering=args.skipFiltering,
sampleRate=args.sampleRate, epochPeriod=args.epochPeriod,
useAbs=args.useAbs, activityClassification=args.activityClassification,
rawOutput=args.rawOutput, rawOutputFile=args.rawOutputFile,
npyOutput=args.npyOutput, npyOutputFile=args.npyOutputFile,
fftOutput=args.fftOutput, startTime=args.startTime,
endTime=args.endTime, verbose=args.verbose)
print(args.rawFile)
else:
summary['file-name'] = args.epochFile
data, time = import_npy(args.rawFile)
# Place your code here
##########################
# remove helper files and close program
##########################
if args.deleteIntermediateFiles:
try:
os.remove(args.stationaryFile)
os.remove(args.epochFile)
os.remove(args.rawFile[:-4] + '.npy')
except:
accelerometer.accUtils.toScreen('could not delete helper file')
# finally, print out processing summary message
processingEndTime = datetime.datetime.now()
processingTime = (processingEndTime - processingStartTime).total_seconds()
accelerometer.accUtils.toScreen("in total, processing took " + \
str(processingTime) + " seconds")
def str2bool(v):
"""
Used to parse true/false values from the command line. E.g. "True" -> True
"""
return v.lower() in ("yes", "true", "t", "1")
def str2date(v):
"""
Used to parse date values from the command line. E.g. "1994-11-30T12:00" -> time.datetime
"""
eg = "1994-11-30T12:00" # example date
if v.count("-")!=eg.count("-"):
print("ERROR: not enough dashes in date")
elif v.count("T")!=eg.count("T"):
print("ERROR: no T seperator in date")
elif v.count(":")!=eg.count(":"):
print("ERROR: no ':' seperator in date")
elif len(v.split("-")[0])!=4:
print("ERROR: year in date must be 4 numbers")
elif len(v.split("-")[1])!=2 and len(v.split("-")[1])!=1:
print("ERROR: month in date must be 1-2 numbers")
elif len(v.split("-")[2].split("T")[0])!=2 and len(v.split("-")[2].split("T")[0])!=1:
print("ERROR: day in date must be 1-2 numbers")
else:
return pd.datetime.strptime(v, "%Y-%m-%dT%H:%M")
print("please change your input date:")
print('"'+v+'"')
print("to match the example date format:")
print('"'+eg+'"')
raise ValueError("date in incorrect format")
if __name__ == '__main__':
main() # Standard boilerplate to call the main() function to begin the program.
| 49.684524
| 93
| 0.547622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,337
| 0.439499
|
fb3f354629384e19edefd222de21d0f75d624bfc
| 382
|
py
|
Python
|
VetsApp/views.py
|
Sabrinax3/Pet-Clinic-1
|
776955d118a46c8d4eaa74de22ea0280b82debc9
|
[
"MIT"
] | 2
|
2020-04-13T14:26:54.000Z
|
2022-01-19T01:30:25.000Z
|
VetsApp/views.py
|
Sabrinax3/Pet-Clinic-1
|
776955d118a46c8d4eaa74de22ea0280b82debc9
|
[
"MIT"
] | 2
|
2020-05-29T18:52:55.000Z
|
2020-05-30T02:06:28.000Z
|
VetsApp/views.py
|
Sabrinax3/Pet-Clinic-1
|
776955d118a46c8d4eaa74de22ea0280b82debc9
|
[
"MIT"
] | 8
|
2020-04-11T08:30:44.000Z
|
2020-05-30T03:26:13.000Z
|
from django.shortcuts import render
from .models import VetsInfoTable
# Create your views here.
def home(request):
context = {
"name": "Home"
}
return render(request, 'index.html', context)
def view_vets(request):
obj = VetsInfoTable.objects.all()
context = {
"vets_data": obj
}
return render(request, 'vets/vets.html', context)
| 15.916667
| 53
| 0.641361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.198953
|
fb40883451f136e23adf11a2d1d1d175606ca586
| 542
|
py
|
Python
|
generate.py
|
IsaacPeters/Closest-Pair-of-Points
|
3c71efcbeae12b0b187117a671b782e392ea71b2
|
[
"MIT"
] | 1
|
2021-07-18T03:59:55.000Z
|
2021-07-18T03:59:55.000Z
|
generate.py
|
IsaacPeters/Closest-Pair-of-Points
|
3c71efcbeae12b0b187117a671b782e392ea71b2
|
[
"MIT"
] | null | null | null |
generate.py
|
IsaacPeters/Closest-Pair-of-Points
|
3c71efcbeae12b0b187117a671b782e392ea71b2
|
[
"MIT"
] | null | null | null |
import sys
import math
import random
# Figure out what we should name our output file, and how big it should be
if len(sys.argv) != 3: # Make sure we get a file argument, and only that
print("Incorrect number of arguments found, should be \"generate <file> 10^<x>\"")
for i in range(10):
with open("./gen/%s%d" % (sys.argv[1], i), "w") as file:
for x in range(pow(10, int(sys.argv[2]))):
xNum = random.randint(1, 10000)
yNum = random.randint(1, 10000)
file.write("%d %d\n" % (xNum, yNum))
| 38.714286
| 86
| 0.612546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.409594
|
fb40d608ac0b102b80003c2f549912877d9e3d53
| 963
|
py
|
Python
|
wagtail/snippets/urls.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 2
|
2021-03-18T21:41:05.000Z
|
2021-03-18T21:41:08.000Z
|
wagtail/snippets/urls.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 13
|
2015-05-08T12:27:10.000Z
|
2020-01-23T14:45:57.000Z
|
wagtail/snippets/urls.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 2
|
2020-09-03T20:12:32.000Z
|
2021-03-29T08:29:23.000Z
|
from django.urls import path
from wagtail.snippets.views import chooser, snippets
app_name = 'wagtailsnippets'
urlpatterns = [
path('', snippets.index, name='index'),
path('choose/', chooser.choose, name='choose_generic'),
path('choose/<slug:app_label>/<slug:model_name>/', chooser.choose, name='choose'),
path('choose/<slug:app_label>/<slug:model_name>/<str:pk>/', chooser.chosen, name='chosen'),
path('<slug:app_label>/<slug:model_name>/', snippets.list, name='list'),
path('<slug:app_label>/<slug:model_name>/add/', snippets.create, name='add'),
path('<slug:app_label>/<slug:model_name>/<str:pk>/', snippets.edit, name='edit'),
path('<slug:app_label>/<slug:model_name>/multiple/delete/', snippets.delete, name='delete-multiple'),
path('<slug:app_label>/<slug:model_name>/<str:pk>/delete/', snippets.delete, name='delete'),
path('<slug:app_label>/<slug:model_name>/<str:pk>/usage/', snippets.usage, name='usage'),
]
| 45.857143
| 105
| 0.688474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 495
| 0.514019
|
fb413304ce562fca6f9892396c8901821a208e1e
| 1,974
|
py
|
Python
|
ding/envs/env/tests/test_env_implementation_check.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | 1
|
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
ding/envs/env/tests/test_env_implementation_check.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
ding/envs/env/tests/test_env_implementation_check.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from easydict import EasyDict
import numpy as np
import gym
from copy import deepcopy
from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure
from ding.envs.env.tests import DemoEnv
@pytest.mark.unittest
def test_an_implemented_env():
demo_env = DemoEnv({})
check_all(demo_env)
demonstrate_correct_procedure(DemoEnv)
@pytest.mark.unittest
def test_check_array_space():
seq_array = (np.array([1, 2, 3], dtype=np.int64), np.array([4., 5., 6.], dtype=np.float32))
seq_space = [gym.spaces.Box(low=0, high=10, shape=(3, ), dtype=np.int64) for _ in range(2)]
with pytest.raises(AssertionError):
check_array_space(seq_array, seq_space, 'test_sequence')
dict_array = {'a': np.array([1, 2, 3], dtype=np.int64), 'b': np.array([4., 5., 6.], dtype=np.float32)}
int_box = gym.spaces.Box(low=0, high=10, shape=(3, ), dtype=np.int64)
dict_space = {'a': deepcopy(int_box), 'b': deepcopy(int_box)}
with pytest.raises(AssertionError):
check_array_space(dict_array, dict_space, 'test_dict')
with pytest.raises(TypeError):
check_array_space(1, dict_space, 'test_type_error')
@pytest.mark.unittest
def test_check_different_memory():
int_seq = np.array([1, 2, 3], dtype=np.int64)
seq_array1 = (int_seq, np.array([4., 5., 6.], dtype=np.float32))
seq_array2 = (int_seq, np.array([4., 5., 6.], dtype=np.float32))
with pytest.raises(AssertionError):
check_different_memory(seq_array1, seq_array2, -1)
dict_array1 = {'a': np.array([4., 5., 6.], dtype=np.float32), 'b': int_seq}
dict_array2 = {'a': np.array([4., 5., 6.], dtype=np.float32), 'b': int_seq}
with pytest.raises(AssertionError):
check_different_memory(dict_array1, dict_array2, -1)
with pytest.raises(AssertionError):
check_different_memory(1, dict_array1, -1)
with pytest.raises(TypeError):
check_different_memory(1, 2, -1)
| 37.961538
| 109
| 0.691489
| 0
| 0
| 0
| 0
| 1,714
| 0.868288
| 0
| 0
| 67
| 0.033941
|
fb421f779844bb484b1c9c0a35a1b99901994f6f
| 18,474
|
py
|
Python
|
tests/evergreen/metrics/test_buildmetrics.py
|
jamesbroadhead/evergreen.py
|
08418bf53bb7cd8de8a68aa6ae0847f28e9e0a30
|
[
"Apache-2.0"
] | null | null | null |
tests/evergreen/metrics/test_buildmetrics.py
|
jamesbroadhead/evergreen.py
|
08418bf53bb7cd8de8a68aa6ae0847f28e9e0a30
|
[
"Apache-2.0"
] | null | null | null |
tests/evergreen/metrics/test_buildmetrics.py
|
jamesbroadhead/evergreen.py
|
08418bf53bb7cd8de8a68aa6ae0847f28e9e0a30
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""Unit tests for src/evergreen/metrics/buildmetrics.py."""
from __future__ import absolute_import
from unittest.mock import MagicMock
import pytest
import evergreen.metrics.buildmetrics as under_test
from evergreen.errors.exceptions import ActiveTaskMetricsException
from evergreen.task import Task
def create_mock_build(task_list=None):
mock_build = MagicMock(id="build_id")
mock_build.get_tasks.return_value = task_list if task_list else []
return mock_build
class TestBuildMetrics(object):
def test_build_metrics_empty_for_no_builds(self):
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.total_tasks == 0
assert build_metrics.total_processing_time == 0
assert build_metrics.estimated_build_costs == 0
assert build_metrics.pct_tasks_undispatched == 0
assert build_metrics.pct_tasks_failed == 0
assert build_metrics.pct_tasks_timed_out == 0
assert build_metrics.pct_tasks_success == 0
assert build_metrics.total_display_tasks == 0
assert build_metrics.pct_display_tasks_success == 0
assert build_metrics.pct_display_tasks_failed == 0
assert build_metrics.pct_display_tasks_timed_out == 0
assert build_metrics.pct_display_tasks_success == 0
assert not build_metrics.create_time
assert not build_metrics.start_time
assert not build_metrics.end_time
assert not build_metrics.makespan
assert not build_metrics.wait_time
def test_various_tasks(self, sample_task):
n_tasks = 5
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.total_tasks == n_tasks
assert build_metrics.pct_tasks_success == 1
assert len(build_metrics._create_times) == n_tasks
assert len(build_metrics._start_times) == n_tasks
assert len(build_metrics._finish_times) == n_tasks
def test_adding_successful_task(self, sample_task):
sample_task["status"] = "success"
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
assert build_metrics.undispatched_count == 0
assert build_metrics.total_tasks == 1
assert len(build_metrics._start_times) == 1
assert build_metrics.failure_count == 0
assert build_metrics.success_count == 1
assert build_metrics.pct_tasks_success == 1
assert build_metrics.system_failure_count == 0
assert build_metrics.timed_out_count == 0
assert build_metrics.display_undispatched_count == 0
assert build_metrics.total_display_tasks == 1
assert build_metrics.display_failure_count == 0
assert build_metrics.display_success_count == 1
assert build_metrics.pct_display_tasks_success == 1
assert build_metrics.display_system_failure_count == 0
assert build_metrics.display_timed_out_count == 0
def test_adding_successful_generated_task(self, sample_task):
n_tasks = 2
sample_task["status"] = "success"
sample_task["generated_by"] = "foobar"
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.display_success_count == 1
assert build_metrics.total_display_tasks == 1
assert build_metrics.pct_display_tasks_success == 1
def test_adding_undispatched_task(self, sample_task):
sample_task["status"] = "undispatched"
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
assert build_metrics.undispatched_count == 1
assert build_metrics.pct_tasks_undispatched == 1
assert build_metrics.total_tasks == 1
assert len(build_metrics._start_times) == 0
assert build_metrics.display_undispatched_count == 1
assert build_metrics.pct_display_tasks_undispatched == 1
assert build_metrics.total_display_tasks == 1
def test_adding_undispatched_generated_task(self, sample_task):
n_tasks = 2
sample_task["status"] = "undispatched"
sample_task["generated_by"] = "foobar"
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.display_undispatched_count == 1
assert build_metrics.total_display_tasks == 1
assert build_metrics.pct_display_tasks_undispatched == 1
def test_adding_failed_task(self, sample_task):
sample_task["status"] = "failed"
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
assert build_metrics.undispatched_count == 0
assert build_metrics.total_tasks == 1
assert len(build_metrics._start_times) == 1
assert build_metrics.failure_count == 1
assert build_metrics.pct_tasks_failed == 1
assert build_metrics.success_count == 0
assert build_metrics.system_failure_count == 0
assert build_metrics.timed_out_count == 0
assert build_metrics.display_undispatched_count == 0
assert build_metrics.total_display_tasks == 1
assert build_metrics.display_failure_count == 1
assert build_metrics.pct_display_tasks_failed == 1
assert build_metrics.display_success_count == 0
assert build_metrics.display_system_failure_count == 0
assert build_metrics.display_timed_out_count == 0
def test_adding_failed_generated_task(self, sample_task):
n_tasks = 2
sample_task["status"] = "failed"
sample_task["generated_by"] = "foobar"
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.display_failure_count == 1
assert build_metrics.total_display_tasks == 1
assert build_metrics.pct_display_tasks_failed == 1
assert build_metrics.pct_display_tasks_system_failure == 0
assert build_metrics.pct_display_tasks_timed_out == 0
def test_adding_system_failed_task(self, sample_task):
sample_task["status"] = "failed"
sample_task["status_details"]["type"] = "system"
sample_task["status_details"]["timed_out"] = True
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
assert build_metrics.undispatched_count == 0
assert build_metrics.total_tasks == 1
assert len(build_metrics._start_times) == 1
assert build_metrics.failure_count == 1
assert build_metrics.system_failure_count == 1
assert build_metrics.pct_tasks_system_failure == 1
assert build_metrics.timed_out_count == 1
assert build_metrics.success_count == 0
assert build_metrics.display_undispatched_count == 0
assert build_metrics.total_display_tasks == 1
assert build_metrics.display_failure_count == 1
assert build_metrics.display_system_failure_count == 1
assert build_metrics.pct_display_tasks_system_failure == 1
assert build_metrics.display_timed_out_count == 1
assert build_metrics.display_success_count == 0
def test_adding_system_failed_display_task(self, sample_task):
n_tasks = 2
sample_task["status"] = "failed"
sample_task["status_details"]["type"] = "system"
sample_task["status_details"]["timed_out"] = False
sample_task["generated_by"] = "foobar"
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.display_failure_count == 1
assert build_metrics.total_display_tasks == 1
assert build_metrics.pct_display_tasks_failed == 1
assert build_metrics.pct_display_tasks_system_failure == 1
assert build_metrics.pct_display_tasks_timed_out == 0
def test_adding_timed_out_display_task(self, sample_task):
n_tasks = 2
sample_task["status"] = "failed"
sample_task["status_details"]["type"] = "system"
sample_task["status_details"]["timed_out"] = True
sample_task["generated_by"] = "foobar"
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.display_timed_out_count == 1
assert build_metrics.total_display_tasks == 1
assert build_metrics.pct_display_tasks_failed == 1
assert build_metrics.pct_display_tasks_system_failure == 0
assert build_metrics.pct_display_tasks_timed_out == 1
def test_generate_by_failure_priority(self, sample_task_list):
sample_task_list[0]["status"] = "failure"
sample_task_list[1]["status"] = "success"
sample_task_list[2]["status"] = "success"
sample_task_list[0]["generated_by"] = "foo"
sample_task_list[1]["generated_by"] = "foo"
sample_task_list[2]["generated_by"] = "foo"
mock_build = create_mock_build(
[
Task(sample_task_list[0], None),
Task(sample_task_list[1], None),
Task(sample_task_list[2], None),
]
)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.success_count == 2
assert build_metrics.failure_count == 1
assert build_metrics.display_success_count == 0
assert build_metrics.display_failure_count == 1
assert build_metrics.total_display_tasks == 1
def test_generate_by_system_failure_priority(self, sample_task_list):
sample_task_list[0]["status"] = "failure"
sample_task_list[0]["status_details"]["type"] = "system"
sample_task_list[0]["status_details"]["timed_out"] = False
sample_task_list[1]["status"] = "failure"
sample_task_list[2]["status"] = "success"
sample_task_list[0]["generated_by"] = "foo"
sample_task_list[1]["generated_by"] = "foo"
sample_task_list[2]["generated_by"] = "foo"
mock_build = create_mock_build(
[
Task(sample_task_list[0], None),
Task(sample_task_list[1], None),
Task(sample_task_list[2], None),
]
)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.success_count == 1
assert build_metrics.failure_count == 2
assert build_metrics.system_failure_count == 1
assert build_metrics.display_success_count == 0
assert build_metrics.display_failure_count == 1
assert build_metrics.display_system_failure_count == 1
assert build_metrics.total_display_tasks == 1
def test_generate_by_system_timeout_priority(self, sample_task_list):
sample_task_list[0]["status"] = "success"
sample_task_list[1]["status"] = "failure"
sample_task_list[1]["status_details"]["type"] = "system"
sample_task_list[1]["status_details"]["timed_out"] = True
sample_task_list[2]["status"] = "failure"
sample_task_list[0]["generated_by"] = "foo"
sample_task_list[1]["generated_by"] = "foo"
sample_task_list[2]["generated_by"] = "foo"
mock_build = create_mock_build(
[
Task(sample_task_list[0], None),
Task(sample_task_list[1], None),
Task(sample_task_list[2], None),
]
)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.success_count == 1
assert build_metrics.failure_count == 2
assert build_metrics.system_failure_count == 1
assert build_metrics.timed_out_count == 1
assert build_metrics.display_failure_count == 1
assert build_metrics.display_timed_out_count == 1
assert build_metrics.display_success_count == 0
assert build_metrics.display_failure_count == 1
assert build_metrics.display_system_failure_count == 0
assert build_metrics.total_display_tasks == 1
def test_generate_by_system_undispatched_priority(self, sample_task_list):
sample_task_list[0]["status"] = "undispatched"
sample_task_list[1]["status"] = "failure"
sample_task_list[1]["status_details"]["type"] = "system"
sample_task_list[1]["status_details"]["timed_out"] = True
sample_task_list[2]["status"] = "failure"
sample_task_list[0]["generated_by"] = "foo"
sample_task_list[1]["generated_by"] = "foo"
sample_task_list[2]["generated_by"] = "foo"
mock_build = create_mock_build(
[
Task(sample_task_list[0], None),
Task(sample_task_list[1], None),
Task(sample_task_list[2], None),
]
)
build_metrics = under_test.BuildMetrics(mock_build).calculate()
assert build_metrics.undispatched_count == 1
assert build_metrics.failure_count == 2
assert build_metrics.system_failure_count == 1
assert build_metrics.timed_out_count == 1
assert build_metrics.display_success_count == 0
assert build_metrics.display_failure_count == 0
assert build_metrics.display_system_failure_count == 0
assert build_metrics.display_timed_out_count == 0
assert build_metrics.display_undispatched_count == 1
def test_adding_task_without_ingest_time(self, sample_task):
del sample_task["ingest_time"]
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
assert build_metrics.undispatched_count == 0
assert build_metrics.total_tasks == 1
assert len(build_metrics._start_times) == 1
assert build_metrics.failure_count == 0
assert build_metrics.success_count == 1
assert build_metrics.system_failure_count == 0
assert build_metrics.timed_out_count == 0
assert build_metrics.display_undispatched_count == 0
assert build_metrics.total_display_tasks == 1
assert build_metrics.display_failure_count == 0
assert build_metrics.display_success_count == 1
assert build_metrics.display_system_failure_count == 0
assert build_metrics.display_timed_out_count == 0
def test_dict_format(self, sample_task):
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
bm_dict = build_metrics.as_dict()
assert bm_dict["build"] == mock_build.id
assert "tasks" not in bm_dict
def test_dict_format_with_children(self, sample_task):
task = Task(sample_task, None)
mock_build = create_mock_build([task])
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics.calculate()
bm_dict = build_metrics.as_dict(include_children=True)
assert bm_dict["build"] == mock_build.id
assert len(bm_dict["tasks"]) == 1
assert bm_dict["tasks"][0]["task_id"] == task.task_id
def test_string_format(self, sample_task):
task = Task(sample_task, None)
mock_build = create_mock_build([task])
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics._count_task(task)
build_metrics._count_display_tasks()
assert mock_build.id in str(build_metrics)
def test_display_tasks_are_filtered(self, sample_task):
sample_task["display_only"] = True
task = Task(sample_task, None)
mock_build = create_mock_build([task])
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics.calculate()
assert len(build_metrics.task_list) == 1
assert build_metrics.total_tasks == 0
def test_task_filter(self, sample_task):
n_tasks = 5
task_list = [Task(sample_task, None) for _ in range(n_tasks)]
sample_task_2 = sample_task.copy()
filter_task_name = "filter me"
sample_task_2["display_name"] = filter_task_name
task_list_2 = [Task(sample_task_2, None) for _ in range(n_tasks)]
mock_build = create_mock_build(task_list + task_list_2)
build_metrics = under_test.BuildMetrics(mock_build)
build_metrics.calculate(lambda t: filter_task_name not in t.display_name)
assert build_metrics.total_tasks == n_tasks
def test_in_progress_task(self, sample_task):
sample_task["finish_time"] = None
task = Task(sample_task, None)
mock_build = create_mock_build()
build_metrics = under_test.BuildMetrics(mock_build)
with pytest.raises(ActiveTaskMetricsException):
build_metrics._count_task(task)
class TestPercentTasks(object):
def test_percent_of_zero_tasks_is_zero(self):
build_metrics = under_test.BuildMetrics("build")
assert build_metrics._percent_tasks(5) == 0
def test_percent_of_non_zero_works(self):
build_metrics = under_test.BuildMetrics("build")
build_metrics.success_count = 10
assert build_metrics._percent_tasks(5) == 0.5
| 41.053333
| 81
| 0.691783
| 17,963
| 0.97234
| 0
| 0
| 0
| 0
| 0
| 0
| 1,264
| 0.06842
|
fb427d81000b8506419aa7780e97ffc579670c50
| 813
|
py
|
Python
|
tools_d2/convert-pretrain-model-to-d2.py
|
nguyentritai2906/panoptic-deeplab
|
6bbe17801488a417ed9586acab285ee6a05d68cb
|
[
"Apache-2.0"
] | 506
|
2020-06-12T01:07:56.000Z
|
2022-03-26T00:56:52.000Z
|
tools_d2/convert-pretrain-model-to-d2.py
|
MrMa-T/panoptic-deeplab
|
cf8e20bbbf1cf540c7593434b965a93c4a889890
|
[
"Apache-2.0"
] | 85
|
2020-06-12T04:51:31.000Z
|
2022-03-23T16:19:44.000Z
|
tools_d2/convert-pretrain-model-to-d2.py
|
MrMa-T/panoptic-deeplab
|
cf8e20bbbf1cf540c7593434b965a93c4a889890
|
[
"Apache-2.0"
] | 102
|
2020-06-12T06:45:44.000Z
|
2022-03-22T14:03:04.000Z
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
"""
Usage:
# download your pretrained model:
wget https://github.com/LikeLy-Journey/SegmenTron/releases/download/v0.1.0/tf-xception65-270e81cf.pth -O x65.pth
# run the conversion
./convert-pretrained-model-to-d2.py x65.pth x65.pkl
# Then, use x65.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/x65.pkl"
PIXEL_MEAN: [128, 128, 128]
PIXEL_STD: [128, 128, 128]
INPUT:
FORMAT: "RGB"
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
res = {"model": obj, "__author__": "third_party", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
| 24.636364
| 114
| 0.681427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 585
| 0.719557
|
fb4386dbb22354c808375368f8d1474f3605a181
| 2,953
|
py
|
Python
|
test_geo.py
|
OrrEos/IA-Flood-Warning-Project
|
af485560050c6e387aabf0bd7500b13de62f810f
|
[
"MIT"
] | null | null | null |
test_geo.py
|
OrrEos/IA-Flood-Warning-Project
|
af485560050c6e387aabf0bd7500b13de62f810f
|
[
"MIT"
] | null | null | null |
test_geo.py
|
OrrEos/IA-Flood-Warning-Project
|
af485560050c6e387aabf0bd7500b13de62f810f
|
[
"MIT"
] | 1
|
2022-01-24T09:57:24.000Z
|
2022-01-24T09:57:24.000Z
|
import random
from floodsystem.utils import sorted_by_key # noqa
from floodsystem.geo import stations_by_distance, stations_within_radius, rivers_with_station, stations_by_river,rivers_by_station_number
from floodsystem.stationdata import build_station_list
'''def test_geo():
#Task 1A
#does the function give an output & if it's a list:
out = build_station_list()
assert type(out) == list
#checking that list is a reasonable length
assert len(out) >1700
assert len(out) <2500'''
#Task 1B
def test_stations_by_distance():
stations = build_station_list()
p = (52.2053, 0.1218)#putting in Cambridge value from task
out = stations_by_distance(stations, p)
#check that list is returned
assert type(out) == list
#check that items are tuples
assert type(out[0]) == tuple
#check that first station is Jesus Lock
assert out[0] == ('Cambridge Jesus Lock', 'Cambridge', 0.840237595667494)
#check that furthest station is Penberth
assert out[-1] == ('Penberth', 'Penberth', 467.53431870130544)
#Task 1C
def test_stations_within_radius():
stations = build_station_list()
out = stations_within_radius(stations, (52.2053, 0.1218), 10)
#check that list is returned
assert type(out) == list
#checking first value, which is checking the sorting, too
assert out[0] == 'Bin Brook'
#checking length of list
assert len(out) == 11
#Task 1D
def test_rivers_with_station():
stations = build_station_list()
out = rivers_with_station(stations)
#check that out is a set
assert type(out) == set
#check that each item in list is a string
out = list(out)
assert type(out[0]) == str
#check that out is of a reasonable length - no. of stations might change in the future?
assert len(out) > 900
assert len(out) < 1000
#checking for duplicates
#if set(out) is shorter than list (out), then there are duplicates
assert len(out) == len(set(out))
def test_stations_by_rivers():
stations = build_station_list
out = stations_by_river(stations)
#check that output is a dictionary
assert type(out) == dict
#check number of stations listed for Aire:
aire = out['River Aire']
assert len(aire) ==24
#check that it's a list
assert type(out['River Thames']) == list
#Task1E
def test_rivers_by_station_number():
stations = build_station_list()
N = random.randint(0,9)
out = rivers_by_station_number(stations, N)
#check that output is a list
assert type(out)==list
#check that items are tuples
#assert(type(out[0])) == tuple
#check that list is of length N
assert len(out) == N
#checking that list is sorted by number of stations
ret = sorted_by_key(out, 1, reverse=True)#sorting the list by decreasing distance from p (the 3rd thing in tuple - distance_p)
assert ret == out
| 29.828283
| 137
| 0.679986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,256
| 0.42533
|
fb43e8774473bcb7c7cfe41180999e085bda6d33
| 724
|
py
|
Python
|
app.py
|
aws-samples/aws-cdk-service-catalog-pipeline
|
e6e3eab0dec3fc41e7621971453131fd0d5b6e32
|
[
"MIT-0"
] | null | null | null |
app.py
|
aws-samples/aws-cdk-service-catalog-pipeline
|
e6e3eab0dec3fc41e7621971453131fd0d5b6e32
|
[
"MIT-0"
] | null | null | null |
app.py
|
aws-samples/aws-cdk-service-catalog-pipeline
|
e6e3eab0dec3fc41e7621971453131fd0d5b6e32
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python3
import os
import aws_cdk as cdk
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from cdk_pipelines.cdk_pipelines import CdkPipelineStack
app = cdk.App()
CdkPipelineStack(app, "AWSomeServiceCatalogPipeline",
description="CI/CD CDK Pipelines for Service Catalog Example",
env={
'region': app.node.try_get_context("region"),
'account': app.node.try_get_context("pipeline_account")
}
)
app.synth()
| 31.478261
| 79
| 0.727901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 454
| 0.627072
|
fb4452119142d00f8ea5508e610548a9fa55bde5
| 1,565
|
py
|
Python
|
Generator/Sheet3/PDF.py
|
trngb/watools
|
57b9074d59d856886675aa26014bfd6673d5da76
|
[
"Apache-2.0"
] | 11
|
2018-09-25T08:58:26.000Z
|
2021-02-13T18:58:05.000Z
|
Generator/Sheet3/PDF.py
|
trngbich/watools
|
57b9074d59d856886675aa26014bfd6673d5da76
|
[
"Apache-2.0"
] | 1
|
2020-07-03T02:36:41.000Z
|
2021-03-21T22:20:47.000Z
|
Generator/Sheet3/PDF.py
|
trngbich/watools
|
57b9074d59d856886675aa26014bfd6673d5da76
|
[
"Apache-2.0"
] | 16
|
2018-09-28T22:55:11.000Z
|
2021-02-22T13:03:56.000Z
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Generator/Sheet3
"""
import os
def Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV_a, Dir_Basin_CSV_b):
"""
This functions create the monthly and yearly sheet 3 in pdf format, based on the csv files.
Parameters
----------
Dir_Basin : str
Path to all the output data of the Basin
Basin : str
Name of the basin
Simulation : int
Defines the simulation
Dir_Basin_CSV_a : str
Data path pointing to the CSV output files for sheet a
Dir_Basin_CSV_b : str
Data path pointing to the CSV output files for sheet b
"""
# import wa module
from watools.Sheets import create_sheet3
# Create output folder for PDF files
Dir_Basin_PDF = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation, "PDF")
if not os.path.exists(Dir_Basin_PDF):
os.mkdir(Dir_Basin_PDF)
# Create output filename for PDFs
FileName_Splitted = Dir_Basin_CSV_a.split('_')
Year = str(FileName_Splitted[-1].split('.')[0])
outFile_a = os.path.join(Dir_Basin_PDF,'Sheet3a_Sim%s_%s_%s.pdf' %(Simulation, Basin, Year))
outFile_b = os.path.join(Dir_Basin_PDF,'Sheet3b_Sim%s_%s_%s.pdf' %(Simulation, Basin, Year))
# Create PDFs
sheet3a_fh, sheet3b_fh = create_sheet3(Basin, str(Year), ['km3/year', 'kg/ha/year', 'kg/m3'], [Dir_Basin_CSV_a, Dir_Basin_CSV_b], [outFile_a, outFile_b])
return()
| 33.297872
| 157
| 0.681789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 882
| 0.563578
|
fb446c8a40864dcf38289e8b379abbb25374263e
| 2,481
|
py
|
Python
|
bonita/commands/user.py
|
dantebarba/bonita-cli
|
f750a6a1ff802e5197644b2363aea406bf29b6bf
|
[
"WTFPL"
] | 2
|
2017-09-02T08:05:03.000Z
|
2018-09-17T13:48:03.000Z
|
bonita/commands/user.py
|
dantebarba/bonita-cli
|
f750a6a1ff802e5197644b2363aea406bf29b6bf
|
[
"WTFPL"
] | null | null | null |
bonita/commands/user.py
|
dantebarba/bonita-cli
|
f750a6a1ff802e5197644b2363aea406bf29b6bf
|
[
"WTFPL"
] | null | null | null |
"""The user command."""
from json import dumps
from .base import Base
from bonita.api.bonita_client import BonitaClient
class User(Base):
"""Manage user"""
def run(self):
# bonita process [deploy <filename_on_server>|get <process_id>|enable <process_id>|disable <process_id>]
#print('You supplied the following options:', dumps(self.options, indent=2, sort_keys=True))
self.bonita_client = BonitaClient(self.loadConfiguration())
if self.hasOption('add'):
self.add()
# if self.hasOption('update'):
# self.add()
elif self.hasOption('get'):
self.get()
elif self.hasOption('remove'):
self.remove()
elif self.hasOption('enable'):
self.enable()
elif self.hasOption('disable'):
self.disable()
else:
print('Nothing to do.')
def add(self):
payload = {
'userName': self.getOption("<login>"),
'password': self.getOption("<password>"),
'password_confirm': self.getOption("<password>"),
'icon': self.getOption("<icon>", ""),
'firstName': self.getOption("<firstName>", ""),
'lastName': self.getOption("<lastName>", ""),
'title': self.getOption("<title>", ""),
'job_title': self.getOption("<job_title>", ""),
'manager_id': self.getOption("<manager_id>", 0)
}
rc, datas = self.bonita_client.addUser(payload)
self.processResults(rc, datas)
def get(self):
if self.hasOption('<user_id>'):
user_id = self.options['<user_id>']
rc, datas = self.bonita_client.getUser(user_id)
self.processResults(rc, datas)
else:
rc, datas = self.bonita_client.searchUsers({
'p': 0,
'c': 20
})
self.processResults(rc, datas)
def remove(self):
if self.hasOption('<user_id>'):
user_id = self.options['<user_id>']
rc, datas = self.bonita_client.deleteUser(user_id)
self.processResults(rc, datas)
def enable(self):
user_id = self.options['<user_id>']
rc, datas = self.bonita_client.enableUser(user_id)
self.processResults(rc, datas)
def disable(self):
user_id = self.options['<user_id>']
rc, datas = self.bonita_client.disableUser(user_id)
self.processResults(rc, datas)
| 33.986301
| 112
| 0.563886
| 2,357
| 0.95002
| 0
| 0
| 0
| 0
| 0
| 0
| 611
| 0.246272
|
fb4520cbe2999728bc66894639cfa8a36d53fb16
| 1,809
|
py
|
Python
|
pyActionRec/action_flow.py
|
Xiatian-Zhu/anet2016_cuhk
|
a0df08cbbe65013e9a259d5412c33a99c2c84127
|
[
"BSD-2-Clause"
] | 253
|
2016-07-01T22:57:55.000Z
|
2022-03-01T10:59:31.000Z
|
pyActionRec/action_flow.py
|
Xiatian-Zhu/anet2016_cuhk
|
a0df08cbbe65013e9a259d5412c33a99c2c84127
|
[
"BSD-2-Clause"
] | 39
|
2016-08-31T08:42:24.000Z
|
2021-12-11T06:56:47.000Z
|
pyActionRec/action_flow.py
|
Xiatian-Zhu/anet2016_cuhk
|
a0df08cbbe65013e9a259d5412c33a99c2c84127
|
[
"BSD-2-Clause"
] | 101
|
2016-07-01T22:57:57.000Z
|
2022-03-08T07:26:53.000Z
|
from config import ANET_CFG
import sys
sys.path.append(ANET_CFG.DENSE_FLOW_ROOT+'/build')
from libpydenseflow import TVL1FlowExtractor
import action_caffe
import numpy as np
class FlowExtractor(object):
def __init__(self, dev_id, bound=20):
TVL1FlowExtractor.set_device(dev_id)
self._et = TVL1FlowExtractor(bound)
def extract_flow(self, frame_list, new_size=None):
"""
This function extracts the optical flow and interleave x and y channels
:param frame_list:
:return:
"""
frame_size = frame_list[0].shape[:2]
rst = self._et.extract_flow([x.tostring() for x in frame_list], frame_size[1], frame_size[0])
n_out = len(rst)
if new_size is None:
ret = np.zeros((n_out*2, frame_size[0], frame_size[1]))
for i in xrange(n_out):
ret[2*i, :] = np.fromstring(rst[i][0], dtype='uint8').reshape(frame_size)
ret[2*i+1, :] = np.fromstring(rst[i][1], dtype='uint8').reshape(frame_size)
else:
import cv2
ret = np.zeros((n_out*2, new_size[1], new_size[0]))
for i in xrange(n_out):
ret[2*i, :] = cv2.resize(np.fromstring(rst[i][0], dtype='uint8').reshape(frame_size), new_size)
ret[2*i+1, :] = cv2.resize(np.fromstring(rst[i][1], dtype='uint8').reshape(frame_size), new_size)
return ret
if __name__ == "__main__":
import cv2
im1 = cv2.imread('../data/img_1.jpg')
im2 = cv2.imread('../data/img_2.jpg')
f = FlowExtractor(0)
flow_frames = f.extract_flow([im1, im2])
from pylab import *
plt.figure()
plt.imshow(flow_frames[0])
plt.figure()
plt.imshow(flow_frames[1])
plt.figure()
plt.imshow(im1)
plt.show()
print flow_frames
| 30.15
| 113
| 0.611388
| 1,234
| 0.682145
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.123273
|
fb4743a6ee0568d1f98e6dec89a2138670b26a6f
| 9,921
|
py
|
Python
|
aqme/qdesc.py
|
patonlab/aqme
|
080d8e85ee905718ddf78f7fdee2ee308a293ad1
|
[
"MIT"
] | null | null | null |
aqme/qdesc.py
|
patonlab/aqme
|
080d8e85ee905718ddf78f7fdee2ee308a293ad1
|
[
"MIT"
] | null | null | null |
aqme/qdesc.py
|
patonlab/aqme
|
080d8e85ee905718ddf78f7fdee2ee308a293ad1
|
[
"MIT"
] | null | null | null |
#####################################################.
# This file stores all the functions #
# used for genrating all parameters #
#####################################################.
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import rdMolTransforms
import os
import pandas as pd
from aqme.csearch import getDihedralMatches
def get_data(rdkit_mols,min_mols,dft_mols,lot,bs,name_mol,args,type_csearch,type_min,w_dir_initial):
geom_data = pd.DataFrame()
for j, mol_j in enumerate(rdkit_mols):
name = mol_j.GetProp('_Name')
name_dft= '_'.join(mol_j.GetProp('_Name').split())+'_'+type_min
geom_data.at[j,'Name'] = name
if len(args.dihedral) != 0:
for d,dh in enumerate(args.dihedral):
dihedral_rdkit = rdMolTransforms.GetDihedralDeg(mol_j.GetConformer(),dh[0],dh[1],dh[2],dh[3])
geom_data.at[j,args.geom_par_name+'-Dihedral-'+type_csearch+'-'+str(dh[0])+'-'+str(dh[1])+'-'+str(dh[2])+'-'+str(dh[3])] = dihedral_rdkit
if len(args.angle) != 0:
for a,an in enumerate(args.angle):
angle_rdkit = rdMolTransforms.GetAngleDeg(mol_j.GetConformer(),an[0],an[1],an[2])
geom_data.at[j,args.geom_par_name+'-Angle-'+type_csearch+'-'+str(an[0])+'-'+str(an[1])+'-'+str(an[2])] = angle_rdkit
if len(args.bond) != 0:
for b,bd in enumerate(args.angle):
bond_rdkit = rdMolTransforms.GetBondLength(mol_j.GetConformer(),bd[0],bd[1])
geom_data.at[j,args.geom_par_name+'-Bond-'+type_csearch+'-'+str(bd[0])+'-'+str(bd[1])] = bond_rdkit
if min_mols is not None:
if type_min =='ani' or type_min=='xtb':
for i, mol_i in enumerate(min_mols):
if mol_i.GetProp('_Name') == name+' '+type_min:
if len(args.dihedral) != 0:
for d,dh in enumerate(args.dihedral):
dihedral_min = rdMolTransforms.GetDihedralDeg(mol_i.GetConformer(),dh[0],dh[1],dh[2],dh[3])
geom_data.at[j,args.geom_par_name+'-Dihedral-'+type_min+'-'+str(dh[0])+'-'+str(dh[1])+'-'+str(dh[2])+'-'+str(dh[3])] = dihedral_min
if len(args.angle) != 0:
for a,an in enumerate(args.angle):
angle_min = rdMolTransforms.GetAngleDeg(mol_i.GetConformer(),an[0],an[1],an[2])
geom_data.at[j,args.geom_par_name+'-Angle-'+type_min+'-'+str(an[0])+'-'+str(an[1])+'-'+str(an[2])] = angle_min
if len(args.bond) != 0:
for b,bd in enumerate(args.angle):
bond_min = rdMolTransforms.GetBondLength(mol_i.GetConformer(),bd[0],bd[1])
if dft_mols is not None:
if type_min =='ani' or type_min=='xtb':
for i, mol_i in enumerate(dft_mols):
if mol_i.GetProp('_Name').split('/')[-1].split('.log')[0] == name_dft:
if len(args.dihedral) != 0:
for d,dh in enumerate(args.dihedral):
dihedral_min = rdMolTransforms.GetDihedralDeg(mol_i.GetConformer(),dh[0],dh[1],dh[2],dh[3])
geom_data.at[j,args.geom_par_name+'-Dihedral-'+lot+'-'+bs+'-'+str(dh[0])+'-'+str(dh[1])+'-'+str(dh[2])+'-'+str(dh[3])] = dihedral_min
if len(args.angle) != 0:
for a,an in enumerate(args.angle):
angle_min = rdMolTransforms.GetAngleDeg(mol_i.GetConformer(),an[0],an[1],an[2])
geom_data.at[j,args.geom_par_name+'-Angle-'+lot+'-'+bs+'-'+str(an[0])+'-'+str(an[1])+'-'+str(an[2])] = angle_min
if len(args.bond) != 0:
for b,bd in enumerate(args.angle):
bond_min = rdMolTransforms.GetBondLength(mol_i.GetConformer(),bd[0],bd[1])
geom_data.at[j,args.geom_par_name+'-Bond-'+lot+'-'+bs+'-'+str(bd[0])+'-'+str(bd[1])] = bond_min
return geom_data
def calculate_parameters(sdf_rdkit,sdf_ani,sdf_xtb,qm_files,args,log,w_dir_initial,name_mol,lot,bs):
#creating folder for all molecules to write geom parameter
folder = w_dir_initial + '/QSTAT/geom_parameters'
try:
os.makedirs(folder)
os.chdir(folder)
except OSError:
if os.path.isdir(folder):
os.chdir(folder)
else:
raise
#get mol objects
dft_mols= []
rdkit_mols = Chem.SDMolSupplier(sdf_rdkit, removeHs=False)
if args.rot_dihedral:
args.dihedral = getDihedralMatches(rdkit_mols[0], args.heavyonly,log)
if sdf_ani is not None:
ani_mols = Chem.SDMolSupplier(sdf_ani, removeHs=False)
if sdf_xtb is not None:
xtb_mols = Chem.SDMolSupplier(sdf_xtb, removeHs=False)
ob_compat = True
try:
import openbabel as ob
except (ModuleNotFoundError,AttributeError):
ob_compat = False
log.write('\nx Open Babel is not installed correctly, it is not possible to get molecular descriptors')
if ob_compat:
obConversion = ob.OBConversion()
obConversion.SetInAndOutFormats("log", "mol")
ob_mol = ob.OBMol()
for file in qm_files:
if str(bs).find('/') > -1:
obConversion.ReadFile(ob_mol, args.path + str(lot) + '-' + str(bs).split('/')[0] +'/success/output_files/'+file)
obConversion.WriteFile(ob_mol, args.path + str(lot) + '-' + str(bs).split('/')[0] +'/success/output_files/'+file.split('.')[0]+'.mol')
obConversion.CloseOutFile()
dft_mols.append(Chem.MolFromMolFile(args.path + str(lot) + '-' + str(bs).split('/')[0] +'/success/output_files/'+file.split('.')[0]+'.mol', removeHs=False))
else:
obConversion.ReadFile(ob_mol, args.path + str(lot) + '-' + str(bs) +'/success/output_files/'+file)
obConversion.WriteFile(ob_mol, args.path + str(lot) + '-' + str(bs) +'/success/output_files/'+file.split('.')[0]+'.mol')
obConversion.CloseOutFile()
dft_mols.append(Chem.MolFromMolFile(args.path + str(lot) + '-' + str(bs) +'/success/output_files/'+file.split('.')[0]+'.mol', removeHs=False))
if os.path.exists(w_dir_initial+'/CSEARCH/xtb/'+name_mol+'_xtb.sdf') and os.path.exists(w_dir_initial+'/CSEARCH/rdkit/'+name_mol+'_rdkit.sdf'):
geom_data = get_data(rdkit_mols,xtb_mols,dft_mols,lot,bs,name_mol,args,'rdkit','xtb',w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-rdkit-xtb.csv',index=False)
if os.path.exists(w_dir_initial+'/CSEARCH/ani/'+name_mol+'_ani.sdf') and os.path.exists(w_dir_initial+'/CSEARCH/rdkit/'+name_mol+'_rdkit.sdf'):
geom_data = get_data(rdkit_mols,ani_mols,dft_mols,lot,bs,name_mol,args,'rdkit','ani',w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-rdkit-ani.csv',index=False)
##########
if os.path.exists(w_dir_initial+'/CSEARCH/xtb/'+name_mol+'_xtb.sdf') and os.path.exists(w_dir_initial+'/CSEARCH/summ/'+name_mol+'_summ.sdf'):
geom_data = get_data(rdkit_mols,xtb_mols,dft_mols,lot,bs,name_mol,args,'summ','xtb',w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-summ-xtb.csv',index=False)
if os.path.exists(w_dir_initial+'/CSEARCH/ani/'+name_mol+'_ani.sdf') and os.path.exists(w_dir_initial+'/CSEARCH/summ/'+name_mol+'_summ.sdf'):
geom_data = get_data(rdkit_mols,ani_mols,dft_mols,lot,bs,name_mol,args,'summ','ani',w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-summ-ani.csv',index=False)
#############
if os.path.exists(w_dir_initial+'/CSEARCH/xtb/'+name_mol+'_xtb.sdf') and os.path.exists(w_dir_initial+'/CSEARCH/fullmonte/'+name_mol+'_fullmonte.sdf'):
geom_data = get_data(rdkit_mols,xtb_mols,dft_mols,lot,bs,name_mol,args,'fullmonte','xtb',w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-fullmonte-xtb.csv',index=False)
if os.path.exists(w_dir_initial+'/CSEARCH/ani/'+name_mol+'_ani.sdf') and os.path.exists(w_dir_initial+'/CSEARCH/fullmonte/'+name_mol+'_fullmonte.sdf'):
geom_data = get_data(rdkit_mols,ani_mols,dft_mols,lot,bs,name_mol,args,'fullmonte','ani',w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-fullmonte-ani.csv',index=False)
############
if os.path.exists(w_dir_initial+'/CSEARCH/summ/'+name_mol+'_summ.sdf') and not os.path.exists(w_dir_initial+'/CSEARCH/xtb/'+name_mol+'_xtb.sdf') and not os.path.exists(w_dir_initial+'/CSEARCH/ani/'+name_mol+'_ani.sdf'):
geom_data = get_data(rdkit_mols,None,dft_mols,lot,bs,name_mol,args,'summ',None,w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-summ.csv',index=False)
if os.path.exists(w_dir_initial+'/CSEARCH/rdkit/'+name_mol+'_rdkit.sdf') and not os.path.exists(w_dir_initial+'/CSEARCH/xtb/'+name_mol+'_xtb.sdf') and not os.path.exists(w_dir_initial+'/CSEARCH/ani/'+name_mol+'_ani.sdf') :
geom_data = get_data(rdkit_mols,None,dft_mols,lot,bs,name_mol,args,'rdkit',None,w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-rdkit.csv',index=False)
if os.path.exists(w_dir_initial+'/CSEARCH/fullmonte/'+name_mol+'_fullmonte.sdf') and not os.path.exists(w_dir_initial+'/CSEARCH/xtb/'+name_mol+'_xtb.sdf') and not os.path.exists(w_dir_initial+'/CSEARCH/ani/'+name_mol+'_ani.sdf') :
geom_data = get_data(rdkit_mols,None,dft_mols,lot,bs,name_mol,args,'rdkit',None,w_dir_initial)
geom_data.to_csv(name_mol+'-all-geom-data-with-fullmonte.csv',index=False)
| 65.269737
| 238
| 0.603467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,884
| 0.1899
|
fb49836ffdfff81dfa3d877748d3c2b47f98f7fb
| 3,446
|
py
|
Python
|
googlecode-issues-exporter/generate_user_map.py
|
ballschin52/support-tools
|
85be996e89d292c7f20031dde88198acc63d5e6c
|
[
"Apache-2.0"
] | 41
|
2016-05-03T02:27:07.000Z
|
2021-10-14T13:54:16.000Z
|
googlecode-issues-exporter/generate_user_map.py
|
ballschin52/support-tools
|
85be996e89d292c7f20031dde88198acc63d5e6c
|
[
"Apache-2.0"
] | 7
|
2016-05-05T13:53:37.000Z
|
2021-06-27T20:25:13.000Z
|
googlecode-issues-exporter/generate_user_map.py
|
ballschin52/support-tools
|
85be996e89d292c7f20031dde88198acc63d5e6c
|
[
"Apache-2.0"
] | 30
|
2016-05-05T13:26:21.000Z
|
2021-10-13T09:39:21.000Z
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating a user mapping from Google Code user to BitBucket user.
"""
import argparse
import json
import sys
import issues
class OptionalMap(dict):
"""Dictionary that returns the key for missing items. """
def __missing__(self, key):
"""Implements the dict interface. """
return key
def addIfNotPresent(users, user):
"""Adds a user if it is not already set."""
if user not in users:
users[user] = user
def _CreateUsersDict(issue_data, project_name):
"""Extract users from list of issues into a dict.
Args:
issue_data: Issue data
project_name: The name of the project being exported.
Returns:
Dict of users associated with a list of issues
"""
users = {}
for issue in issue_data:
googlecode_issue = issues.GoogleCodeIssue(
issue, project_name, OptionalMap())
reporting_user = googlecode_issue.GetAuthor()
addIfNotPresent(users, reporting_user)
assignee_user = googlecode_issue.GetOwner()
addIfNotPresent(users, assignee_user)
googlecode_comments = googlecode_issue.GetComments()
for comment in googlecode_comments:
googlecode_comment = issues.GoogleCodeComment(googlecode_issue, comment)
commenting_user = googlecode_comment.GetAuthor()
addIfNotPresent(users, commenting_user)
return {
"users": users
}
def Generate(issue_file_path, project_name):
"""Generates a user map for the specified issues. """
issue_data = None
user_file = open(issue_file_path)
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name in project["name"]:
issue_data = project["issues"]["items"]
break
if issue_data is None:
raise issues.ProjectNotFoundError(
"Project %s not found" % project_name)
users = _CreateUsersDict(issue_data, project_name)
with open("users.json", "w") as users_file:
user_json = json.dumps(users, sort_keys=True, indent=4,
separators=(",", ": "), ensure_ascii=False)
users_file.write(unicode(user_json))
print "\nCreated file users.json.\n"
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
issues.ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parsed_args, _ = parser.parse_known_args(args)
Generate(parsed_args.issue_file_path, parsed_args.project_name)
if __name__ == "__main__":
main(sys.argv)
| 28.716667
| 78
| 0.702844
| 172
| 0.049913
| 0
| 0
| 0
| 0
| 0
| 0
| 1,506
| 0.437028
|
fb4998788840ae0b088496c0b1aec6536f521b03
| 952
|
py
|
Python
|
apps/auth/views/wxlogin.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 349
|
2020-08-04T10:21:01.000Z
|
2022-03-23T08:31:29.000Z
|
apps/auth/views/wxlogin.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 2
|
2021-01-07T06:17:05.000Z
|
2021-04-01T06:01:30.000Z
|
apps/auth/views/wxlogin.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 70
|
2020-08-24T06:46:14.000Z
|
2022-03-25T13:23:27.000Z
|
from flask import Blueprint
from apps.auth.business.wxlogin import WxLoginBusiness
from apps.auth.extentions import validation, parse_json_form
from library.api.render import json_detail_render
wxlogin = Blueprint("wxlogin", __name__)
@wxlogin.route('/', methods=['POST'])
@validation('POST:wx_user_code')
def wxuser_index_handler():
"""
@api {post} /v1/wxlogin/ 登录 微信
@apiName WxLogin
@apiGroup 用户
@apiDescription 登录微信
@apiParam {string} user_code 用户编码
@apiParamExample {json} Request-Example:
{
"user_code":"j2qL3QjNXXwa_4A0WJFDNJyPEx88HTHytARgRbr176g"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"token": "asdasdasd"
},
"message": ""
}
"""
user_code = parse_json_form('wx_user_code')
ret, data, msg = WxLoginBusiness.get_user(user_code[0])
return json_detail_render(ret, data, msg)
| 25.052632
| 65
| 0.668067
| 0
| 0
| 0
| 0
| 740
| 0.755102
| 0
| 0
| 532
| 0.542857
|
fb4ad13207c5ca10ca59d1294d3d67f91a07e8bb
| 4,374
|
py
|
Python
|
servidor/jornada_teorica.py
|
angeloide78/wShifts
|
d88a3284c8a3829a7fbda127eb23c4d5392033f3
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2019-12-21T22:07:11.000Z
|
2021-09-24T15:08:45.000Z
|
servidor/jornada_teorica.py
|
angeloide78/wShifts
|
d88a3284c8a3829a7fbda127eb23c4d5392033f3
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-03-24T23:10:40.000Z
|
2018-03-24T23:10:40.000Z
|
servidor/jornada_teorica.py
|
angeloide78/wShifts
|
d88a3284c8a3829a7fbda127eb23c4d5392033f3
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-26T21:47:10.000Z
|
2022-01-26T21:47:10.000Z
|
# -*- coding: utf-8 -*
# ALGG 03-01-2017 Creación de módulo jornada_teorica.
class JornadaTeorica(object):
def __init__(self, conn):
'''Constructor'''
# Conexión.
self.__conn = conn
def get_jt(self, centro_fisico_id = None, anno = None):
'''Devuelve: (id, cf_id, cf_cod, cf_desc, anno,
total_horas_anual, observaciones)
Tabla: jornada_teorica
Opciones de filtrado: Id de centro físico <centro_fisico_id>,
año de calendario laboral <anno>
'''
# Se recuperan todos las jornadas teóricas.
ret = self.__conn.get_jt(centro_fisico_id, anno)
# Diccionario principal.
data = {}
# Valor del diccionario data, que será una lista de diccionarios.
lista = []
for i in range(len(ret)):
# Se crea diccionario.
d = {}
# ID ficticio.
ID = str(ret[i].anno) + str(ret[i].cf_id)
# Se forma diccionario.
d.setdefault('id', ID)
d.setdefault('cf_id', ret[i].cf_id)
d.setdefault('cf_cod', ret[i].cf_cod)
d.setdefault('cf_desc', ret[i].cf_desc)
d.setdefault('anno', ret[i].anno)
d.setdefault('total_horas_anual', ret[i].total_horas_anual)
d.setdefault('observaciones', ret[i].observaciones)
# Se añade diccionario a la lista.
lista.append(d)
# Se incluye clave, valor que será "data" : lista de diccionarios
data.setdefault('data', lista)
print(data)
return data
def set_jt(self, datos):
ret = True
msj = ''
# Lista de celdas a modificar. Cada elemento es un diccionario que
# contiene los elementos necesarios para modificar el campo.
celdas = datos['celdas_a_modificar']
# Lista de filas a insertar.
filas_a_insertar = datos['filas_a_insertar']
# Lista de filas a eliminar.
filas_a_eliminar = datos['filas_a_eliminar'] ;
# ###############################
# TRATAMIENTO DE FILAS A ELIMINAR
# ###############################
# Eliminamos identificadores 0, que son aquellos de filas nuevas que
# se eliminaron antes de ser guardadas en BD.
try:
while True: filas_a_eliminar.remove(0)
except: pass
# #################################
# TRATAMIENTO DE CELDAS A MODIFICAR
# #################################
# Eliminamos celdas con identificador 0, ya que son celdas que
# pertenecen a filas que son nuevas y que se insertarán como elementos
# nuevos. También eliminamos aquellas celdas que pertenezcan a filas
# que ya hayan sido eliminadas, ya que puede darse el caso de
# modificar celdas y luego eliminar la fila de la celda modificada.
aux = []
for i in celdas:
if i is None: continue
if i['id'] == 0: continue
if i['id'] in filas_a_eliminar: continue
try:
if i['field'] == 'total_horas_anual':
int(i['valor_nuevo'])
except:
ret = False
msj = u'Solo se permiten valores numéricos'
break
aux.append(i)
celdas = aux
# ###############################
# TRATAMIENTO DE FILAS A INSERTAR
# ###############################
# Se comprueba que los datos son numéricos.
for i in filas_a_insertar:
try: int(i['total_horas_anual'])
except:
ret = False
msj = u'Solo se permiten valores numéricos'
break
# ##############
# ENVÍO DE DATOS
# ##############
if ret:
ret, msj = self.__conn.actualizar_jt(celdas, \
filas_a_insertar, \
filas_a_eliminar)
ret = {'data' : [{'estado' : ret}, {'mensaje' : msj}]}
# Devolvemos estado.
return ret
| 32.887218
| 79
| 0.486968
| 4,294
| 0.978355
| 0
| 0
| 0
| 0
| 0
| 0
| 2,034
| 0.463431
|
fb4e27ed9f0165f7474ca1a89bce202114f9a019
| 3,645
|
py
|
Python
|
app.py
|
jasoncordis/spotify-flask
|
1e7f2955ab9d825a5f32f494b2966f18c460f311
|
[
"Apache-2.0"
] | 58
|
2017-04-20T17:25:25.000Z
|
2021-02-05T21:41:25.000Z
|
app.py
|
jasoncordis/spotify-flask
|
1e7f2955ab9d825a5f32f494b2966f18c460f311
|
[
"Apache-2.0"
] | 3
|
2018-04-08T22:03:25.000Z
|
2020-05-07T06:03:21.000Z
|
app.py
|
jasoncordis/spotify-flask
|
1e7f2955ab9d825a5f32f494b2966f18c460f311
|
[
"Apache-2.0"
] | 17
|
2017-12-03T04:26:48.000Z
|
2021-01-26T21:18:27.000Z
|
'''
This code was based on these repositories,
so special thanks to:
https://github.com/datademofun/spotify-flask
https://github.com/drshrey/spotify-flask-auth-example
'''
from flask import Flask, request, redirect, g, render_template, session
from spotify_requests import spotify
app = Flask(__name__)
app.secret_key = 'some key for session'
# ----------------------- AUTH API PROCEDURE -------------------------
@app.route("/auth")
def auth():
return redirect(spotify.AUTH_URL)
@app.route("/callback/")
def callback():
auth_token = request.args['code']
auth_header = spotify.authorize(auth_token)
session['auth_header'] = auth_header
return profile()
def valid_token(resp):
return resp is not None and not 'error' in resp
# -------------------------- API REQUESTS ----------------------------
@app.route("/")
def index():
return render_template('index.html')
@app.route('/search/')
def search():
try:
search_type = request.args['search_type']
name = request.args['name']
return make_search(search_type, name)
except:
return render_template('search.html')
@app.route('/search/<search_type>/<name>')
def search_item(search_type, name):
return make_search(search_type, name)
def make_search(search_type, name):
if search_type not in ['artist', 'album', 'playlist', 'track']:
return render_template('index.html')
data = spotify.search(search_type, name)
api_url = data[search_type + 's']['href']
items = data[search_type + 's']['items']
return render_template('search.html',
name=name,
results=items,
api_url=api_url,
search_type=search_type)
@app.route('/artist/<id>')
def artist(id):
artist = spotify.get_artist(id)
if artist['images']:
image_url = artist['images'][0]['url']
else:
image_url = 'http://bit.ly/2nXRRfX'
tracksdata = spotify.get_artist_top_tracks(id)
tracks = tracksdata['tracks']
related = spotify.get_related_artists(id)
related = related['artists']
return render_template('artist.html',
artist=artist,
related_artists=related,
image_url=image_url,
tracks=tracks)
@app.route('/profile')
def profile():
if 'auth_header' in session:
auth_header = session['auth_header']
# get profile data
profile_data = spotify.get_users_profile(auth_header)
# get user playlist data
playlist_data = spotify.get_users_playlists(auth_header)
# get user recently played tracks
recently_played = spotify.get_users_recently_played(auth_header)
if valid_token(recently_played):
return render_template("profile.html",
user=profile_data,
playlists=playlist_data["items"],
recently_played=recently_played["items"])
return render_template('profile.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/featured_playlists')
def featured_playlists():
if 'auth_header' in session:
auth_header = session['auth_header']
hot = spotify.get_featured_playlists(auth_header)
if valid_token(hot):
return render_template('featured_playlists.html', hot=hot)
return render_template('profile.html')
if __name__ == "__main__":
app.run(debug=True, port=spotify.PORT)
| 27.613636
| 72
| 0.609877
| 0
| 0
| 0
| 0
| 2,452
| 0.672702
| 0
| 0
| 927
| 0.254321
|
fb4e569bf8fd09a1c7d6371a76f1b851a6a2772b
| 7,275
|
py
|
Python
|
ckanext-datagathering/ckanext/datagathering/commands/migrate.py
|
smallmedia/iod-ckan
|
dfd85b41286fe86924ec16b0a88efc7292848ceb
|
[
"Apache-2.0"
] | 4
|
2017-06-12T15:18:30.000Z
|
2019-10-11T15:12:43.000Z
|
ckanext-datagathering/ckanext/datagathering/commands/migrate.py
|
smallmedia/iod-ckan
|
dfd85b41286fe86924ec16b0a88efc7292848ceb
|
[
"Apache-2.0"
] | 64
|
2017-05-14T22:15:53.000Z
|
2020-03-08T15:26:49.000Z
|
ckanext-datagathering/ckanext/datagathering/commands/migrate.py
|
smallmedia/iod-ckan
|
dfd85b41286fe86924ec16b0a88efc7292848ceb
|
[
"Apache-2.0"
] | 2
|
2018-09-08T08:02:25.000Z
|
2020-04-24T13:02:06.000Z
|
from ckan import model
from ckan.lib.cli import CkanCommand
from ckan.lib.munge import munge_title_to_name, substitute_ascii_equivalents
from ckan.logic import get_action
from ckan.lib.helpers import render_markdown
from ckan.plugins import toolkit
import logging
log = logging.getLogger(__name__)
class MigrationCommand(CkanCommand):
'''
CKAN 'Related Items' to 'Datagathering' migration command.
Usage::
paster datagathering migrate -c <path to config file>
- Migrate Related Items to Datagatherings
paster datagathering migrate -c <path to config file> [--allow-duplicates]
- Migrate Related Items to Datagatherings and allow duplicates
paster datagathering markdown-to-html -c <path to config file>
- Migrate the notes of all datagatherings from markdown to html.
Must be run from the ckanext-datagathering directory.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def __init__(self,name):
super(CkanCommand, self).__init__(name)
self.parser.add_option('--allow-duplicates', dest='allow_duplicates',
default=False, help='''Use this option to allow
related items with duplicate titles to be migrated.
Duplicate datagatherings will be created as
'duplicate_<related-name>_<related-id>'.''', action='store_true')
def command(self):
'''
Parse command line arguments and call appropriate method.
'''
if not self.args or self.args[0] in ['--help', '-h', 'help']:
print(self.__doc__)
return
cmd = self.args[0]
self._load_config()
if cmd == 'migrate':
self.migrate()
elif cmd == 'markdown-to-html':
self.markdown_to_html()
else:
print('Command "{0}" not recognized'.format(cmd))
def migrate(self):
'''
'''
# determine whether migration should allow duplicates
allow_duplicates = self.options.allow_duplicates
related_items = get_action('related_list')(data_dict={})
# preflight:
# related items must have unique titles before migration
related_titles = [i['title'] for i in related_items]
# make a list of duplicate titles
duplicate_titles = self._find_duplicates(related_titles)
if duplicate_titles and allow_duplicates == False:
print(
"""All Related Items must have unique titles before migration. The following
Related Item titles are used more than once and need to be corrected before
migration can continue. Please correct and try again:"""
)
for i in duplicate_titles:
print(i)
return
for related in related_items:
existing_datagathering = get_action('package_search')(
data_dict={'fq': '+dataset_type:datagathering original_related_item_id:{0}'.format(related['id'])})
normalized_title = substitute_ascii_equivalents(related['title'])
if existing_datagathering['count'] > 0:
print('Datagathering for Related Item "{0}" already exists.'.format(
normalized_title))
else:
datagathering_title = self._gen_new_title(related.get('title'), related['id'])
data_dict = {
'original_related_item_id': related.get('id'),
'title': datagathering_title,
'name': munge_title_to_name(datagathering_title),
'notes': related.get('description'),
'image_url': related.get('image_url'),
'url': related.get('url'),
'tags': [{"name": related.get('type').lower()}]
}
# make the datagathering
try:
new_datagathering = get_action('ckanext_datagathering_create')(
data_dict=data_dict)
except Exception as e:
print('There was a problem migrating "{0}": {1}'.format(
normalized_title, e))
else:
print('Created Datagathering from the Related Item "{0}"'.format(normalized_title))
# make the datagathering_package_association, if needed
try:
related_pkg_id = self._get_related_dataset(
related['id'])
if related_pkg_id:
get_action('ckanext_datagathering_package_association_create')(
data_dict={'datagathering_id': new_datagathering['id'],
'package_id': related_pkg_id})
except Exception as e:
print('There was a problem creating the datagathering_package_association for "{0}": {1}'.format(
normalized_title, e))
def _get_related_dataset(self, related_id):
'''Get the id of a package from related_dataset, if one exists.'''
related_dataset = model.Session.query(model.RelatedDataset).filter_by(
related_id=related_id).first()
if related_dataset:
return related_dataset.dataset_id
def _find_duplicates(self, lst):
'''From a list, return a set of duplicates.
>>> MigrationCommand('cmd')._find_duplicates([1, 2, 3, 4, 5])
[]
>>> MigrationCommand('cmd')._find_duplicates([1, 2, 3, 4, 3, 1, 1])
[1, 3]
>>> MigrationCommand('cmd')._find_duplicates(['one', 'two', 'three', 'four', 'two', 'three'])
['two', 'three']
'''
return list(set(x for x in lst if lst.count(x) >= 2))
def _gen_new_title(self, title, related_id):
name = munge_title_to_name(title)
pkg_obj = model.Session.query(model.Package).filter_by(name=name).first()
if pkg_obj:
title.replace('duplicate_', '')
return 'duplicate_' + title + '_' + related_id
else:
return title
def markdown_to_html(self):
''' Migrates the notes of all datagatherings from markdown to html.
When using CKEditor, notes on datagatherings are stored in html instead of
markdown, this command will migrate all nothes using CKAN's
render_markdown core helper.
'''
datagatherings = toolkit.get_action('ckanext_datagathering_list')(data_dict={})
site_user = toolkit.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
for datagathering in datagatherings:
toolkit.get_action('package_patch')(
context,
{
'id': datagathering['id'],
'notes': render_markdown(datagathering['notes'])
}
)
print('All notes were migrated successfully.')
| 39.538043
| 121
| 0.576082
| 6,972
| 0.958351
| 0
| 0
| 0
| 0
| 0
| 0
| 2,979
| 0.409485
|
fb4fa127bfbce18cd4fdeeaf2d1ebf19b58badc3
| 13,826
|
py
|
Python
|
run.py
|
openmg/mg-phm
|
e3bb05d6352f90ee40fdc4415ad8e1ed5857196f
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
openmg/mg-phm
|
e3bb05d6352f90ee40fdc4415ad8e1ed5857196f
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
openmg/mg-phm
|
e3bb05d6352f90ee40fdc4415ad8e1ed5857196f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from scipy.misc import imsave
import matplotlib.pyplot as plt
import analysis
import imageprocess
import datainterface
import imagemosaicking
town_names = ['昂素镇', '敖勒召其镇', '布拉格苏木', '城川镇', '二道川乡', \
'毛盖图苏木', '三段地镇', '上海庙镇', '珠和苏木']
for index in range(len(town_names)):
town_names[index] = unicode(town_names[index], 'utf8')
geojson_path = 'testdata/'
#12月
winter12933 = 'LC81290332016343LGN00_MTL'
winter12833 = 'LC81280332016336LGN00_MTL'
winter12834 = 'LC81280342016336LGN00_MTL'
winter12 = (winter12933, winter12833, winter12834)
#01月
winter12933 = 'LC81290332017025LGN00_MTL'
winter12833 = 'LC81280332017002LGN00_MTL'
winter12834 = 'LC81280342017002LGN00_MTL'
winter01 = (winter12933, winter12833, winter12834)
#02月
winter12933 = 'LC81290332017089LGN00_MTL'#3月,2月数据不可用
winter12833 = 'LC81280332017034LGN00_MTL'
winter12834 = 'LC81280342017034LGN00_MTL'
winter02 = (winter12933, winter12833, winter12834)
#06
summer12933 = 'LC81290332016151LGN00_MTL'
summer12833 = 'LC81280332016176LGN00_MTL'
summer12834 = 'LC81280342016176LGN00_MTL'
summer06 = (summer12933, summer12833, summer12834)
#07
summer12933 = 'LC81290332016183LGN00_MTL'
summer12833 = 'LC81280332016208LGN00_MTL'
summer12834 = 'LC81280342016208LGN00_MTL'
summer07 = (summer12933, summer12833, summer12834)
#08
summer12933 = 'LC81290332016247LGN00_MTL'
summer12833 = 'LC81280332016240LGN00_MTL'
summer12834 = 'LC81280342016240LGN00_MTL'
summer08 = (summer12933, summer12833, summer12834)
cases = (summer08,)
case_name = ('Aug',)
#cases = (winter12, winter01, winter02, summer06, summer07, summer08)
#case_name = ('Nov','Jan','Feb','Jun','Jul','Aug',)
#cases = (wintercode,)
#case_name = ('winter')
for ii in range(len(cases)):
case = cases[ii]
# image load
imgcode1 = case[0]
imgcode2 = case[1]
imgcode3 = case[2]
path1 = 'testdata/1-12933/'
path2 = 'testdata/2-12833/'
path3 = 'testdata/3-12834/'
corner1 = datainterface.get_corner(imgcode1, path1)
corner2 = datainterface.get_corner(imgcode2, path2)
corner3 = datainterface.get_corner(imgcode3, path3)
img1 = datainterface.get_band(imgcode1, 4, path1)
img2 = datainterface.get_band(imgcode2, 4, path2)
img3 = datainterface.get_band(imgcode3, 4, path3)
bqa1 = datainterface.get_bqa(imgcode1, path1)
bqa2 = datainterface.get_bqa(imgcode2, path2)
bqa3 = datainterface.get_bqa(imgcode3, path3)
file_date1 = datainterface.get_date(imgcode1, path1)
file_date2 = datainterface.get_date(imgcode2, path2)
file_date3 = datainterface.get_date(imgcode3, path3)
# image analysis
ndvi1, vfc1 = analysis.get_plant(imgcode1, path1)
ndvi2, vfc2 = analysis.get_plant(imgcode2, path2)
ndvi3, vfc3 = analysis.get_plant(imgcode3, path3)
print 'complete ndvi calculation...'
Ts1 = analysis.get_temperature(imgcode1, path1)
Ts2 = analysis.get_temperature(imgcode2, path2)
Ts3 = analysis.get_temperature(imgcode3, path3)
print 'complete Ts calculation...'
tvdi1, cover1 = analysis.get_drought(ndvi1, Ts1, bqa1)
tvdi2, cover2 = analysis.get_drought(ndvi2, Ts2, bqa2)
tvdi3, cover3 = analysis.get_drought(ndvi3, Ts3, bqa3)
print 'complete tvdi calculation...'
ndvi1_d = cv2.resize(ndvi1,None,fx=0.1,fy=0.1)
ndvi2_d = cv2.resize(ndvi2,None,fx=0.1,fy=0.1)
ndvi3_d = cv2.resize(ndvi3,None,fx=0.1,fy=0.1)
vfc1_d = cv2.resize(vfc1,None,fx=0.1,fy=0.1)
vfc2_d = cv2.resize(vfc2,None,fx=0.1,fy=0.1)
vfc3_d = cv2.resize(vfc3,None,fx=0.1,fy=0.1)
Ts1_d = cv2.resize(Ts1,None,fx=0.1,fy=0.1)
Ts2_d = cv2.resize(Ts2,None,fx=0.1,fy=0.1)
Ts3_d = cv2.resize(Ts3,None,fx=0.1,fy=0.1)
tvdi1_d = cv2.resize(tvdi1,None,fx=0.1,fy=0.1)
tvdi2_d = cv2.resize(tvdi2,None,fx=0.1,fy=0.1)
tvdi3_d = cv2.resize(tvdi3,None,fx=0.1,fy=0.1)
print 'complete image analyzing...'
save_filename = 'output/' + case_name[ii] + '_' + 'ndvi1' + '.png'
imsave(save_filename, ndvi1)
save_filename = 'output/' + case_name[ii] + '_' + 'vfc1' + '.png'
imsave(save_filename, vfc1)
save_filename = 'output/' + case_name[ii] + '_' + 'ndvi2' + '.png'
imsave(save_filename, ndvi2)
save_filename = 'output/' + case_name[ii] + '_' + 'vfc2' + '.png'
imsave(save_filename, vfc2)
save_filename = 'output/' + case_name[ii] + '_' + 'ndvi3' + '.png'
imsave(save_filename, ndvi3)
save_filename = 'output/' + case_name[ii] + '_' + 'vfc3' + '.png'
imsave(save_filename, vfc3)
save_filename = 'output/' + case_name[ii] + '_' + 'Ts1' + '.png'
imsave(save_filename, Ts1)
save_filename = 'output/' + case_name[ii] + '_' + 'Ts2' + '.png'
imsave(save_filename, Ts2)
save_filename = 'output/' + case_name[ii] + '_' + 'Ts3' + '.png'
imsave(save_filename, Ts3)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi1' + '.png'
imsave(save_filename, tvdi1)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi2' + '.png'
imsave(save_filename, tvdi2)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi3' + '.png'
imsave(save_filename, tvdi3)
save_filename = 'output/d' + case_name[ii] + '_' + 'ndvi1_d' + '.png'
imsave(save_filename, ndvi1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'vfc1_d' + '.png'
imsave(save_filename, vfc1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'ndvi2_d' + '.png'
imsave(save_filename, ndvi2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'vfc2_d' + '.png'
imsave(save_filename, vfc2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'ndvi3_d' + '.png'
imsave(save_filename, ndvi3_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'vfc3_d' + '.png'
imsave(save_filename, vfc3_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'Ts1_d' + '.png'
imsave(save_filename, Ts1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'Ts2_d' + '.png'
imsave(save_filename, Ts2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'Ts3_d' + '.png'
imsave(save_filename, Ts3_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'tvdi1_d' + '.png'
imsave(save_filename, tvdi1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'tvdi2_d' + '.png'
imsave(save_filename, tvdi2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'tvdi3_d' + '.png'
imsave(save_filename, tvdi3_d)
# image mosaicking
imgall_origin, corner_origin = imagemosaicking.cut_img_easy(img1, img2, img3, corner1, corner2, corner3)
imgall_ndvi, corner_ndvi = imagemosaicking.cut_img_easy(ndvi1, ndvi2, ndvi3, corner1, corner2, corner3)
imgall_vfc, corner_vfc = imagemosaicking.cut_img_easy(vfc1, vfc2, vfc3, corner1, corner2, corner3)
imgall_Ts, corner_Ts = imagemosaicking.cut_img_easy(Ts1, Ts2, Ts3, corner1, corner2, corner3)
imgall_tvdi, corner_tvdi = imagemosaicking.cut_img_easy(tvdi1, tvdi2, tvdi3, corner1, corner2, corner3)
imgall_tvdi_cover, corner_cover = imagemosaicking.cut_img_easy(cover1, cover2, cover3, corner1, corner2, corner3)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_origin' + '.png'
imsave(save_filename, imgall_origin)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_ndvi' + '.png'
imsave(save_filename, imgall_ndvi)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_vfc' + '.png'
imsave(save_filename, imgall_vfc)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_Ts' + '.png'
imsave(save_filename, imgall_Ts)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_tvdi' + '.png'
imsave(save_filename, imgall_tvdi)
imgall_origin_d = cv2.resize(imgall_origin, None, fx=0.2, fy=0.2)
imgall_ndvi_d = cv2.resize(imgall_ndvi, None, fx=0.2, fy=0.2)
imgall_vfc_d = cv2.resize(imgall_vfc, None, fx=0.2, fy=0.2)
imgall_Ts_d = cv2.resize(imgall_Ts, None, fx=0.2, fy=0.2)
imgall_tvdi_d = cv2.resize(imgall_tvdi, None, fx=0.2, fy=0.2)
imgall_tvdi_cover_d = cv2.resize(imgall_tvdi_cover, None, fx=0.2, fy=0.2)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_origin_d' + '.png'
imsave(save_filename, imgall_origin_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_ndvi_d' + '.png'
imsave(save_filename, imgall_ndvi_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_vfc_d' + '.png'
imsave(save_filename, imgall_vfc_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_Ts_d' + '.png'
imsave(save_filename, imgall_Ts_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_tvdi_d' + '.png'
imsave(save_filename, imgall_tvdi_d)
print 'complete image mosaicking...'
# image filtering
filter_box = 20
imgall_origin_filtered = imageprocess.mean_filter(imgall_origin_d, filter_box)
imgall_ndvi_filtered = imageprocess.mean_filter(imgall_ndvi_d, filter_box)
imgall_vfc_filtered = imageprocess.mean_filter(imgall_vfc_d, filter_box)
imgall_Ts_filtered = imageprocess.mean_filter(imgall_Ts_d, filter_box)
imgall_tvdi_filtered = imageprocess.mean_filter(imgall_tvdi_d, filter_box)
print 'complete image filtering...'
"""
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_origin_filtered' + '.png'
imsave(save_filename, imgall_origin_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_ndvi_filtered' + '.png'
imsave(save_filename, imgall_ndvi_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_vfc_filtered' + '.png'
imsave(save_filename, imgall_vfc_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_Ts_filtered' + '.png'
imsave(save_filename, imgall_Ts_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_tvdi_filtered' + '.png'
imsave(save_filename, imgall_tvdi_filtered)
"""
filter_box = 5
imgall_origin = imageprocess.mean_filter(imgall_origin_d, filter_box)
imgall_ndvi = imageprocess.mean_filter(imgall_ndvi_d, filter_box)
imgall_vfc = imageprocess.mean_filter(imgall_vfc_d, filter_box)
imgall_Ts = imageprocess.mean_filter(imgall_Ts_d, filter_box)
imgall_tvdi = imageprocess.mean_filter(imgall_tvdi_d, filter_box)
print 'complete image filtering...'
# density divide
vfc_3d = analysis.vfc_divide(imgall_vfc, imgall_ndvi)
tvdi_3d = analysis.tvdi_divide(imgall_tvdi, imgall_ndvi, imgall_tvdi_cover_d)
print 'complete density divide...'
save_filename = 'output/' + case_name[ii] + '_' + 'vfc_3d' + '.png'
imsave(save_filename, vfc_3d)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi_3d' + '.png'
imsave(save_filename, tvdi_3d)
"""
#pn_poly
county_cover = np.zeros_like(imgall_origin)
for town_num in range(len(town_names)):
print town_num + 1
geo_filename = geojson_path + town_names[town_num] + '.geojson'
geodata = datainterface.geojson_read(geo_filename)
town_cover = imageprocess.pn_poly(imgall_origin, corner_origin, geodata)
county_cover += town_cover
town_origin = town_cover * imgall_origin
town_vfc = town_cover * imgall_vfc
town_Ts = town_cover * imgall_Ts
town_tvdi = town_cover * imgall_tvdi
town_vfc_4d = np.zeros((vfc_3d.shape[0], vfc_3d.shape[1], 4))
town_tvdi_4d = np.zeros((tvdi_3d.shape[0], tvdi_3d.shape[1], 4))
for i in range(3):
town_vfc_4d[:, :, i] = vfc_3d[:, :, i] / 255.0
town_tvdi_4d[:, :, i] = tvdi_3d[:, :, i] / 255.0
town_vfc_4d[:,:,3] = town_cover
town_tvdi_4d[:,:,3] = town_cover
var_names = ('town_origin', 'town_vfc', 'town_Ts', 'town_tvdi',\
'town_vfc_4d', 'town_tvdi_4d')
for var_name in var_names:
save_filename = 'output/' + case_name[ii] + town_names[town_num] + var_name + '_' + '.png'
print 'saving images of '+ town_names[town_num] + var_name + '...'
if (var_name != 'town_vfc_4d') and (var_name != 'town_tvdi_4d'):
imsave(save_filename, eval(var_name) * town_cover)
else:
# img_temp = np.zeros((town_cover.shape[0], town_cover.shape[1],4))
# img_temp[:,:,0:3] = eval(var_name)
# img_temp[:,:,3] = town_cover
# imsave(save_filename, img_temp)
imsave(save_filename, eval(var_name))
print 'saving images of county...'
county_origin = county_cover * imgall_origin
county_vfc = county_cover * imgall_vfc
county_Ts = county_cover * imgall_Ts
county_tvdi = county_cover * imgall_tvdi
county_vfc_4d = np.zeros((vfc_3d.shape[0], vfc_3d.shape[1], 4))
county_tvdi_4d = np.zeros((tvdi_3d.shape[0], tvdi_3d.shape[1], 4))
for i in range(3):
county_vfc_4d[:, :, i] = vfc_3d[:, :, i] / 255.0
county_tvdi_4d[:, :, i] = tvdi_3d[:, :, i] / 255.0
county_vfc_4d[:,:,3] = county_cover
county_tvdi_4d[:,:,3] = county_cover
# save county
var_names = ('county_origin', 'county_vfc', 'county_Ts', 'county_tvdi',\
'county_vfc_4d', 'county_tvdi_4d')
for var_name in var_names:
print var_name
save_filename = 'output/' + case_name[ii] + var_name + '_' + '.png'
print 'saving images of ' + var_name +'...'
if (var_name != 'county_vfc_4d') and (var_name != 'county_tvdi_4d'):
imsave(save_filename, eval(var_name) * county_cover)
else:
imsave(save_filename, eval(var_name))
# img_temp = np.zeros((county_cover.shape[0], county_cover.shape[1],4))
# img_temp[:,:,0:3] = eval(var_name)
# img_temp[:,:,3] = county_cover
# imsave(save_filename, img_temp)
"""
| 41.39521
| 117
| 0.667872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,001
| 0.431044
|
fb4fc030e59a7a7273510289cf7ba58993b6464b
| 2,593
|
py
|
Python
|
tests/tests_lambda.py
|
schwin007/Lambda-Metric-Shipper
|
8659794cfbf54fe74eaa8bb3f956555d101af604
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_lambda.py
|
schwin007/Lambda-Metric-Shipper
|
8659794cfbf54fe74eaa8bb3f956555d101af604
|
[
"Apache-2.0"
] | 2
|
2019-04-05T21:38:16.000Z
|
2019-12-25T07:15:37.000Z
|
tests/tests_lambda.py
|
schwin007/Lambda-Metric-Shipper
|
8659794cfbf54fe74eaa8bb3f956555d101af604
|
[
"Apache-2.0"
] | 6
|
2018-07-29T11:41:35.000Z
|
2020-12-02T12:22:52.000Z
|
import logging
import os
import unittest
from logging.config import fileConfig
from src.lambda_function import validate_configurations as validate
# create logger assuming running from ./run script
fileConfig('tests/logging_config.ini')
logger = logging.getLogger(__name__)
class TestLambdaFunction(unittest.TestCase):
""" Unit testing logzio lambda function """
def setUp(self):
# Set os.environ for tests
os.environ['FILEPATH'] = "tests/configurations/valid_configure.json"
os.environ['URL'] = "url"
os.environ['TOKEN'] = "1234567890"
def test_good_config_file(self):
logger.info("TEST: test_good_config_file")
try:
validate()
except (ValueError, KeyError, RuntimeError, EnvironmentError):
assert True, "Failed to validate a good configuration file"
def test_wrong_variable(self):
logger.info("TEST: test_wrong_variable")
os.environ['FILEPATH'] = "wrong"
with self.assertRaises(EnvironmentError):
validate()
logger.info("Catched the correct exception, wrong no such file at FILEPATH")
os.environ['FILEPATH'] = "tests/configurations/missing_variable.json"
with self.assertRaises(KeyError):
validate()
logger.info("Catched the correct exception, missing TimeInterval")
del os.environ['FILEPATH']
with self.assertRaises(RuntimeError):
validate()
logger.info("Catched the correct exception, no 'FILEPATH'")
def test_wrong_variable_format(self):
logger.info("TEST: test_wrong_variable_format")
os.environ['FILEPATH'] = "tests/configurations/wrong_variable_format.json"
with self.assertRaises(RuntimeError):
validate()
logger.info("Catched the correct exception, wrong format for period")
def test_wrong_time_ranges(self):
logger.info("TEST: test_wrong_time_ranges")
os.environ['FILEPATH'] = "tests/configurations/wrong_time_ranges.json"
with self.assertRaises(RuntimeError):
validate()
logger.info("Catched the correct exception, period can't be bigger than timeInterval")
def test_duplicate_statistics(self):
logger.info("TEST: test_duplicate_statistics")
os.environ['FILEPATH'] = "tests/configurations/duplicate_statistics.json"
with self.assertRaises(RuntimeError):
validate()
logger.info("Catched the correct exception, can't have both Statistics and ExtendedStatistics")
if __name__ == '__main__':
unittest.main()
| 35.520548
| 103
| 0.687235
| 2,266
| 0.873891
| 0
| 0
| 0
| 0
| 0
| 0
| 1,062
| 0.409564
|
fb4fd1b54e5406173715c8f8b6132187b8fbeda2
| 1,954
|
py
|
Python
|
script/QA_LSTM.py
|
xjtushilei/Answer_Selection
|
4a827f64e5361eab951713c2350632c5278404dd
|
[
"MIT"
] | 4
|
2017-06-19T01:15:55.000Z
|
2020-02-29T03:45:26.000Z
|
script/QA_LSTM_v2.py
|
xjtushilei/Answer_Selection
|
4a827f64e5361eab951713c2350632c5278404dd
|
[
"MIT"
] | null | null | null |
script/QA_LSTM_v2.py
|
xjtushilei/Answer_Selection
|
4a827f64e5361eab951713c2350632c5278404dd
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow
from keras import Input, optimizers
from keras import backend as K
from keras.engine import Model
from keras import layers
from keras.layers import Bidirectional, LSTM, merge, Reshape, Lambda, Dense, BatchNormalization
K.clear_session()
print("设置显卡信息...")
# 设置tendorflow对显存使用按需增长
config = tensorflow.ConfigProto()
config.gpu_options.allow_growth = True
session = tensorflow.Session(config=config)
question_max_len = 40
answer_max_len = 40
embedding_dim = 300
input_question = Input(shape=(question_max_len, embedding_dim))
input_answer = Input(shape=(answer_max_len, embedding_dim))
# 双向lstm
question_lstm = Bidirectional(LSTM(64))
answer_lstm = Bidirectional(LSTM(64))
encoded_question = question_lstm(input_question)
encoded_answer = answer_lstm(input_answer)
cos_distance = merge([encoded_question, encoded_answer], mode='cos', dot_axes=1)
cos_distance = Reshape((1,))(cos_distance)
cos_similarity = Lambda(lambda x: 1 - x)(cos_distance)
predictions = Dense(1, activation='sigmoid')(cos_similarity)
model = Model([input_question, input_answer], [predictions])
sgd = optimizers.SGD(lr=0.1, clipvalue=0.5)
model.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['binary_accuracy'])
model.summary()
# 下面是训练
questions = np.load('train' + '_' + 'questions' + '.npy')
answers = np.load('train' + '_' + 'answers' + '.npy')
labels = np.load('train' + '_' + 'labels' + '.npy')
# 下面是 dev 验证
dev_questions = np.load('dev' + '_' + 'questions' + '.npy')
dev_answers = np.load('dev' + '_' + 'answers' + '.npy')
dev_labels = np.load('dev' + '_' + 'labels' + '.npy')
# 开始迭代
model.fit([questions, answers], [labels],
epochs=2,
batch_size=256,
validation_data=([dev_questions, dev_answers], [dev_labels]))
# 预测
print('开始预测!')
predict = model.predict([dev_questions, dev_answers], verbose=1, batch_size=256)
print(predict)
np.save('predict.npy', predict)
| 31.516129
| 95
| 0.720061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 369
| 0.181416
|
fb515bfaa92002625ae59283942eea3a360391f0
| 467
|
py
|
Python
|
lib/mysocket.py
|
vanphuong12a2/pposter
|
fac6e289985909de059150ca860677dba9ade6c9
|
[
"MIT"
] | null | null | null |
lib/mysocket.py
|
vanphuong12a2/pposter
|
fac6e289985909de059150ca860677dba9ade6c9
|
[
"MIT"
] | null | null | null |
lib/mysocket.py
|
vanphuong12a2/pposter
|
fac6e289985909de059150ca860677dba9ade6c9
|
[
"MIT"
] | null | null | null |
from flask_socketio import SocketIO
NOTI = 'notification'
class MySocket():
def __init__(self, app, async_mode):
self.socketio = SocketIO(app, async_mode=async_mode)
def get_socketio(self):
return self.socketio
def noti_emit(self, msg, room=None):
if room:
self.socketio.emit(NOTI, {'data': msg}, namespace='/noti', room=room)
else:
self.socketio.emit(NOTI, {'data': msg}, namespace='/noti')
| 23.35
| 81
| 0.62955
| 404
| 0.865096
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.085653
|
fb51e6590cca3f878c6c2b90af8869c140eb763b
| 2,128
|
py
|
Python
|
server/tests/steps/sql_translator/test_filter.py
|
davinov/weaverbird
|
3f907f080729ba70be8872d6c5ed0fdcec9b8a9a
|
[
"BSD-3-Clause"
] | 54
|
2019-11-20T15:07:39.000Z
|
2022-03-24T22:13:51.000Z
|
server/tests/steps/sql_translator/test_filter.py
|
ToucanToco/weaverbird
|
7cbd3cc612437a876470cc872efba69526694d62
|
[
"BSD-3-Clause"
] | 786
|
2019-10-20T11:48:37.000Z
|
2022-03-23T08:58:18.000Z
|
server/tests/steps/sql_translator/test_filter.py
|
davinov/weaverbird
|
3f907f080729ba70be8872d6c5ed0fdcec9b8a9a
|
[
"BSD-3-Clause"
] | 10
|
2019-11-21T10:16:16.000Z
|
2022-03-21T10:34:06.000Z
|
import pytest
from weaverbird.backends.sql_translator.metadata import SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_filter
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.conditions import ComparisonCondition
from weaverbird.pipeline.steps import FilterStep
def test_translate_filter(mocker):
step = FilterStep(
name='filter', condition=ComparisonCondition(column='amount', operator='eq', value=10)
)
query = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT TOTO, TATA FROM products)',
selection_query='SELECT TOTO, TATA FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={'table1': {'toto': 'text', 'tata': 'int'}},
),
)
mocker.patch(
'weaverbird.backends.sql_translator.steps.utils.query_transformation.apply_condition',
return_value='SELECT TOTO, TATA FROM SELECT_STEP_0 WHERE amount = 10',
)
res = translate_filter(step, query, index=1)
assert (
res.transformed_query
== 'WITH SELECT_STEP_0 AS (SELECT TOTO, TATA FROM products), FILTER_STEP_1 AS (SELECT TOTO, TATA FROM '
'SELECT_STEP_0 WHERE amount = 10)'
)
assert res.selection_query == 'SELECT TOTO, TATA FROM FILTER_STEP_1'
def test_translate_filter_error(mocker):
step = FilterStep(
name='filter', condition=ComparisonCondition(column='amount', operator='eq', value=10)
)
query = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products), SELECT * FROM SELECT_STEP_0',
selection_query='SELECT * FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={'table1': {'toto': 'text', 'tata': 'int'}},
),
)
mocker.patch(
'weaverbird.backends.sql_translator.steps.filter.apply_condition',
side_effect=NotImplementedError,
)
with pytest.raises(NotImplementedError):
translate_filter(step, query, index=1)
| 40.150943
| 111
| 0.706767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 711
| 0.334117
|
fb5248790acad20b7f6c753089b4b879cf218187
| 4,322
|
py
|
Python
|
nose2/plugins/loader/testcases.py
|
leth/nose2
|
a8fb776a0533264e0b123fc01237b9d2a039e9d0
|
[
"BSD-2-Clause"
] | null | null | null |
nose2/plugins/loader/testcases.py
|
leth/nose2
|
a8fb776a0533264e0b123fc01237b9d2a039e9d0
|
[
"BSD-2-Clause"
] | null | null | null |
nose2/plugins/loader/testcases.py
|
leth/nose2
|
a8fb776a0533264e0b123fc01237b9d2a039e9d0
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Load tests from :class:`unittest.TestCase` subclasses.
This plugin implements :func:`loadTestsFromName` and
:func:`loadTestsFromModule` to load tests from
:class:`unittest.TestCase` subclasses found in modules or named on the
command line.
"""
# Adapted from unittest2/loader.py from the unittest2 plugins branch.
# This module contains some code copied from unittest2/loader.py and other
# code developed in reference to that module and others within unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
import logging
import unittest
from nose2 import events, util
__unittest = True
log = logging.getLogger(__name__)
class TestCaseLoader(events.Plugin):
"""Loader plugin that loads from test cases"""
alwaysOn = True
configSection = 'testcases'
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
def loadTestsFromModule(self, event):
"""Load tests in :class:`unittest.TestCase` subclasses"""
seen = set()
module = event.module
for name in dir(module):
obj = getattr(module, name)
if id(obj) in seen:
continue
seen.add(id(obj))
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
event.extraTests.append(
self._loadTestsFromTestCase(event, obj))
def loadTestsFromName(self, event):
"""Load tests from event.name if it names a test case/method"""
name = event.name
module = event.module
log.debug("load %s from %s", name, module)
try:
result = util.test_from_name(name, module)
except (AttributeError, ImportError) as e:
event.handled = True
return event.loader.failedLoadTests(name, e)
if result is None:
return
parent, obj, name, index = result
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
# name is a test case class
event.extraTests.append(self._loadTestsFromTestCase(event, obj))
elif (isinstance(parent, type) and
issubclass(parent, unittest.TestCase) and not
util.isgenerator(obj) and not
hasattr(obj, 'paramList')):
# name is a single test method
event.extraTests.append(parent(obj.__name__))
def _loadTestsFromTestCase(self, event, testCaseClass):
evt = events.LoadFromTestCaseEvent(event.loader, testCaseClass)
result = self.session.hooks.loadTestsFromTestCase(evt)
if evt.handled:
loaded_suite = result or event.loader.suiteClass()
else:
names = self._getTestCaseNames(event, testCaseClass)
if not names and hasattr(testCaseClass, 'runTest'):
names = ['runTest']
# FIXME return failure test case if name not in testcase class
loaded_suite = event.loader.suiteClass(map(testCaseClass, names))
if evt.extraTests:
loaded_suite.addTests(evt.extraTests)
return loaded_suite
def _getTestCaseNames(self, event, testCaseClass):
excluded = set()
def isTestMethod(attrname, testCaseClass=testCaseClass,
excluded=excluded):
prefix = evt.testMethodPrefix or self.session.testMethodPrefix
return (
attrname.startswith(prefix) and
hasattr(getattr(testCaseClass, attrname), '__call__') and
attrname not in excluded
)
evt = events.GetTestCaseNamesEvent(
event.loader, testCaseClass, isTestMethod)
result = self.session.hooks.getTestCaseNames(evt)
if evt.handled:
test_names = result or []
else:
excluded.update(evt.excludedNames)
test_names = [entry for entry in dir(testCaseClass)
if isTestMethod(entry)]
if evt.extraNames:
test_names.extend(evt.extraNames)
sortkey = getattr(
testCaseClass, 'sortTestMethodsUsing', event.loader.sortTestMethodsUsing)
if sortkey:
test_names.sort(
key=sortkey)
return test_names
| 37.258621
| 85
| 0.635817
| 3,599
| 0.832716
| 0
| 0
| 0
| 0
| 0
| 0
| 969
| 0.224202
|
fb52ea45a86609e7040cf2f5adb9df43b0bf1496
| 265
|
py
|
Python
|
todo/main.py
|
shuayb/simple-todo
|
7a6c840d38ada098b5cc3458d652c7db02ffd791
|
[
"MIT"
] | null | null | null |
todo/main.py
|
shuayb/simple-todo
|
7a6c840d38ada098b5cc3458d652c7db02ffd791
|
[
"MIT"
] | null | null | null |
todo/main.py
|
shuayb/simple-todo
|
7a6c840d38ada098b5cc3458d652c7db02ffd791
|
[
"MIT"
] | null | null | null |
from app import app, db
import models
import views
if __name__ == '__main__':
app.run()
# No need to do (debug=True), as in config.py, debug = true is already set.
# app.run(debug=True)
# app.run(debug=True, use_debugger=False, use_reloader=False)
| 26.5
| 79
| 0.683019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.630189
|
fb53c7de261609a0deb36f13cdae3c4c1cc92433
| 789
|
py
|
Python
|
Libraries/DUTs/Community/di_vsphere/pysphere/revertToNamedSnapshot.py
|
nneul/iTest-assets
|
478659d176891e45d81f7fdb27440a86a21965bb
|
[
"MIT"
] | 10
|
2017-12-28T10:15:56.000Z
|
2020-10-19T18:13:58.000Z
|
Libraries/DUTs/Community/di_vsphere/pysphere/revertToNamedSnapshot.py
|
nneul/iTest-assets
|
478659d176891e45d81f7fdb27440a86a21965bb
|
[
"MIT"
] | 37
|
2018-03-07T00:48:37.000Z
|
2021-03-22T20:03:48.000Z
|
Libraries/DUTs/Community/di_vsphere/pysphere/revertToNamedSnapshot.py
|
nneul/iTest-assets
|
478659d176891e45d81f7fdb27440a86a21965bb
|
[
"MIT"
] | 27
|
2018-03-06T19:56:01.000Z
|
2022-03-23T04:18:23.000Z
|
import sys
sys.path.append("./pysphere")
from pysphere import VIServer
from pysphere.resources.vi_exception import VIException, VIApiException, \
FaultTypes
import sys
if len(sys.argv) != 6:
sys.exit("error = please check arguments")
serverName = sys.argv[1]
login = sys.argv[2]
passwd = sys.argv[3]
vm_name = sys.argv[4]
snap_name = sys.argv[5]
server = VIServer()
server.connect(serverName, login, passwd)
myVm = server.get_vm_by_name(vm_name)
try:
revertTask = myVm.revert_to_named_snapshot(snap_name)
server.disconnect()
except (VIException), err:
print "RevertResult = " + err.message
sys.exit(1)
if revertTask is None:
print "RevertResult = success"
else:
print "RevertResult = failure"
| 29.222222
| 75
| 0.673004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.139417
|
fb56942879beca982f2985123f64367d7b06b779
| 1,431
|
py
|
Python
|
easy_rec/python/utils/fg_util.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 61
|
2021-08-19T06:10:03.000Z
|
2021-10-09T06:44:54.000Z
|
easy_rec/python/utils/fg_util.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 41
|
2021-09-08T03:02:42.000Z
|
2021-09-29T09:00:57.000Z
|
easy_rec/python/utils/fg_util.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 11
|
2021-08-20T06:19:08.000Z
|
2021-10-02T14:55:39.000Z
|
import json
import logging
import tensorflow as tf
from easy_rec.python.protos.dataset_pb2 import DatasetConfig
from easy_rec.python.protos.feature_config_pb2 import FeatureConfig
from easy_rec.python.utils.config_util import get_compatible_feature_configs
from easy_rec.python.utils.convert_rtp_fg import load_input_field_and_feature_config # NOQA
if tf.__version__ >= '2.0':
tf = tf.compat.v1
def load_fg_json_to_config(pipeline_config):
fg_json_path = pipeline_config.fg_json_path
if not fg_json_path:
return
label_fields = pipeline_config.data_config.label_fields
with tf.gfile.GFile(fg_json_path, 'r') as fin:
rtp_fg = json.load(fin)
fg_config = load_input_field_and_feature_config(
rtp_fg, label_fields=label_fields)
pipeline_config.data_config.ClearField('input_fields')
pipeline_config.ClearField('feature_configs')
pipeline_config.feature_config.ClearField('features')
for input_config in fg_config.data_config.input_fields:
in_config = DatasetConfig.Field()
in_config.CopyFrom(input_config)
pipeline_config.data_config.input_fields.append(in_config)
fg_fea_config = get_compatible_feature_configs(fg_config)
for fc in fg_fea_config:
fea_config = FeatureConfig()
fea_config.CopyFrom(fc)
pipeline_config.feature_config.features.append(fea_config)
logging.info('data_config and feature_config has been replaced by fg_json.')
return pipeline_config
| 34.071429
| 92
| 0.811321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.081761
|
fb56b6a8fcadb6c716511c7be794553961db8e2e
| 529
|
py
|
Python
|
modules/api/functional_test/live_tests/conftest.py
|
exoego/vinyldns
|
aac4c2afe4c599ac8c96ad3a826f3a6dff887104
|
[
"Apache-2.0"
] | null | null | null |
modules/api/functional_test/live_tests/conftest.py
|
exoego/vinyldns
|
aac4c2afe4c599ac8c96ad3a826f3a6dff887104
|
[
"Apache-2.0"
] | 1
|
2019-02-06T21:38:12.000Z
|
2019-02-06T21:38:12.000Z
|
modules/api/functional_test/live_tests/conftest.py
|
exoego/vinyldns
|
aac4c2afe4c599ac8c96ad3a826f3a6dff887104
|
[
"Apache-2.0"
] | null | null | null |
import pytest
@pytest.fixture(scope="session")
def shared_zone_test_context(request):
from shared_zone_test_context import SharedZoneTestContext
ctx = SharedZoneTestContext()
def fin():
ctx.tear_down()
request.addfinalizer(fin)
return ctx
@pytest.fixture(scope="session")
def zone_history_context(request):
from zone_history_context import ZoneHistoryContext
context = ZoneHistoryContext()
def fin():
context.tear_down()
request.addfinalizer(fin)
return context
| 18.241379
| 62
| 0.725898
| 0
| 0
| 0
| 0
| 510
| 0.964083
| 0
| 0
| 18
| 0.034026
|
fb57d98140afeca2dc5e728adfc2de4c920c0f82
| 15,642
|
py
|
Python
|
calculate_best_ball_scores.py
|
arnmishra/sleeper-best-ball
|
926d673eebe3a0f114a60f4749dcc451db792b4d
|
[
"MIT"
] | null | null | null |
calculate_best_ball_scores.py
|
arnmishra/sleeper-best-ball
|
926d673eebe3a0f114a60f4749dcc451db792b4d
|
[
"MIT"
] | null | null | null |
calculate_best_ball_scores.py
|
arnmishra/sleeper-best-ball
|
926d673eebe3a0f114a60f4749dcc451db792b4d
|
[
"MIT"
] | null | null | null |
from enum import Enum
import requests
import argparse
import nflgame
def get_user_id_to_team_name(league_id):
"""
Gets a map of fantasy player user id to their team name
"""
user_id_to_team_name = {}
r = requests.get("https://api.sleeper.app/v1/league/%s/users" % league_id)
user_data = r.json()
for user in user_data:
user_id_to_team_name[user['user_id']] = user['display_name']
return user_id_to_team_name
def get_roster_id_to_owner(user_id_to_team_name, league_id):
"""
Gets a map of the roster id to the fantasy owner team name
"""
roster_id_to_owner = {}
r = requests.get('https://api.sleeper.app/v1/league/%s/rosters' % league_id)
roster_info = r.json()
for roster in roster_info:
name = user_id_to_team_name[roster['owner_id']]
roster_id_to_owner[roster['roster_id']] = name
return roster_id_to_owner
def get_owner_to_roster(player_id_to_custom_id, roster_id_to_owner, league_id, week):
"""
Gets a map of the owner team name to the roster players
Also determines which two teams are in each matchup by getting a map of
matchu pid to the two owners playing the game
"""
owner_to_roster = {}
matchup_id_to_owners = {}
r = requests.get('https://api.sleeper.app/v1/league/%s/matchups/%s' %
(league_id, week))
rosters = r.json()
for roster in rosters:
owner = roster_id_to_owner[roster['roster_id']]
player_ids = roster['players']
custom_ids = [player_id_to_custom_id[player_id] for player_id in player_ids]
owner_to_roster[owner] = custom_ids
matchup_id = roster['matchup_id']
if matchup_id in matchup_id_to_owners:
matchup_id_to_owners[matchup_id].append(owner)
else:
matchup_id_to_owners[matchup_id] = [owner]
return owner_to_roster, matchup_id_to_owners
def get_player_id(first_name, last_name, team):
"""
Returns a custom player ID of first initial + last name + team
i.e. for Tom Brady in New England that is T.Brady-NE
"""
if (team == None):
team = 'None'
return first_name[0] + "." + last_name + "-" + team
def get_custom_id_to_info():
"""
Gets a map of player name/team to position
"""
custom_id_to_info = {}
player_id_to_custom_id = {}
r = requests.get('https://api.sleeper.app/v1/players/nfl')
players = r.json()
for player_id in players:
player = players[player_id]
if player['fantasy_positions']:
position = player['fantasy_positions'][0]
if position in ('RB', 'WR', 'QB', 'TE'):
custom_id = get_player_id(player['first_name'], player['last_name'], player['team'])
if not custom_id:
continue
player_id_to_custom_id[player_id] = custom_id
custom_id_to_info[custom_id] = position
return custom_id_to_info, player_id_to_custom_id
def calculate_player_points(player):
rushing_score = player.rushing_yds * 0.1 + player.rushing_tds * 6 + player.rushing_twoptm * 2
passing_score = player.passing_yds * 0.04 + player.passing_tds * 4 + player.passing_twoptm * 2
receiving_score = player.receiving_yds * 0.1 + player.receiving_tds * 6 + player.receiving_rec * 0.5 + player.receiving_twoptm * 2
negative_scores = player.passing_ints * 2 + player.fumbles_lost * 2
return rushing_score + passing_score + receiving_score - negative_scores
def get_player_to_points(year, week, custom_id_to_info):
"""
Gets a map of player ID to a tuple of the player's points and position
"""
player_id_to_points = {}
games = nflgame.games(int(year), week=int(week))
players = nflgame.combine_game_stats(games)
for player in players:
custom_id = player.name + "-" + player.team
if (custom_id in custom_id_to_info):
points = calculate_player_points(player)
player_id_to_points[custom_id] = (points, custom_id_to_info[custom_id])
print (player_id_to_points)
return player_id_to_points
def get_points(rbs, wrs, qbs, tes, roster_count):
"""
Gets the number of points a set of players makes up given the roster counts
"""
flex = rbs[roster_count['rb']:] + \
wrs[roster_count['wr']:] + \
tes[roster_count['te']:]
flex.sort(reverse=True)
return sum(rbs[:roster_count['rb']]) + \
sum(wrs[:roster_count['wr']]) + \
sum(qbs[:roster_count['qb']]) + \
sum(tes[:roster_count['te']]) + \
sum(flex[:roster_count['flex']])
def get_owner_to_score(owner_to_roster, player_to_points, roster_count):
"""
Gets a map of the owner to their fantasy score
"""
owner_to_score = {}
for owner in owner_to_roster:
rbs = []
wrs = []
qbs = []
tes = []
for player in owner_to_roster[owner]:
if player in player_to_points:
points, position = player_to_points[player]
if position == 'RB':
rbs.append(points)
elif position == 'WR':
wrs.append(points)
elif position == 'QB':
qbs.append(points)
elif position == 'TE':
tes.append(points)
rbs.sort(reverse=True)
wrs.sort(reverse=True)
qbs.sort(reverse=True)
tes.sort(reverse=True)
owner_to_score[owner] = get_points(rbs, wrs, qbs, tes, roster_count)
return owner_to_score
def get_owner_to_weekly_record(matchup_id_to_owners, final_owner_to_score):
"""
Gets a map of the owner to their best ball record
"""
owner_to_record = {}
for matchup_id in matchup_id_to_owners:
owner_1 = matchup_id_to_owners[matchup_id][0]
owner_2 = matchup_id_to_owners[matchup_id][1]
score_1 = final_owner_to_score[owner_1]
score_2 = final_owner_to_score[owner_2]
if score_1 > score_2:
owner_to_record[owner_1] = [1, 0, 0]
owner_to_record[owner_2] = [0, 1, 0]
elif score_1 == score_2:
owner_to_record[owner_1] = [0, 0, 1]
owner_to_record[owner_2] = [0, 0, 1]
else:
owner_to_record[owner_1] = [0, 1, 0]
owner_to_record[owner_2] = [1, 0, 0]
return owner_to_record
def parse_args():
parser = argparse.ArgumentParser(
description='Get Sleeper App Best Ball Scores')
parser.add_argument(
'-i', '--league_id', help='The ID of your Sleeper League', required=True
)
parser.add_argument(
'-y','--year', help='Which year to work with (i.e. 2018).',
required=True)
parser.add_argument(
'-w', '--week',
help='Which week to work with (i.e. 1), for full season leave blank',
required=False)
parser.add_argument(
'-e', '--end_week',
help='Sum of all weeks till the end week. Default to 13 for 13 week season.',
required=False, default=13, type=int)
parser.add_argument(
'-b', '--num_rb',
help='Number of Starting Running Backs in your league (Default 2)',
required=False, default=2, type=int)
parser.add_argument(
'-r', '--num_wr',
help='Number of Starting Wide Receivers in your league (Default 2)',
required=False, default=2, type=int)
parser.add_argument(
'-q', '--num_qb',
help='Number of Starting Quarterbacks in your league (Default 1)',
required=False, default=1, type=int)
parser.add_argument(
'-t', '--num_te',
help='Number of Starting Tight Ends in your league (Default 1)',
required=False, default=1, type=int)
parser.add_argument(
'-f', '--num_flex',
help='Number of Starting Flex(WR/RB/TE) in your league (Default 2)',
required=False, default=2, type=int)
parser.add_argument(
'-s', '--sort_by',
help='Sort by score, record, rank, top6. (Default Score)',
required=False, default='score', type=str)
return vars(parser.parse_args())
if __name__ == "__main__":
"Parses all the arguments into variables"
args = parse_args()
league_id = args['league_id']
year = args['year']
week = args['week']
end_week = args['end_week']
roster_count = {}
roster_count['rb'] = args['num_rb']
roster_count['wr'] = args['num_wr']
roster_count['qb'] = args['num_qb']
roster_count['te'] = args['num_te']
roster_count['flex'] = args['num_flex']
# Gets a map of the user id to the owner team name
user_id_to_team_name = get_user_id_to_team_name(league_id)
# Gets a map of the roster id to the owner team name
roster_id_to_owner = get_roster_id_to_owner(user_id_to_team_name, league_id)
# Gets a map of each player id to their name and position
custom_id_to_info, player_id_to_custom_id = get_custom_id_to_info()
# A map to track the owner name to its best ball score
final_owner_to_score = {}
# A map of each owner to their best ball record
final_owner_to_record = {}
# A map of each owner to their best ball rank
final_owner_to_rank = {}
# A map of each owner to number of top 6 best ball performances
final_owner_to_top_half_or_bottom = {}
num_teams = len(user_id_to_team_name)
if week:
# If we are getting it for an individual week, calculate that data
# Get the number of fantasy points each player scored that week
player_to_points = get_player_to_points(year, week, custom_id_to_info)
# Gets the map of each owner to their players and which two teams are playing each other
owner_to_roster, matchup_id_to_owners = get_owner_to_roster(
player_id_to_custom_id, roster_id_to_owner, league_id, week)
# Gets the best ball score for each owner
final_owner_to_score = get_owner_to_score(owner_to_roster, player_to_points, roster_count)
# Gets the best ball record for each owner
final_owner_to_record = get_owner_to_weekly_record(
matchup_id_to_owners, final_owner_to_score)
# Sorts the teams by score and determines if they are top 6
sorted_by_score = sorted(final_owner_to_score.items(), key=lambda kv: kv[1])
for i in range(len(sorted_by_score)):
owner = sorted_by_score[i][0]
final_owner_to_rank[owner] = [num_teams-i]
if(i >= 6):
final_owner_to_top_half_or_bottom[owner] = 1
else:
# If we are getting it for the whole season, calculate that data for each week
for week in range(1, end_week + 1):
# Get the number of fantasy points each player scored that week
player_to_points = get_player_to_points(year, week, custom_id_to_info)
# Gets the map of each owner to their players and which two teams are playing each other
owner_to_roster, matchup_id_to_owners = get_owner_to_roster(
player_id_to_custom_id, roster_id_to_owner, league_id, week)
# Gets the best ball score for each owner
owner_to_score = get_owner_to_score(owner_to_roster, player_to_points, roster_count)
# Gets the best ball record for each owner
owner_to_record = get_owner_to_weekly_record(
matchup_id_to_owners, owner_to_score)
# Adds the total scores and records for each team
for owner in owner_to_score:
if owner in final_owner_to_score:
final_owner_to_score[owner] += owner_to_score[owner]
records = final_owner_to_record[owner]
new_record = owner_to_record[owner]
final_owner_to_record[owner] = [sum(x) for x in zip(records, new_record)]
else:
final_owner_to_score[owner] = owner_to_score[owner]
final_owner_to_record[owner] = owner_to_record[owner]
# Creates list of tuple of (owner, score) sorted by score
sorted_by_score = sorted(final_owner_to_score.items(), key=lambda kv: kv[1])
# Sorts the teams by score and determines if they are top 6
for i in range(num_teams):
owner = sorted_by_score[i][0]
if owner in final_owner_to_rank:
final_owner_to_rank[owner].append(num_teams-i)
else:
final_owner_to_rank[owner] = [num_teams-i]
if(i >= 6):
if owner in final_owner_to_top_half_or_bottom:
final_owner_to_top_half_or_bottom[owner] += 1
else:
final_owner_to_top_half_or_bottom[owner] = 1
# Prints out all the information sorted as the user wants
for owner in final_owner_to_record:
final_owner_to_record[owner] = ("-").join([str(elem) for elem in final_owner_to_record[owner]])
final_owner_to_rank[owner] = round(float(sum(final_owner_to_rank[owner])) / len(final_owner_to_rank[owner]), 2)
if owner not in final_owner_to_top_half_or_bottom:
final_owner_to_top_half_or_bottom[owner] = 0
if args['sort_by'] == 'record':
sorted_records = final_owner_to_record.items()
sorted_records = sorted(sorted_records, key=lambda tup: int(tup[1].split("-")[0])) # sort by the records
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Record(W-L-T)', 'Score', 'Top 6 Performances', 'Average Rank'))
for record in sorted_records:
owner = record[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, record[1], final_owner_to_score[owner], final_owner_to_top_half_or_bottom[owner], final_owner_to_rank[owner]))
elif args['sort_by'] == 'rank':
sorted_rank = final_owner_to_rank.items()
sorted_rank = sorted(sorted_rank, key=lambda tup: tup[1], reverse=True) # sort by the ranks
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Average Rank', 'Score', 'Record(W-L-T)', 'Top 6 Performances'))
for rank in sorted_rank:
owner = rank[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, rank[1], final_owner_to_score[owner], final_owner_to_record[owner], final_owner_to_top_half_or_bottom[owner]))
elif args['sort_by'] == 'top6':
sorted_top6 = final_owner_to_top_half_or_bottom.items()
sorted_top6 = sorted(sorted_top6, key=lambda tup: tup[1]) # sort by the top 6 performances
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Top 6 Performances', 'Score', 'Record(W-L-T)', 'Average Rank'))
for top6 in sorted_top6:
owner = top6[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, top6[1], final_owner_to_score[owner], final_owner_to_record[owner], final_owner_to_rank[owner]))
elif args['sort_by'] == 'score':
sorted_scores = final_owner_to_score.items()
sorted_scores = sorted(sorted_scores, key=lambda tup: tup[1]) # sort by the scores
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Score', 'Record(W-L-T)', 'Top 6 Performances', 'Average Rank'))
for score in sorted_scores:
owner = score[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, score[1], final_owner_to_record[owner], final_owner_to_top_half_or_bottom[owner], final_owner_to_rank[owner]))
else:
print("Please enter either 'score', 'record', 'rank', or 'top6' for the sort option. %s isn't recognized" % args['sort_by'])
| 47.256798
| 180
| 0.637642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,208
| 0.269019
|
fb593493c97b14b708bc0b8b5a7f5e7166948d28
| 10,489
|
py
|
Python
|
Kelp/kelp.py
|
trondkr/particleDistributions
|
1f5be088150db92c985c00210951ab62521bf694
|
[
"MIT"
] | null | null | null |
Kelp/kelp.py
|
trondkr/particleDistributions
|
1f5be088150db92c985c00210951ab62521bf694
|
[
"MIT"
] | 1
|
2019-07-11T15:02:32.000Z
|
2019-07-11T16:57:03.000Z
|
Kelp/kelp.py
|
trondkr/particleDistributions
|
1f5be088150db92c985c00210951ab62521bf694
|
[
"MIT"
] | 1
|
2019-12-23T06:49:29.000Z
|
2019-12-23T06:49:29.000Z
|
#!/usr/bin/env python
from datetime import datetime, timedelta
import numpy as np
from opendrift.readers import reader_basemap_landmask
from opendrift.readers import reader_ROMS_native
from kelp.kelpClass import PelagicPlanktonDrift
from opendrift.readers import reader_netCDF_CF_generic
import logging
import gdal
import os
from netCDF4 import Dataset, datetime, date2num,num2date
from numpy.random import RandomState
import random
import glob
import matplotlib.pyplot as plt
try:
import ogr
import osr
except Exception as e:
print(e)
raise ValueError('OGR library is needed to read shapefiles.')
def setupSeed(hoursBetweenTimestepInROMSFiles,startTime,endTime,startReleaseTime,endReleaseTime,releaseParticles):
##################################################
# Create seed variation as function of day
##################################################
# Make datetime array from start to end at 3 hour interval
#interval = timedelta(hours=hoursBetweenTimestepInROMSFiles)
difference=endTime-startTime
hoursOfSimulation=divmod(difference.total_seconds(), 3600)
difference=endReleaseTime-startReleaseTime
hoursOfRelease=divmod(difference.total_seconds(), 3600)
#startSimulationJD=startTime.timetuple().tm_yday
#endSimulationJD=endTime.timetuple().tm_yday
timeStepsSimulation=int(int(hoursOfSimulation[0])/hoursBetweenTimestepInROMSFiles)
#startReleaseJD=startReleaseTime.timetuple().tm_yday
#endReleaseJD=endReleaseTime.timetuple().tm_yday
#timeStepsRelease=int(int(hoursOfRelease[0])/hoursBetweenTimestepInROMSFiles)
print("=>SIMULATION: Drift simulation will run for %s simulation hours" %(timeStepsSimulation))
print("=>Release: Simulated Release will run for %s simulation hours\n initiated on %s and ending on %s"%(timeStepsSimulation,startReleaseTime,endReleaseTime))
interval = timedelta(hours=6)
hoursPerRelease=divmod(interval.total_seconds(), 3600) #hours per Release event
timeStepsRelease=int(int(hoursOfRelease[0])/int(hoursPerRelease[0])) #number of Release timesteps
ReleaseTimes = [startReleaseTime + interval*n for n in range(timeStepsRelease)] #times of Release
num=np.random.normal(releaseParticles,int(releaseParticles/2)-1, size=len(ReleaseTimes)).astype(int)
num=np.sort(num) #sort particles in increasing order
#num=np.concatenate((num[len(num)%2::2],num[::-2]),axis=0) #release the highest number of particles at the midpoint of the Release period
print("Release: Simulated Release will release %s kelp particles"%(np.sum(num)))
return num, ReleaseTimes
def kelpProperties(num):
kelpProps=np.zeros((len(num), 4))
mylist=[0,1,2]
weights=[0.5069324, 0.4485244,0.10138648]
volumes=[0.000140226, 0.000753867, 2.80452E-05]
areas=[0.1149934, 0.05, 0.02299868]
densities=[2000,1100,300]
for i in range(len(num)):
ind=random.choice(mylist)
kelpProps[i,0]=weights[ind]
kelpProps[i,1]=volumes[ind]
kelpProps[i,2]=areas[ind]
# Calculate the density of the kelp
density=weights[ind]/volumes[ind]
kelpProps[i,3]=densities[ind]
return kelpProps
def createOutputFilenames(startTime,endTime,polygonIndex,shapefile,verticalBehavior):
startDate=''
if startTime.day<10:
startDate+='0%s'%(startTime.day)
else:
startDate+='%s'%(startTime.day)
if startTime.month<10:
startDate+='0%s'%(startTime.month)
else:
startDate+='%s'%(startTime.month)
startDate+='%s'%(startTime.year)
endDate=''
if endTime.day<10:
endDate+='0%s'%(endTime.day)
else:
endDate+='%s'%(endTime.day)
if endTime.month<10:
endDate+='0%s'%(endTime.month)
else:
endDate+='%s'%(endTime.month)
endDate+='%s'%(endTime.year)
# Special file naming for KINO. Each layer has name 'species.shp' and we want teh species name only.
head,tail=os.path.split(shapefile)
specie="Kelp"
if verticalBehavior:
outputFilename='results/%s_polygon_%s_kelp_opendrift_%s_to_%s_vertical.nc'%(specie,polygonIndex,startDate,endDate)
animationFilename='figures/%s_polygon_%s_kelp_animation_%s_to_%s_vertical.mp4'%(specie,polygonIndex,startDate,endDate)
plotFilename='figures/%s_polygon_%s_kelp_plot_%s_to_%s_vertical.png'%(specie,polygonIndex,startDate,endDate)
else:
outputFilename='results/%s_polygon_%s_kelp_opendrift_%s_to_%s_novertical.nc'%(specie,polygonIndex,startDate,endDate)
animationFilename='figures/%s_polygon_%s_kelp_animation_%s_to_%s_novertical.mp4'%(specie,polygonIndex,startDate,endDate)
plotFilename='figures/%s_polygon_%s_kelp_plot_%s_to_%s_novertical.png'%(specie,polygonIndex,startDate,endDate)
if not os.path.exists('figures'):
os.makedirs('figures')
if not os.path.exists('results'):
os.makedirs('results')
return outputFilename, animationFilename, plotFilename
def createAndRunSimulation(lowDepth,highDepth,endTime,layer,polygonIndex,shapefile,outputFilename,animationFilename,plotFilename,releaseParticles,kinoDirectory,pattern_kino,svimDirectory,pattern_svim,verticalBehavior):
# Setup a new simulation
o = PelagicPlanktonDrift(loglevel=0) # Set loglevel to 0 for debug information
#######################
# Preparing readers
#######################
reader_basemap = reader_basemap_landmask.Reader(
llcrnrlon=16, llcrnrlat=68,
urcrnrlon=20, urcrnrlat=72,
resolution='f', projection='merc')
o.add_reader([reader_basemap]) #Do not include basemap when stranding is deactivated
print([s for s in pattern_kino])
reader_kino = reader_ROMS_native.Reader([s for s in pattern_kino])
reader_kino.interpolation = 'linearNDFast' #linearND
reader_svim = reader_ROMS_native.Reader(svimDirectory+pattern_svim)
reader_svim.interpolation = 'linearNDFast' #linearND
#reader_arome = reader_netCDF_CF_generic.Reader('http://thredds.met.no/thredds/dodsC/arome25/arome_metcoop_default2_5km_latest.nc')
o.add_reader([reader_kino,reader_svim])
num, ReleaseTimes = setupSeed(hoursBetweenTimestepInROMSFiles,startTime,endTime,startReleaseTime,endReleaseTime,releaseParticles)
#######################
#Adjusting configuration
#######################
o.set_config('processes:turbulentmixing', True)
o.set_config('turbulentmixing:diffusivitymodel','environment')
o.set_config('turbulentmixing:timestep', 30) # seconds
o.set_config('turbulentmixing:verticalresolution', 1) # default is 1 meter, but since we have longer timestep we justify it
o.set_config('processes:verticaladvection', True)
o.set_config('turbulentmixing:TSprofiles', True)
# o.set_config('drift:scheme', 'euler')
o.set_config('drift:scheme', 'runge-kutta')
#del o.fallback_values['x_sea_water_velocity']
#del o.fallback_values['y_sea_water_velocity']
o.set_config('general:coastline_action', 'stranding') #Prevent stranding, jump back to previous position
#######################
# Seed kelp particles
#######################
kelpProps=kelpProperties(num)
for i, nums in enumerate(num):
if nums <= 0:
continue
print("Running i=%s num=%s and polygon=%s"%(i,nums,polygonIndex))
o.seed_from_shapefile(shapefile, nums,featurenum=[polygonIndex],
z=np.random.randint(low=lowDepth, high=highDepth, size=np.shape(nums)),
weight=kelpProps[i,0],
volume=kelpProps[i,1],
diameter=kelpProps[i,2],
density=kelpProps[i,3],
time=ReleaseTimes[i])
#reader_basemap.plot()
#########################
# Run the model
#########################
o.run(end_time=endTime, time_step=timedelta(hours=2),
outfile=outputFilename)
#export_variables=['lon', 'lat', 'z','temp','length','weight','survival'])
print(o)
#########################
# SETUP FOR KELP PROJECT
#########################
startTime=datetime(2016,4,10,12,0,0)
endTime=datetime(2016,5,26,23,0,0)
startReleaseTime=startTime
endReleaseTime=datetime(2016,4,12,12,0,0)
releaseParticles=4 # Per timestep multiplied by gaussian bell (so maximum is releaseParticles and minimum is close to zero)
lowDepth, highDepth = -7, -2 # in negative meters
verticalBehavior=False
hoursBetweenTimestepInROMSFiles=1
#kinoDirectory='/work/users/trondk/KINO/FORWARD/Run/RESULTS/'+str(startTime.year)+'/'
kinoDirectory='/work/shared/nn9297k/Nordfjord/'
kinoDirectory='/imr/vol1/NorFjords5/Malangen-160m_AUG2015-AUG2016/'
svimDirectory='/work/shared/imr/SVIM/'+str(startTime.year)+'/'
firstkino = int(date2num(startTime,units="days since 1948-01-01 00:00:00",calendar="standard"))
lastkino = int(date2num(endTime,units="days since 1948-01-01 00:00:00",calendar="standard"))
apattern = 'norfjords_160m_his.nc4_%s*'%(startTime.year)
argument="%s%s"%(kinoDirectory,apattern)
pattern_kino = glob.glob(argument)
pattern_kino.sort()
print(pattern_kino)
pattern_svim='ocean_avg_*.nc'
shapefile='/work/shared/nn9297k/Kelp/Shapefile/KelpExPol_utenNASAland.shp'
print("=> Using shapefile %s"%(shapefile))
s = ogr.Open(shapefile)
for layer in s:
polygons=[x+1 for x in range(layer.GetFeatureCount()-1)]
#polygons=[1,2,3,4,7] #N.Trench,Dogger bank C, Dogger bank, German bight, Viking bank
#polygons=[2] #N.Trench,Dogger bank C, Dogger bank, German bight, Viking bank
for polygonIndex in polygons:
feature = layer.GetFeature(polygonIndex-1)
print("Area",feature.GetGeometryRef().GetArea())
geom = feature.GetGeometryRef()
points = geom.GetGeometryCount()
ring = geom.GetGeometryRef(0)
print("jj",polygonIndex, points)
if ring.GetPointCount() > 3:
outputFilename, animationFilename, plotFilename = createOutputFilenames(startTime,endTime,polygonIndex,shapefile,verticalBehavior)
print("Result files will be stored as:\nnetCDF=> %s\nmp4=> %s"%(outputFilename,animationFilename))
createAndRunSimulation(lowDepth,highDepth,endTime,
layer,polygonIndex,shapefile,
outputFilename,animationFilename,plotFilename,releaseParticles,
kinoDirectory,pattern_kino,svimDirectory,pattern_svim,verticalBehavior)
| 38.992565
| 218
| 0.695586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,701
| 0.352846
|
fb59a435d0311305f0e15444f804e1c503ccd050
| 6,818
|
py
|
Python
|
evernotebot/bot/storage.py
|
AuroraDysis/evernote-telegram-bot
|
eca7b7c53d2e034e366f1e715211dbe98b4991f7
|
[
"MIT"
] | 1
|
2021-03-29T07:31:22.000Z
|
2021-03-29T07:31:22.000Z
|
evernotebot/bot/storage.py
|
AuroraDysis/evernote-telegram-bot
|
eca7b7c53d2e034e366f1e715211dbe98b4991f7
|
[
"MIT"
] | null | null | null |
evernotebot/bot/storage.py
|
AuroraDysis/evernote-telegram-bot
|
eca7b7c53d2e034e366f1e715211dbe98b4991f7
|
[
"MIT"
] | null | null | null |
import json
import sqlite3
import typing
from typing import Optional, Dict
from copy import deepcopy
from contextlib import suppress
from bson.objectid import ObjectId
from pymongo import MongoClient
from pymongo.errors import ConfigurationError
class MongoStorageException(Exception):
pass
class Mongo:
def __init__(self, connection_string, *, collection=None, db_name=None):
if collection is None:
raise MongoStorageException('`collection` is required')
self._driver = MongoClient(connection_string)
with suppress(ConfigurationError):
db = self._driver.get_database(db_name)
if db is None:
raise MongoStorageException(
'You have to specify database name '
'either in connection string or as `db_name` parameter')
self._collection = db.get_collection(collection)
def create(self, data: dict, auto_generate_id=False):
data = deepcopy(data)
if "id" in data:
data["_id"] = data["id"]
del data["id"]
elif not auto_generate_id:
raise MongoStorageException("`id` required")
object_id = self._collection.insert_one(data).inserted_id
if isinstance(object_id, ObjectId):
object_id = str(object_id)
return object_id
def get(self, object_id, fail_if_not_exists=False):
query = object_id if isinstance(object_id, dict) else {"_id": object_id}
data = self._collection.find_one(query)
if fail_if_not_exists and not data:
raise MongoStorageException(f"Object not found. Query: {query}")
if data:
data["id"] = data["_id"]
del data["_id"]
return data
def get_all(self, query):
for document in self._collection.find(query):
document["id"] = document["_id"]
del document["_id"]
yield document
def save(self, data: dict):
object_id = data.get("id")
if object_id:
data["_id"] = object_id
del data["id"]
query = {"_id": object_id}
result = self._collection.update_one(query, {"$set": data})
if result.matched_count == 0:
raise MongoStorageException(f"Object `{object_id}` not found")
data["id"] = object_id
else:
object_id = str(self._collection.insert_one(data).inserted_id)
if isinstance(object_id, ObjectId):
object_id = str(object_id)
data["id"] = object_id
return object_id
def delete(self, object_id, check_deleted_count=True):
result = self._collection.delete_one({"_id": object_id})
if check_deleted_count and result.deleted_count != 1:
raise MongoStorageException(f"Object `{object_id}` not found")
def close(self):
self._driver.close()
class Sqlite:
def __init__(self, dirpath: str, *, collection: str = None, db_name: str = None) -> None:
db_filepath = f'{dirpath}/{db_name}'
self._connection = sqlite3.connect(db_filepath)
self._table_name = collection
self.__execute_sql(
f'CREATE TABLE IF NOT EXISTS {collection}'
'(id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT)'
)
def __execute_sql(self, sql: str, *args) -> sqlite3.Cursor:
sql = sql.strip().upper()
cursor = self._connection.execute(sql, args)
if not sql.startswith('SELECT'):
self._connection.commit()
return cursor
def create(self, data: dict, auto_generate_id: bool = False) -> int:
table = self._table_name
if auto_generate_id:
if 'id' in data:
del data['id']
sql = f'INSERT INTO {table}(data) VALUES(?)'
cursor = self.__execute_sql(sql, json.dumps(data))
else:
object_id = data['id']
if object_id <= 0:
raise Exception(f'Invalid id `{object_id}`. Id must be >= 0')
cursor = self.__execute_sql(f'INSERT INTO {table}(id, data) VALUES(?, ?)',
object_id, json.dumps(data))
return cursor.lastrowid
def get(self, object_id: int, fail_if_not_exists: bool = False) -> Dict:
query = object_id if isinstance(object_id, dict) else {'id': object_id}
objects = self.get_all(query)
result = list(objects)
if fail_if_not_exists and not result:
raise Exception(f'Object not found. Query: {query}')
return result and result[0]
def get_all(self, query: Optional[Dict] = None) -> typing.Generator:
if query is None:
query = {}
table = self._table_name
args = tuple()
if 'id' in query:
sql = f'SELECT id, data FROM {table} WHERE id=?'
args = (query['id'],)
else:
sql = f'SELECT id, data FROM {table}'
cursor = self.__execute_sql(sql, *args)
objects = cursor.fetchall()
if not objects:
return tuple()
for object_id, json_data in objects:
data = json.loads(json_data)
data['id'] = object_id
if self._check_query(data, query):
yield data
def _check_query(self, document: dict, query: dict) -> bool:
matched = True
for k, query_value in query.items():
key_value = document
for name in k.split('.'):
key_value = key_value.get(name) if isinstance(key_value, dict) else None
if key_value is None:
break
if isinstance(query_value, dict):
matched = self._check_query(key_value, query_value)
else:
matched = key_value == query_value
if not matched:
return False
return matched
def save(self, data: dict) -> int:
object_id = data['id']
if not object_id:
object_id = self.create(data, auto_generate_id=True)
else:
table = self._table_name
sql = f'UPDATE {table} SET data=? WHERE id=?'
cursor = self.__execute_sql(sql, json.dumps(data), object_id)
if cursor.rowcount == 0:
raise Exception(f'Object `{object_id}` not found')
return object_id
def delete(self, object_id: int, check_deleted_count: bool = True) -> None:
table = self._table_name
sql = f'DELETE FROM {table} WHERE id=?'
cursor = self.__execute_sql(sql, object_id)
if check_deleted_count and cursor.rowcount != 1:
raise Exception(f'Object `{object_id}` not found')
def close(self) -> None:
self._connection.commit()
self._connection.close()
| 37.668508
| 93
| 0.589909
| 6,563
| 0.962599
| 871
| 0.12775
| 0
| 0
| 0
| 0
| 851
| 0.124817
|
fb59b4889f363415f77eaf0d9d1624d307371014
| 13,486
|
py
|
Python
|
tests/models/programdb/opstress/opstress_integration_test.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 26
|
2019-05-15T02:03:47.000Z
|
2022-02-21T07:28:11.000Z
|
tests/models/programdb/opstress/opstress_integration_test.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 815
|
2019-05-10T12:31:52.000Z
|
2022-03-31T12:56:26.000Z
|
tests/models/programdb/opstress/opstress_integration_test.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 9
|
2019-04-20T23:06:29.000Z
|
2022-01-24T21:21:04.000Z
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.opstress.opstress_integration_test.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Class for testing operating stress integrations."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models import RAMSTKOpStressRecord, RAMSTKOpStressTable
@pytest.fixture(scope="class")
def test_tablemodel(test_program_dao):
"""Get a data manager instance for each test class."""
# Create the device under test (dut) and connect to the database.
dut = RAMSTKOpStressTable()
dut.do_connect(test_program_dao)
dut.do_select_all(
{
"revision_id": 1,
"hardware_id": 1,
"mode_id": 6,
"mechanism_id": 3,
"load_id": 3,
}
)
yield dut
# Unsubscribe from pypubsub topics.
pub.unsubscribe(dut.do_get_attributes, "request_get_opstress_attributes")
pub.unsubscribe(dut.do_set_attributes, "request_set_opstress_attributes")
pub.unsubscribe(dut.do_set_attributes, "wvw_editing_opstress")
pub.unsubscribe(dut.do_update, "request_update_opstress")
pub.unsubscribe(dut.do_select_all, "selected_revision")
pub.unsubscribe(dut.do_get_tree, "request_get_opstress_tree")
pub.unsubscribe(dut.do_delete, "request_delete_opstress")
pub.unsubscribe(dut.do_insert, "request_insert_opstress")
# Delete the device under test.
del dut
@pytest.mark.usefixtures("test_attributes", "test_tablemodel")
class TestSelectMethods:
"""Class for testing data manager select_all() and select() methods."""
def on_succeed_select_all(self, tree):
assert isinstance(tree, Tree)
assert isinstance(tree.get_node(1).data["opstress"], RAMSTKOpStressRecord)
print("\033[36m\nsucceed_retrieve_opstress topic was broadcast.")
@pytest.mark.integration
def test_do_select_all_populated_tree(self, test_attributes, test_tablemodel):
"""do_select_all() should return a Tree() object populated with
RAMSTKOpStressRecord instances on success."""
pub.subscribe(self.on_succeed_select_all, "succeed_retrieve_opstress")
pub.sendMessage("selected_revision", attributes=test_attributes)
pub.unsubscribe(self.on_succeed_select_all, "succeed_retrieve_opstress")
@pytest.mark.usefixtures("test_attributes", "test_tablemodel")
class TestInsertMethods:
"""Class for testing the data manager insert() method."""
def on_succeed_insert_sibling(self, node_id, tree):
assert node_id == 5
assert isinstance(tree, Tree)
assert isinstance(tree.get_node(5).data["opstress"], RAMSTKOpStressRecord)
print("\033[36m\nsucceed_insert_opstress topic was broadcast.")
def on_fail_insert_no_parent(self, error_message):
assert error_message == (
"do_insert: Database error when attempting to add a record. Database "
"returned:\n\tKey (fld_load_id)=(100) is not present in table "
'"ramstk_op_load".'
)
print("\033[35m\nfail_insert_opstress topic was broadcast.")
@pytest.mark.integration
def test_do_insert_sibling(self, test_attributes, test_tablemodel):
"""should send success message, add record to record tree and update
last_id."""
pub.subscribe(self.on_succeed_insert_sibling, "succeed_insert_opstress")
pub.sendMessage("request_insert_opstress", attributes=test_attributes)
assert test_tablemodel.last_id == 5
pub.unsubscribe(self.on_succeed_insert_sibling, "succeed_insert_opstress")
@pytest.mark.integration
def test_do_insert_no_parent(self, test_attributes, test_tablemodel):
"""should send the fail message when load ID does not exist."""
pub.subscribe(self.on_fail_insert_no_parent, "fail_insert_opstress")
test_attributes["load_id"] = 100
pub.sendMessage("request_insert_opstress", attributes=test_attributes)
assert test_tablemodel.last_id == 5
pub.unsubscribe(self.on_fail_insert_no_parent, "fail_insert_opstress")
@pytest.mark.usefixtures("test_tablemodel")
class TestDeleteMethods:
"""Class for testing the data manager delete() method."""
def on_succeed_delete(self, tree):
assert isinstance(tree, Tree)
print(
"\033[36m\nsucceed_delete_opstress topic was broadcast when deleting "
"a failure mode."
)
def on_fail_delete_non_existent_id(self, error_message):
assert error_message == ("Attempted to delete non-existent Opstress ID 300.")
print("\033[35m\nfail_delete_opstress topic was broadcast.")
def on_fail_delete_not_in_tree(self, error_message):
assert error_message == ("Attempted to delete non-existent Opstress ID 4.")
print("\033[35m\nfail_delete_opstress topic was broadcast.")
@pytest.mark.integration
def test_do_delete(self, test_tablemodel):
"""should remove the record from the record tree and update last_id."""
pub.subscribe(self.on_succeed_delete, "succeed_delete_opstress")
pub.sendMessage("request_delete_opstress", node_id=3)
assert test_tablemodel.last_id == 4
assert test_tablemodel.tree.get_node(3) is None
pub.unsubscribe(self.on_succeed_delete, "succeed_delete_opstress")
@pytest.mark.integration
def test_do_delete_non_existent_id(self):
"""should send the fail message when stress ID does not exist."""
pub.subscribe(self.on_fail_delete_non_existent_id, "fail_delete_opstress")
pub.sendMessage("request_delete_opstress", node_id=300)
pub.unsubscribe(self.on_fail_delete_non_existent_id, "fail_delete_opstress")
@pytest.mark.integration
def test_do_delete_not_in_tree(self, test_tablemodel):
"""should send the fail message record does not exist in record tree."""
pub.subscribe(self.on_fail_delete_not_in_tree, "fail_delete_opstress")
test_tablemodel.tree.remove_node(4)
pub.sendMessage("request_delete_opstress", node_id=4)
pub.unsubscribe(self.on_fail_delete_not_in_tree, "fail_delete_opstress")
@pytest.mark.usefixtures("test_tablemodel")
class TestUpdateMethods:
"""Class for testing update() and update_all() methods."""
def on_succeed_update(self, tree):
assert isinstance(tree, Tree)
assert tree.get_node(3).data["opstress"].description == (
"Test failure opstress"
)
assert tree.get_node(3).data["opstress"].measurable_parameter == "Parameter"
print("\033[36m\nsucceed_update_opstress topic was broadcast")
def on_succeed_update_all(self):
print("\033[36m\nsucceed_update_all topic was broadcast")
def on_fail_update_wrong_data_type(self, error_message):
assert error_message == (
"do_update: The value for one or more attributes for opstress ID 3 was "
"the wrong type."
)
print("\033[35m\nfail_update_opstress topic was broadcast on wrong data type.")
def on_fail_update_root_node_wrong_data_type(self, error_message):
assert error_message == ("do_update: Attempting to update the root node 0.")
print("\033[35m\nfail_update_opstress topic was broadcast on root node.")
def on_fail_update_non_existent_id(self, error_message):
assert error_message == (
"do_update: Attempted to save non-existent opstress with opstress ID 100."
)
print("\033[35m\nfail_update_opstress topic was broadcast on non-existent ID.")
def on_fail_update_no_data_package(self, error_message):
assert error_message == ("do_update: No data package found for opstress ID 3.")
print("\033[35m\nfail_update_opstress topic was broadcast on no data package.")
@pytest.mark.integration
def test_do_update(self, test_tablemodel):
"""do_update() should return a zero error code on success."""
pub.subscribe(self.on_succeed_update, "succeed_update_opstress")
test_tablemodel.tree.get_node(3).data[
"opstress"
].description = "Test failure opstress"
test_tablemodel.tree.get_node(3).data[
"opstress"
].measurable_parameter = "Parameter"
pub.sendMessage("request_update_opstress", node_id=3, table="opstress")
pub.unsubscribe(self.on_succeed_update, "succeed_update_opstress")
@pytest.mark.integration
def test_do_update_all(self, test_tablemodel):
"""do_update_all() should broadcast the succeed message on success."""
pub.subscribe(self.on_succeed_update_all, "succeed_update_all")
pub.sendMessage("request_update_all_opstresss")
pub.unsubscribe(self.on_succeed_update_all, "succeed_update_all")
@pytest.mark.integration
def test_do_update_wrong_data_type(self, test_tablemodel):
"""do_update() should return a non-zero error code when passed a Requirement ID
that doesn't exist."""
pub.subscribe(self.on_fail_update_wrong_data_type, "fail_update_opstress")
_opstress = test_tablemodel.do_select(3)
_opstress.measurable_parameter = {1: 2}
pub.sendMessage("request_update_opstress", node_id=3, table="opstress")
pub.unsubscribe(self.on_fail_update_wrong_data_type, "fail_update_opstress")
@pytest.mark.integration
def test_do_update_root_node_wrong_data_type(self, test_tablemodel):
"""do_update() should return a non-zero error code when passed a Requirement ID
that doesn't exist."""
pub.subscribe(
self.on_fail_update_root_node_wrong_data_type, "fail_update_opstress"
)
_opstress = test_tablemodel.do_select(3)
_opstress.measurable_parameter = {1: 2}
pub.sendMessage("request_update_opstress", node_id=0, table="opstress")
pub.unsubscribe(
self.on_fail_update_root_node_wrong_data_type, "fail_update_opstress"
)
@pytest.mark.integration
def test_do_update_non_existent_id(self, test_tablemodel):
"""do_update() should return a non-zero error code when passed an OpStress ID
that doesn't exist."""
pub.subscribe(self.on_fail_update_non_existent_id, "fail_update_opstress")
pub.sendMessage("request_update_opstress", node_id=100, table="opstress")
pub.unsubscribe(self.on_fail_update_non_existent_id, "fail_update_opstress")
@pytest.mark.integration
def test_do_update_no_data_package(self, test_tablemodel):
"""do_update() should return a non-zero error code when passed a FMEA ID that
has no data package."""
pub.subscribe(self.on_fail_update_no_data_package, "fail_update_opstress")
test_tablemodel.tree.get_node(3).data.pop("opstress")
pub.sendMessage("request_update_opstress", node_id=3, table="opstress")
pub.unsubscribe(self.on_fail_update_no_data_package, "fail_update_opstress")
@pytest.mark.usefixtures("test_tablemodel")
class TestGetterSetter:
"""Class for testing methods that get or set."""
def on_succeed_get_attributes(self, attributes):
assert isinstance(attributes, dict)
assert attributes["load_id"] == 3
assert attributes["description"] == ""
print("\033[36m\nsucceed_get_opstress_attributes topic was broadcast.")
def on_succeed_get_data_manager_tree(self, tree):
assert isinstance(tree, Tree)
assert isinstance(tree.get_node(3).data["opstress"], RAMSTKOpStressRecord)
print("\033[36m\nsucceed_get_opstress_tree topic was broadcast")
def on_succeed_set_attributes(self, tree):
assert isinstance(tree, Tree)
assert (
tree.get_node(3).data["opstress"].description == "Big test operating load."
)
print("\033[36m\nsucceed_get_opstress_tree topic was broadcast")
@pytest.mark.integration
def test_do_get_attributes(self, test_tablemodel):
"""do_get_attributes() should return a dict of mode attributes on success."""
pub.subscribe(self.on_succeed_get_attributes, "succeed_get_opstress_attributes")
pub.sendMessage("request_get_opstress_attributes", node_id=3, table="opstress")
pub.unsubscribe(
self.on_succeed_get_attributes, "succeed_get_opstress_attributes"
)
@pytest.mark.integration
def test_on_get_tree_data_manager(self, test_tablemodel):
"""on_get_tree() should return the PoF treelib Tree."""
pub.subscribe(
self.on_succeed_get_data_manager_tree, "succeed_get_opstress_tree"
)
pub.sendMessage("request_get_opstress_tree")
pub.unsubscribe(
self.on_succeed_get_data_manager_tree, "succeed_get_opstress_tree"
)
@pytest.mark.integration
def test_do_set_attributes(self, test_tablemodel):
"""do_set_attributes() should return None when successfully setting operating
load attributes."""
pub.subscribe(self.on_succeed_set_attributes, "succeed_get_opstress_tree")
pub.sendMessage(
"request_set_opstress_attributes",
node_id=[3],
package={"description": "Big test operating load."},
)
pub.unsubscribe(self.on_succeed_set_attributes, "succeed_get_opstress_tree")
| 39.899408
| 88
| 0.710811
| 11,613
| 0.861115
| 1,068
| 0.079193
| 12,970
| 0.961738
| 0
| 0
| 5,383
| 0.399155
|
fb5a0434cd62419bd4c146c19aafa8c01ca37afb
| 1,159
|
py
|
Python
|
rssant_async/views.py
|
landlordlycat/rssant
|
12d9182154a3ffaa35310f1258de4be4822cf1e6
|
[
"BSD-3-Clause"
] | null | null | null |
rssant_async/views.py
|
landlordlycat/rssant
|
12d9182154a3ffaa35310f1258de4be4822cf1e6
|
[
"BSD-3-Clause"
] | null | null | null |
rssant_async/views.py
|
landlordlycat/rssant
|
12d9182154a3ffaa35310f1258de4be4822cf1e6
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from validr import T
from aiohttp.web import json_response
from aiohttp.web_request import Request
from rssant_common import timezone
from rssant_common.image_token import ImageToken, ImageTokenDecodeError
from rssant_config import CONFIG
from .rest_validr import ValidrRouteTableDef
from .image_proxy import image_proxy
routes = ValidrRouteTableDef()
@routes.get('/image/proxy')
async def image_proxy_view_v2(
request: Request,
token: T.str,
url: T.url.maxlen(4096),
):
try:
image_token = ImageToken.decode(
token, secret=CONFIG.image_token_secret,
expires=CONFIG.image_token_expires)
except ImageTokenDecodeError as ex:
return json_response({'message': str(ex)}, status=400)
response = await image_proxy(request, url, image_token.referrer)
return response
@routes.get('/image/_health')
async def get_health(request):
build_id = os.getenv('RSSANT_BUILD_ID')
commit_id = os.getenv('RSSANT_COMMIT_ID')
now = timezone.now().isoformat()
info = dict(
build_id=build_id,
commit_id=commit_id,
now=now,
)
return json_response(info)
| 26.340909
| 71
| 0.724763
| 0
| 0
| 0
| 0
| 787
| 0.679034
| 729
| 0.628991
| 74
| 0.063848
|
fb5bb09cbb3eed2ec9bb972fd01943b3b7af90ee
| 2,785
|
py
|
Python
|
tests/test_matching.py
|
grickly-nyu/grickly
|
39fbf796ea5918d0183b3aa1b3ae23dcb3d84f22
|
[
"MIT"
] | 3
|
2021-02-04T02:53:35.000Z
|
2021-07-22T01:09:36.000Z
|
tests/test_matching.py
|
grickly-nyu/grickly
|
39fbf796ea5918d0183b3aa1b3ae23dcb3d84f22
|
[
"MIT"
] | 17
|
2021-02-19T23:25:29.000Z
|
2021-05-16T04:18:00.000Z
|
tests/test_matching.py
|
grickly-nyu/grickly
|
39fbf796ea5918d0183b3aa1b3ae23dcb3d84f22
|
[
"MIT"
] | null | null | null |
from testing_config import BaseTestConfig
from application.models import User
from application.models import Chatroom
import json
from application.utils import auth
class TestMatch(BaseTestConfig):
test_group = {
"name": "test_group",
"tag": "Poker",
}
test_group2 = {
"name": "test_group2",
"tag": "Study",
}
tag_p = {"query_tag": "Poker"}
tag_s = {"query_tag": "Study"}
tag_o = {"query_tag": "Outdoor"}
tag_l = {"query_tag": "Life"}
tag_t = {"query_tag": "Test"}
testrm_1 = {"room_id": 1}
testrm_2 = {"room_id": 185}
testrm_3 = {"room_id": 4}
def test_get_suggestions(self):
res = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_p),
content_type='application/json'
)
res1 = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_s),
content_type='application/json'
)
res2 = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_o),
content_type='application/json'
)
self.assertEqual(res.status_code,200)
self.assertEqual(res1.status_code,200)
self.assertEqual(res2.status_code,200)
res3 = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_t),
content_type='application/json'
)
self.assertEqual(res2.status_code,200)
def test_create_group(self):
res = self.app.post(
"/api/create_group",
data=json.dumps(self.test_group),
content_type='application/json'
)
self.assertEqual(json.loads(res.data.decode("utf-8"))["results"], 2)
self.assertEqual(res.status_code, 200)
res = self.app.post(
"/api/create_group",
data=json.dumps(self.test_group2),
content_type='application/json'
)
self.assertEqual(json.loads(res.data.decode("utf-8"))["results"], 3)
# def test_join_chatroom(self):
# res = self.app.post(
# "/api/join_chatroom",
# data=json.dumps(self.testrm_1),
# content_type='application/json'
# )
# res1 = self.app.post(
# "/api/join_chatroom",
# data=json.dumps(self.testrm_2),
# content_type='application/json'
# )
# res2 = self.app.post(
# "/api/join_chatroom",
# data=json.dumps(self.testrm_3),
# content_type='application/json'
# )
# self.assertEqual(res.status_code,201)
# self.assertEqual(res1.status_code,201)
# self.assertEqual(res2.status_code,201)
| 29.315789
| 76
| 0.560144
| 2,614
| 0.9386
| 0
| 0
| 0
| 0
| 0
| 0
| 994
| 0.356912
|
fb5d367b92efd326d4327262afe891263095720b
| 1,633
|
py
|
Python
|
examples/pycaffe/layers/aggregation_cross_entropy_layer.py
|
HannaRiver/all-caffe
|
eae31715d903c1e3ef7035702d66b23d9cdf45c3
|
[
"BSD-2-Clause"
] | null | null | null |
examples/pycaffe/layers/aggregation_cross_entropy_layer.py
|
HannaRiver/all-caffe
|
eae31715d903c1e3ef7035702d66b23d9cdf45c3
|
[
"BSD-2-Clause"
] | null | null | null |
examples/pycaffe/layers/aggregation_cross_entropy_layer.py
|
HannaRiver/all-caffe
|
eae31715d903c1e3ef7035702d66b23d9cdf45c3
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
sys.path.insert(0, '/home/hena/caffe-ocr/buildcmake/install/python')
sys.path.insert(0, '/home/hena/tool/protobuf-3.1.0/python')
import caffe
import math
import numpy as np
def SoftMax(net_ans):
tmp_net = [math.exp(i) for i in net_ans]
sum_exp = sum(tmp_net)
return [i/sum_exp for i in tmp_net]
class AggregationCrossEntropyLayer(caffe.Layer):
"""
Comput the Aggregation Cross Entropy loss for ocr rec plan
"""
def setup(self, bottom, top):
print("==============================================================Hi")
self.dict_size = 1220
if len(bottom) != 2:
raise Exception("Need two inputs to computer loss.")
def reshape(self, bottom, top):
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
print("==============================================================Hi1")
# score = bottom[0].data
# label = bottom[1].data
# print(score)
# print(type(score))
# print(score.shape)
# T_ = len(score)
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
def get_n_k(self, label):
pass
| 29.160714
| 82
| 0.515615
| 1,302
| 0.797306
| 0
| 0
| 0
| 0
| 0
| 0
| 487
| 0.298224
|
fb5dda247bf82e8dba4c4c4fbaea1e533adc2c8f
| 2,530
|
py
|
Python
|
pyroms/sta_hgrid.py
|
ChuningWang/pyroms2
|
090a1a6d614088612f586f80b335ddb0dc0077a2
|
[
"MIT"
] | null | null | null |
pyroms/sta_hgrid.py
|
ChuningWang/pyroms2
|
090a1a6d614088612f586f80b335ddb0dc0077a2
|
[
"MIT"
] | null | null | null |
pyroms/sta_hgrid.py
|
ChuningWang/pyroms2
|
090a1a6d614088612f586f80b335ddb0dc0077a2
|
[
"MIT"
] | null | null | null |
"""
Tools for creating and working with Line (Station) Grids
"""
from typing import Union
import pyproj
import numpy as np
_atype = Union[type(None), np.ndarray]
_ptype = Union[type(None), pyproj.Proj]
class StaHGrid:
"""
Stations Grid
EXAMPLES:
--------
>>> x = arange(8)
>>> y = arange(8)*2-1
>>> grd = pyroms.grid.StaHGrid(x, y)
>>> print grd.x
[4.5 4.5 4.5 4.5 4.5 4.5 4.5]
"""
def __init__(self, x: np.ndarray, y: np.ndarray, angle: _atype = None):
assert x.ndim == 1 and y.ndim == 1 and x.shape == y.shape, \
'x and y must be 2D arrays of the same size.'
mask = np.isnan(x) | np.isnan(y)
if np.any(mask):
x = np.ma.masked_where(mask, x)
y = np.ma.masked_where(mask, y)
self.spherical = False
self._x, self._y = x, y
if angle is None:
self.angle = np.zeros(len(self.y))
else:
self.angle = angle
return
x = property(lambda self: self._x)
y = property(lambda self: self._y)
class StaHGridGeo(StaHGrid):
"""
Stations Grid
EXAMPLES:
--------
>>> lon = arange(8)
>>> lat = arange(8)*2-1
>>> proj = pyproj()
>>> grd = pyroms.grid.StaHGridGeo(lon, lat, proj)
>>> print grd.x
[xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx]
"""
def __init__(self, lon: np.ndarray, lat: np.ndarray,
x: _atype = None, y: _atype = None,
angle: _atype = None, proj: _ptype = None):
self.spherical = True
self._lon, self._lat = lon, lat
self.proj = proj
if x is not None and y is not None:
super(StaHGridGeo, self).__init__(x, y, angle)
self.spherical = True
else:
if proj is not None:
self._x, self._y = proj(lon, lat)
else:
raise ValueError('Projection transformer must be ' +
'provided if x/y are missing.')
return
@property
def lon(self):
return self._lon
@lon.setter
def lon(self, lon):
if self.proj is not None:
self.__init__(lon, self._lat, angle=self.angle, proj=self.proj)
else:
self._lon = lon
@property
def lat(self):
return self._lat
@lat.setter
def lat(self, lat):
if self.proj is not None:
self.__init__(self._lon, lat, angle=self.angle, proj=self.proj)
else:
self._lat = lat
| 24.326923
| 75
| 0.527273
| 2,319
| 0.916601
| 0
| 0
| 480
| 0.189723
| 0
| 0
| 626
| 0.247431
|
fb5e69fb9347917ede848ed32aab714b5ee1edac
| 3,364
|
py
|
Python
|
lstchain/visualization/camera.py
|
misabelber/cta-lstchain
|
08fc4dccfe8a05a77fa46fb4ffb6e26f439c0a93
|
[
"BSD-3-Clause"
] | null | null | null |
lstchain/visualization/camera.py
|
misabelber/cta-lstchain
|
08fc4dccfe8a05a77fa46fb4ffb6e26f439c0a93
|
[
"BSD-3-Clause"
] | null | null | null |
lstchain/visualization/camera.py
|
misabelber/cta-lstchain
|
08fc4dccfe8a05a77fa46fb4ffb6e26f439c0a93
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from ..reco.disp import disp_vector
import astropy.units as u
import matplotlib.pyplot as plt
from ctapipe.visualization import CameraDisplay
__all__ = [
'overlay_disp_vector',
'overlay_hillas_major_axis',
'overlay_source',
'display_dl1_event',
]
def display_dl1_event(event, camera_geometry, tel_id=1, axes=None, **kwargs):
"""
Display a DL1 event (image and pulse time map) side by side
Parameters
----------
event: ctapipe event
tel_id: int
axes: list of `matplotlib.pyplot.axes` of shape (2,) or None
kwargs: kwargs for `ctapipe.visualization.CameraDisplay`
Returns
-------
axes: `matplotlib.pyplot.axes`
"""
if axes is None:
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
image = event.dl1.tel[tel_id].image
peak_time = event.dl1.tel[tel_id].peak_time
if image is None or peak_time is None:
raise Exception(f"There is no calibrated image or pulse time map for telescope {tel_id}")
d1 = CameraDisplay(camera_geometry, image, ax=axes[0], **kwargs)
d1.add_colorbar(ax=axes[0])
d2 = CameraDisplay(camera_geometry, peak_time, ax=axes[1], **kwargs)
d2.add_colorbar(ax=axes[1])
return axes
def overlay_source(display, source_pos_x, source_pos_y, **kwargs):
"""
Display the source (event) position in the camera
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
source_pos_x: `astropy.units.Quantity`
source_pos_y: `astropy.units.Quantity`
kwargs: args for `matplotlib.pyplot.scatter`
Returns
-------
`matplotlib.pyplot.axes`
"""
kwargs['marker'] = 'x' if 'marker' not in kwargs else kwargs['marker']
kwargs['color'] = 'red' if 'color' not in kwargs else kwargs['color']
display.axes.scatter(source_pos_x, source_pos_y, **kwargs)
def overlay_disp_vector(display, disp, hillas, **kwargs):
"""
Overlay disp vector on a CameraDisplay
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
disp: `DispContainer`
hillas: `ctapipe.containers.HillasParametersContainer`
kwargs: args for `matplotlib.pyplot.quiver`
"""
assert np.isfinite([hillas.x.value, hillas.y.value]).all()
if not np.isfinite([disp.dx.value, disp.dy.value]).all():
disp_vector(disp)
display.axes.quiver(hillas.x, hillas.y,
disp.dx, disp.dy,
units='xy', scale=1*u.m,
angles='xy',
**kwargs,
)
display.axes.quiver(hillas.x.value, hillas.y.value, disp.dx.value, disp.dy.value, units='xy', scale=1)
def overlay_hillas_major_axis(display, hillas, **kwargs):
"""
Overlay hillas ellipse major axis on a CameraDisplay.
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
hillas: `ctapipe.containers.HillaParametersContainer`
kwargs: args for `matplotlib.pyplot.plot`
"""
kwargs['color'] = 'black' if 'color' not in kwargs else kwargs['color']
length = hillas.length * 2
x = -length + 2 * length * np.arange(10) / 10
display.axes.plot(hillas.x + x * np.cos(hillas.psi.to(u.rad).value),
hillas.y + x * np.sin(hillas.psi.to(u.rad).value),
**kwargs,
)
| 29.769912
| 106
| 0.633472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,443
| 0.428954
|
fb5ee7c913a1ddd435fb481e4af6d53922603786
| 14,537
|
py
|
Python
|
QFlow-2.0/QFlow/Process_Data.py
|
jpzwolak/QFlow-suite
|
d34d74d8690908137adbce0e71587884758b5ecf
|
[
"MIT"
] | null | null | null |
QFlow-2.0/QFlow/Process_Data.py
|
jpzwolak/QFlow-suite
|
d34d74d8690908137adbce0e71587884758b5ecf
|
[
"MIT"
] | null | null | null |
QFlow-2.0/QFlow/Process_Data.py
|
jpzwolak/QFlow-suite
|
d34d74d8690908137adbce0e71587884758b5ecf
|
[
"MIT"
] | 1
|
2022-02-16T22:25:22.000Z
|
2022-02-16T22:25:22.000Z
|
import numpy as np
import random
from scipy.stats import skew as scipy_skew
from skimage.transform import resize as skimage_resize
from QFlow import config
## set of functions for loading and preparing a dataset for training.
def get_num_min_class(labels):
'''
Get the number of the minimum represented class in label vector.
Used for resampling data.
input:
labels: np.ndarray of labels
outputs:
num_samples: int number of samples for minimum class
'''
# use argmax as example's class
argmax_labels = np.argmax(labels, axis=-1)
# max of num_samples is all one label
num_samples = labels.shape[0]
for i in range(labels.shape[-1]):
lab_elems = np.sum(argmax_labels==i)
if lab_elems < num_samples:
num_samples = lab_elems
return num_samples
def resample_data(features, state_labels, labels=None, seed=None):
'''
Resample data to be evenly distributed across classes in labels by cutting
number of examples for each class to be equal to the number of examples
in the least represented class. (classes assumed to be last axis of
labels). Shuffles after resampling.
inputs:
features: ndarray of features to be resampled. Resample along first axis.
state_labels: ndarray of labels to be used for resampling
labels: ndarray of labels to be resampled.
return_state: bool specifying whether to return state labels
seed: Seed of random number generator for shuffling idxs during resample
and for shuffling resampled features and labels.
outputs:
features: list of resampled features
labels: list of resampled labels
'''
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []; state_labels_resamp = []; labels_resamp = []
for i in range(state_labels.shape[-1]):
s_idxs = state_labels.argmax(axis=-1)==i
# first get full array of single state
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if labels is not None:
labels_s_full = labels[s_idxs]
# then get idxs (0-length), shuffle, and slice to num_samples
# shuffle idxs to be sure labels and features are shuffled together
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if labels is not None:
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if labels is not None:
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if labels is not None:
return features_resamp_arr[idxs], labels_resamp_arr[idxs]
elif labels is None:
return features_resamp_arr[idxs], state_labels_resamp_arr[idxs]
def noise_mag_to_class(state_labels, noise_mags,
low_thresholds=None, high_thresholds=None):
'''
Function to convert noise magnitudes to noise classes.
Noise class thresholds are defined here. Thresholds for states
order is: no dot, left dot, central dot, right dot, double dot
Default low thresholds is the linear extrapolation to 100 % accuracy
of an average noisy-trained model vs. noise_mag. Default high
thresholds are from linear extrapolation to 0 % accuracy of an
average noisy trained model vs. noise_mag.
inputs:
state_labels: list of state labels. shape assumed to be
(num_examples, num_states).
noise_mags: list of float noise_mags for state_labels. shape assumed
to be (num_examples, ).
low_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
high_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
'''
# set number of noise classes and states.
# length of thresholds must be equal to num_states.
# no num_quality_classes != 3 are supported.
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
# set default thresholds
if high_thresholds is None:
high_thresholds = [1.22, 1.00, 1.21, 0.68, 2.00]
if low_thresholds is None:
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros(noise_mags.shape+(num_quality_classes,))
# use fractional labels by taking weighted average after
# applying thresholds
num_states = state_labels.shape[-1]
# get per state classes then sum across last axis later
per_state_classes = np.zeros(
noise_mags.shape + (num_quality_classes,) + (num_states,))
# use boolean indexing to define classes from noise mags/threshold arrays
for i in range(num_states):
per_state_classes[noise_mags <= low_thresholds[i],0, i] = 1
per_state_classes[(noise_mags > low_thresholds[i]) &\
(noise_mags <= high_thresholds[i]), 1, i] = 1
per_state_classes[noise_mags > high_thresholds[i], 2, i] = 1
# multiply each first axis element then sum across last axes
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
def get_data(f, train_test_split=0.9,
dat_key='sensor', label_key='state',
resample=True, seed=None,
low_thresholds=None, high_thresholds=None):
'''
Reads in the subregion data and converts it to a format useful for training
Note that the data is shuffled after reading in.
inputs:
f: one of:
str path to .npz file containing cropped data
dict of cropped data.
train_test_split: float fraction of data to use for training.
resample: bool specifying whether to resample data to get even state
representation.
seed: int random seed for file shuffling.
label_key: string key for data used for the label. One of:
'data_quality', 'noise_mag_factor', 'state'.
low_threshold: list of noise levels to use for high/moderate signal
to noise ratio threshold.
high_threshold: list of noise levels to use for moderate/low signal
to noise ratio threshold.
outputs:
train_data: np.ndarray of training data.
train_labels: np.ndarray of training labels.
eval_data: np.ndarray of training data.
eval_labels: np.ndarray of training labels.
'''
# treat f as path, or if TypeError treat as dict.
try:
dict_of_dicts = np.load(f, allow_pickle = True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
# if we want a nonstate label load it so we can resample
if label_key!='state':
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
# if label is noise class, we need to get noise mag labels first
# then process to turn the mag into a class label
if label_key == 'data_quality':
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
# for compressed data, file is the key of the dict of dicts
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
# generates a list of arrays
inp.append(dat.reshape(config.SUB_SIZE,config.SUB_SIZE,1))
oup_state.append(data_dict['state']) # generates a list of arrays
if oup_labels is not None:
oup_labels.append(data_dict[label_key])
inp = np.array(inp) # converts the list to np.array
oup_state = np.array(oup_state) # converts the list to np.array
if oup_labels is not None:
oup_labels = np.array(oup_labels)
# split data into train and evaluatoin data/labels
n_samples = inp.shape[0]
print("Total number of samples :", n_samples)
n_train = int(train_test_split * n_samples)
train_data = inp[:n_train]
print("Training data info:", train_data.shape)
train_states = oup_state[:n_train]
if oup_labels is not None:
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print("Evaluation data info:", eval_data.shape)
eval_states = oup_state[n_train:]
if oup_labels is not None:
eval_labels = oup_labels[n_train:]
# convert noise mag to class before resampling/getting noise mags if
# needed because resampling doesnt return state labels
if data_quality:
train_labels = noise_mag_to_class(
train_states, train_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
eval_labels = noise_mag_to_class(
eval_states, eval_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
# resample to make state representation even
if resample:
train_data, train_labels = resample_data(
train_data, train_states, train_labels)
eval_data, eval_labels = resample_data(
eval_data, eval_states, eval_labels)
elif not resample and label_key=='state':
train_labels = train_states
eval_labels = eval_states
# expand dim of labels to make sure that they have proper shape
if oup_labels is not None and len(train_labels.shape)==1:
np.expand_dims(train_labels, 1)
if oup_labels is not None and len(eval_labels.shape)==1:
np.expand_dims(eval_labels, 1)
return train_data, train_labels, eval_data, eval_labels
## preprocess functions
def gradient(x):
'''
Take gradient of an ndarray in specified direction. Thin wrapper around
np.gradient(). Also note that x -> axis=1 and y-> axis=0
input:
x: An numpy ndarray to take the gradient of
output:
numpy ndarray containing gradient in x direction.
'''
return np.gradient(x, axis=1)
def apply_threshold(x, threshold_val=10, threshold_to=0):
'''
Thresholds an numpy ndarray to remove
Args:
x = numpy array with data to be filtered
threshold_val = percentile below which to set values to zero
'''
x[x < np.abs(np.percentile(x.flatten(),threshold_val))] = threshold_to
return x
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'''
Clip input symmetrically at clip_val number of std devs.
Do not zscore norm x, but apply thresholds using normed x
'''
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = (x - mean) / std
# set clipped values to either the mean or clip threshold
if clip_to.lower() == 'clip_val':
x_clipped[norm_x < -clip_val] = -clip_val * std + mean
x_clipped[norm_x > clip_val] = clip_val * std + mean
elif clip_to.lower() == 'mean':
x_clipped[norm_x < -clip_val] = mean
x_clipped[norm_x > clip_val] = mean
else:
raise KeyError('"clip_to" option not valid: ' +str(clip_to) +\
'Valid options: clip_val, mean')
return x_clipped
def autoflip_skew(data):
'''
Autoflip a numpy ndarray based on the skew of the values
(effective for gradient data).
'''
skew_sign = np.sign(scipy_skew(np.ravel(data)))
return data*skew_sign
def zscore_norm(x):
'''
Takes a numpy ndarray and returns a z-score normalized version
'''
return (x-x.mean())/x.std()
class Preprocessor():
def __init__(self, autoflip=False, denoising=[],
clip_val=None, thresh_val=None):
'''
Class for doing preprocessing of data.
inputs:
autoflip: bool specifying whether to autoflip data.
denoising: list of str specifying denoising to apply to data.
clip_val: value for clipping denoising. Unused if 'clip' not in
denoising.
thresh_val
'''
self.autoflip = autoflip
valid_denoising = ['threshold', 'clip']
if not set(denoising).issubset(valid_denoising):
raise ValueError(
'invalid denoising ', denoising,
' Valid values:', valid_denoising)
self.denoising = denoising
self.clip_val = clip_val
self.thresh_val = thresh_val
def proc_subimage(self, x):
'''
Takes the gradient of the measured data, applies denoising if specified,
normalizes, autoflips if specified,
and then adjusts the size (if necessary)
Args:
x = an array with data
'''
# take gradient
x = gradient(x)
# apply thresholding
if 'threshold' in self.denoising:
if self.threshold_val is not None:
grad_x = apply_threshold(x, self.threshold_val)
else:
grad_x = apply_threshold(x)
# apply clipping
if 'clip' in self.denoising:
if self.clip_val is not None:
grad_x = apply_clipping(grad_x, self.clip_val)
else:
grad_x = apply_clipping(grad_x)
# normalize with zscore normalization
x = zscore_norm(x)
# autoflip by skew of image gradient
if self.autoflip:
x = autoflip_skew(x)
target_shape = (config.SUB_SIZE, config.SUB_SIZE, 1)
if x.shape != target_shape:
x = skimage_resize(x, target_shape)
return x
def proc_subimage_set(self, x_arr):
'''
Loop through subimages and apply preprocessing to each one.
inputs:
x: full dataset of images. First axis assumed to be example index.
returns:
Full dataset of images with same shape, processed.
'''
return np.array([self.proc_subimage(x) for x in x_arr])
| 35.982673
| 81
| 0.65323
| 2,380
| 0.16372
| 0
| 0
| 0
| 0
| 0
| 0
| 6,623
| 0.455596
|
fb5eea86a746925440911830c3d41121077c7f7a
| 472
|
py
|
Python
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
"""
Test PEP 0448 -- Additional Unpacking Generalizations
https://www.python.org/dev/peps/pep-0448/
"""
# pylint: disable=superfluous-parens, unnecessary-comprehension
UNPACK_TUPLE = (*range(4), 4)
UNPACK_LIST = [*range(4), 4]
UNPACK_SET = {*range(4), 4}
UNPACK_DICT = {'a': 1, **{'b': '2'}}
UNPACK_DICT2 = {**UNPACK_DICT, "x": 1, "y": 2}
UNPACK_DICT3 = {**{'a': 1}, 'a': 2, **{'a': 3}}
UNPACK_IN_COMP = {elem for elem in (*range(10))} # [star-needs-assignment-target]
| 29.5
| 81
| 0.641949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.470339
|
fb5ffc354d2d854524531b1d4f70227336db8f87
| 238
|
py
|
Python
|
src/ikazuchi/errors.py
|
t2y/ikazuchi
|
7023111e92fa47360c50cfefd1398c554475f2c6
|
[
"Apache-2.0"
] | null | null | null |
src/ikazuchi/errors.py
|
t2y/ikazuchi
|
7023111e92fa47360c50cfefd1398c554475f2c6
|
[
"Apache-2.0"
] | null | null | null |
src/ikazuchi/errors.py
|
t2y/ikazuchi
|
7023111e92fa47360c50cfefd1398c554475f2c6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
class IkazuchiError(Exception):
""" ikazuchi root exception """
pass
class TranslatorError(IkazuchiError):
""" ikazuchi translator exception """
pass
class NeedApiKeyError(TranslatorError): pass
| 19.833333
| 44
| 0.693277
| 208
| 0.87395
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.382353
|
fb607c62040621d2cd1122da4b43413ea79de0be
| 4,331
|
py
|
Python
|
engine/resources.py
|
gerizim16/MP2_GRP19
|
591fbb47fec6c5471d4e63151f494641452b4cb7
|
[
"CC0-1.0"
] | 1
|
2020-09-25T02:46:00.000Z
|
2020-09-25T02:46:00.000Z
|
engine/resources.py
|
gerizim16/MP2_GRP19
|
591fbb47fec6c5471d4e63151f494641452b4cb7
|
[
"CC0-1.0"
] | null | null | null |
engine/resources.py
|
gerizim16/MP2_GRP19
|
591fbb47fec6c5471d4e63151f494641452b4cb7
|
[
"CC0-1.0"
] | null | null | null |
import pyglet
print('Loading resources')
def center_image(image):
"""Sets an image's anchor point to its center"""
image.anchor_x = image.width / 2
image.anchor_y = image.height / 2
# Tell pyglet where to find the resources
pyglet.resource.path = ['./resources', './resources/backgrounds']
pyglet.resource.reindex()
images = list()
# Load the three main resources and get them to draw centered
tank_body_img = pyglet.resource.image('tank_body.png')
images.append(tank_body_img)
tank_head_img = pyglet.resource.image('tank_head.png')
images.append(tank_head_img)
boxlife_img = pyglet.resource.image('boxlife.png')
images.append(boxlife_img)
boxlife_dead_img = pyglet.resource.image('boxlife_dead.png')
images.append(boxlife_dead_img)
wheel_img = pyglet.resource.image('wheel.png')
images.append(wheel_img)
thread_img = pyglet.resource.image('thread.png')
images.append(thread_img)
motorbike_chassis_img = pyglet.resource.image('motorbike_chassis.png')
images.append(motorbike_chassis_img)
mb_wheel_img = pyglet.resource.image('mb_wheel.png')
images.append(mb_wheel_img)
mb_holder_img = pyglet.resource.image('mb_holder.png')
images.append(mb_holder_img)
vbv_chassis_img = pyglet.resource.image('vbv_chassis.png')
images.append(vbv_chassis_img)
vbv_wheels_img = pyglet.resource.image('vbv_wheels.png')
images.append(vbv_wheels_img)
vbv_platform_img = pyglet.resource.image('vbv_platform.png')
images.append(vbv_platform_img)
vb_net_img = pyglet.resource.image('vb_net.png')
images.append(vb_net_img)
vb_ball_img = pyglet.resource.image('vb_ball.png')
images.append(vb_ball_img)
game1_button_img = pyglet.resource.image('game1.png')
images.append(game1_button_img)
game1_button_hover_img = pyglet.resource.image('game1_hover.png')
images.append(game1_button_hover_img)
game2_button_img = pyglet.resource.image('game2.png')
images.append(game2_button_img)
game2_button_hover_img = pyglet.resource.image('game2_hover.png')
images.append(game2_button_hover_img)
game3_button_img = pyglet.resource.image('game3.png')
images.append(game3_button_img)
game3_button_hover_img = pyglet.resource.image('game3_hover.png')
images.append(game3_button_hover_img)
game1_hs_button_img = pyglet.resource.image('game1_hs.png')
images.append(game1_hs_button_img)
game1_hs_button_hover_img = pyglet.resource.image('game1_hs_hover.png')
images.append(game1_hs_button_hover_img)
game2_hs_button_img = pyglet.resource.image('game2_hs.png')
images.append(game2_hs_button_img)
game2_hs_button_hover_img = pyglet.resource.image('game2_hs_hover.png')
images.append(game2_hs_button_hover_img)
menu_button_img = pyglet.resource.image('menu.png')
images.append(menu_button_img)
gravity_button_img = pyglet.resource.image('gravity.png')
images.append(gravity_button_img)
fullscreen_button_img = pyglet.resource.image('fullscreen.png')
images.append(fullscreen_button_img)
restart_button_img = pyglet.resource.image('restart_button.png')
images.append(restart_button_img)
enter_button_img = pyglet.resource.image('enter_button.png')
images.append(enter_button_img)
enter_button_hover_img = pyglet.resource.image('enter_button_hover.png')
images.append(enter_button_hover_img)
circle_meter_img = pyglet.resource.image('circle_meter.png')
images.append(circle_meter_img)
pointer_img = pyglet.resource.image('pointer.png')
images.append(pointer_img)
finishflag_img = pyglet.resource.image('finishflag.png')
images.append(finishflag_img)
goal_meter_img = pyglet.resource.image('goal_meter.png')
images.append(goal_meter_img)
bg_goal_meter_img = pyglet.resource.image('bg_goal_meter.png')
images.append(bg_goal_meter_img)
background_img = pyglet.resource.image('background.png')
images.append(background_img)
for image in images:
center_image(image)
# load backgrounds
parallax_bgs = list()
layer_counts = (3, 2, 2, 2, 3, 4)
for bg_i, layer_count in enumerate(layer_counts):
bg_set = list()
for layer_i in range(layer_count):
bg_set.append(pyglet.resource.image('{}layer_{}.png'.format(bg_i, layer_i)))
parallax_bgs.append(tuple(bg_set))
parallax_bgs = tuple(parallax_bgs)
# Load sfx without streaming
engine_sfx = pyglet.media.load('./resources/engine_sfx.wav', streaming=False)
bg_music = pyglet.media.load('./resources/bg_music.wav', streaming=False)
print('Resource loading successful')
| 30.286713
| 84
| 0.803048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 915
| 0.211268
|
fb61944ce32d6c5a99c9e008904e108e5bfd2d77
| 2,517
|
py
|
Python
|
search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/gil9red/VideoStreamingWithEncryption/blob/37cf7f501460a286ec44a20db7b2403e8cb05d97/server_GUI_Qt/inner_libs/gui/SelectDirBox.py
import os
from PyQt5.QtWidgets import QWidget, QLineEdit, QLabel, QPushButton, QHBoxLayout, QFileDialog, QStyle
from PyQt5.QtCore import pyqtSignal
class SelectDirBox(QWidget):
valueChanged = pyqtSignal(str)
valueEdited = pyqtSignal(str)
def __init__(self, value='', visible_label=True):
super().__init__()
self._label = QLabel('Directory:')
self._label.setVisible(visible_label)
self._value = QLineEdit()
self._value.textChanged.connect(self.valueChanged.emit)
self._value.textEdited.connect(self.valueEdited.emit)
icon_open_dir = self.style().standardIcon(QStyle.SP_DirOpenIcon)
action_open_dir = self._value.addAction(icon_open_dir, QLineEdit.TrailingPosition)
action_open_dir.setToolTip('Open directory')
action_open_dir.triggered.connect(self._on_open_dir)
self._button_select_path = QPushButton('...')
self._button_select_path.setFixedWidth(24)
self._button_select_path.setToolTip('Select directory')
self._button_select_path.clicked.connect(self._on_select_path)
self.setValue(value)
layout = QHBoxLayout()
layout.setSpacing(5)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._label)
layout.addWidget(self._value, stretch=1)
layout.addWidget(self._button_select_path)
self.setLayout(layout)
def setValue(self, value: str):
self._value.setText(value)
self._value.setToolTip(value)
def getValue(self) -> str:
return self._value.text()
def _on_select_path(self):
path = QFileDialog.getExistingDirectory(self, None, self._value.text())
if not path:
return
self.setValue(path)
def _on_open_dir(self):
path = self._value.text()
if os.path.isdir(path):
os.startfile(path)
def resizeEvent(self, event):
super().resizeEvent(event)
self._button_select_path.setFixedHeight(self._value.height())
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication
app = QApplication([])
mw = SelectDirBox()
mw.valueChanged.connect(
lambda value: print(f'Selected directory: {value}')
)
mw.show()
app.exec()
| 28.931034
| 156
| 0.684148
| 1,881
| 0.747318
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.120779
|
fb623387f3a45681b01c77927c90b4d6cbbd3ef4
| 3,503
|
py
|
Python
|
ImageStabilizer.py
|
arthurscholz/UMN-AFM-Scripts
|
86b4d11f9f70f378200899c930d1fa38ad393c66
|
[
"MIT"
] | null | null | null |
ImageStabilizer.py
|
arthurscholz/UMN-AFM-Scripts
|
86b4d11f9f70f378200899c930d1fa38ad393c66
|
[
"MIT"
] | null | null | null |
ImageStabilizer.py
|
arthurscholz/UMN-AFM-Scripts
|
86b4d11f9f70f378200899c930d1fa38ad393c66
|
[
"MIT"
] | null | null | null |
import numpy as np
import picoscript
import cv2
import HeightTracker
import atexit
print "Setting Parameters..."
zDacRange = 0.215 # Sensor specific number
windowSize = 3e-6 # window size in meters
windowBLHCX = 3.5e-6 # window bottom left hand corner X-axis in meters
windowBLHCY = 3.5e-6 # window bottom left hand corner Y-axis in meters
imageBuffer = 0 # buffer for tracking image (0-7)
binary = True
servoRange = picoscript.GetServoTopographyRange()
imageRange = servoRange * zDacRange
MAX_SHORT = 2**15
def PlaneLevel(im):
x, y = np.indices(im.shape)
A = np.vstack([x.flatten(), y.flatten(), np.ones(im.size)]).T
z = im.flatten()
a,b,c = np.linalg.lstsq(A, z)[0]
plane = a*x + b*y + c
return im - plane
def LineLevel(im):
output = np.empty_like(im)
for i in range(im.shape[0]):
output[i] = im[i] - np.median(im[i])
return output
def LoadBuffer(i):
xSize = picoscript.GetScanXPixels()
ySize = picoscript.GetScanYPixels()
# Read in image data buffer
im = np.asarray(picoscript.ReadImageDataBuffer(imageBuffer))
im = im.reshape(ySize,xSize)
im = im * imageRange / MAX_SHORT
return im
# Calculates scan offset for new image. Takes an image, roi template and bottom
# left hand corner
def CalculateOffset(im,template,blhc):
res = cv2.matchTemplate(im.astype('float32'),template.astype('float32'),cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# returns vector to feature
return np.asarray(max_loc)-blhc
def Stabilize():
position = picoscript.GetStatusApproachPosition()
im = LoadBuffer(0)
im = PlaneLevel(im)
im = LineLevel(im)
ref_image = im
im_size = picoscript.GetScanSize()
x0 = int(windowBLHCX / im_size * im.shape[1])
y0 = int(windowBLHCY / im_size * im.shape[0])
x1 = int((windowBLHCX + windowSize)/im_size * im.shape[1])
y1 = int((windowBLHCY + windowSize)/im_size * im.shape[0])
template = ref_image[y0:y1,x0:x1]
while(position == picoscript.GetStatusApproachPosition()):
picoscript.ScanStartDown()
picoscript.WaitForStatusScanning(True)
picoscript.WaitForStatusScanning(False)
im = LoadBuffer(0)
im = PlaneLevel(im)
im = LineLevel(im)
offset = CalculateOffset(im,template,np.array([x0,y0]))
x_offset = picoscript.GetScanXOffset() + offset[0]*im_size/im.shape[1]
y_offset = picoscript.GetScanYOffset() + offset[1]*im_size/im.shape[0]
picoscript.SetScanXOffset(x_offset)
picoscript.SetScanYOffset(y_offset)
x0_new = offset[0] + x0
x1_new = offset[0] + x1
y0_new = offset[1] + y0
y1_new = offset[1] + y1
print '{0}, {1}'.format(x_offset, y_offset)
template = im[y0_new:y1_new,x0_new:x1_new]
if __name__ == "__main__":
atexit.register(picoscript.Disconnect)
heighttrack = HeightTracker.Track()
heighttrack.start()
RunStabilize = True
print "Waiting for current scan to end..."
picoscript.WaitForStatusScanning(False)
print "Starting stabilization..."
while True:
if RunStabilize:
Stabilize()
position = picoscript.GetStatusApproachPosition()
picoscript.ScanStartDown()
picoscript.WaitForStatusScanning(True)
picoscript.WaitForStatusScanning(False)
RunStabilize = position == picoscript.GetStatusApproachPosition()
| 30.198276
| 96
| 0.663717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 453
| 0.129318
|
fb6262762a9edf203b455a0bed2e167c184ce590
| 1,947
|
py
|
Python
|
Twitter Data Extraction.py
|
scottblender/twitter-covid-19-vaccine-analysis
|
a4d273b8b885fc33db075dfc910fa39645fa3789
|
[
"MIT"
] | null | null | null |
Twitter Data Extraction.py
|
scottblender/twitter-covid-19-vaccine-analysis
|
a4d273b8b885fc33db075dfc910fa39645fa3789
|
[
"MIT"
] | null | null | null |
Twitter Data Extraction.py
|
scottblender/twitter-covid-19-vaccine-analysis
|
a4d273b8b885fc33db075dfc910fa39645fa3789
|
[
"MIT"
] | null | null | null |
import snscrape.modules.twitter as sntwitter
import pandas as pd
# Creating list to append tweet data to
tweets_list2 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('covid vaccine until:2021-05-24').get_items()):
if i>100000:
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.user.verified, tweet.user.followersCount, tweet.user.friendsCount, tweet.likeCount, tweet.retweetCount, tweet.quoteCount, tweet.user.created, tweet.user.location, tweet.user.displayname, tweet.lang, tweet.coordinates, tweet.place])
# Creating a dataframe from the tweets list above
tweets_df2 = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Verified', 'Followers Count', 'Friends Count', 'Like Count', 'Retweet Count', 'Quote Count', 'Created','Location','Display Name', 'Language', 'Coordinates', 'Place'])
tweets_df2.to_csv('First Extract.csv')
# Creating list to append tweet data to
tweets_list2 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('covid vaccine until:2021-05-13').get_items()):
if i>100000:
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.user.verified, tweet.user.followersCount, tweet.user.friendsCount, tweet.likeCount, tweet.retweetCount, tweet.quoteCount, tweet.user.created, tweet.user.location, tweet.user.displayname, tweet.lang, tweet.coordinates, tweet.place])
# Creating a dataframe from the tweets list above
tweets_df3 = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Verified', 'Followers Count', 'Friends Count', 'Like Count', 'Retweet Count', 'Quote Count', 'Created','Location','Display Name', 'Language', 'Coordinates', 'Place'])
tweets_df3.to_csv('Second Extract.csv')
| 69.535714
| 320
| 0.757062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 779
| 0.400103
|
fb62f3f5a5769a80a5d13a6f4d1ccd457d5f9675
| 138
|
py
|
Python
|
hugs/__init__.py
|
Bogdanp/hugs
|
e7f16f15369fbe3da11d89882d76c7ef432f3709
|
[
"BSD-3-Clause"
] | 22
|
2017-07-20T18:02:27.000Z
|
2021-06-10T13:06:22.000Z
|
hugs/__init__.py
|
Bogdanp/hugs
|
e7f16f15369fbe3da11d89882d76c7ef432f3709
|
[
"BSD-3-Clause"
] | null | null | null |
hugs/__init__.py
|
Bogdanp/hugs
|
e7f16f15369fbe3da11d89882d76c7ef432f3709
|
[
"BSD-3-Clause"
] | 2
|
2019-12-11T20:44:08.000Z
|
2021-02-02T04:37:04.000Z
|
from .repository import Repository
from .manager import Manager
__all__ = ["Manager", "Repository", "__version__"]
__version__ = "0.2.0"
| 23
| 50
| 0.746377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.297101
|
fb62f6a8cb550f9476912173180ad44a3f1fe7d0
| 44,876
|
py
|
Python
|
Source/Git/wb_git_project.py
|
barry-scott/git-workbench
|
9f352875ab097ce5e45f85bf255b1fa02a196807
|
[
"Apache-2.0"
] | 24
|
2017-03-23T06:24:02.000Z
|
2022-03-19T13:35:44.000Z
|
Source/Git/wb_git_project.py
|
barry-scott/scm-workbench
|
5607f12056f8245e0178816603e4922b7f5805ac
|
[
"Apache-2.0"
] | 14
|
2016-06-21T10:06:27.000Z
|
2020-07-25T11:56:23.000Z
|
Source/Git/wb_git_project.py
|
barry-scott/git-workbench
|
9f352875ab097ce5e45f85bf255b1fa02a196807
|
[
"Apache-2.0"
] | 11
|
2016-12-25T12:36:16.000Z
|
2022-03-23T14:25:25.000Z
|
'''
====================================================================
Copyright (c) 2016-2017 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_git_project.py
'''
import sys
import os
import pathlib
import wb_annotate_node
import wb_platform_specific
import wb_git_callback_server
import git
import git.exc
import git.index
GitCommandError = git.exc.GitCommandError
def gitInit( app, progress_handler, wc_path ):
progress = Progress( progress_handler )
try:
git.repo.Repo.init( str(wc_path) )
return True
except GitCommandError:
for line in progress.allErrorLines():
app.log.error( line )
return False
__callback_server = None
git_extra_environ = {}
def initCallbackServer( app ):
#pylint disable=global-statement
global __callback_server
assert __callback_server is None, 'Cannot call initCallbackServer twice'
__callback_server = wb_git_callback_server.WbGitCallbackServer( app )
__callback_server.start()
if sys.platform == 'win32':
callback = wb_platform_specific.getAppDir() / 'scm-workbench-git-callback.exe'
if not callback.exists():
app.log.info( 'Cannot find %s' % (callback,) )
# assume in development environment
callback = wb_platform_specific.getAppDir() / 'scm_workbench_git_callback.cmd'
else:
callback = wb_platform_specific.getAppDir() / 'scm-workbench-git-callback'
if not callback.exists():
app.log.error( 'Cannot find %s' % (callback,) )
return
if 'GIT_ASKPASS' in os.environ:
app.log.info( "Using user's GIT_ASKPASS program %s" % (os.environ[ 'GIT_ASKPASS' ],) )
else:
git_extra_environ['GIT_ASKPASS'] = '"%s" askpass' % (str(callback),)
app.log.info( "Using Workbench's GIT_ASKPASS program" )
git_extra_environ['GIT_SEQUENCE_EDITOR'] = '"%s" sequence-editor' % (str(callback),)
git_extra_environ['GIT_EDITOR'] = '"%s" editor' % (str(callback),)
app.log.info( "Setup Workbench's GIT callback program" )
def setCallbackCredentialsHandler( handler ):
__callback_server.setCallbackCredentialsHandler( handler )
def setCallbackRebaseSequenceHandler( handler ):
__callback_server.setCallbackRebaseSequenceHandler( handler )
def setCallbackRebaseEditorHandler( handler ):
__callback_server.setCallbackRebaseEditorHandler( handler )
def setCallbackReply( code, value ):
__callback_server.setReply( code, value )
class GitProject:
def __init__( self, app, prefs_project, ui_components ):
self.app = app
self.ui_components = ui_components
self.debugLog = self.app.debug_options.debugLogGitProject
self.debugLogTree = self.app.debug_options.debugLogGitUpdateTree
self.prefs_project = prefs_project
# repo will be setup on demand - this speeds up start up especically on macOS
self.__repo = None
self.index = None
self.tree = GitProjectTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
self.flat_tree = GitProjectTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
self.all_file_state = {}
self.__stale_index = False
self.__num_staged_files = 0
self.__num_modified_files = 0
def getMasterBranchName( self ):
if self.prefs_project.master_branch_name is None:
return 'master'
else:
return self.prefs_project.master_branch_name
def setMasterBranchName( self, master_branch_name ):
if master_branch_name == 'master':
self.prefs_project.master_branch_name = None
else:
self.prefs_project.master_branch_name = master_branch_name
def cloneFrom( self, url, progress_handler ):
assert self.__repo is None
progress = Progress( progress_handler )
try:
self.__repo = git.repo.Repo.clone_from( url, str(self.prefs_project.path), progress )
return True
except GitCommandError:
for line in progress.allErrorLines():
self.app.log.error( line )
return False
def repo( self ):
# setup repo on demand
if self.__repo is None:
self.__repo = git.Repo( str( self.prefs_project.path ) )
self.__repo.git.update_environment( **git_extra_environ )
return self.__repo
def scmType( self ):
return 'git'
# return a new GitProject that can be used in another thread
def newInstance( self ):
return GitProject( self.app, self.prefs_project, self.ui_components )
def isNotEqual( self, other ):
return self.prefs_project.name != other.prefs_project.name
def __repr__( self ):
return '<GitProject: %s (id:%d>' % (self.prefs_project.name, id(self))
def pathForGit( self, path ):
assert isinstance( path, pathlib.Path )
# return abs path
return str( self.projectPath() / path )
def pathForWb( self, str_path ):
assert type( str_path ) == str
wb_path = pathlib.Path( str_path )
if wb_path.is_absolute():
wb_path = wb_path.relative_to( self.projectPath() )
return wb_path
def hasCommits( self ):
try:
self.repo().head.ref.commit
return True
except ValueError:
return False
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def configReader( self, level ):
return self.repo().config_reader( level )
def configWriter( self, level ):
return self.repo().config_writer( level )
def addRemote( self, name, url ):
self.repo().create_remote( name, url )
def getHeadCommit( self ):
return self.repo().head.ref.commit
def getShortCommitId( self, commit_id, size=7 ):
return self.repo().git.rev_parse( commit_id, short=size )
def switchToBranch( self, branch ):
self.cmdCheckout( branch )
def getBranchName( self ):
return self.repo().head.ref.name
def getAllBranchNames( self ):
all_branch_names = sorted( [b.name for b in self.repo().branches] )
# detect the case of a new, empty git repo
if len(all_branch_names) == 0:
all_branch_names = [self.getBranchName()]
return all_branch_names
def getTrackingBranchName( self ):
tracking_branch = self.repo().head.ref.tracking_branch()
return tracking_branch.name if tracking_branch is not None else None
def getTrackingBranchCommit( self ):
tracking_branch = self.repo().head.ref.tracking_branch()
if tracking_branch is None:
return None
return tracking_branch.commit
def getRemote( self, name ):
try:
return self.repo().remote( name )
except ValueError:
return None
def cmdAddRemote( self, name, url ):
git.Remote.create( self.repo(), name, url )
def cmdDeleteRemote( self, name ):
git.Remote.remove( self.repo(), name )
def cmdUpdateRemote( self, name, url ):
remote = self.getRemote( name )
remote.set_url( url, remote.url )
def numStagedFiles( self ):
return self.__num_staged_files
def numModifiedFiles( self ):
return self.__num_modified_files
def saveChanges( self ):
self.debugLog( 'saveChanges() __stale_index %r' % (self.__stale_index,) )
if self.__stale_index:
self.updateState( 'QQQ' )
self.__stale_index = False
def updateState( self, tree_leaf ):
self.debugLog( 'updateState( %r ) repo=%s' % (tree_leaf, self.projectPath()) )
# rebuild the tree
self.tree = GitProjectTreeNode( self, self.prefs_project.name, pathlib.Path( '.' ) )
self.flat_tree = GitProjectTreeNode( self, self.prefs_project.name, pathlib.Path( '.' ) )
if not self.projectPath().exists():
self.app.log.error( T_('Project %(name)s folder %(folder)s has been deleted') %
{'name': self.projectName()
,'folder': self.projectPath()} )
self.all_file_state = {}
else:
self.__calculateStatus()
for path in self.all_file_state:
self.__updateTree( path )
self.dumpTree()
def __calculateStatus( self ):
self.all_file_state = {}
repo_root = self.projectPath()
git_dir = repo_root / '.git'
all_folders = set( [repo_root] )
while len(all_folders) > 0:
folder = all_folders.pop()
for filename in folder.iterdir():
abs_path = folder / filename
repo_relative = abs_path.relative_to( repo_root )
if abs_path.is_dir():
if abs_path != git_dir:
all_folders.add( abs_path )
self.all_file_state[ repo_relative ] = WbGitFileState( self, repo_relative )
self.all_file_state[ repo_relative ].setIsDir()
else:
self.all_file_state[ repo_relative ] = WbGitFileState( self, repo_relative )
# ----------------------------------------
# can only get info from the index if there is at least 1 commit
self.index = git.index.IndexFile( self.repo() )
if self.hasCommits():
head_vs_index = self.index.diff( self.repo().head.commit )
index_vs_working = self.index.diff( None )
else:
head_vs_index = []
index_vs_working = []
# each ref to self.repo().untracked_files creates a new object
# cache the value once/update
untracked_files = self.repo().untracked_files
for entry in self.index.entries.values():
filepath = pathlib.Path( entry.path )
if filepath not in self.all_file_state:
# filepath has been deleted
self.all_file_state[ filepath ] = WbGitFileState( self, filepath )
self.all_file_state[ filepath ].setIndexEntry( entry )
self.__num_staged_files = 0
for diff in head_vs_index:
self.__num_staged_files += 1
filepath = pathlib.Path( diff.b_path )
if filepath not in self.all_file_state:
self.all_file_state[ filepath ] = WbGitFileState( self, filepath )
if diff.renamed:
self.all_file_state[ pathlib.Path( diff.rename_from ) ]._addStaged( diff )
else:
self.all_file_state[ filepath ]._addStaged( diff )
self.__num_modified_files = 0
for diff in index_vs_working:
self.__num_modified_files += 1
filepath = pathlib.Path( diff.a_path )
if filepath not in self.all_file_state:
self.all_file_state[ filepath ] = WbGitFileState( self, filepath )
self.all_file_state[ filepath ]._addUnstaged( diff )
for path in untracked_files:
filepath = pathlib.Path( path )
if filepath not in self.all_file_state:
self.all_file_state[ filepath ] = WbGitFileState( self, filepath )
self.all_file_state[ filepath ]._setUntracked()
def __updateTree( self, path ):
assert isinstance( path, pathlib.Path ), 'path %r' % (path,)
self.debugLogTree( '__updateTree path %r' % (path,) )
node = self.tree
self.debugLogTree( '__updateTree path.parts %r' % (path.parts,) )
for index, name in enumerate( path.parts[0:-1] ):
self.debugLogTree( '__updateTree name %r at node %r' % (name,node) )
if not node.hasFolder( name ):
node.addFolder( name, GitProjectTreeNode( self, name, pathlib.Path( *path.parts[0:index+1] ) ) )
node = node.getFolder( name )
self.debugLogTree( '__updateTree addFile %r to node %r' % (path, node) )
node.addFileByName( path )
self.flat_tree.addFileByPath( path )
def dumpTree( self ):
if self.debugLogTree.isEnabled():
self.tree._dumpTree( 0 )
#------------------------------------------------------------
#
# functions to retrive interesting info from the repo
#
#------------------------------------------------------------
def hasFileState( self, filename ):
assert isinstance( filename, pathlib.Path )
return filename in self.all_file_state
def getFileState( self, filename ):
assert isinstance( filename, pathlib.Path )
# status only has enties for none CURRENT status files
return self.all_file_state[ filename ]
def getReportStagedFiles( self ):
all_staged_files = []
for filename, file_state in self.all_file_state.items():
if file_state.isStagedNew():
all_staged_files.append( (T_('New file'), filename, None) )
elif file_state.isStagedModified():
all_staged_files.append( (T_('Modified'), filename, None) )
elif file_state.isStagedDeleted():
all_staged_files.append( (T_('Deleted'), filename, None) )
elif file_state.isStagedRenamed():
all_staged_files.append( (T_('Renamed'), file_state.renamedFromFilename(), file_state.renamedToFilename()) )
return all_staged_files
def getReportUntrackedFiles( self ):
all_untracked_files = []
for filename, file_state in self.all_file_state.items():
if file_state.isUncontrolled():
all_untracked_files.append( (T_('New file'), filename) )
elif file_state.isUnstagedModified():
all_untracked_files.append( (T_('Modified'), filename) )
elif file_state.isUnstagedDeleted():
all_untracked_files.append( (T_('Deleted'), filename) )
return all_untracked_files
def canPush( self ):
if not self.hasCommits():
return False
remote_commit = self.getTrackingBranchCommit()
if remote_commit is None:
return False
head_commit = self.repo().head.ref.commit
return head_commit != remote_commit
def canPull( self ):
return self.repo().head.ref.tracking_branch() is not None
def getUnpushedCommits( self ):
tracking_commit = self.getTrackingBranchCommit()
if tracking_commit is None:
return []
last_pushed_commit_id = tracking_commit.hexsha
all_unpushed_commits = []
for commit in self.repo().iter_commits( None ):
commit_id = commit.hexsha
if last_pushed_commit_id == commit_id:
break
all_unpushed_commits.append( commit )
return all_unpushed_commits
#------------------------------------------------------------
#
# all functions starting with "cmd" are like the git <cmd> in behavior
#
#------------------------------------------------------------
def cmdCheckout( self, branch_name ):
try:
branch = self.repo().branches[ branch_name ]
branch.checkout()
except GitCommandError as e:
self.app.log.error( str(e) )
def cmdStage( self, filename ):
self.debugLog( 'cmdStage( %r )' % (filename,) )
self.repo().git.add( filename )
self.__stale_index = True
def cmdUnstage( self, rev, filename ):
self.debugLog( 'cmdUnstage( %r )' % (filename,) )
self.repo().git.reset( 'HEAD', filename, mixed=True )
self.__stale_index = True
def cmdRevert( self, rev, file_state ):
self.debugLog( 'cmdRevert( %r, %s:%r )' % (rev, file_state.relativePath(), file_state) )
try:
if file_state.isStagedRenamed():
self.debugLog( 'cmdRevert renamedFromFilename %r renamedToFilename %r' %
(file_state.renamedFromFilename(), file_state.renamedToFilename()) )
self.repo().git.reset( rev, file_state.renamedFromFilename(), mixed=True )
self.repo().git.reset( rev, file_state.renamedToFilename(), mixed=True )
self.repo().git.checkout( rev, file_state.renamedFromFilename() )
self.cmdDelete( file_state.renamedToFilename() )
elif( file_state.isStagedNew()
or file_state.isStagedModified() ):
self.repo().git.reset( rev, file_state.relativePath(), mixed=True )
else:
self.repo().git.checkout( rev, file_state.relativePath() )
except GitCommandError as e:
if e.stderr is not None:
# stderr unfortuently is prefixed with "\n stderr: '"
self.app.log.error( e.stderr.split( "'", 1 )[1][:-1] )
else:
self.app.log.error( str(e) )
self.__stale_index = True
def cmdDelete( self, filename ):
(self.prefs_project.path / filename).unlink()
self.__stale_index = True
def cmdRename( self, filename, new_filename ):
filestate = self.getFileState( filename )
if filestate.isControlled():
self.repo().git.mv( filename, new_filename )
else:
abs_path = filestate.absolutePath()
new_abs_path = self.prefs_project.path / new_filename
try:
abs_path.rename( new_abs_path )
except IOError as e:
self.app.log.error( 'Renamed failed - %s' % (e,) )
self.__stale_index = True
def cmdRebase( self, commit_id, all_rebase_commands, new_commit_message=None ):
all_text = []
for command in all_rebase_commands:
all_text.append( ' '.join( command ) )
all_text.append( '' )
rebase_commands = '\n'.join( all_text )
def rebaseHandler( filename ):
if self.debugLog.isEnabled():
with open( filename, 'r', encoding='utf-8' ) as f:
for line in f:
self.debugLog( 'Old Rebase: %r' % (line,) )
with open( filename, 'w', encoding='utf-8' ) as f:
if self.debugLog.isEnabled():
for line in all_text:
self.debugLog( 'New Rebase: %r' %(line,) )
f.write( rebase_commands )
return 0, ''
def newCommitMessage( filename ):
if self.debugLog.isEnabled():
with open( filename, 'r', encoding='utf-8' ) as f:
for line in f:
self.debugLog( 'Old Commit Message: %r' % (line,) )
with open( filename, 'w', encoding='utf-8' ) as f:
if self.debugLog.isEnabled():
for line in new_commit_message.split('\n'):
self.debugLog( 'New Commit Message: %r' % (line,) )
f.write( new_commit_message )
return 0, ''
def unexpectedCallback( filename ):
return 1, 'Unexpected callback with %r' % (filename,)
setCallbackRebaseSequenceHandler( rebaseHandler )
if new_commit_message is None:
setCallbackRebaseEditorHandler( unexpectedCallback )
else:
setCallbackRebaseEditorHandler( newCommitMessage )
rc, stdout, stderr = self.repo().git.execute(
[git.Git.GIT_PYTHON_GIT_EXECUTABLE, 'rebase', '--interactive', '%s^1' % (commit_id,)],
with_extended_output=True,
with_exceptions=False,
universal_newlines=False, # GitPython bug will TB if true
stdout_as_string=True )
self.debugLog( '%s rebase --interactive %s -> rc %d' %
(git.Git.GIT_PYTHON_GIT_EXECUTABLE, commit_id, rc) )
if rc != 0:
# assume need to abort rebase on failure
self.repo().git.execute(
[git.Git.GIT_PYTHON_GIT_EXECUTABLE, 'rebase', '--abort'],
with_extended_output=True,
with_exceptions=False,
universal_newlines=False, # GitPython bug will TB if true
stdout_as_string=True )
setCallbackRebaseSequenceHandler( None )
setCallbackRebaseEditorHandler( None )
return rc, stdout.replace( '\r', '' ).split('\n'), stderr.replace( '\r', '' ).split('\n')
def cmdCreateTag( self, tag_name, ref ):
self.repo().create_tag( tag_name, ref=ref )
def cmdDiffFolder( self, folder, head, staged ):
if head and staged:
return self.repo().git.diff( 'HEAD', self.pathForGit( folder ), staged=staged )
elif staged:
return self.repo().git.diff( self.pathForGit( folder ), staged=True )
elif head:
return self.repo().git.diff( 'HEAD', self.pathForGit( folder ), staged=False )
else:
return self.repo().git.diff( self.pathForGit( folder ), staged=False )
def cmdDiffWorkingVsCommit( self, filename, commit ):
return self.repo().git.diff( commit, self.pathForGit( filename ), staged=False )
def cmdDiffStagedVSCommit( self, filename, commit ):
return self.repo().git.diff( commit, self.pathForGit( filename ), staged=True )
def cmdDiffCommitVsCommit( self, filename, old_commit, new_commit ):
return self.repo().git.diff( old_commit, new_commit, '--', self.pathForGit( filename ) )
def cmdShow( self, what ):
return self.repo().git.show( what )
def getTextLinesForCommit( self, filepath, commit_id ):
assert isinstance( filepath, pathlib.Path ), 'expecting pathlib.Path got %r' % (filepath,)
# git show wants a posix path, it does not work with '\' path seperators
git_filepath = pathlib.PurePosixPath( filepath )
text = self.cmdShow( '%s:%s' % (commit_id, git_filepath) )
all_lines = text.split('\n')
if all_lines[-1] == '':
return all_lines[:-1]
else:
return all_lines
def cmdCommit( self, message ):
self.__stale_index = True
return self.index.commit( message )
def cmdCommitLogAfterCommitId( self, commit_id ):
if not self.hasCommits():
return []
all_commit_logs = []
for commit in self.repo().iter_commits( None ):
if commit.hexsha == commit_id:
break
all_commit_logs.append( GitCommitLogNode( commit ) )
return all_commit_logs
def cmdCommitLogForRepository( self, progress_callback, limit=None, since=None, until=None, rev=None, paths='' ):
if not self.hasCommits():
return []
all_commit_logs = []
kwds = {}
if limit is not None:
kwds['max_count'] = limit
if since is not None:
kwds['since'] = since
if since is not None:
kwds['until'] = until
for commit in self.repo().iter_commits( rev, paths, **kwds ):
all_commit_logs.append( GitCommitLogNode( commit ) )
total = len(all_commit_logs)
progress_callback( 0, total )
self.__addCommitChangeInformation( progress_callback, all_commit_logs )
progress_callback( total, total )
return all_commit_logs
def cmdCommitLogForFile( self, progress_callback, filename, limit=None, since=None, until=None, rev=None ):
return self.cmdCommitLogForRepository( progress_callback, paths=filename, limit=limit, since=since, until=until, rev=rev )
def cmdTagsForRepository( self ):
tag_name_by_id = {}
for tag in self.repo().tags:
try:
tag_name_by_id[ tag.commit.hexsha ] = tag.name
except ValueError:
# cannot get the tag - may be a deteched ref
pass
return tag_name_by_id
def doesTagExist( self, tag_name ):
return tag_name in self.repo().tags
def __addCommitChangeInformation( self, progress_callback, all_commit_logs ):
# now calculate what was added, deleted and modified in each commit
total = len(all_commit_logs)
for offset in range( total ):
progress_callback( offset, total )
all_files = all_commit_logs[ offset ].commitStats().files
new_tree = all_commit_logs[ offset ].commitTree()
old_tree = all_commit_logs[ offset ].commitPreviousTree()
all_new = {}
self.__treeToDict( all_files, new_tree, all_new )
new_set = set(all_new)
if old_tree is None:
all_commit_logs[ offset ]._addChanges( new_set, set(), [], set() )
else:
all_old = {}
self.__treeToDict( all_files, old_tree, all_old )
old_set = set(all_old)
all_added = new_set - old_set
all_deleted = old_set - new_set
all_renamed = []
# look for renames
if len(all_added) > 0 and len(all_deleted) > 0:
all_old_id_to_name = {}
for name in all_deleted:
all_old_id_to_name[ all_old[ name ] ] = name
for name in list(all_added):
id_ = all_new[ name ]
if id_ in all_old_id_to_name:
old_name = all_old_id_to_name[ id_ ]
# converted svn repos can have trees that cannot
# be used to figure out the rename
# for example when the checkin deletes a folder
# which cannot be expressed in git trees
if( old_name in all_added
and old_name in all_deleted ):
all_added.remove( name )
all_deleted.remove( old_name )
all_renamed.append( (name, old_name) )
all_modified = set()
for key in all_new:
if( key in all_old
and all_new[ key ] != all_old[ key ] ):
all_modified.add( key )
all_commit_logs[ offset ]._addChanges( all_added, all_deleted, all_renamed, all_modified )
def __treeToDict( self, all_files, tree, all_entries ):
for file in all_files:
all_parts = file.split('/')
node = tree
# walk down the tree (aka folders) until we have
# the tree that has the blob (aka file) in it
# tree.path is the full name of the folder
for index in range(1, len(all_parts)):
prefix = '/'.join( all_parts[:index] )
for child in node.trees:
if child.path == prefix:
node = child
break
# blob.path is the full path to the file
for blob in node:
if blob.path == file:
all_entries[ blob.path ] = blob.hexsha
break
def cmdAnnotationForFile( self, filename, rev=None ):
if rev is None:
rev = 'HEAD'
all_annotate_nodes = []
line_num = 0
for commit, all_lines in self.repo().blame( rev, self.pathForGit( filename ) ):
commit_id = commit.hexsha
for line_text in all_lines:
line_num += 1
all_annotate_nodes.append(
wb_annotate_node.AnnotateNode( line_num, line_text, commit_id ) )
return all_annotate_nodes
def cmdCommitLogForAnnotateFile( self, filename, all_commit_ids ):
all_commit_logs = {}
for commit_id in all_commit_ids:
commit = self.repo().commit( commit_id )
all_commit_logs[ commit_id ] = GitCommitLogNode( commit )
return all_commit_logs
def cmdPull( self, progress_callback, info_callback ):
tracking_branch = self.repo().head.ref.tracking_branch()
remote = self.repo().remote( tracking_branch.remote_name )
progress = Progress( progress_callback )
try:
for info in remote.pull( progress=progress ):
info_callback( info )
for line in progress.allDroppedLines():
self.app.log.info( line )
except GitCommandError:
for line in progress.allErrorLines():
self.app.log.error( line )
raise
def cmdPush( self, progress_callback, info_callback ):
progress = Progress( progress_callback )
tracking_branch = self.repo().head.ref.tracking_branch()
remote = self.repo().remote( tracking_branch.remote_name )
try:
for info in remote.push( progress=progress ):
info_callback( info )
for line in progress.allDroppedLines():
self.app.log.info( line )
except GitCommandError:
for line in progress.allErrorLines():
self.app.log.error( line )
raise
def cmdStashSave( self, message=None ):
cmd = [git.Git.GIT_PYTHON_GIT_EXECUTABLE, 'stash', 'push']
if message is not None:
cmd.append( '--message' )
cmd.append( message )
rc, stdout, stderr = self.repo().git.execute(
cmd,
with_extended_output=True,
with_exceptions=False,
universal_newlines=False, # GitPython bug will TB if true
stdout_as_string=True )
self.debugLog( '%s stash save -> rc %d' % (git.Git.GIT_PYTHON_GIT_EXECUTABLE, rc) )
if rc != 0:
for line in stderr.split( '\n' ):
line = line.strip()
self.app.log.error( line )
return rc == 0
def cmdStashPop( self, stash_id ):
cmd = [git.Git.GIT_PYTHON_GIT_EXECUTABLE, 'stash', 'pop', '--quiet', stash_id]
self.debugLog( 'cmdStashPop: %r' % (cmd,) )
rc, stdout, stderr = self.repo().git.execute(
cmd,
with_extended_output=True,
with_exceptions=False,
universal_newlines=False, # GitPython bug will TB if true
stdout_as_string=True )
self.debugLog( '%s stash apply %s -> rc %d' % (git.Git.GIT_PYTHON_GIT_EXECUTABLE, stash_id, rc) )
for line in stdout.split( '\n' ):
line = line.strip()
self.app.log.info( line )
if rc != 0:
for line in stderr.split( '\n' ):
line = line.strip()
self.app.log.error( line )
return rc == 0
def cmdStashList( self ):
rc, stdout, stderr = self.repo().git.execute(
[git.Git.GIT_PYTHON_GIT_EXECUTABLE, 'stash', 'list'],
with_extended_output=True,
with_exceptions=False,
universal_newlines=False, # GitPython bug will TB if true
stdout_as_string=True )
self.debugLog( '%s stash list -> rc %d' % (git.Git.GIT_PYTHON_GIT_EXECUTABLE, rc) )
if rc != 0:
for line in stderr.split( '\n' ):
line = line.strip()
self.app.log.error( line )
return []
all_stashes = []
for line in stdout.split( '\n' ):
line = line.strip()
if line == '':
continue
stash_id, stash_branch, stash_message = line.split( ': ', 2 )
for branch_prefix in ('WIP on ', 'On '):
if stash_branch.startswith( branch_prefix ):
stash_branch = stash_branch[len(branch_prefix):]
break
all_stashes.append( WbGitStashInfo( stash_id, stash_branch, stash_message ) )
return all_stashes
class WbGitStashInfo:
def __init__( self, stash_id, stash_branch, stash_message ):
self.stash_id = stash_id
self.stash_branch = stash_branch
self.stash_message = stash_message
def __repr__( self ):
return ('<WbGitStashInfo: id=%s branch=%s msg=%s>' %
(self.stash_id, self.stash_branch, self.stash_message))
class WbGitFileState:
def __init__( self, project, filepath ):
assert isinstance( project, GitProject ),'expecting GitProject got %r' % (project,)
assert isinstance( filepath, pathlib.Path ), 'expecting pathlib.Path got %r' % (filepath,)
self.__project = project
self.__filepath = filepath
self.__is_dir = False
self.__index_entry = None
self.__unstaged_diff = None
self.__staged_diff = None
self.__untracked = False
# from the above calculate the following
self.__state_calculated = False
self.__staged_is_modified = False
self.__unstaged_is_modified = False
self.__staged_abbrev = None
self.__unstaged_abbrev = None
self.__head_blob = None
self.__staged_blob = None
def __repr__( self ):
return ('<WbGitFileState: calc %r, S=%r, U=%r' %
(self.__state_calculated, self.__staged_abbrev, self.__unstaged_abbrev))
def relativePath( self ):
return self.__filepath
def absolutePath( self ):
return self.__project.projectPath() / self.__filepath
def renamedToFilename( self ):
assert self.isStagedRenamed()
return pathlib.Path( self.__staged_diff.rename_from )
def renamedFromFilename( self ):
assert self.isStagedRenamed()
return pathlib.Path( self.__staged_diff.rename_to )
def setIsDir( self ):
self.__is_dir = True
def isDir( self ):
return self.__is_dir
def setIndexEntry( self, index_entry ):
self.__index_entry = index_entry
def _addStaged( self, diff ):
self.__state_calculated = False
self.__staged_diff = diff
def _addUnstaged( self, diff ):
self.__state_calculated = False
self.__unstaged_diff = diff
def _setUntracked( self ):
self.__untracked = True
# from the provided info work out
# interesting properies
def __calculateState( self ):
if self.__state_calculated:
return
if self.__staged_diff is None:
self.__staged_abbrev = ''
else:
if self.__staged_diff.renamed:
self.__staged_abbrev = 'R'
elif self.__staged_diff.deleted_file:
self.__staged_abbrev = 'A'
elif self.__staged_diff.new_file:
self.__staged_abbrev = 'D'
else:
self.__staged_abbrev = 'M'
self.__staged_is_modified = True
self.__head_blob = self.__staged_diff.b_blob
self.__staged_blob = self.__staged_diff.a_blob
if self.__unstaged_diff is None:
self.__unstaged_abbrev = ''
else:
if self.__unstaged_diff.deleted_file:
self.__unstaged_abbrev = 'D'
elif self.__unstaged_diff.new_file:
self.__unstaged_abbrev = 'A'
else:
self.__unstaged_abbrev = 'M'
self.__unstaged_is_modified = True
if self.__head_blob is None:
self.__head_blob = self.__unstaged_diff.a_blob
self.__state_calculated = True
def getStagedAbbreviatedStatus( self ):
self.__calculateState()
return self.__staged_abbrev
def getUnstagedAbbreviatedStatus( self ):
self.__calculateState()
return self.__unstaged_abbrev
#------------------------------------------------------------
def isControlled( self ):
if self.__staged_diff is not None:
return True
return self.__index_entry is not None
def isUncontrolled( self ):
return self.__untracked
def isIgnored( self ):
if self.__staged_diff is not None:
return False
if self.__index_entry is not None:
return False
# untracked files have had ignored files striped out
if self.__untracked:
return False
return True
# ------------------------------
def isStagedNew( self ):
self.__calculateState()
return self.__staged_abbrev == 'A'
def isStagedModified( self ):
self.__calculateState()
return self.__staged_abbrev == 'M'
def isStagedDeleted( self ):
self.__calculateState()
return self.__staged_abbrev == 'D'
def isStagedRenamed( self ):
self.__calculateState()
return self.__staged_abbrev == 'R'
def isUnstagedModified( self ):
self.__calculateState()
return self.__unstaged_abbrev == 'M'
def isUnstagedDeleted( self ):
self.__calculateState()
return self.__unstaged_abbrev == 'D'
# ------------------------------------------------------------
def canCommit( self ):
return self.__staged_abbrev != ''
def canStage( self ):
return self.__unstaged_abbrev != '' or self.__untracked
def canUnstage( self ):
return self.__staged_abbrev != ''
def canRevert( self ):
return (self.isUnstagedDeleted()
or self.isUnstagedModified()
or self.isStagedNew()
or self.isStagedRenamed()
or self.isStagedDeleted()
or self.isStagedModified())
# ------------------------------------------------------------
def canDiffHeadVsStaged( self ):
self.__calculateState()
return self.__staged_is_modified
def canDiffStagedVsWorking( self ):
self.__calculateState()
return self.__unstaged_is_modified and self.__staged_is_modified
def canDiffHeadVsWorking( self ):
self.__calculateState()
return self.__unstaged_is_modified
def getTextLinesWorking( self ):
path = self.absolutePath()
with path.open( encoding='utf-8' ) as f:
all_lines = f.read().split( '\n' )
if all_lines[-1] == '':
return all_lines[:-1]
else:
return all_lines
def getTextLinesHead( self ):
return self.__getTextLinesFromBlob( self.getHeadBlob() )
def getTextLinesStaged( self ):
return self.__getTextLinesFromBlob( self.getStagedBlob() )
def __getTextLinesFromBlob( self, blob ):
data = blob.data_stream.read()
text = data.decode( 'utf-8' )
all_lines = text.split('\n')
if all_lines[-1] == '':
return all_lines[:-1]
else:
return all_lines
def getTextLinesForCommit( self, commit_id ):
git_filepath = pathlib.PurePosixPath( self.__filepath )
text = self.__project.cmdShow( '%s:%s' % (commit_id, git_filepath) )
all_lines = text.split('\n')
if all_lines[-1] == '':
return all_lines[:-1]
else:
return all_lines
def getHeadBlob( self ):
return self.__head_blob
def getStagedBlob( self ):
return self.__staged_blob
class GitCommitLogNode:
def __init__( self, commit ):
self.__commit = commit
self.__all_changes = []
def _addChanges( self, all_added, all_deleted, all_renamed, all_modified ):
for name in all_added:
self.__all_changes.append( ('A', name, '' ) )
for name in all_deleted:
self.__all_changes.append( ('D', name, '' ) )
for name, old_name in all_renamed:
self.__all_changes.append( ('R', name, old_name ) )
for name in all_modified:
self.__all_changes.append( ('M', name, '' ) )
def commitStats( self ):
return self.__commit.stats
def commitTree( self ):
return self.__commit.tree
def commitPreviousTree( self ):
if len(self.__commit.parents) == 0:
return None
previous_commit = self.__commit.parents[0]
return previous_commit.tree
def commitId( self ):
return self.__commit.hexsha
def commitIdString( self ):
return self.__commit.hexsha
def commitAuthor( self ):
return self.__commit.author.name
def commitAuthorEmail( self ):
return self.__commit.author.email
def commitDate( self ):
return self.__commit.committed_datetime
def commitMessage( self ):
return self.__commit.message
def commitMessageHeadline( self ):
return self.__commit.message.split('\n')[0]
def commitFileChanges( self ):
return self.__all_changes
class GitProjectTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.is_by_path = False
self.__path = path
self.__all_folders = {}
self.__all_files = {}
def __repr__( self ):
return '<GitProjectTreeNode: project %r, path %s>' % (self.project, self.__path)
def updateTreeNode( self ):
pass
def isByPath( self ):
return self.is_by_path
def addFileByName( self, path ):
assert path.name != ''
self.__all_files[ path.name ] = path
def addFileByPath( self, path ):
assert path.name != ''
self.is_by_path = True
path = path
self.__all_files[ path ] = path
def getAllFileNames( self ):
return self.__all_files.keys()
def addFolder( self, name, node ):
assert type(name) == str and name != '', 'name %r, node %r' % (name, node)
assert isinstance( node, GitProjectTreeNode )
self.__all_folders[ name ] = node
def getFolder( self, name ):
assert type(name) == str
return self.__all_folders[ name ]
def getAllFolderNodes( self ):
return self.__all_folders.values()
def getAllFolderNames( self ):
return self.__all_folders.keys()
def hasFolder( self, name ):
assert type(name) == str
return name in self.__all_folders
def _dumpTree( self, indent ):
self.project.debugLog( 'dump: %*s%r' % (indent, '', self) )
for file in sorted( self.__all_files ):
self.project.debugLog( 'dump %*s file: %r' % (indent, '', file) )
for folder in sorted( self.__all_folders ):
self.__all_folders[ folder ]._dumpTree( indent+4 )
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getStatusEntry( self, name ):
path = self.__all_files[ name ]
if path in self.project.all_file_state:
entry = self.project.all_file_state[ path ]
else:
entry = WbGitFileState( self.project, path )
return entry
class Progress(git.RemoteProgress):
def __init__( self, progress_call_back ):
self.progress_call_back = progress_call_back
super().__init__()
self.__all_dropped_lines = []
all_update_stages = {
git.RemoteProgress.COUNTING: 'Counting',
git.RemoteProgress.COMPRESSING: 'Compressing',
git.RemoteProgress.WRITING: 'Writing',
git.RemoteProgress.RECEIVING: 'Receiving',
git.RemoteProgress.RESOLVING: 'Resolving',
git.RemoteProgress.FINDING_SOURCES: 'Finding Sources',
git.RemoteProgress.CHECKING_OUT: 'Checking Out',
}
def update( self, op_code, cur_count, max_count=None, message='' ):
stage_name = self.all_update_stages.get( op_code&git.RemoteProgress.OP_MASK, 'Unknown' )
is_begin = op_code&git.RemoteProgress.BEGIN != 0
is_end = op_code&git.RemoteProgress.END != 0
self.progress_call_back( is_begin, is_end, stage_name, cur_count, max_count, message )
def line_dropped( self, line ):
if line.startswith( 'POST git-upload-pack' ):
return
self.__all_dropped_lines.append( line )
def allErrorLines( self ):
return self.error_lines + self.__all_dropped_lines
def allDroppedLines( self ):
return self.__all_dropped_lines
| 33.741353
| 130
| 0.586661
| 42,194
| 0.940235
| 0
| 0
| 0
| 0
| 0
| 0
| 4,444
| 0.099028
|
fb64c2c423679d3b9a605145467c5cb4184c77b4
| 443
|
py
|
Python
|
stackflowCrawl/spiders/stackoverflow/constants/consult.py
|
matheuslins/stackflowCrawl
|
b6adacc29bfc2e6210a24968f691a54854952b2e
|
[
"MIT"
] | null | null | null |
stackflowCrawl/spiders/stackoverflow/constants/consult.py
|
matheuslins/stackflowCrawl
|
b6adacc29bfc2e6210a24968f691a54854952b2e
|
[
"MIT"
] | 2
|
2021-03-31T19:47:59.000Z
|
2021-12-13T20:41:06.000Z
|
stackflowCrawl/spiders/stackoverflow/constants/consult.py
|
matheuslins/stackflowCrawl
|
b6adacc29bfc2e6210a24968f691a54854952b2e
|
[
"MIT"
] | null | null | null |
XPAHS_CONSULT = {
'jobs_urls': '//div[contains(@class, "listResults")]//div[contains(@data-jobid, "")]//h2//a/@href',
'results': '//span[@class="description fc-light fs-body1"]//text()',
'pagination_indicator': '//a[contains(@class, "s-pagination--item")][last()]//span[contains(text(), "next")]',
'pagination_url': '//a[contains(@class, "s-pagination--item")][last()]/@href',
}
START_URL = 'https://stackoverflow.com/jobs/'
| 44.3
| 114
| 0.629797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.848758
|
fb6699684cb8142168142ff3619e29cd5107fcf6
| 3,676
|
py
|
Python
|
mainSample.py
|
snipeso/sample_psychopy
|
332cd34cf2c584f9ba01302050964649dd2e5367
|
[
"Linux-OpenIB"
] | null | null | null |
mainSample.py
|
snipeso/sample_psychopy
|
332cd34cf2c584f9ba01302050964649dd2e5367
|
[
"Linux-OpenIB"
] | 3
|
2021-06-02T00:56:48.000Z
|
2021-09-08T01:35:53.000Z
|
mainSample.py
|
snipeso/sample_psychopy
|
332cd34cf2c584f9ba01302050964649dd2e5367
|
[
"Linux-OpenIB"
] | null | null | null |
import logging
import os
import random
import time
import datetime
import sys
import math
from screen import Screen
from scorer import Scorer
from trigger import Trigger
from psychopy import core, event, sound
from psychopy.hardware import keyboard
from pupil_labs import PupilCore
from datalog import Datalog
from config.configSample import CONF
#########################################################################
######################################
# Initialize screen, logger and inputs
logging.basicConfig(
level=CONF["loggingLevel"],
format='%(asctime)s-%(levelname)s-%(message)s',
) # This is a log for debugging the script, and prints messages to the terminal
# needs to be first, so that if it doesn't succeed, it doesn't freeze everything
eyetracker = PupilCore(ip=CONF["pupillometry"]
["ip"], port=CONF["pupillometry"]["port"], shouldRecord=CONF["recordEyetracking"])
trigger = Trigger(CONF["trigger"]["serial_device"],
CONF["sendTriggers"], CONF["trigger"]["labels"])
screen = Screen(CONF)
datalog = Datalog(OUTPUT_FOLDER=os.path.join(
'output', CONF["participant"] + "_" + CONF["session"],
datetime.datetime.now().strftime("%Y-%m-%d")), CONF=CONF) # This is for saving data
kb = keyboard.Keyboard()
mainClock = core.MonotonicClock() # starts clock for timestamping events
alarm = sound.Sound(os.path.join('sounds', CONF["instructions"]["alarm"]),
stereo=True)
questionnaireReminder = sound.Sound(os.path.join(
'sounds', CONF["instructions"]["questionnaireReminder"]), stereo=True)
scorer = Scorer()
logging.info('Initialization completed')
#########################################################################
def quitExperimentIf(shouldQuit):
"Quit experiment if condition is met"
if shouldQuit:
trigger.send("Quit")
scorer.getScore()
logging.info('quit experiment')
eyetracker.stop_recording()
trigger.reset()
sys.exit(2)
def onFlip(stimName, logName):
"send trigger on flip, set keyboard clock, and save timepoint"
trigger.send(stimName)
kb.clock.reset() # this starts the keyboard clock as soon as stimulus appears
datalog[logName] = mainClock.getTime()
##############
# Introduction
##############
# Display overview of session
screen.show_overview()
core.wait(CONF["timing"]["overview"])
# Optionally, display instructions
if CONF["showInstructions"]:
screen.show_instructions()
key = event.waitKeys()
quitExperimentIf(key[0] == 'q')
eyetracker.start_recording(os.path.join(
CONF["participant"], CONF["session"], CONF["task"]["name"]))
# Blank screen for initial rest
screen.show_blank()
logging.info('Starting blank period')
trigger.send("StartBlank")
core.wait(CONF["timing"]["rest"])
trigger.send("EndBlank")
# Cue start of the experiment
screen.show_cue("START")
trigger.send("Start")
core.wait(CONF["timing"]["cue"])
#################
# Main experiment
#################
# customize
datalog["trialID"] = trigger.sendTriggerId()
eyetracker.send_trigger("Stim", {"id": 1, "condition": "sample"})
datalog["pupilSize"] = eyetracker.getPupildiameter()
# save data to file
datalog.flush()
###########
# Concluion
###########
# End main experiment
screen.show_cue("DONE!")
trigger.send("End")
core.wait(CONF["timing"]["cue"])
# Blank screen for final rest
screen.show_blank()
logging.info('Starting blank period')
trigger.send("StartBlank")
core.wait(CONF["timing"]["rest"])
trigger.send("EndBlank")
logging.info('Finished')
scorer.getScore()
trigger.reset()
eyetracker.stop_recording()
questionnaireReminder.play()
core.wait(2)
| 24.344371
| 105
| 0.658052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,563
| 0.42519
|
fb67712141abf405660b20968e896ccaf386184f
| 3,696
|
py
|
Python
|
src/commercetools/services/inventory.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 15
|
2018-11-02T14:35:52.000Z
|
2022-03-16T07:51:44.000Z
|
src/commercetools/services/inventory.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | 84
|
2018-11-02T12:50:32.000Z
|
2022-03-22T01:25:54.000Z
|
src/commercetools/services/inventory.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | 13
|
2019-01-03T09:16:50.000Z
|
2022-02-15T18:37:19.000Z
|
# DO NOT EDIT! This file is automatically generated
import typing
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.platform.models.inventory import (
InventoryEntry,
InventoryEntryDraft,
InventoryEntryUpdate,
InventoryEntryUpdateAction,
InventoryPagedQueryResponse,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _InventoryEntryQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class _InventoryEntryUpdateSchema(traits.ExpandableSchema, traits.VersionedSchema):
pass
class _InventoryEntryDeleteSchema(traits.VersionedSchema, traits.ExpandableSchema):
pass
class InventoryEntryService(abstract.AbstractService):
"""Inventory allows you to track stock quantities."""
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> InventoryEntry:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"inventory/{id}", params=params, response_class=InventoryEntry
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> InventoryPagedQueryResponse:
"""Inventory allows you to track stock quantities."""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"with_total": with_total,
"where": where,
"predicate_var": predicate_var,
},
_InventoryEntryQuerySchema,
)
return self._client._get(
endpoint="inventory",
params=params,
response_class=InventoryPagedQueryResponse,
)
def create(
self, draft: InventoryEntryDraft, *, expand: OptionalListStr = None
) -> InventoryEntry:
"""Inventory allows you to track stock quantities."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="inventory",
params=params,
data_object=draft,
response_class=InventoryEntry,
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[InventoryEntryUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> InventoryEntry:
params = self._serialize_params({"expand": expand}, _InventoryEntryUpdateSchema)
update_action = InventoryEntryUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"inventory/{id}",
params=params,
data_object=update_action,
response_class=InventoryEntry,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
*,
expand: OptionalListStr = None,
force_delete: bool = False,
) -> InventoryEntry:
params = self._serialize_params(
{"version": version, "expand": expand}, _InventoryEntryDeleteSchema
)
return self._client._delete(
endpoint=f"inventory/{id}",
params=params,
response_class=InventoryEntry,
force_delete=force_delete,
)
| 30.545455
| 88
| 0.623106
| 3,287
| 0.88934
| 0
| 0
| 0
| 0
| 0
| 0
| 387
| 0.104708
|
fb685e91f9d3ddb25b69ea95c37b26cc21ab500f
| 8,008
|
py
|
Python
|
qmla/remote_model_learning.py
|
flynnbr11/QMD
|
ac8cfe1603658ee9b916452f29b99460ee5e3d44
|
[
"MIT"
] | 9
|
2021-01-08T12:49:01.000Z
|
2021-12-29T06:59:32.000Z
|
qmla/remote_model_learning.py
|
flynnbr11/QMD
|
ac8cfe1603658ee9b916452f29b99460ee5e3d44
|
[
"MIT"
] | 2
|
2021-02-22T20:42:25.000Z
|
2021-02-22T22:22:59.000Z
|
qmla/remote_model_learning.py
|
flynnbr11/QMD
|
ac8cfe1603658ee9b916452f29b99460ee5e3d44
|
[
"MIT"
] | 9
|
2021-02-15T14:18:48.000Z
|
2021-12-17T04:02:07.000Z
|
from __future__ import print_function # so print doesn't show brackets
import copy
import numpy as np
import time as time
import matplotlib.pyplot as plt
import pickle
import redis
import qmla.model_for_learning
import qmla.redis_settings
import qmla.logging
pickle.HIGHEST_PROTOCOL = 4
plt.switch_backend("agg")
__all__ = ["remote_learn_model_parameters"]
def remote_learn_model_parameters(
name,
model_id,
branch_id,
exploration_rule,
qmla_core_info_dict=None,
remote=False,
host_name="localhost",
port_number=6379,
qid=0,
log_file="rq_output.log",
):
"""
Standalone function to perform Quantum Hamiltonian Learning on individual models.
Used in conjunction with redis databases so this calculation can be
performed without any knowledge of the QMLA instance.
Given model ids and names are used to instantiate
the ModelInstanceForLearning class, which is then used
for learning the models parameters.
QMLA info is unpickled from a redis databse, containing
true operator, params etc.
Once parameters are learned, we pickle the results to dictionaries
held on a redis database which can be accessed by other actors.
:param str name: model name string
:param int model_id: unique model id
:param int branch_id: QMLA branch where the model was generated
:param str exploration_rule: string corresponding to a unique exploration strategy,
used by get_exploration_class to generate a
ExplorationStrategy (or subclass) instance.
:param dict qmla_core_info_dict: crucial data for QMLA, such as number
of experiments/particles etc. Default None: core info is stored on the
redis database so can be retrieved there on a server; if running locally,
can be passed to save pickling.
:param bool remote: whether QMLA is running remotely via RQ workers.
:param str host_name: name of host server on which redis database exists.
:param int port_number: this QMLA instance's unique port number,
on which redis database exists.
:param int qid: QMLA id, unique to a single instance within a run.
Used to identify the redis database corresponding to this instance.
:param str log_file: Path of the log file.
"""
def log_print(to_print_list):
qmla.logging.print_to_log(
to_print_list=to_print_list,
log_file=log_file,
log_identifier="RemoteLearnModel {}".format(model_id),
)
log_print(["Starting QHL for Model {} on branch {}".format(model_id, branch_id)])
time_start = time.time()
num_redis_retries = 5
# Access databases
redis_databases = qmla.redis_settings.get_redis_databases_by_qmla_id(
host_name, port_number, qid
)
qmla_core_info_database = redis_databases["qmla_core_info_database"]
learned_models_info_db = redis_databases["learned_models_info_db"]
learned_models_ids = redis_databases["learned_models_ids"]
active_branches_learning_models = redis_databases["active_branches_learning_models"]
any_job_failed_db = redis_databases["any_job_failed"]
if qmla_core_info_dict is not None:
# for local runs, qmla_core_info_dict passed, with probe_dict included
# in it.
probe_dict = qmla_core_info_dict["probe_dict"]
else:
qmla_core_info_dict = pickle.loads(qmla_core_info_database["qmla_settings"])
probe_dict = pickle.loads(qmla_core_info_database["probes_system"])
true_model_terms_matrices = qmla_core_info_dict["true_oplist"]
qhl_plots = qmla_core_info_dict["qhl_plots"]
plots_directory = qmla_core_info_dict["plots_directory"]
long_id = qmla_core_info_dict["long_id"]
# Generate model instance
qml_instance = qmla.model_for_learning.ModelInstanceForLearning(
model_id=model_id,
model_name=name,
qid=qid,
log_file=log_file,
exploration_rule=exploration_rule,
host_name=host_name,
port_number=port_number,
)
try:
# Learn parameters
update_timer_start = time.time()
qml_instance.update_model()
log_print(
["Time for update alone: {}".format(time.time() - update_timer_start)]
)
# Evaluate learned parameterisation
# qml_instance.compute_likelihood_after_parameter_learning()
except NameError:
log_print(
[
"Model learning failed. QHL failed for model id {}. Setting job failure model_building_utilities.".format(
model_id
)
]
)
any_job_failed_db.set("Status", 1)
raise
except BaseException:
log_print(
[
"Model learning failed. QHL failed for model id {}. Setting job failure model_building_utilities.".format(
model_id
)
]
)
any_job_failed_db.set("Status", 1)
raise
if qhl_plots:
log_print(["Drawing plots for QHL"])
try:
if len(true_model_terms_matrices) == 1: # TODO buggy
qml_instance.plot_distribution_progression(
save_to_file=str(
plots_directory
+ "qhl_distribution_progression_"
+ str(long_id)
+ ".png"
)
)
qml_instance.plot_distribution_progression(
renormalise=False,
save_to_file=str(
plots_directory
+ "qhl_distribution_progression_uniform_"
+ str(long_id)
+ ".png"
),
)
except BaseException:
pass
# Throw away model instance; only need to store results.
updated_model_info = copy.deepcopy(qml_instance.learned_info_dict())
compressed_info = pickle.dumps(updated_model_info, protocol=4)
# Store the (compressed) result set on the redis database.
for k in range(num_redis_retries):
try:
learned_models_info_db.set(str(model_id), compressed_info)
log_print(
[
"learned_models_info_db added to db for model {} after {} attempts".format(
str(model_id), k
)
]
)
break
except Exception as e:
if k == num_redis_retries - 1:
log_print(
["Model learning failed at the storage stage. Error: {}".format(e)]
)
any_job_failed_db.set("Status", 1)
pass
# Update databases to record that this model has finished.
for k in range(num_redis_retries):
try:
active_branches_learning_models.incr(int(branch_id), 1)
learned_models_ids.set(str(model_id), 1)
log_print(
[
"Updated model/branch learned on redis db {}/{}".format(
model_id, branch_id
)
]
)
break
except Exception as e:
if k == num_redis_retries - 1:
log_print(["Model learning failed to update branch info. Error: ", e])
any_job_failed_db.set("Status", 1)
if remote:
del updated_model_info
del compressed_info
del qml_instance
log_print(
[
"Learned model; remote time:",
str(np.round((time.time() - time_start), 2)),
]
)
return None
else:
return updated_model_info
| 35.591111
| 123
| 0.604146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,127
| 0.390485
|
fb69066846fdd4ee95649e7481b0ff3dce03d604
| 9,390
|
py
|
Python
|
pylinex/quantity/CompiledQuantity.py
|
CU-NESS/pylinex
|
b6f342595b6a154e129eb303782e5268088f34d5
|
[
"Apache-2.0"
] | null | null | null |
pylinex/quantity/CompiledQuantity.py
|
CU-NESS/pylinex
|
b6f342595b6a154e129eb303782e5268088f34d5
|
[
"Apache-2.0"
] | null | null | null |
pylinex/quantity/CompiledQuantity.py
|
CU-NESS/pylinex
|
b6f342595b6a154e129eb303782e5268088f34d5
|
[
"Apache-2.0"
] | null | null | null |
"""
File: pylinex/quantity/CompiledQuantity.py
Author: Keith Tauscher
Date: 3 Sep 2017
Description: File containing a class representing a list of Quantities to be
evaluated with the same (or overlapping) arguments. When it is
called, each underlying Quantity is called.
"""
from ..util import int_types, sequence_types, Savable, Loadable
from .Quantity import Quantity
from .AttributeQuantity import AttributeQuantity
from .ConstantQuantity import ConstantQuantity
from .FunctionQuantity import FunctionQuantity
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class CompiledQuantity(Quantity, Savable, Loadable):
"""
Class representing a list of Quantities to be evaluated with the same (or
overlapping) arguments. When it is called, each underlying Quantity is
called.
"""
def __init__(self, name, *quantities):
"""
Initialized a new CompiledQuantity made of the given quantities.
name: string identifier of this Quantity
*quantities: unpacked list of quantities to call when this object is
called
"""
Quantity.__init__(self, name)
self.quantities = quantities
@property
def quantities(self):
"""
Property storing the individual quantities underlying this
CompiledQuantity.
"""
if not hasattr(self, '_quantities'):
raise AttributeError("quantities was referenced before it was " +\
"set.")
return self._quantities
@quantities.setter
def quantities(self, value):
"""
Setter for the quantities property.
value: must be a sequence of Quantity objects
"""
if type(value) in sequence_types:
if all([isinstance(quantity, Quantity) for quantity in value]):
self._quantities = value
else:
raise TypeError("Not all elements of the sequence to " +\
"which quantities was set were Quantity " +\
"objects.")
else:
raise TypeError("quantities was set to a non-sequence.")
@property
def num_quantities(self):
"""
Property storing the number of quantities compiled in this object.
"""
if not hasattr(self, '_num_quantities'):
self._num_quantities = len(self.quantities)
return self._num_quantities
@property
def names(self):
"""
Property storing the list of names of the quantities underlying this
object.
"""
if not hasattr(self, '_names'):
self._names = [quantity.name for quantity in self.quantities]
return self._names
@property
def index_dict(self):
"""
Property storing a dictionary that connects the names of quantities to
their indices in the list of Quantity objects underlying this object.
"""
if not hasattr(self, '_index_dict'):
self._index_dict = {}
for (iquantity, quantity) in enumerate(self.quantities):
self._index_dict[quantity.name] = iquantity
return self._index_dict
@property
def can_index_by_string(self):
"""
Property storing a Boolean describing whether this Quantity can be
indexed by string. This essentially checks whether the names of the
quantities underlying this object are unique.
"""
if not hasattr(self, '_can_index_by_string'):
self._can_index_by_string =\
(self.num_quantities == len(set(self.names)))
return self._can_index_by_string
def __getitem__(self, index):
"""
Gets the quantity associated with the index.
index: if index is a string, it is assumed to be the name of one of the
Quantity objects underlying this object.
(index can only be a string if the
can_index_by_string property is True.)
if index is an int, it is taken to be the index of one of the
Quantity objects underlying this one.
returns: the Quantity object described by the given index
"""
if type(index) in int_types:
return self.quantities[index]
elif isinstance(index, basestring):
if self.can_index_by_string:
return self.quantities[self.index_dict[index]]
else:
raise TypeError("CompiledQuantity can only be indexed by " +\
"string if the names of the quantities " +\
"underlying it are unique.")
else:
raise IndexError("CompiledQuantity can only be indexed by an " +\
"integer index or a string quantity name.")
def __add__(self, other):
"""
Appends other to this CompiledQuantity.
other: CompiledQuantity (or some other Quantity)
returns: if other is another CompiledQuantity, names quantity lists of
both CompiledQuantity
objects are combined
otherwise, other must be a Quantity object. It will be added
to the quantity list of this CompiledQuantity
(whose name won't change)
"""
if isinstance(other, CompiledQuantity):
new_name = '{0!s}+{1!s}'.format(self.name, other.name)
new_quantities = self.quantities + other.quantities
elif isinstance(other, Quantity):
new_name = self.name
new_quantities = [quantity for quantity in self.quantities]
new_quantities.append(other)
else:
raise TypeError("Only Quantity objects can be added to " +\
"compiled quantities.")
return CompiledQuantity(new_name, *new_quantities)
def __call__(self, *args, **kwargs):
"""
Finds the values of all of the Quantity objects underlying this obejct.
args: list of arguments to pass on to the constituent Quantity objects
kwargs: list of keyword arguments to pass on to the constituent
Quantity objects
returns: list containing the values of all of the Quantity objects
underlying this one
"""
return [quantity(*args, **kwargs) for quantity in self.quantities]
def __contains__(self, key):
"""
Checks if a quantity with the given name exists in this
CompiledQuantity.
key: string name of Quantity to check for
returns: True if there exists at least one Quantity named key
"""
return any([(quantity.name == key) for quantity in self.quantities])
def fill_hdf5_group(self, group, exclude=[]):
"""
Fills given hdf5 file group with data about this CompiledQuantity.
group: hdf5 file group to fill with data about this CompiledQuantity
"""
iquantity = 0
group.attrs['name'] = self.name
group.attrs['class'] = 'CompiledQuantity'
for quantity in self.quantities:
excluded = (quantity.name in exclude)
savable = isinstance(quantity, Savable)
if (not excluded) and savable:
subgroup = group.create_group('quantity_{}'.format(iquantity))
if isinstance(quantity, Savable):
quantity.fill_hdf5_group(subgroup)
else:
raise TypeError("This CompiledQuantity cannot be saved " +\
"because it contains Quantity objects which cannot " +\
"be saved.")
iquantity += 1
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a CompiledQuantity from the given hdf5 group.
group: hdf5 file group from which to load a CompiledQuantity
returns: CompiledQuantity loaded from given hdf5 file group
"""
try:
assert group.attrs['class'] == 'CompiledQuantity'
except:
raise TypeError("This hdf5 file group does not seem to contain " +\
"a CompiledQuantity.")
name = group.attrs['name']
iquantity = 0
quantities = []
while 'quantity_{}'.format(iquantity) in group:
subgroup = group['quantity_{}'.format(iquantity)]
try:
class_name = subgroup.attrs['class']
cls = eval(class_name)
except:
raise TypeError("One of the quantities in this " +\
"CompiledQuantity could not be loaded; its class was " +\
"not recognized.")
quantities.append(cls.load_from_hdf5_group(subgroup))
iquantity += 1
return CompiledQuantity(name, *quantities)
| 39.620253
| 79
| 0.583174
| 8,650
| 0.921193
| 0
| 0
| 3,638
| 0.387433
| 0
| 0
| 4,831
| 0.514483
|
fb6a3d12a6011f130cb0dca995f5e63b890b615a
| 184
|
py
|
Python
|
Desafios/desafio48.py
|
ArthurBrito1/MY-SCRIPTS-PYTHON
|
86967fe293715a705ac50e908d3369fa3257b5a2
|
[
"MIT"
] | 1
|
2019-11-21T02:08:58.000Z
|
2019-11-21T02:08:58.000Z
|
Desafios/desafio48.py
|
ArthurBrito1/MY-SCRIPTS-PYTHON
|
86967fe293715a705ac50e908d3369fa3257b5a2
|
[
"MIT"
] | null | null | null |
Desafios/desafio48.py
|
ArthurBrito1/MY-SCRIPTS-PYTHON
|
86967fe293715a705ac50e908d3369fa3257b5a2
|
[
"MIT"
] | null | null | null |
s = 0
cont = 0
for c in range(1, 501, 2):
if c % 3 == 0:
s = s + c
cont = cont + 1
print('A soma de todos os {} valores solicitados é {}'.format(cont, s))
| 20.444444
| 72
| 0.483696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.264865
|
fb6d1002b4582dd7b3bb100c3a8d3d43f66ca13b
| 8,588
|
py
|
Python
|
tests/test_k8s_cronjob.py
|
riconnon/kubernetes-py
|
42a4537876985ed105ee44b6529763ba5d57c179
|
[
"Apache-2.0"
] | null | null | null |
tests/test_k8s_cronjob.py
|
riconnon/kubernetes-py
|
42a4537876985ed105ee44b6529763ba5d57c179
|
[
"Apache-2.0"
] | null | null | null |
tests/test_k8s_cronjob.py
|
riconnon/kubernetes-py
|
42a4537876985ed105ee44b6529763ba5d57c179
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import time
import uuid
from kubernetes.K8sCronJob import K8sCronJob
from kubernetes.K8sPod import K8sPod
from kubernetes.models.v2alpha1.CronJob import CronJob
from kubernetes.K8sExceptions import CronJobAlreadyRunningException
from tests import _constants
from tests import _utils
from tests.BaseTest import BaseTest
class K8sCronJobTests(BaseTest):
def setUp(self):
_utils.cleanup_cronjobs()
_utils.cleanup_jobs()
_utils.cleanup_pods()
def tearDown(self):
_utils.cleanup_cronjobs()
_utils.cleanup_jobs()
_utils.cleanup_pods()
# --------------------------------------------------------------------------------- init
def test_init_no_args(self):
try:
K8sCronJob()
self.fail("Should not fail.")
except SyntaxError:
pass
except IOError:
pass
except Exception as err:
self.fail("Unhandled exception: [ {0} ]".format(err))
def test_init_with_invalid_config(self):
config = object()
with self.assertRaises(SyntaxError):
K8sCronJob(config=config)
def test_init_with_invalid_name(self):
name = object()
with self.assertRaises(SyntaxError):
_utils.create_cronjob(name=name)
def test_init_with_name(self):
name = "yomama"
rc = _utils.create_cronjob(name=name)
self.assertIsNotNone(rc)
self.assertIsInstance(rc, K8sCronJob)
self.assertEqual(rc.name, name)
# --------------------------------------------------------------------------------- containers
def test_containers(self):
c_name = "redis"
c_image = "redis:latest"
c_image_2 = "redis:3.2.3"
container = _utils.create_container(name=c_name, image=c_image)
name = "job-{}".format(uuid.uuid4())
cj = _utils.create_cronjob(name=name)
cj.add_container(container)
self.assertEqual(1, len(cj.containers))
self.assertIn(c_name, cj.container_image)
self.assertEqual(c_image, cj.container_image[c_name])
container = _utils.create_container(name=c_name, image=c_image_2)
cj.add_container(container)
self.assertEqual(1, len(cj.containers))
self.assertEqual(c_image_2, cj.container_image[c_name])
# --------------------------------------------------------------------------------- imagePullSecrets
def test_add_image_pull_secrets(self):
cfg = _utils.create_config()
cfg.pull_secret = [
{'name': 'secret-name'},
{'name': 'other-secret-name'},
{'name': 'secret-name'} # duplicate
]
cj = _utils.create_cronjob(config=cfg, name="yo")
self.assertEqual(2, len(cj.image_pull_secrets)) # duplicate not present
# --------------------------------------------------------------------------------- api - create
def test_api_create(self):
name = "job-{}".format(uuid.uuid4())
job = CronJob(_constants.scheduledjob())
k8s_cronjob = _utils.create_cronjob(name=name)
k8s_cronjob.model = job
if _utils.is_reachable(k8s_cronjob.config):
k8s_cronjob.create()
self.assertIsInstance(k8s_cronjob, K8sCronJob)
def test_api_create_long_running_with_concurrency(self):
name = "job-{}".format(uuid.uuid4())
job = CronJob(_constants.scheduledjob_90())
k8s_cronjob = _utils.create_cronjob(name=name)
k8s_cronjob.model = job
k8s_cronjob.concurrency_policy = "Allow"
if _utils.is_reachable(k8s_cronjob.config):
k8s_cronjob.create()
self.assertIsInstance(k8s_cronjob, K8sCronJob)
self.assertEqual('Allow', k8s_cronjob.concurrency_policy)
def test_api_create_long_running_no_concurrency(self):
name = "job-{}".format(uuid.uuid4())
job = CronJob(_constants.scheduledjob_90())
k8s_cronjob = _utils.create_cronjob(name=name)
k8s_cronjob.model = job
k8s_cronjob.concurrency_policy = "Forbid"
k8s_cronjob.starting_deadline_seconds = 10
if _utils.is_reachable(k8s_cronjob.config):
k8s_cronjob.create()
self.assertIsInstance(k8s_cronjob, K8sCronJob)
self.assertEqual('Forbid', k8s_cronjob.concurrency_policy)
self.assertEqual(10, k8s_cronjob.starting_deadline_seconds)
# --------------------------------------------------------------------------------- api - list
def test_list(self):
name = "job-{}".format(uuid.uuid4())
job = CronJob(_constants.scheduledjob_90())
k8s_cronjob = _utils.create_cronjob(name=name)
k8s_cronjob.model = job
k8s_cronjob.concurrency_policy = "Forbid"
k8s_cronjob.starting_deadline_seconds = 10
if _utils.is_reachable(k8s_cronjob.config):
k8s_cronjob.create()
crons = k8s_cronjob.list()
for c in crons:
self.assertIsInstance(c, K8sCronJob)
# --------------------------------------------------------------------------------- api - last scheduled time
def test_last_schedule_time(self):
name = "job-{}".format(uuid.uuid4())
job = CronJob(_constants.scheduledjob_90())
k8s_cronjob = _utils.create_cronjob(name=name)
k8s_cronjob.model = job
k8s_cronjob.concurrency_policy = "Forbid"
k8s_cronjob.starting_deadline_seconds = 10
if _utils.is_reachable(k8s_cronjob.config):
k8s_cronjob.create()
while not k8s_cronjob.last_schedule_time:
k8s_cronjob.get()
time.sleep(2)
lst = k8s_cronjob.last_schedule_time
self.assertIsNotNone(lst)
self.assertIsInstance(lst, str)
# --------------------------------------------------------------------------------- api - pod
def test_pod(self):
name = "job-{}".format(uuid.uuid4())
model = CronJob(_constants.scheduledjob_90())
cj = _utils.create_cronjob(name=name)
cj.model = model
cj.concurrency_policy = "Forbid"
cj.starting_deadline_seconds = 10
if _utils.is_reachable(cj.config):
cj.create()
while not cj.last_schedule_time:
cj.get()
time.sleep(2)
pod = cj.pod
self.assertIsInstance(pod, K8sPod)
# --------------------------------------------------------------------------------- api - run
def test_run_already_running(self):
name = "job-{}".format(uuid.uuid4())
model = CronJob(_constants.scheduledjob_90())
cj = _utils.create_cronjob(name=name)
cj.model = model
cj.concurrency_policy = "Forbid"
cj.starting_deadline_seconds = 10
if _utils.is_reachable(cj.config):
cj.create()
while not cj.last_schedule_time:
cj.get()
time.sleep(2)
with self.assertRaises(CronJobAlreadyRunningException):
cj.run()
def test_run(self):
name = "job-{}".format(uuid.uuid4())
model = CronJob(_constants.scheduledjob_90())
cj = _utils.create_cronjob(name=name)
cj.model = model
cj.concurrency_policy = "Forbid"
cj.starting_deadline_seconds = 10
if _utils.is_reachable(cj.config):
cj.create()
self.assertFalse(cj.suspend)
cj.run()
self.assertFalse(cj.suspend)
# --------------------------------------------------------------------------------- api - activeDeadlineSeconds
def test_active_deadline_seconds(self):
ads = 50
cfg = _utils.create_config()
cj = CronJob(_constants.cronjob())
k8s = K8sCronJob(config=cfg, name="yo")
k8s.model = cj
self.assertIsNone(k8s.active_deadline_seconds)
k8s.active_deadline_seconds = ads
self.assertIsNotNone(k8s.active_deadline_seconds)
self.assertEqual(k8s.active_deadline_seconds, ads)
def test_observe_active_deadline_seconds(self):
cfg = _utils.create_config()
cj = CronJob(_constants.cronjob_exit_1())
k8s = K8sCronJob(config=cfg, name="yo")
k8s.model = cj
if _utils.is_reachable(cfg):
k8s.create()
self.assertIsInstance(k8s, K8sCronJob)
| 35.053061
| 115
| 0.577317
| 8,087
| 0.941663
| 0
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0.161737
|
fb6d4b7447432a3d88c9b0ce1e3fc024eb47008f
| 9,054
|
py
|
Python
|
code/nn/optimization.py
|
serced/rcnn
|
1c5949c7ae5652a342b359e9defa72b2a6a6666b
|
[
"Apache-2.0"
] | 372
|
2016-01-26T02:41:51.000Z
|
2022-03-31T02:03:13.000Z
|
code/nn/optimization.py
|
serced/rcnn
|
1c5949c7ae5652a342b359e9defa72b2a6a6666b
|
[
"Apache-2.0"
] | 17
|
2016-08-23T17:28:02.000Z
|
2020-05-11T15:54:50.000Z
|
code/nn/optimization.py
|
serced/rcnn
|
1c5949c7ae5652a342b359e9defa72b2a6a6666b
|
[
"Apache-2.0"
] | 143
|
2016-01-13T05:33:33.000Z
|
2021-12-10T16:48:42.000Z
|
'''
This file implements various optimization methods, including
-- SGD with gradient norm clipping
-- AdaGrad
-- AdaDelta
-- Adam
Transparent to switch between CPU / GPU.
@author: Tao Lei (taolei@csail.mit.edu)
'''
import random
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import HostFromGpu
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
from theano.printing import debugprint
from .initialization import default_mrng
def create_optimization_updates(
cost, params, method="sgd",
max_norm=5, updates=None, gradients=None,
lr=0.01, eps=None, rho=0.99, gamma=0.999,
beta1=0.9, beta2=0.999, momentum=0.0):
_momentum = momentum
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
rho = theano.shared(np.float64(rho).astype(theano.config.floatX))
beta1 = theano.shared(np.float64(beta1).astype(theano.config.floatX))
beta2 = theano.shared(np.float64(beta2).astype(theano.config.floatX))
momentum = theano.shared(np.float64(momentum).astype(theano.config.floatX))
gamma = theano.shared(np.float64(gamma).astype(theano.config.floatX))
if eps is None:
eps = 1e-8 if method.lower() != "esgd" else 1e-4
eps = np.float64(eps).astype(theano.config.floatX)
gparams = T.grad(cost, params) if gradients is None else gradients
g_norm = 0
for g in gparams:
g_norm = g_norm + g.norm(2)**2
g_norm = T.sqrt(g_norm)
# max_norm is useful for sgd
if method != "sgd": max_norm = None
if max_norm is not None and max_norm is not False:
max_norm = theano.shared(np.float64(max_norm).astype(theano.config.floatX))
shrink_factor = T.minimum(max_norm, g_norm + eps) / (g_norm + eps)
gparams_clipped = [ ]
for g in gparams:
g = shrink_factor * g
gparams_clipped.append(g)
gparams = gparams_clipped
if updates is None:
updates = OrderedDict()
gsums = create_accumulators(params) if method != "sgd" or _momentum > 0.0 else \
[ None for p in params ]
xsums = create_accumulators(params) if method != "sgd" and method != "adagrad" else None
if method == "sgd":
create_sgd_updates(updates, params, gparams, gsums, lr, momentum)
elif method == "adagrad":
create_adagrad_updates(updates, params, gparams, gsums, lr, eps)
elif method == "adadelta":
create_adadelta_updates(updates, params, gparams, gsums, xsums, lr, eps, rho)
elif method == "adam":
create_adam_updates(updates, params, gparams, gsums, xsums, lr, eps, beta1, beta2)
elif method == "esgd":
create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum)
else:
raise Exception("Unknown optim method: {}\n".format(method))
if method == "adadelta":
lr = rho
return updates, lr, g_norm, gsums, xsums, max_norm
def is_subtensor_op(p):
if hasattr(p, 'owner') and hasattr(p.owner, 'op'):
return isinstance(p.owner.op, T.AdvancedSubtensor1) or \
isinstance(p.owner.op, T.Subtensor)
return False
def get_subtensor_op_inputs(p):
origin, indexes = p.owner.inputs
if hasattr(origin, 'owner') and hasattr(origin.owner, 'op') and \
isinstance(origin.owner.op, HostFromGpu):
origin = origin.owner.inputs[0]
assert isinstance(origin, CudaNdarraySharedVariable)
return origin, indexes
def get_similar_subtensor(matrix, indexes, param_op):
'''
So far there is only two possible subtensor operation used.
'''
if isinstance(param_op.owner.op, T.AdvancedSubtensor1):
return matrix[indexes]
else:
# indexes is start index in this case
return matrix[indexes:]
def create_accumulators(params):
accums = [ ]
for p in params:
if is_subtensor_op(p):
origin, _ = get_subtensor_op_inputs(p)
acc = theano.shared(np.zeros_like(origin.get_value(borrow=True), \
dtype=theano.config.floatX))
else:
acc = theano.shared(np.zeros_like(p.get_value(borrow=True), \
dtype=theano.config.floatX))
accums.append(acc)
return accums
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
has_momentum = momentum.get_value() > 0.0
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
if has_momentum:
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices*momentum + g
updates[acc] = T.set_subtensor(acc_slices, new_acc)
else:
new_acc = g
updates[origin] = T.inc_subtensor(p, - lr * new_acc)
else:
if has_momentum:
new_acc = acc*momentum + g
updates[acc] = new_acc
else:
new_acc = g
updates[p] = p - lr * new_acc
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
#acc_slices = acc[indexes]
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices + g**2
updates[acc] = T.set_subtensor(acc_slices, new_acc)
updates[origin] = T.inc_subtensor(p, \
- lr * (g / T.sqrt(new_acc + eps)))
else:
new_acc = acc + g**2
updates[acc] = new_acc
updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
#updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
# which one to use?
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
lr, eps, rho):
for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
gacc_slices = gacc[indexes]
xacc_slices = xacc[indexes]
new_gacc = rho * gacc_slices + (1.0-rho) * g**2
d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc_slices + (1.0-rho) * d**2
updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
updates[origin] = T.inc_subtensor(p, d)
else:
new_gacc = rho * gacc + (1.0-rho) * g**2
d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc + (1.0-rho) * d**2
updates[gacc] = new_gacc
updates[xacc] = new_xacc
updates[p] = p + d
def create_adam_updates(updates, params, gparams, gsums, xsums, \
lr, eps, beta1, beta2):
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omb1_t = 1.0 - beta1**i_t
omb2_t = 1.0 - beta2**i_t
lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
for p, g, m, v in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
m_sub = m[indexes]
v_sub = v[indexes]
m_t = beta1*m_sub + (1.0-beta1)*g
v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = T.set_subtensor(m_sub, m_t)
updates[v] = T.set_subtensor(v_sub, v_t)
updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
else:
m_t = beta1*m + (1.0-beta1)*g
v_t = beta2*v + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = m_t
updates[v] = v_t
updates[p] = p - lr_t*g_t
updates[i] = i_t
def create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum):
has_momentum = momentum.get_value() > 0.0
samples = [ default_mrng.normal(size=p.shape, avg=0, std=1,
dtype=theano.config.floatX) for p in params ]
HVs = T.Lop(gparams, params, samples)
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omg_t = 1.0 - gamma**i_t
for p, g, m, D, Hv in zip(params, gparams, gsums, xsums, HVs):
if is_subtensor_op(p):
raise Exception("ESGD subtensor update not implemented!")
else:
D_t = D * gamma + T.sqr(Hv) * (1.0-gamma)
if has_momentum:
m_t = m*momentum + g
updates[m] = m_t
else:
m_t = g
g_t = m_t / ( T.sqrt(D_t/omg_t + eps) )
#g_t = m_t / ( T.sqrt(D_t + eps) )
updates[D] = D_t
updates[p] = p - lr*g_t
updates[i] = i_t
| 37.882845
| 93
| 0.587254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 707
| 0.078087
|
fb704422d64cb57af346521f6ec226890742b70a
| 883
|
py
|
Python
|
projectlaika/looseWindow.py
|
TheSgtPepper23/LaikaIA
|
fc73aa17f74462b211c4a4159b663ed7c3cdb1bd
|
[
"MIT"
] | null | null | null |
projectlaika/looseWindow.py
|
TheSgtPepper23/LaikaIA
|
fc73aa17f74462b211c4a4159b663ed7c3cdb1bd
|
[
"MIT"
] | null | null | null |
projectlaika/looseWindow.py
|
TheSgtPepper23/LaikaIA
|
fc73aa17f74462b211c4a4159b663ed7c3cdb1bd
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5 import uic
from internationalization import LANGUAGE
class Loose(QMainWindow):
def __init__(self, lang):
QMainWindow.__init__(self)
uic.loadUi("windows/Looser.ui", self)
self.lang = lang
self.reload_text()
self.loser = QPixmap("resources/loser.png")
self.lose_button.clicked.connect(self.end_game)
self.lose_image.setPixmap(self.loser)
def reload_text(self):
"""Change the language of the window according to the chosen previously"""
self.language=LANGUAGE.get(self.lang)
self.setWindowTitle(self.language["lose_title"])
self.lose_label.setText(self.language["lose_text"])
self.lose_button.setText(self.language["return_to_menu"])
def end_game(self):
self.close()
| 35.32
| 82
| 0.698754
| 728
| 0.824462
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.173273
|
fb705b9a868266542cc3de66cc5408c3859e9bcd
| 617
|
py
|
Python
|
messenger/helper/http/post.py
|
gellowmellow/python-messenger-bot
|
01aaba569add8a6ed1349fc4774e3c7e64439dc0
|
[
"MIT"
] | null | null | null |
messenger/helper/http/post.py
|
gellowmellow/python-messenger-bot
|
01aaba569add8a6ed1349fc4774e3c7e64439dc0
|
[
"MIT"
] | null | null | null |
messenger/helper/http/post.py
|
gellowmellow/python-messenger-bot
|
01aaba569add8a6ed1349fc4774e3c7e64439dc0
|
[
"MIT"
] | null | null | null |
import requests
class Post:
def __init__(self, page_access_token, **kwargs):
self.page_access_token = page_access_token
return super().__init__(**kwargs)
def send(self, url, json):
try:
request_session = requests.Session()
params = {'access_token': self.page_access_token}
request = requests.Request('POST', url = url, params = params, json = json)
prepare = request.prepare()
response = request_session.send(prepare)
finally:
request_session.close()
return response.text
| 30.85
| 87
| 0.593193
| 599
| 0.96769
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.03231
|
fb70843b616618f2f4796598ec6f5433ecaca7a0
| 2,424
|
py
|
Python
|
scripts/Feature_Generation/calculateSREstrengthDifferenceBetweenWTandMUT_perCluster.py
|
JayKu4418/Computational-Experimental-framework-for-predicting-effects-of-variants-on-alternative-splicing
|
4cb33b94bb8a864bc63fd5a3c96dae547914b20f
|
[
"CC0-1.0"
] | null | null | null |
scripts/Feature_Generation/calculateSREstrengthDifferenceBetweenWTandMUT_perCluster.py
|
JayKu4418/Computational-Experimental-framework-for-predicting-effects-of-variants-on-alternative-splicing
|
4cb33b94bb8a864bc63fd5a3c96dae547914b20f
|
[
"CC0-1.0"
] | null | null | null |
scripts/Feature_Generation/calculateSREstrengthDifferenceBetweenWTandMUT_perCluster.py
|
JayKu4418/Computational-Experimental-framework-for-predicting-effects-of-variants-on-alternative-splicing
|
4cb33b94bb8a864bc63fd5a3c96dae547914b20f
|
[
"CC0-1.0"
] | null | null | null |
import argparse
import Functions_Features.functionsToDetermineMotifStrength as fdm
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("-w","--tmpfolder",type=str,help="Input the upperlevel folder containing folder to Write to")
parser.add_argument("-t","--foldertitle",type=str,help="Input the title of the mutation file")
parser.add_argument("-m","--mutationfile",type=str,help="Input a mutation file")
parser.add_argument("-q","--quantile",nargs='?',default=0.95,type=float,help="Input a quantile value to set a threshold strength score for each motif cluster, default is 0.95")
args = parser.parse_args()
TMPfolder=args.tmpfolder
folderTitle=args.foldertitle
MUTATION_FILE=args.mutationfile
QUANTILE=args.quantile
dict_NumCluster={"ESE":8,"ESS":7,"ISE":7,"ISS":8}
strength_threshold_dict=fdm.createSREclusterThresholdDictionary(TMPfolder,dict_NumCluster,QUANTILE)
with open(MUTATION_FILE) as f:
#with open("../data/MAPT_MUTs_ToTest.tsv") as f:
mutations=[line.strip().split("\t") for line in f]
#mutsToIgnore=["Mut3","Mut10","Mut33"]
to_write = []
# Go through each mutation
for mut in mutations:
mutID=mut[0]
ESE_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ESE",dict_NumCluster["ESE"],strength_threshold_dict)
ESS_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ESS",dict_NumCluster["ESS"],strength_threshold_dict)
ISE_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ISE",dict_NumCluster["ISE"],strength_threshold_dict)
ISS_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ISS",dict_NumCluster["ISS"],strength_threshold_dict)
motifStrengths_forMut = [mutID]+ESE_motifStrengths+ESS_motifStrengths+ISE_motifStrengths+ISS_motifStrengths
to_write.append(motifStrengths_forMut)
with open(TMPfolder+MUTATION_FILE.split("/")[2].split(".")[0]+"_SREstrengthsDifferences_perCluster.tsv","w") as fw:
#with open(TMPfolder+motifType+"_MUTsToTest_ScoreDifferences.tsv","w") as fw:
fw.write("MutID")
for motifType in ["ESE","ESS","ISE","ISS"]:
for cluster in range(1,dict_NumCluster[motifType]+1):
fw.write("\t")
fw.write(motifType+"_Cluster"+str(cluster))
fw.write("\n")
for i in to_write:
fw.write("\t".join(i))
fw.write("\n")
| 47.529412
| 176
| 0.757426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 650
| 0.268152
|
fb7155177920bff87f0b52005b1ab66f25856784
| 964
|
py
|
Python
|
asciinema/asciicast.py
|
alex/asciinema
|
ff23896174c07719d3b2ace6320a193934a0ac71
|
[
"MIT"
] | 1
|
2015-11-08T13:00:51.000Z
|
2015-11-08T13:00:51.000Z
|
asciinema/asciicast.py
|
alex/asciinema
|
ff23896174c07719d3b2ace6320a193934a0ac71
|
[
"MIT"
] | null | null | null |
asciinema/asciicast.py
|
alex/asciinema
|
ff23896174c07719d3b2ace6320a193934a0ac71
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import time
class Asciicast(object):
def __init__(self, env=os.environ):
self.command = None
self.title = None
self.shell = env.get('SHELL', '/bin/sh')
self.term = env.get('TERM')
self.username = env.get('USER')
@property
def meta_data(self):
lines = int(get_command_output(['tput', 'lines']))
columns = int(get_command_output(['tput', 'cols']))
return {
'username' : self.username,
'duration' : self.duration,
'title' : self.title,
'command' : self.command,
'shell' : self.shell,
'term' : {
'type' : self.term,
'lines' : lines,
'columns': columns
}
}
def get_command_output(args):
process = subprocess.Popen(args, stdout=subprocess.PIPE)
return process.communicate()[0].strip()
| 26.054054
| 60
| 0.520747
| 784
| 0.813278
| 0
| 0
| 534
| 0.553942
| 0
| 0
| 124
| 0.128631
|
fb71ff02d4840f857aab0f05feb1b65683b1dfad
| 88
|
py
|
Python
|
software_engineering-project/project/admin.py
|
mahdiieh/software_engineering_PROJECT
|
f0c40ccf0452f6da83fbb253050848b49c4f6153
|
[
"MIT"
] | null | null | null |
software_engineering-project/project/admin.py
|
mahdiieh/software_engineering_PROJECT
|
f0c40ccf0452f6da83fbb253050848b49c4f6153
|
[
"MIT"
] | null | null | null |
software_engineering-project/project/admin.py
|
mahdiieh/software_engineering_PROJECT
|
f0c40ccf0452f6da83fbb253050848b49c4f6153
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Movie
admin.site.register(Movie)
| 14.666667
| 32
| 0.806818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fb731e02437da6274e8e54fa035f9eeb59f57f17
| 17,808
|
py
|
Python
|
datasets/alt/alt.py
|
NihalHarish/datasets
|
67574a8d74796bc065a8b9b49ec02f7b1200c172
|
[
"Apache-2.0"
] | 9
|
2021-04-26T14:43:52.000Z
|
2021-11-08T09:47:24.000Z
|
datasets/alt/alt.py
|
NihalHarish/datasets
|
67574a8d74796bc065a8b9b49ec02f7b1200c172
|
[
"Apache-2.0"
] | null | null | null |
datasets/alt/alt.py
|
NihalHarish/datasets
|
67574a8d74796bc065a8b9b49ec02f7b1200c172
|
[
"Apache-2.0"
] | 1
|
2021-03-24T18:33:32.000Z
|
2021-03-24T18:33:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Asian Language Treebank (ALT) Project"""
from __future__ import absolute_import, division, print_function
import os
import datasets
_CITATION = """\
@inproceedings{riza2016introduction,
title={Introduction of the asian language treebank},
author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},
booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},
pages={1--6},
year={2016},
organization={IEEE}
}
"""
_HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/"
_DESCRIPTION = """\
The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).
"""
_URLs = {
"alt": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206.zip",
"alt-en": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/English-ALT-20170107.zip",
"alt-jp": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/Japanese-ALT-20170330.zip",
"alt-my": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-alt-190530.zip",
"alt-my-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-en-transliteration.zip",
"alt-my-west-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/western-myanmar-transliteration.zip",
"alt-km": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/km-nova-181101.zip",
}
_SPLIT = {
"train": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt",
"dev": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt",
"test": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt",
}
_WIKI_URL = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206/URL.txt"
class AltParallelConfig(datasets.BuilderConfig):
"""BuilderConfig for ALT."""
def __init__(self, languages, **kwargs):
"""BuilderConfig for ALT.
Args:
for the `datasets.features.text.TextEncoder` used for the features feature.
languages: languages that will be used for translation. it should be one of the
**kwargs: keyword arguments forwarded to super.
"""
name = "alt-parallel"
description = "ALT Parallel Corpus"
super(AltParallelConfig, self).__init__(
name=name,
description=description,
version=datasets.Version("1.0.0", ""),
**kwargs,
)
available_langs = set(
["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
)
for language in languages:
assert language in available_langs
self.languages = languages
class Alt(datasets.GeneratorBasedBuilder):
"""Asian Language Treebank (ALT) Project"""
BUILDER_CONFIGS = [
AltParallelConfig(
languages=["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
),
datasets.BuilderConfig(name="alt-en", version=datasets.Version("1.0.0"), description="English ALT"),
datasets.BuilderConfig(name="alt-jp", version=datasets.Version("1.0.0"), description="Japanese ALT"),
datasets.BuilderConfig(name="alt-my", version=datasets.Version("1.0.0"), description="Myanmar ALT"),
datasets.BuilderConfig(name="alt-km", version=datasets.Version("1.0.0"), description="Khmer ALT"),
datasets.BuilderConfig(
name="alt-my-transliteration",
version=datasets.Version("1.0.0"),
description="Myanmar-English Transliteration Dataset",
),
datasets.BuilderConfig(
name="alt-my-west-transliteration",
version=datasets.Version("1.0.0"),
description="Latin-Myanmar Transliteration Dataset",
),
]
DEFAULT_CONFIG_NAME = "alt-parallel"
def _info(self):
if self.config.name.startswith("alt-parallel"):
features = datasets.Features(
{
"SNT.URLID": datasets.Value("string"),
"SNT.URLID.SNTID": datasets.Value("string"),
"url": datasets.Value("string"),
"translation": datasets.features.Translation(languages=self.config.languages),
}
)
elif self.config.name == "alt-en":
features = datasets.Features(
{
"SNT.URLID": datasets.Value("string"),
"SNT.URLID.SNTID": datasets.Value("string"),
"url": datasets.Value("string"),
"status": datasets.Value("string"),
"value": datasets.Value("string"),
}
)
elif self.config.name == "alt-jp":
features = datasets.Features(
{
"SNT.URLID": datasets.Value("string"),
"SNT.URLID.SNTID": datasets.Value("string"),
"url": datasets.Value("string"),
"status": datasets.Value("string"),
"value": datasets.Value("string"),
"word_alignment": datasets.Value("string"),
"jp_tokenized": datasets.Value("string"),
"en_tokenized": datasets.Value("string"),
}
)
elif self.config.name == "alt-my":
features = datasets.Features(
{
"SNT.URLID": datasets.Value("string"),
"SNT.URLID.SNTID": datasets.Value("string"),
"url": datasets.Value("string"),
"value": datasets.Value("string"),
}
)
elif self.config.name == "alt-my-transliteration":
features = datasets.Features(
{
"en": datasets.Value("string"),
"my": datasets.Sequence(datasets.Value("string")),
}
)
elif self.config.name == "alt-my-west-transliteration":
features = datasets.Features(
{
"en": datasets.Value("string"),
"my": datasets.Sequence(datasets.Value("string")),
}
)
elif self.config.name == "alt-km":
features = datasets.Features(
{
"SNT.URLID": datasets.Value("string"),
"SNT.URLID.SNTID": datasets.Value("string"),
"url": datasets.Value("string"),
"km_pos_tag": datasets.Value("string"),
"km_tokenized": datasets.Value("string"),
}
)
else:
raise
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name.startswith("alt-parallel"):
data_path = dl_manager.download_and_extract(_URLs["alt"])
else:
data_path = dl_manager.download_and_extract(_URLs[self.config.name])
if self.config.name == "alt-my-transliteration" or self.config.name == "alt-my-west-transliteration":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"basepath": data_path, "split": None},
)
]
else:
data_split = {}
for k in _SPLIT:
data_split[k] = dl_manager.download_and_extract(_SPLIT[k])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"basepath": data_path, "split": data_split["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"basepath": data_path, "split": data_split["dev"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"basepath": data_path, "split": data_split["test"]},
),
]
def _generate_examples(self, basepath, split=None):
allow_urls = {}
if split is not None:
with open(split, encoding="utf-8") as fin:
for line in fin:
sp = line.strip().split("\t")
urlid = sp[0].replace("URL.", "")
allow_urls[urlid] = {"SNT.URLID": urlid, "url": sp[1]}
data = {}
if self.config.name.startswith("alt-parallel"):
files = self.config.languages
data = {}
for lang in files:
file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
fin = open(file_path, encoding="utf-8")
for line in fin:
line = line.strip()
sp = line.split("\t")
_, urlid, sntid = sp[0].split(".")
if urlid not in allow_urls:
continue
if sntid not in data:
data[sntid] = {}
data[sntid]["SNT.URLID"] = urlid
data[sntid]["SNT.URLID.SNTID"] = sntid
data[sntid]["url"] = allow_urls[urlid]["url"]
data[sntid]["translation"] = {}
# Note that Japanese and Myanmar texts have empty sentence fields in this release.
if len(sp) >= 2:
data[sntid]["translation"][lang] = sp[1]
fin.close()
elif self.config.name == "alt-en":
data = {}
for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
file_path = os.path.join(basepath, "English-ALT-20170107", fname)
fin = open(file_path, encoding="utf-8")
for line in fin:
line = line.strip()
sp = line.split("\t")
_, urlid, sntid = sp[0].split(".")
if urlid not in allow_urls:
continue
d = {
"SNT.URLID": urlid,
"SNT.URLID.SNTID": sntid,
"url": allow_urls[urlid]["url"],
"status": None,
"value": None,
}
d["value"] = sp[1]
if fname == "English-ALT-Draft.txt":
d["status"] = "draft"
else:
d["status"] = "reviewed"
data[sntid] = d
fin.close()
elif self.config.name == "alt-jp":
data = {}
for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
file_path = os.path.join(basepath, "Japanese-ALT-20170330", fname)
fin = open(file_path, encoding="utf-8")
for line in fin:
line = line.strip()
sp = line.split("\t")
_, urlid, sntid = sp[0].split(".")
if urlid not in allow_urls:
continue
d = {
"SNT.URLID": urlid,
"SNT.URLID.SNTID": sntid,
"url": allow_urls[urlid]["url"],
"value": None,
"status": None,
"word_alignment": None,
"en_tokenized": None,
"jp_tokenized": None,
}
d["value"] = sp[1]
if fname == "Japanese-ALT-Draft.txt":
d["status"] = "draft"
else:
d["status"] = "reviewed"
data[sntid] = d
fin.close()
keys = {
"word_alignment": "word-alignment/data_ja.en-ja",
"en_tokenized": "word-alignment/data_ja.en-tok",
"jp_tokenized": "word-alignment/data_ja.ja-tok",
}
for k in keys:
file_path = os.path.join(basepath, "Japanese-ALT-20170330", keys[k])
fin = open(file_path, encoding="utf-8")
for line in fin:
line = line.strip()
sp = line.split("\t")
# Note that Japanese and Myanmar texts have empty sentence fields in this release.
if len(sp) < 2:
continue
_, urlid, sntid = sp[0].split(".")
if urlid not in allow_urls:
continue
if sntid in data:
data[sntid][k] = sp[1]
fin.close()
elif self.config.name == "alt-my":
data = {}
for fname in ["data"]:
file_path = os.path.join(basepath, "my-alt-190530", fname)
fin = open(file_path, encoding="utf-8")
for line in fin:
line = line.strip()
sp = line.split("\t")
_, urlid, sntid = sp[0].split(".")
if urlid not in allow_urls:
continue
data[sntid] = {
"SNT.URLID": urlid,
"SNT.URLID.SNTID": sntid,
"url": allow_urls[urlid]["url"],
"value": sp[1],
}
fin.close()
elif self.config.name == "alt-km":
data = {}
for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
file_path = os.path.join(basepath, "km-nova-181101", fname)
fin = open(file_path, encoding="utf-8")
for line in fin:
line = line.strip()
sp = line.split("\t")
_, urlid, sntid = sp[0].split(".")
if urlid not in allow_urls:
continue
k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
if sntid in data:
data[sntid][k] = sp[1]
else:
data[sntid] = {
"SNT.URLID": urlid,
"SNT.URLID.SNTID": sntid,
"url": allow_urls[urlid]["url"],
"km_pos_tag": None,
"km_tokenized": None,
}
data[sntid][k] = sp[1]
fin.close()
elif self.config.name == "alt-my-transliteration":
file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
# Need to set errors='ignore' because of the unknown error
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
# It might due to some issues related to Myanmar alphabets
fin = open(file_path, encoding="utf-8", errors="ignore")
_id = 0
for line in fin:
line = line.strip()
# I don't know why there are \x00 between |||. They don't show in the editor.
line = line.replace("\x00", "")
sp = line.split("|||")
# When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
if len(sp) < 2:
continue
data[_id] = {"en": sp[0].strip(), "my": [sp[1].strip()]}
_id += 1
fin.close()
elif self.config.name == "alt-my-west-transliteration":
file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
# Need to set errors='ignore' because of the unknown error
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
# It might due to some issues related to Myanmar alphabets
fin = open(file_path, encoding="utf-8", errors="ignore")
_id = 0
for line in fin:
line = line.strip()
line = line.replace("\x00", "")
sp = line.split("|||")
data[_id] = {"en": sp[0].strip(), "my": [k.strip() for k in sp[1].split("|")]}
_id += 1
fin.close()
_id = 1
for k in data:
yield _id, data[k]
_id += 1
| 41.901176
| 706
| 0.500898
| 15,142
| 0.850292
| 8,591
| 0.482424
| 0
| 0
| 0
| 0
| 6,039
| 0.339117
|
fb747216a8e33dd3a7c21862ae471a10d4ad3882
| 246
|
py
|
Python
|
setup.py
|
whistlebee/awis-py
|
01793c72b369e5e41c4d11b7ba67f71e47cee3ef
|
[
"Apache-2.0"
] | 1
|
2020-09-04T18:50:32.000Z
|
2020-09-04T18:50:32.000Z
|
setup.py
|
whistlebee/awis-py
|
01793c72b369e5e41c4d11b7ba67f71e47cee3ef
|
[
"Apache-2.0"
] | 1
|
2020-09-06T05:51:43.000Z
|
2020-09-19T09:27:56.000Z
|
setup.py
|
whistlebee/awis-py
|
01793c72b369e5e41c4d11b7ba67f71e47cee3ef
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='awis-py',
version='0.0.2',
url='https://github.com/whistlebee/awis-py',
packages=find_packages(),
install_requires=['requests', 'lxml'],
python_requires='>=3.6'
)
| 20.5
| 48
| 0.658537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.317073
|
fb74a99668bbeadd3a3026fa2344b01e7a173609
| 17,918
|
py
|
Python
|
src/validation/aux_functions.py
|
christianhilscher/dynasim
|
881cfd3bd9d4b9291d289d703ec7da4a617a479a
|
[
"MIT"
] | null | null | null |
src/validation/aux_functions.py
|
christianhilscher/dynasim
|
881cfd3bd9d4b9291d289d703ec7da4a617a479a
|
[
"MIT"
] | 2
|
2020-08-06T10:01:59.000Z
|
2021-05-17T12:14:44.000Z
|
src/validation/aux_functions.py
|
christianhilscher/dynasim
|
881cfd3bd9d4b9291d289d703ec7da4a617a479a
|
[
"MIT"
] | 2
|
2020-08-19T06:52:09.000Z
|
2021-12-10T08:57:54.000Z
|
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource
from bokeh.io import export_png
from bokeh.plotting import figure
def plot_lifetime(df, type, path):
df = df.copy()
palette = ["#c9d9d3", "#718dbf", "#e84d60", "#648450"]
ylist = []
list0 = []
list1 = []
list2 = []
list3 = []
interv = np.sort(df["age_real"].unique())
for a in interv:
df_rel = df[df["age_real"]==a]
n = len(df_rel)
status0 = sum(df_rel["employment_status_" + type] == 0)/n
status1 = sum(df_rel["employment_status_" + type] == 1)/n
status2 = sum(df_rel["employment_status_" + type] == 2)/n
status3 = sum(df_rel["employment_status_" + type] == 3)/n
ylist.append(str(a))
list0.append(status0)
list1.append(status1)
list2.append(status2)
list3.append(status3)
dici = {"age": ylist,
"0": list0,
"1": list1,
"2": list2,
"3": list3}
#alllist = ["0", "1", "2", "3"]
#labels = ["N.E.", "Rente", "Teilzeit", "Vollzeit"]
alllist = ["3", "2", "0", "1"]
labels = ["Vollzeit", "Teilzeit", "N.E.", "Rente"]
p = figure(x_range=ylist, plot_height=250, plot_width=1500, title="Employment Status by age: West Germany / type: " + type)
p.vbar_stack(alllist, x='age', width=0.9, color=palette, source=dici,
legend_label=labels)
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "bottom_left"
p.legend.orientation = "horizontal"
str_path = "employment_" + type + ".png"
export_png(p, filename=str(path/ str_path))
def var_by_method(dataf, variable):
dataf_out = pd.DataFrame()
dataf_out["pid"] = dataf["pid"]
dataf_out["year"] = dataf["year"]
dataf_out["hid"] = dataf["hid_real"]
dataf_out["age"] = dataf["age_real"]
for m in ["real", "ext"]:
dataf_out[m] = dataf[variable + "_" + m]
return dataf_out
def plot_mean_by_age(dataf, m_list, variable, path):
m_list = ["real", "ext"]
dataf = dataf.copy()
df = var_by_method(dataf, variable)
df_plot = df.groupby("age")[m_list].mean()
fig_title = variable
file_title = variable + ".png"
# return df
plot_age(df_plot, fig_title, file_title, path)
def make_pretty(p):
p.xgrid.grid_line_color = None
p.yaxis.minor_tick_line_width=0
p.xaxis.minor_tick_line_width=0
# p.legend.location = "bottom_right"
return p
def plot_employment_status_by_age(dataf, employment_status, path, female=None, east=None):
dataf = dataf.copy()
dataf_rest = rest_dataf(dataf, female, east)
status_list = ["N_E", "Rente", "Teilzeit", "Vollzeit"]
status = status_list[employment_status]
df_tmp = var_by_method(dataf_rest, "employment_status")
tmp = df_tmp[["real", "ext"]] == employment_status
df_plot = pd.concat([df_tmp["age"], tmp], axis=1)
df_plot = df_plot.groupby("age").mean()
# Plotting
fig_title, file_title = get_titles(female, east, status)
plot_age(df_plot, fig_title, file_title, path, interv=1)
def plot_age(dataf, fig_title, file_title, path, interv=0):
source = ColumnDataSource(dataf)
if interv==1:
p = figure(title = fig_title, y_range=(0, 1))
else:
p = figure(title = fig_title)
p.line(x="age", y="real", source=source,
line_color="black", line_dash = "solid", line_width=2,
legend_label = "Real")
p.line(x="age", y="ext", source=source,
line_color="black", line_dash = "dotted", line_width=2,
legend_label = "Ext")
p.xaxis.axis_label = "Age"
p = make_pretty(p)
export_png(p, filename=str(path/ file_title))
def plot_year(dataf, fig_title, file_title, path, interv=0):
source = ColumnDataSource(dataf)
if interv==1:
p = figure(title = fig_title, y_range=(0, 1))
else:
p = figure(title = fig_title)
p.line(x="year", y="real", source=source,
line_color="black", line_dash = "solid", line_width=2,
legend_label = "Real")
p.line(x="year", y="ext", source=source,
line_color="black", line_dash = "dotted", line_width=2,
legend_label = "Ext")
p.xaxis.axis_label = "Year"
p = make_pretty(p)
export_png(p, filename=str(path/ file_title))
def plot_year_age(ploto, by="year"):
source = ColumnDataSource(ploto.df_plot)
if ploto.y_range is None:
p = figure(title = ploto.fig_title)
else:
p = figure(title = ploto.fig_title, y_range=ploto.y_range)
p.line(x=by, y="real", source=source,
line_color="black", line_dash = "solid", line_width=2,
legend_label = "Real")
p.line(x=by, y="ext", source=source,
line_color="black", line_dash = "dotted", line_width=2,
legend_label = "Ext")
if by == "year":
p.xaxis.axis_label = "Year"
elif by == "age":
p.xaxis.axis_label = "Age"
p = make_pretty(p)
export_png(p, filename=str(ploto.path/ ploto.file_title))
def rest_dataf(dataf, female, east):
dataf = dataf.copy()
method = "real" # Gender and East do not change during the simulation
# Including either all people, or only male and female
if female == 1:
condition_female = dataf["female_" + method] == 1
elif female == 0:
condition_female = dataf["female_" + method] == 0
else:
condition_female = np.ones(len(dataf))
# Including either all people, or only east or west germans
if east == 1:
condition_east = dataf["east_" + method] == 1
elif east == 0:
condition_east = dataf["east_" + method] == 0
else:
condition_east = np.ones(len(dataf))
# Output is then sum of both conditions
final_condition = (condition_female).astype(int) \
+ (condition_east).astype(int)
df_out = dataf[final_condition == 2]
return df_out
def get_titles(female, east, status):
title = ""
shorttitle = status
if (female==None) & (east==None):
title = "Employment status: " + status + "; all people"
shorttitle += "_mfew.png"
elif (female==None) & (east==0):
title = "Employment status: " + status + "; all genders, west Germany"
shorttitle += "_mfw.png"
elif (female==None) & (east==1):
title = "Employment status: " + status + "; all genders, east Germany"
shorttitle += "_mfe.png"
elif (female==0) & (east==None):
title = "Employment status: " + status + "; male, whole Germany"
shorttitle += "_mew.png"
elif (female==1) & (east==None):
title = "Employment status: " + status + "; female, whole Germany"
shorttitle += "_few.png"
elif (female==0) & (east==0):
title = "Employment status: " + status + "; male, west Germany"
shorttitle += "_mw.png"
elif (female==0) & (east==1):
title = "Employment status: " + status + "; male, east Germany"
shorttitle += "_me.png"
elif (female==1) & (east==0):
title = "Employment status: " + status + "; female, west Germany"
shorttitle += "_fw.png"
elif (female==1) & (east==1):
title = "Employment status: " + status + "; female, east Germany"
shorttitle += "_fe.png"
return title, shorttitle
def get_titles_incomes(suffix, variable, working, female, fulltime, measure):
w_string = ""
f_string = ""
t_string = ""
if working==1:
w_string = "_working"
else:
pass
if female==1:
f_string = "_female"
elif female==0:
f_string = "_male"
else:
pass
if fulltime==1:
t_string = "_fulltime"
elif fulltime==0:
t_string = "_parttime"
else:
pass
fig_title = suffix + measure + "_" + variable + w_string + f_string + t_string
file_title = fig_title + ".png"
return fig_title, file_title
def wrap_employment_plots(dataf, path):
dataf = dataf.copy()
for emp in np.arange(4):
# All people, all employment status
plot_employment_status_by_age(dataf, emp, path)
# Males, all employment status
plot_employment_status_by_age(dataf, emp, path, female=0)
# Females, all employment status
plot_employment_status_by_age(dataf, emp, path, female=1)
# All_people, east Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, east=1)
# All_people, west Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, east=0)
# Males, east Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=0, east=1)
# Males, west Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=0, east=0)
# Females, east Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=1, east=1)
# Females, west Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=1, east=0)
def condition_by_type(dataf, method, working=False, female=None, fulltime=None):
dataf = dataf.copy()
# Condition to include all or only working people
if working:
condition_work = dataf["working_" + method] == 1
else:
condition_work = np.ones(len(dataf))
# Including either all people, or only male and female
if female == 1:
condition_female = dataf["female_" + method] == 1
elif female == 0:
condition_female = dataf["female_" + method] == 0
else:
condition_female = np.ones(len(dataf))
# Including either all people, or only male and female
if fulltime == 1:
condition_fulltime = dataf["fulltime_" + method] == 1
elif fulltime == 0:
condition_fulltime = dataf["parttimetime_" + method] == 1
else:
condition_fulltime = np.ones(len(dataf))
# Output is then sum of both conditions
final_condition = (condition_female).astype(int) \
+ (condition_work).astype(int) \
+ (condition_fulltime).astype(int)
df_out = dataf[final_condition == 3]
return df_out
def restrict(dataf, working=False, female=None, fulltime=None):
dataf = dataf.copy()
out_dici = {"real": condition_by_type(dataf, "real", working, female, fulltime),
"ext": condition_by_type(dataf, "ext", working, female, fulltime)}
return out_dici
def var_by_method_dici(dici, variable, group, measure):
tmp = {}
m_list = ["real", "ext"]
for m in m_list:
if measure == "mean":
tmp[m] = dici[m].groupby(group)[variable + "_" + m].mean()
elif measure == "median":
tmp[m] = dici[m].groupby(group)[variable + "_" + m].median()
elif measure == "p90p50":
p90 = dici[m].groupby(group)[variable + "_" + m].quantile(0.9)
p50 = dici[m].groupby(group)[variable + "_" + m].quantile(0.5)
tmp[m] = p90/p50
elif measure == "p90p10":
p90 = dici[m].groupby(group)[variable + "_" + m].quantile(0.9)
p10 = dici[m].groupby(group)[variable + "_" + m].quantile(0.1)
tmp[m] = p90/p10
elif measure == "p50p10":
p50 = dici[m].groupby(group)[variable + "_" + m].quantile(0.5)
p10 = dici[m].groupby(group)[variable + "_" + m].quantile(0.1)
tmp[m] = p50/p10
elif measure == "gini":
tmp[m] = dici[m].groupby(group)[variable + "_" + m].agg(gini_coefficient)
df_out = pd.DataFrame(tmp)
return df_out
def plot_income_age(dataf, variable, path, working=None, female=None, fulltime=None, measure="mean"):
dataf = dataf.copy()
dici = restrict(dataf, working, female, fulltime)
df_plot = var_by_method_dici(dici, variable, group="age_real", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
df_plot.rename(columns={"age_real": "age"}, inplace=True)
fig_title, file_title = get_titles_incomes("age_", variable, working, female, fulltime, measure)
plot_age(df_plot, fig_title, file_title, path)
def wrap_income_age_plots(dataf, path):
dataf = dataf.copy()
variables = ["gross_earnings", "hours"]
for var in variables:
for m in ["mean", "median"]:
# All people
plot_income_age(dataf, var, path=path, measure=m)
plot_income_age(dataf, var, path=path, female=0, measure=m)
plot_income_age(dataf, var, path=path, female=1, measure=m)
# Conditional on working
plot_income_age(dataf, var, path=path, working=1, measure=m)
plot_income_age(dataf, var, path=path, working=1, female=0, measure=m)
plot_income_age(dataf, var, path=path, working=1, female=1, measure=m)
# Conditional on fulltime
plot_income_age(dataf, var, path=path, fulltime=1, measure=m)
plot_income_age(dataf, var, path=path, fulltime=1, female=0, measure=m)
plot_income_age(dataf, var, path=path, fulltime=1, female=1, measure=m)
def plot_income_year(dataf, variable, path, working=None, female=None, fulltime=None, measure="mean"):
dataf = dataf.copy()
dici = restrict(dataf, working, female, fulltime)
df_plot = var_by_method_dici(dici, variable, group="year", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
fig_title, file_title = get_titles_incomes("year_", variable, working, female, fulltime, measure)
plot_year(df_plot, fig_title, file_title, path)
def plot_income_year2(ploto, measure="mean"):
dici = restrict(ploto.data, ploto.working, ploto.female, ploto.fulltime)
df_plot = var_by_method_dici(dici, ploto.var, group="year", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
fig_title, file_title = get_titles_incomes("year_", ploto.var, ploto.working, ploto.female, ploto.fulltime, measure)
plot_year2(df_plot, fig_title, file_title, ploto)
def wrap_income_year_plots(dataf, path):
dataf = dataf.copy()
variables = ["gross_earnings", "hours"]
for var in variables:
for m in ["mean", "median"]:
# All people
plot_income_year(dataf, var, path=path, measure=m)
plot_income_year(dataf, var, path=path, female=0, measure=m)
plot_income_year(dataf, var, path=path, female=1, measure=m)
# Conditional on working
plot_income_year(dataf, var, path=path, working=1, measure=m)
plot_income_year(dataf, var, path=path, working=1, female=0, measure=m)
plot_income_year(dataf, var, path=path, working=1, female=1, measure=m)
# Conditional on fulltime
plot_income_year(dataf, var, path=path, fulltime=1, measure=m)
plot_income_year(dataf, var, path=path, fulltime=1, female=0, measure=m)
plot_income_year(dataf, var, path=path, fulltime=1, female=1, measure=m)
def plot_inequality_year(dataf, variable, path, working=None, female=None, fulltime=None, measure="mean"):
dataf = dataf.copy()
dici = restrict(dataf, working, female, fulltime)
df_plot = var_by_method_dici(dici, variable, group="year", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
fig_title, file_title = get_titles_incomes("ineq_", variable, working, female, fulltime, measure)
plot_year(df_plot, fig_title, file_title, path)
def wrap_inequality_year_plots(dataf, path):
dataf = dataf.copy()
var = ["gross_earnings", "hours"]
for v in var:
for m in ["p90p50", "p90p10", "p50p10", "gini"]:
plot_inequality_year(dataf, v, path, working=1, measure=m)
plot_inequality_year(dataf, v, path, working=1, female=0, measure=m)
plot_inequality_year(dataf, v, path, working=1, female=1, measure=m)
def gini_coefficient(x):
"""Compute Gini coefficient of array of values"""
diffsum = 0
for i, xi in enumerate(x[:-1], 1):
diffsum += np.sum(np.abs(xi - x[i:]))
return diffsum / (len(x)**2 * np.mean(x))
def make_quantile(dataf, var, m_list, q):
dataf = dataf.copy()
for m in m_list:
variable = var + "_" + m
real_q = dataf.groupby(["year"])[variable].quantile(q).to_frame()
real_q.rename(columns={variable: "var"}, inplace=True)
dataf = pd.merge(dataf, real_q, how="left", on="year")
dataf.loc[dataf[variable]>dataf["var"], variable] = dataf["var"]
dataf.drop("var", axis=1, inplace=True)
return dataf
def cap_outliers(dataf, m_list):
dataf = dataf.copy()
# # Hours
# dataf = make_quantile(dataf, "hours", m_list, 0.99)
# dataf = make_quantile(dataf, "hours_t1", m_list, 0.99)
# dataf = make_quantile(dataf, "hours_t2", m_list, 0.99)
# Gross earnings
dataf = make_quantile(dataf, "gross_earnings", m_list, 0.95)
dataf = make_quantile(dataf, "gross_earnings_t1", m_list, 0.95)
dataf = make_quantile(dataf, "gross_earnings_t2", m_list, 0.95)
return dataf
| 34.194656
| 127
| 0.60509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,012
| 0.168099
|
fb7583ba835e078f93bcf270c20be6606ba135d8
| 98
|
py
|
Python
|
test.py
|
krithikV/vaccineregistration
|
5d9aa52c7d8c9b196e23a73525dbaaf1e791e3e2
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
krithikV/vaccineregistration
|
5d9aa52c7d8c9b196e23a73525dbaaf1e791e3e2
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
krithikV/vaccineregistration
|
5d9aa52c7d8c9b196e23a73525dbaaf1e791e3e2
|
[
"Apache-2.0"
] | null | null | null |
from multiprocessing import Process
server = Process(target=app.run)# ...
server.terminate()
| 19.6
| 38
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.061224
|
fb7811b122904a7fba10519297aa03213ea6aa2e
| 755
|
py
|
Python
|
deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
from OpenGL.arrays import vbo
from OpenGL.GLES2.VERSION import GLES2_2_0
from OpenGL.GLES2.OES import mapbuffer
class Implementation( vbo.Implementation ):
"""OpenGL-based implementation of VBO interfaces"""
def __init__( self ):
for name in self.EXPORTED_NAMES:
for source in [ GLES2_2_0, mapbuffer ]:
for possible in (name,name+'OES'):
try:
setattr( self, name, getattr( source, possible ))
except AttributeError as err:
pass
else:
found = True
assert found, name
if GLES2_2_0.glBufferData:
self.available = True
Implementation.register()
| 35.952381
| 73
| 0.561589
| 615
| 0.81457
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.074172
|
fb79110d81706eec2a558890fdef6435d3ebf1bb
| 8,457
|
py
|
Python
|
tests/test_model.py
|
zeta1999/OpenJij
|
0fe03f07af947f519a32ad58fe20423919651634
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model.py
|
zeta1999/OpenJij
|
0fe03f07af947f519a32ad58fe20423919651634
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model.py
|
zeta1999/OpenJij
|
0fe03f07af947f519a32ad58fe20423919651634
|
[
"Apache-2.0"
] | 1
|
2021-04-09T09:13:56.000Z
|
2021-04-09T09:13:56.000Z
|
import unittest
import numpy as np
import openjij as oj
import cxxjij as cj
def calculate_ising_energy(h, J, spins):
energy = 0.0
for (i, j), Jij in J.items():
energy += Jij*spins[i]*spins[j]
for i, hi in h.items():
energy += hi * spins[i]
return energy
def calculate_qubo_energy(Q, binary):
energy = 0.0
for (i, j), Qij in Q.items():
energy += Qij*binary[i]*binary[j]
return energy
class VariableTypeTest(unittest.TestCase):
def test_variable_type(self):
spin = oj.cast_var_type('SPIN')
self.assertEqual(spin, oj.SPIN)
binary = oj.cast_var_type('BINARY')
self.assertEqual(binary, oj.BINARY)
class ModelTest(unittest.TestCase):
def setUp(self):
self.h = {0: 1, 1: -2}
self.J = {(0, 1): -1, (1, 2): -3, (2, 3): 0.5}
self.spins = {0: 1, 1: -1, 2: 1, 3: 1}
self.Q = {(0, 0): 1, (1, 2): -1, (2, 0): -0.2, (1, 3): 3}
self.binaries = {0: 0, 1: 1, 2: 1, 3: 0}
def test_bqm_constructor(self):
# Test BinaryQuadraticModel constructor
bqm = oj.BinaryQuadraticModel(self.h, self.J)
self.assertEqual(type(bqm.interaction_matrix()), np.ndarray)
self.assertEqual(bqm.vartype, oj.SPIN)
dense_graph = bqm.get_cxxjij_ising_graph(sparse=False)
self.assertTrue(isinstance(dense_graph, cj.graph.Dense))
bqm_qubo = oj.BinaryQuadraticModel.from_qubo(Q=self.Q)
self.assertEqual(bqm_qubo.vartype, oj.BINARY)
def test_interaction_matrix(self):
bqm = oj.BinaryQuadraticModel(self.h, self.J)
ising_matrix = np.array([
[1, -1, 0, 0],
[-1, -2, -3, 0],
[0, -3, 0, 0.5],
[0, 0, 0.5, 0]
])
np.testing.assert_array_equal(
bqm.interaction_matrix(), ising_matrix
)
# check Hij = Jij + Jji
J = self.J.copy()
J[0, 1] /= 3
J[1, 0] = J[0, 1] * 2
bqm = oj.BinaryQuadraticModel(self.h, J)
np.testing.assert_array_equal(bqm.interaction_matrix(), ising_matrix)
def test_transfer_to_cxxjij(self):
bqm = oj.BinaryQuadraticModel(self.h, self.J)
# to Dense
ising_graph = bqm.get_cxxjij_ising_graph(sparse=False)
self.assertEqual(ising_graph.size(), len(bqm.indices))
for i in range(len(bqm.indices)):
for j in range(len(bqm.indices)):
if i != j:
self.assertAlmostEqual(bqm.interaction_matrix()[i,j], ising_graph.get_interactions()[i, j])
else:
# i == j
self.assertAlmostEqual(bqm.interaction_matrix()[i,j], ising_graph.get_interactions()[i, len(bqm.indices)])
self.assertAlmostEqual(bqm.interaction_matrix()[i,j], ising_graph.get_interactions()[len(bqm.indices), i])
self.assertEqual(ising_graph.get_interactions()[i,i], 0)
self.assertEqual(ising_graph.get_interactions()[len(bqm.indices),len(bqm.indices)], 1)
# to Sparse
ising_graph = bqm.get_cxxjij_ising_graph(sparse=True)
self.assertEqual(ising_graph.size(), len(bqm.indices))
for i in range(ising_graph.size()):
for j in ising_graph.adj_nodes(i):
self.assertEqual(bqm.interaction_matrix()[i,j], ising_graph[i,j])
def test_bqm_calc_energy(self):
# Test to calculate energy
# Test Ising energy
bqm = oj.BinaryQuadraticModel(self.h, self.J)
ising_energy_bqm = bqm.energy(self.spins)
true_ising_e = calculate_ising_energy(self.h, self.J, self.spins)
self.assertEqual(ising_energy_bqm, true_ising_e)
# Test QUBO energy
bqm = oj.BinaryQuadraticModel.from_qubo(Q=self.Q)
qubo_energy_bqm = bqm.energy(self.binaries)
true_qubo_e = calculate_qubo_energy(self.Q, self.binaries)
self.assertEqual(qubo_energy_bqm, true_qubo_e)
# QUBO == Ising
spins = {0: 1, 1: 1, 2: -1, 3: 1}
binary = {0: 1, 1: 1, 2: 0, 3: 1}
qubo_bqm = oj.BinaryQuadraticModel.from_qubo(Q=self.Q)
# ising_mat = qubo_bqm.ising_interactions()
# h, J = {}, {}
# for i in range(len(ising_mat)-1):
# for j in range(i, len(ising_mat)):
# if i == j:
# h[i] = ising_mat[i][i]
# else:
# J[(i, j)] = ising_mat[i][j]
qubo_energy = qubo_bqm.energy(binary)
self.assertEqual(qubo_energy, qubo_bqm.energy(spins, convert_sample=True))
def test_energy_consistency(self):
bqm = oj.BinaryQuadraticModel(self.h, self.J, var_type='SPIN')
dense_ising_graph = bqm.get_cxxjij_ising_graph(sparse=False)
sparse_ising_graph = bqm.get_cxxjij_ising_graph(sparse=True)
spins = {0: -1, 1: -1, 2: -1, 3: -1}
self.assertAlmostEqual(dense_ising_graph.calc_energy([spins[i] for i in range(len(spins))]), bqm.energy(spins))
self.assertAlmostEqual(sparse_ising_graph.calc_energy([spins[i] for i in range(len(spins))]), bqm.energy(spins))
def test_bqm(self):
h = {}
J = {(0, 1): -1.0, (1, 2): -3.0}
bqm = oj.BinaryQuadraticModel(h, J)
self.assertEqual(J, bqm.get_quadratic())
self.assertEqual(type(bqm.interaction_matrix()), np.ndarray)
correct_mat = np.array([[0, -1, 0, ], [-1, 0, -3], [0, -3, 0]])
np.testing.assert_array_equal(
bqm.interaction_matrix(), correct_mat.astype(np.float))
def test_chimera_converter(self):
h = {}
J = {(0, 4): -1.0, (6, 2): -3.0, (16, 0): 4}
chimera = oj.ChimeraModel(h, J, offset=0, unit_num_L=2)
self.assertEqual(chimera.chimera_coordinate(
4, unit_num_L=2), (0, 0, 4))
self.assertEqual(chimera.chimera_coordinate(
12, unit_num_L=2), (0, 1, 4))
self.assertEqual(chimera.chimera_coordinate(
16, unit_num_L=2), (1, 0, 0))
def test_chimera(self):
h = {}
J = {(0, 4): -1.0, (6, 2): -3.0}
bqm = oj.ChimeraModel(h, J, offset=0, unit_num_L=3)
self.assertTrue(bqm.validate_chimera())
J = {(0, 1): -1}
bqm = oj.ChimeraModel(h, J, unit_num_L=3)
with self.assertRaises(ValueError):
bqm.validate_chimera()
J = {(4, 12): -1}
bqm = oj.ChimeraModel(h, J, unit_num_L=2)
self.assertTrue(bqm.validate_chimera())
J = {(0, 4): -1, (5, 13): 1, (24, 8): 2,
(18, 20): 1, (16, 0): 0.5, (19, 23): -2}
h = {13: 2}
chimera = oj.ChimeraModel(h, J, unit_num_L=2)
self.assertEqual(chimera.to_index(1, 1, 1, unit_num_L=2), 25)
self.assertTrue(chimera.validate_chimera())
def test_ising_dict(self):
Q = {(0, 4): -1.0, (6, 2): -3.0}
bqm = oj.ChimeraModel.from_qubo(Q=Q, unit_num_L=3)
def test_king_graph(self):
h = {}
J = {(0, 1): -1.0, (1, 2): -3.0}
king_interaction = [[0, 0, 1, 0, -1.0], [1, 0, 2, 0, -3.0]]
king_graph = oj.KingGraph(machine_type="ASIC", linear=h, quadratic=J)
correct_mat = np.array([[0, -1, 0, ], [-1, 0, -3], [0, -3, 0]])
np.testing.assert_array_equal(
king_graph.interaction_matrix(), correct_mat.astype(np.float))
self.assertCountEqual(king_interaction, king_graph._ising_king_graph)
king_graph = oj.KingGraph(
machine_type="ASIC", king_graph=king_interaction)
np.testing.assert_array_equal(
king_interaction, king_graph._ising_king_graph)
king_graph = oj.KingGraph.from_qubo(Q={(0, 1): -1}, machine_type='ASIC')
king_interaction = [[0, 0, 0, 0, -0.25],
[0, 0, 1, 0, -0.25], [1, 0, 1, 0, -0.25]]
self.assertCountEqual(king_interaction, king_graph._ising_king_graph)
def test_get_chimera_graph(self):
c_model = oj.ChimeraModel.from_qubo(Q={(0, 4): -1, (1, 1): -1, (1, 5): 1}, unit_num_L=2)
chimera = c_model.get_cxxjij_ising_graph()
self.assertIsInstance(chimera, cj.graph.Chimera)
c_model = oj.ChimeraModel.from_qubo(Q={((0, 0, 1), (0, 0, 4)): -1, ((0, 0, 4), (0, 0, 2)): -1},
unit_num_L=2)
chimera = c_model.get_cxxjij_ising_graph()
self.assertIsInstance(chimera, cj.graph.Chimera)
if __name__ == '__main__':
unittest.main()
| 37.255507
| 126
| 0.578101
| 7,962
| 0.941469
| 0
| 0
| 0
| 0
| 0
| 0
| 462
| 0.054629
|
fb795f78cbeba316633c5e08d2759b19e5be4e41
| 18,273
|
py
|
Python
|
src/bgp-acl-agent/bgp-acl-agent.py
|
jbemmel/srl-bgp-acl
|
18d2b625ea24cc1a269513798e0e58e84f3eaca8
|
[
"Apache-2.0"
] | 1
|
2022-01-25T16:03:02.000Z
|
2022-01-25T16:03:02.000Z
|
src/bgp-acl-agent/bgp-acl-agent.py
|
jbemmel/srl-bgp-acl
|
18d2b625ea24cc1a269513798e0e58e84f3eaca8
|
[
"Apache-2.0"
] | null | null | null |
src/bgp-acl-agent/bgp-acl-agent.py
|
jbemmel/srl-bgp-acl
|
18d2b625ea24cc1a269513798e0e58e84f3eaca8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import grpc
from datetime import datetime
import sys
import logging
import socket
import os
from ipaddress import ip_network, ip_address, IPv4Address
import json
import signal
import traceback
import re
from concurrent.futures import ThreadPoolExecutor
import sdk_service_pb2
import sdk_service_pb2_grpc
import config_service_pb2
# To report state back
import telemetry_service_pb2
import telemetry_service_pb2_grpc
from pygnmi.client import gNMIclient, telemetryParser
from logging.handlers import RotatingFileHandler
############################################################
## Agent will start with this name
############################################################
agent_name='bgp_acl_agent'
acl_sequence_start=1000 # Default ACL sequence number base, can be configured
acl_count=0 # Number of ACL entries created/managed
############################################################
## Open a GRPC channel to connect to sdk_mgr on the dut
## sdk_mgr will be listening on 50053
############################################################
channel = grpc.insecure_channel('unix:///opt/srlinux/var/run/sr_sdk_service_manager:50053')
# channel = grpc.insecure_channel('127.0.0.1:50053')
metadata = [('agent_name', agent_name)]
stub = sdk_service_pb2_grpc.SdkMgrServiceStub(channel)
############################################################
## Subscribe to required event
## This proc handles subscription of: Interface, LLDP,
## Route, Network Instance, Config
############################################################
def Subscribe(stream_id, option):
op = sdk_service_pb2.NotificationRegisterRequest.AddSubscription
if option == 'cfg':
entry = config_service_pb2.ConfigSubscriptionRequest()
entry.key.js_path = '.' + agent_name # filter out .commit.end notifications
request = sdk_service_pb2.NotificationRegisterRequest(op=op, stream_id=stream_id, config=entry)
subscription_response = stub.NotificationRegister(request=request, metadata=metadata)
print('Status of subscription response for {}:: {}'.format(option, subscription_response.status))
############################################################
## Subscribe to all the events that Agent needs
############################################################
def Subscribe_Notifications(stream_id):
'''
Agent will receive notifications to what is subscribed here.
'''
if not stream_id:
logging.info("Stream ID not sent.")
return False
# Subscribe to config changes, first
Subscribe(stream_id, 'cfg')
##################################################################
## Proc to process the config Notifications received by auto_config_agent
## At present processing config from js_path = .fib-agent
##################################################################
def Handle_Notification(obj):
if obj.HasField('config'):
logging.info(f"GOT CONFIG :: {obj.config.key.js_path}")
if obj.config.key.js_path == ".bgp_acl_agent":
logging.info(f"Got config for agent, now will handle it :: \n{obj.config}\
Operation :: {obj.config.op}\nData :: {obj.config.data.json}")
if obj.config.op == 2:
logging.info(f"Delete bgp-acl-agent cli scenario")
# if file_name != None:
# Update_Result(file_name, action='delete')
response=stub.AgentUnRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.info('Handle_Config: Unregister response:: {}'.format(response))
else:
json_acceptable_string = obj.config.data.json.replace("'", "\"")
data = json.loads(json_acceptable_string)
if 'acl_sequence_start' in data:
acl_sequence_start = data['acl_sequence_start']['value']
logging.info(f"Got init sequence :: {acl_sequence_start}")
return 'acl_sequence_start' in data
else:
logging.info(f"Unexpected notification : {obj}")
return False
def Gnmi_subscribe_bgp_changes():
subscribe = {
'subscription': [
{
# 'path': '/srl_nokia-network-instance:network-instance[name=*]/protocols/srl_nokia-bgp:bgp/neighbor[peer-address=*]/admin-state',
# Possible to subscribe without '/admin-state', but then too many events
# Like this, no 'delete' is received when the neighbor is deleted
# Also, 'enable' event is followed by 'disable' - broken
# 'path': '/network-instance[name=*]/protocols/bgp/neighbor[peer-address=*]/admin-state',
# This leads to too many events, hitting the max 60/minute gNMI limit
# 10 events per CLI change to a bgp neighbor, many duplicates
# 'path': '/network-instance[name=*]/protocols/bgp/neighbor[peer-address=*]',
'path': '/network-instance[name=*]/protocols/bgp/neighbor[peer-address=*]',
'mode': 'on_change',
# 'heartbeat_interval': 10 * 1000000000 # ns between, i.e. 10s
# Mode 'sample' results in polling
# 'mode': 'sample',
# 'sample_interval': 10 * 1000000000 # ns between samples, i.e. 10s
},
{ # Also monitor dynamic-neighbors sections
'path': '/network-instance[name=*]/protocols/bgp/dynamic-neighbors/accept/match[prefix=*]',
'mode': 'on_change',
}
],
'use_aliases': False,
'mode': 'stream',
'encoding': 'json'
}
_bgp = re.compile( r'^network-instance\[name=([^]]*)\]/protocols/bgp/neighbor\[peer-address=([^]]*)\]/admin-state$' )
_dyn = re.compile( r'^network-instance\[name=([^]]*)\]/protocols/bgp/dynamic-neighbors/accept/match\[prefix=([^]]*)\]/peer-group$' )
# with Namespace('/var/run/netns/srbase-mgmt', 'net'):
with gNMIclient(target=('unix:///opt/srlinux/var/run/sr_gnmi_server',57400),
username="admin",password="admin",
insecure=True, debug=False) as c:
telemetry_stream = c.subscribe(subscribe=subscribe)
for m in telemetry_stream:
try:
if m.HasField('update'): # both update and delete events
# Filter out only toplevel events
parsed = telemetryParser(m)
logging.info(f"gNMI change event :: {parsed}")
update = parsed['update']
if update['update']:
path = update['update'][0]['path'] # Only look at top level
neighbor = _bgp.match( path )
if neighbor:
net_inst = neighbor.groups()[0]
ip_prefix = neighbor.groups()[1] # plain ip
peer_type = "static"
logging.info(f"Got neighbor change event :: {ip_prefix}")
else:
dyn_group = _dyn.match( path )
if dyn_group:
net_inst = dyn_group.groups()[0]
ip_prefix = dyn_group.groups()[1] # ip/prefix
peer_type = "dynamic"
logging.info(f"Got dynamic-neighbor change event :: {ip_prefix}")
else:
logging.info(f"Ignoring gNMI change event :: {path}")
continue
# No-op if already exists
Add_ACL(c,ip_prefix.split('/'),net_inst,peer_type)
else: # pygnmi does not provide 'path' for delete events
handleDelete(c,m)
except Exception as e:
traceback_str = ''.join(traceback.format_tb(e.__traceback__))
logging.error(f'Exception caught in gNMI :: {e} m={m} stack:{traceback_str}')
logging.info("Leaving BGP event loop")
def handleDelete(gnmi,m):
logging.info(f"handleDelete :: {m}")
for e in m.update.delete:
for p in e.elem:
# TODO dynamic-neighbors, also modify of prefix in dynamic-neighbors
if p.name == "neighbor":
for n,v in p.key.items():
logging.info(f"n={n} v={v}")
if n=="peer-address":
peer_ip = v
Remove_ACL(gnmi,peer_ip)
return # XXX could be multiple peers deleted in 1 go?
#
# Checks if this is an IPv4 or IPv6 address, and normalizes host prefixes
#
def checkIP( ip_prefix ):
try:
v = 4 if type(ip_address(ip_prefix[0])) is IPv4Address else 6
prefix = ip_prefix[1] if len(ip_prefix)>1 else ('32' if v==4 else '128')
return v, ip_prefix[0], prefix
except ValueError:
return None
def Add_Telemetry(js_path, dict):
telemetry_stub = telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub(channel)
telemetry_update_request = telemetry_service_pb2.TelemetryUpdateRequest()
telemetry_info = telemetry_update_request.state.add()
telemetry_info.key.js_path = js_path
telemetry_info.data.json_content = json.dumps(dict)
logging.info(f"Telemetry_Update_Request :: {telemetry_update_request}")
telemetry_response = telemetry_stub.TelemetryAddOrUpdate(request=telemetry_update_request, metadata=metadata)
logging.info(f"TelemetryAddOrUpdate response:{telemetry_response}")
return telemetry_response
def Update_ACL_Counter(delta):
global acl_count
acl_count += delta
_ts = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
Add_Telemetry( ".bgp_acl_agent", { "acl_count" : acl_count,
"last_change" : _ts } )
def Add_ACL(gnmi,ip_prefix,net_inst,peer_type):
seq, next_seq, v, ip, prefix = Find_ACL_entry(gnmi,ip_prefix) # Also returns next available entry
if seq is None:
acl_entry = {
"created-by-bgp-acl-agent": datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC"),
"description": f"BGP ({peer_type}) peer in network-instance {net_inst}",
"match": {
("protocol" if v==4 else "next-header"): "tcp",
"source-ip": { "prefix": ip + '/' + prefix },
"destination-port": { "operator": "eq", "value": 179 }
},
"action": { "accept": { } },
}
path = f'/acl/cpm-filter/ipv{v}-filter/entry[sequence-id={next_seq}]'
logging.info(f"Update: {path}={acl_entry}")
gnmi.set( encoding='json_ietf', update=[(path,acl_entry)] )
# Need to set state separately, not via gNMI. Uses underscores in path
# Tried extending ACL entries, but system won't accept these updates
# js_path = (f'.acl.cpm_filter.ipv{v}_filter.entry' +
# '{.sequence_id==' + str(next_seq) + '}.bgp_acl_agent_state')
# js_path = '.bgp_acl_agent.entry{.ip=="'+peer_ip+'"}'
# Add_Telemetry( js_path, { "sequence_id": next_seq } )
Update_ACL_Counter( +1 )
def Remove_ACL(gnmi,peer_ip):
seq, next_seq, v, ip, prefix = Find_ACL_entry(gnmi,[peer_ip])
if seq is not None:
logging.info(f"Remove_ACL: Deleting ACL entry :: {seq}")
path = f'/acl/cpm-filter/ipv{v}-filter/entry[sequence-id={seq}]'
gnmi.set( encoding='json_ietf', delete=[path] )
Update_ACL_Counter( -1 )
else:
logging.info(f"Remove_ACL: No entry found for peer_ip={peer_ip}")
#
# Because it is possible that ACL entries get saved to 'startup', the agent may
# not have a full map of sequence number to peer_ip. Therefore, we perform a
# lookup based on IP address each time
# Since 'prefix' is not a key, we have to loop through all entries with a prefix
#
def Find_ACL_entry(gnmi,ip_prefix):
v, ip, prefix = checkIP( ip_prefix )
#
# Can do it like this and add custom state, but then we cannot find the next
# available sequence number we can use
# path = f"/bgp-acl-agent/entry[ip={peer_ip}]"
path = f'/acl/cpm-filter/ipv{v}-filter/entry/match/'
# could add /source-ip/prefix but then we cannot check for dest-port
# Could filter like this to reduce #entries, limits to max 999 entries
# path = '/acl/cpm-filter/ipv4-filter/entry[sequence-id=1*]/match
# Interestingly, datatype='config' is required to see custom config state
# The default datatype='all' does not show it
acl_entries = gnmi.get( encoding='json_ietf', path=[path] )
logging.info(f"Find_ACL_entry({ip_prefix}): GOT GET response :: {acl_entries}")
searched = ip + '/' + prefix
next_seq = acl_sequence_start
for e in acl_entries['notification']:
try:
if 'update' in e:
logging.info(f"GOT Update :: {e['update']}")
for u in e['update']:
for j in u['val']['entry']:
logging.info(f"Check ACL entry :: {j}")
match = j['match']
# Users could change acl_sequence_start
if 'source-ip' in match: # and j['sequence-id'] >= acl_sequence_start:
src_ip = match['source-ip']
if 'prefix' in src_ip:
if (src_ip['prefix'] == searched):
logging.info(f"Find_ACL_entry: Found matching entry :: {j}")
# Perform extra sanity check
if ('destination-port' in match
and 'value' in match['destination-port']
and match['destination-port']['value'] == 179):
return (j['sequence-id'],None,v,ip,prefix)
else:
logging.info( "Source IP match but not BGP port" )
if j['sequence-id']==next_seq:
logging.info( f"Increment next_seq (={next_seq})" )
next_seq += 1
else:
logging.info( "No source-ip in entry" )
except Exception as e:
logging.error(f'Exception caught in Find_ACL_entry :: {e}')
logging.info(f"Find_ACL_entry: no match for searched={searched} next_seq={next_seq}")
return (None,next_seq,v,ip,prefix)
##################################################################################################
## This is the main proc where all processing for auto_config_agent starts.
## Agent registration, notification registration, Subscrition to notifications.
## Waits on the subscribed Notifications and once any config is received, handles that config
## If there are critical errors, Unregisters the fib_agent gracefully.
##################################################################################################
def Run():
sub_stub = sdk_service_pb2_grpc.SdkNotificationServiceStub(channel)
response = stub.AgentRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.info(f"Registration response : {response.status}")
request=sdk_service_pb2.NotificationRegisterRequest(op=sdk_service_pb2.NotificationRegisterRequest.Create)
create_subscription_response = stub.NotificationRegister(request=request, metadata=metadata)
stream_id = create_subscription_response.stream_id
logging.info(f"Create subscription response received. stream_id : {stream_id}")
Subscribe_Notifications(stream_id)
stream_request = sdk_service_pb2.NotificationStreamRequest(stream_id=stream_id)
stream_response = sub_stub.NotificationStream(stream_request, metadata=metadata)
# Gnmi_subscribe_bgp_changes()
executor = ThreadPoolExecutor(max_workers=1)
executor.submit(Gnmi_subscribe_bgp_changes)
try:
for r in stream_response:
logging.info(f"NOTIFICATION:: \n{r.notification}")
for obj in r.notification:
Handle_Notification(obj)
except grpc._channel._Rendezvous as err:
logging.info(f'GOING TO EXIT NOW: {err}')
except Exception as e:
logging.error(f'Exception caught :: {e}')
#if file_name != None:
# Update_Result(file_name, action='delete')
try:
response = stub.AgentUnRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.error(f'Run try: Unregister response:: {response}')
except grpc._channel._Rendezvous as err:
logging.info(f'GOING TO EXIT NOW: {err}')
sys.exit()
return True
sys.exit()
return True
############################################################
## Gracefully handle SIGTERM signal
## When called, will unregister Agent and gracefully exit
############################################################
def Exit_Gracefully(signum, frame):
logging.info("Caught signal :: {}\n will unregister bgp acl agent".format(signum))
try:
response=stub.AgentUnRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.error('try: Unregister response:: {}'.format(response))
sys.exit()
except grpc._channel._Rendezvous as err:
logging.info('GOING TO EXIT NOW: {}'.format(err))
sys.exit()
##################################################################################################
## Main from where the Agent starts
## Log file is written to: /var/log/srlinux/stdout/bgp_acl_agent.log
## Signals handled for graceful exit: SIGTERM
##################################################################################################
if __name__ == '__main__':
# hostname = socket.gethostname()
stdout_dir = '/var/log/srlinux/stdout' # PyTEnv.SRL_STDOUT_DIR
signal.signal(signal.SIGTERM, Exit_Gracefully)
if not os.path.exists(stdout_dir):
os.makedirs(stdout_dir, exist_ok=True)
log_filename = f'{stdout_dir}/{agent_name}.log'
logging.basicConfig(
handlers=[RotatingFileHandler(log_filename, maxBytes=3000000,backupCount=5)],
format='%(asctime)s,%(msecs)03d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S', level=logging.INFO)
logging.info("START TIME :: {}".format(datetime.now()))
if Run():
logging.info('Agent unregistered')
else:
logging.info('Should not happen')
| 46.614796
| 150
| 0.591747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,594
| 0.470311
|
fb7b00cf08bb11ddac21a6f98e99ab8e31ed948a
| 1,003
|
py
|
Python
|
tests/test_35_cfgrib_.py
|
shoyer/cfgrib
|
fe11a1b638b1779e51da87eaa30f1f12b2d0911c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_35_cfgrib_.py
|
shoyer/cfgrib
|
fe11a1b638b1779e51da87eaa30f1f12b2d0911c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_35_cfgrib_.py
|
shoyer/cfgrib
|
fe11a1b638b1779e51da87eaa30f1f12b2d0911c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import os.path
import pytest
xr = pytest.importorskip('xarray') # noqa
from cfgrib import cfgrib_
SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), 'sample-data')
TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, 'era5-levels-members.grib')
def test_CfGribDataStore():
datastore = cfgrib_.CfGribDataStore(TEST_DATA, encode_cf=())
expected = {'number': 10, 'dataDate': 2, 'dataTime': 2, 'level': 2, 'values': 7320}
assert datastore.get_dimensions() == expected
def test_xarray_open_dataset():
datastore = cfgrib_.CfGribDataStore(TEST_DATA, encode_cf=(), lock=cfgrib_.SerializableLock())
res = xr.open_dataset(datastore)
assert res.attrs['GRIB_edition'] == 1
assert res['t'].attrs['GRIB_gridType'] == 'regular_ll'
assert res['t'].attrs['GRIB_units'] == 'K'
assert res['t'].dims == ('number', 'dataDate', 'dataTime', 'level', 'values')
assert res['t'].mean() > 0.
| 31.34375
| 97
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.206381
|
fb7b2416a898fcaefa5788048c29968341cc3540
| 4,578
|
py
|
Python
|
apps/stream_ty_gn_threaded/camera_processor.py
|
MichelleLau/ncappzoo
|
d222058e9bf16fbfa2670ff686d11fe521a976e0
|
[
"MIT"
] | 1
|
2018-11-23T01:48:59.000Z
|
2018-11-23T01:48:59.000Z
|
apps/stream_ty_gn_threaded/camera_processor.py
|
MichelleLau/ncappzoo
|
d222058e9bf16fbfa2670ff686d11fe521a976e0
|
[
"MIT"
] | 1
|
2018-03-25T19:36:41.000Z
|
2018-03-25T19:53:27.000Z
|
apps/stream_ty_gn_threaded/camera_processor.py
|
MichelleLau/ncappzoo
|
d222058e9bf16fbfa2670ff686d11fe521a976e0
|
[
"MIT"
] | 1
|
2020-10-01T15:38:04.000Z
|
2020-10-01T15:38:04.000Z
|
#! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# NPS
# pulls images from camera device and places them in a Queue
# if the queue is full will start to skip camera frames.
#import numpy as np
import cv2
import queue
import threading
import time
class camera_processor:
# initializer for the class
# Parameters:
# output_queue is an instance of queue.Queue in which the camera
# images will be placed
# queue_put_wait_max is the max number of seconds to wait when putting
# images into the output queue.
# camera_index is the index of the camera in the system. if only one camera
# it will typically be index 0
# request_video_width is the width to request for the camera stream
# request_video_height is the height to request for the camera stream
# queue_full_sleep_seconds is the number of seconds to sleep when the
# output queue is full.
def __init__(self, output_queue, queue_put_wait_max = 0.01, camera_index = 0,
request_video_width=640, request_video_height = 480,
queue_full_sleep_seconds = 0.1):
self._queue_full_sleep_seconds = queue_full_sleep_seconds
self._queue_put_wait_max = queue_put_wait_max
self._camera_index = camera_index
self._request_video_width = request_video_width
self._request_video_height = request_video_height
# create the camera device
self._camera_device = cv2.VideoCapture(self._camera_index)
if ((self._camera_device == None) or (not self._camera_device.isOpened())):
print('\n\n')
print('Error - could not open camera. Make sure it is plugged in.')
print('Also, if you installed python opencv via pip or pip3 you')
print('need to uninstall it and install from source with -D WITH_V4L=ON')
print('Use the provided script: install-opencv-from_source.sh')
print('\n\n')
return
# Request the dimensions
self._camera_device.set(cv2.CAP_PROP_FRAME_WIDTH, self._request_video_width)
self._camera_device.set(cv2.CAP_PROP_FRAME_HEIGHT, self._request_video_height)
# save the actual dimensions
self._actual_camera_width = self._camera_device.get(cv2.CAP_PROP_FRAME_WIDTH)
self._actual_camera_height = self._camera_device.get(cv2.CAP_PROP_FRAME_HEIGHT)
print('actual camera resolution: ' + str(self._actual_camera_width) + ' x ' + str(self._actual_camera_height))
self._output_queue = output_queue
self._worker_thread = threading.Thread(target=self._do_work, args=())
# the width of the images that will be put in the queue
def get_actual_camera_width(self):
return self._actual_camera_width
# the height of the images that will be put in the queue
def get_actual_camera_height(self):
return self._actual_camera_height
# start reading from the camera and placing images in the output queue
def start_processing(self):
self._end_flag = False
self._worker_thread.start()
# stop reading from camera and placing images in the output queue
def stop_processing(self):
self._end_flag = True
self._worker_thread.join()
# thread target. when call start_processing this function will be called
# in its own thread. it will keep working until stop_processing is called.
# or an error is encountered.
def _do_work(self):
print('in camera_processor worker thread')
if (self._camera_device == None):
print('camera_processor camera_device is None, returning.')
return
while (not self._end_flag):
try:
ret_val, input_image = self._camera_device.read()
if (not ret_val):
print("No image from camera, exiting")
break
self._output_queue.put(input_image, True, self._queue_put_wait_max)
except queue.Full:
# the camera is probably way faster than the processing
# so if our output queue is full sleep a little while before
# trying the next image from the camera.
time.sleep(self._queue_full_sleep_seconds)
print('exiting camera_processor worker thread')
# should be called once for each class instance when finished with it.
def cleanup(self):
# close camera
self._camera_device.release()
| 42
| 118
| 0.676933
| 4,262
| 0.930974
| 0
| 0
| 0
| 0
| 0
| 0
| 2,033
| 0.44408
|
fb7bbc55fb1c9399f6c93d62c5a66100a843787f
| 175
|
py
|
Python
|
examples/domainby.py
|
ipfinder/ip-finder-python
|
48ba093801d244c12a4583c138d62c94355baf28
|
[
"Apache-2.0"
] | 8
|
2019-07-12T22:20:49.000Z
|
2022-03-01T09:03:58.000Z
|
examples/domainby.py
|
ipfinder/ip-finder-python
|
48ba093801d244c12a4583c138d62c94355baf28
|
[
"Apache-2.0"
] | 2
|
2019-08-29T23:24:57.000Z
|
2021-02-01T15:15:16.000Z
|
examples/domainby.py
|
ipfinder/ip-finder-python
|
48ba093801d244c12a4583c138d62c94355baf28
|
[
"Apache-2.0"
] | 5
|
2019-07-12T23:01:03.000Z
|
2021-07-07T11:11:44.000Z
|
import ipfinder
con = ipfinder.config('f67f788f8a02a188ec84502e0dff066ed4413a85') # YOUR_TOKEN_GOES_HERE
# domain name
by = 'DZ';
dby = con.getDomainBy(by)
print(dby.all)
| 15.909091
| 88
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.462857
|
fb7be7756110402e4a2ea628f2c6bc51fd0dd0f4
| 139
|
py
|
Python
|
manager.py
|
thangbk2209/pretraining_auto_scaling_ng
|
0b98b311c75ec4b87b3e8168f93eeb53ed0d16f5
|
[
"MIT"
] | null | null | null |
manager.py
|
thangbk2209/pretraining_auto_scaling_ng
|
0b98b311c75ec4b87b3e8168f93eeb53ed0d16f5
|
[
"MIT"
] | null | null | null |
manager.py
|
thangbk2209/pretraining_auto_scaling_ng
|
0b98b311c75ec4b87b3e8168f93eeb53ed0d16f5
|
[
"MIT"
] | null | null | null |
"""
Author: bkc@data_analysis
Project: autoencoder_ng
Created: 7/29/20 10:51
Purpose: START SCRIPT FOR AUTOENCODER_NG PROJECT
"""
| 19.857143
| 50
| 0.726619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.992806
|
fb7dc85f21a97ece3e0b036a3c4e6d6962f9001a
| 49
|
py
|
Python
|
netvisor_api_client/schemas/sales_payments/__init__.py
|
kiuru/netvisor-api-client
|
2af3e4ca400497ace5a86d0a1807ec3b9c530cf4
|
[
"MIT"
] | 5
|
2019-04-17T08:10:47.000Z
|
2021-11-27T12:26:15.000Z
|
netvisor_api_client/schemas/sales_payments/__init__.py
|
kiuru/netvisor-api-client
|
2af3e4ca400497ace5a86d0a1807ec3b9c530cf4
|
[
"MIT"
] | 7
|
2019-06-25T17:02:50.000Z
|
2021-07-21T10:14:38.000Z
|
netvisor_api_client/schemas/sales_payments/__init__.py
|
kiuru/netvisor-api-client
|
2af3e4ca400497ace5a86d0a1807ec3b9c530cf4
|
[
"MIT"
] | 10
|
2019-06-25T15:37:33.000Z
|
2021-10-16T19:40:37.000Z
|
from .list import SalesPaymentListSchema # noqa
| 24.5
| 48
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.122449
|
fb7ec52a5b793917a80604774ec9ccdc87a89f1d
| 858
|
py
|
Python
|
derive_cubic.py
|
vuonglv1612/page-dewarp
|
68063db040ba97964a22f68a6056467dacd2952f
|
[
"MIT"
] | 9
|
2021-05-15T21:18:03.000Z
|
2022-03-31T16:56:36.000Z
|
derive_cubic.py
|
vuonglv1612/page-dewarp
|
68063db040ba97964a22f68a6056467dacd2952f
|
[
"MIT"
] | 5
|
2021-04-23T17:59:23.000Z
|
2021-05-23T17:03:40.000Z
|
derive_cubic.py
|
vuonglv1612/page-dewarp
|
68063db040ba97964a22f68a6056467dacd2952f
|
[
"MIT"
] | 3
|
2022-02-22T12:09:49.000Z
|
2022-03-16T21:33:49.000Z
|
import numpy as np
from matplotlib import pyplot as plt
from sympy import symbols, solve
a, b, c, d, x, α, β = symbols("a b c d x α β")
# polynomial function f(x) = ax³ + bx² + cx + d
f = a * x ** 3 + b * x ** 2 + c * x + d
fp = f.diff(x) # derivative f'(x)
# evaluate both at x=0 and x=1
f0, f1 = [f.subs(x, i) for i in range(2)]
fp0, fp1 = [fp.subs(x, i) for i in range(2)]
# we want a, b, c, d such that the following conditions hold:
#
# f(0) = 0
# f(1) = 0
# f'(0) = α
# f'(1) = β
S = solve([f0, f1, fp0 - α, fp1 - β], [a, b, c, d])
# print the analytic solution and plot a graphical example
coeffs = []
num_α = 0.3
num_β = -0.03
for key in [a, b, c, d]:
print(key, "=", S[key])
coeffs.append(S[key].subs(dict(α=num_α, β=num_β)))
xvals = np.linspace(0, 1, 101)
yvals = np.polyval(coeffs, xvals)
plt.plot(xvals, yvals)
plt.show()
| 21.45
| 61
| 0.581585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 285
| 0.326087
|
fb80aeb666b891b4a2d73d6188ae90784a764de1
| 13,614
|
py
|
Python
|
test/test_admin.py
|
image72/browserscope
|
44a63558ee376704d996851099bc7703128201cc
|
[
"Apache-2.0"
] | 22
|
2015-10-26T15:20:37.000Z
|
2022-03-11T06:38:17.000Z
|
test/test_admin.py
|
image72/browserscope
|
44a63558ee376704d996851099bc7703128201cc
|
[
"Apache-2.0"
] | 10
|
2016-01-22T18:46:19.000Z
|
2019-07-19T12:49:51.000Z
|
test/test_admin.py
|
mcauer/browserscope
|
a9c0e1a250774f14689e06f93ad274d0b9d725e4
|
[
"Apache-2.0"
] | 12
|
2015-10-17T09:40:44.000Z
|
2019-06-08T19:54:36.000Z
|
#!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test admin_rankers."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import datetime
import logging
import unittest
import mock_data
import settings
from django.test.client import Client
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.ext import db
from categories import all_test_sets
from models import result_stats
from models.result import ResultParent
from models.result import ResultTime
from models.user_agent import UserAgent
from third_party import mox
from base import admin
USER_AGENT_STRINGS = {
'Firefox 3.0.6': ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) '
'Gecko/2009011912 Firefox/3.0.6'),
'Firefox 3.5': ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) '
'Gecko/2009011912 Firefox/3.5'),
'Firefox 3.0.9': ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) '
'Gecko/2009011912 Firefox/3.0.9'),
}
class TestConfirmUa(unittest.TestCase):
def setUp(self):
self.client = Client()
ua_string = ('Mozilla/5.0 (X11 U Linux armv6l de-DE rv:1.9a6pre) '
'Gecko/20080606 '
'Firefox/3.0a1 Tablet browser 0.3.7 '
'RX-34+RX-44+RX-48_DIABLO_4.2008.23-14')
self.ua = UserAgent.factory(ua_string)
def testConfirmBasic(self):
params = {
'submit': 1,
'ac_%s' % self.ua.key(): 'confirm',
'cht_%s' % self.ua.key(): '',
'csrf_token': self.client.get('/get_csrf').content,
}
response = self.client.get('/admin/confirm-ua', params)
self.assertEqual(302, response.status_code)
self.assertTrue(self.ua.get(self.ua.key()).confirmed)
class TestDataDump(unittest.TestCase):
def setUp(self):
self.test_set = mock_data.MockTestSet()
all_test_sets.AddTestSet(self.test_set)
self.client = Client()
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set)
def testNoParamsGivesError(self):
params = {}
response = self.client.get('/admin/data_dump', params)
self.assertEqual(400, response.status_code)
def testNoModelGivesError(self):
params = {'keys': 'car,home,heart'}
response = self.client.get('/admin/data_dump', params)
self.assertEqual(400, response.status_code)
def testNonExistentKeyIsMarkedLost(self):
for model in ('ResultParent', 'UserAgent'):
params = {
'keys': 'agt1YS1wcm9maWxlcnIRCxIJVXNlckFnZW50GN6JIgw',
'model': model}
response = self.client.get('/admin/data_dump', params)
self.assertEqual(200, response.status_code)
response_params = simplejson.loads(response.content)
expected_data = [{
'model_class': model,
'lost_key': 'agt1YS1wcm9maWxlcnIRCxIJVXNlckFnZW50GN6JIgw',
}]
self.assertEqual(expected_data, response_params['data'])
def testDumpAll(self):
keys = []
for scores in ((1, 4, 50), (1, 1, 20), (0, 2, 30), (1, 0, 10), (1, 3, 10)):
result = ResultParent.AddResult(
self.test_set, '1.2.2.5', mock_data.GetUserAgentString('Firefox 3.5'),
'apple=%s,banana=%s,coconut=%s' % scores)
keys.append(str(result.key()))
params = {
'model': 'ResultParent',
'keys': ','.join(keys),
}
response = self.client.get('/admin/data_dump', params)
self.assertEqual(200, response.status_code)
response_params = simplejson.loads(response.content)
self.assertEqual(20, len(response_params['data'])) # 5 parents + 15 times
class TestDataDumpKeys(unittest.TestCase):
def setUp(self):
self.test_set = mock_data.MockTestSet()
all_test_sets.AddTestSet(self.test_set)
self.client = Client()
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set)
def testCreated(self):
created_base = datetime.datetime(2009, 9, 9, 9, 9, 0)
keys = []
for scores in ((0, 10, 100), (1, 20, 200)):
ip = '1.2.2.%s' % scores[1]
result = ResultParent.AddResult(
self.test_set, ip, mock_data.GetUserAgentString('Firefox 3.5'),
'apple=%s,banana=%s,coconut=%s' % scores,
created=created_base + datetime.timedelta(seconds=scores[1]))
keys.append(str(result.key()))
params = {
'model': 'ResultParent',
'created': created_base + datetime.timedelta(seconds=15),
}
response = self.client.get('/admin/data_dump_keys', params)
self.assertEqual(200, response.status_code)
response_params = simplejson.loads(response.content)
self.assertEqual(None, response_params['bookmark'])
self.assertEqual(keys[1:], response_params['keys'])
def testBookmarkRestart(self):
expected_keys = []
for scores in ((1, 4, 50), (1, 1, 20), (0, 2, 30), (1, 0, 10), (1, 3, 10)):
result = ResultParent.AddResult(
self.test_set, '1.2.2.5', mock_data.GetUserAgentString('Firefox 3.5'),
'apple=%s,banana=%s,coconut=%s' % scores)
expected_keys.append(str(result.key()))
params = {
'model': 'ResultParent',
'fetch_limit': '3'
}
response = self.client.get('/admin/data_dump_keys', params)
keys = []
self.assertEqual(200, response.status_code)
response_params = simplejson.loads(response.content)
self.assertNotEqual(None, response_params['bookmark'])
keys.extend(response_params['keys'])
self.assertEqual(3, len(keys))
del response_params['keys']
response = self.client.get('/admin/data_dump_keys', response_params)
self.assertEqual(200, response.status_code)
response_params = simplejson.loads(response.content)
self.assertEqual(None, response_params['bookmark'])
keys.extend(response_params['keys'])
self.assertEqual(sorted(expected_keys), sorted(keys))
class TestUploadCategoryBrowsers(unittest.TestCase):
def setUp(self):
self.client = Client()
self.manager = result_stats.CategoryBrowserManager
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def testNoBrowsersGivesError(self):
params = {}
response = self.client.get('/admin/upload_category_browsers', params)
self.assertTrue('Must set "browsers"' in response.content)
self.assertEqual(500, response.status_code)
def testNoCategoryGivesError(self):
params = {
'version_level': 4,
'browsers': 'Firefox,IE',
}
response = self.client.get('/admin/upload_category_browsers', params)
self.assertEqual('Must set "category".', response.content)
self.assertEqual(500, response.status_code)
def testBadVersionLevelGivesError(self):
params = {
'category': 'network',
'version_level': 4,
'browsers': 'Firefox,IE',
}
response = self.client.get('/admin/upload_category_browsers', params)
self.assertTrue('Version level' in response.content)
self.assertEqual(500, response.status_code)
def testNoBrowsersGivesError(self):
params = {
'category': 'network',
'version_level': 0,
}
response = self.client.get('/admin/upload_category_browsers', params)
self.assertTrue('Must set "browsers"' in response.content)
self.assertEqual(500, response.status_code)
def testBasic(self):
self.mox.StubOutWithMock(
result_stats.CategoryBrowserManager, 'UpdateSummaryBrowsers')
categories = [ts.category for ts in all_test_sets.GetVisibleTestSets()]
# Use mox to make sure UpdateSummaryBrowsers gets called.
result_stats.CategoryBrowserManager.UpdateSummaryBrowsers(categories)
self.mox.ReplayAll()
params = {
'category': 'network',
'version_level': 0,
'browsers': 'IE,Firefox',
}
response = self.client.get('/admin/upload_category_browsers', params)
self.assertEqual('Success.', response.content)
self.assertEqual(200, response.status_code)
self.assertEqual(['Firefox', 'IE'], self.manager.GetBrowsers('network', 0))
self.mox.VerifyAll()
class TestUpdateStatsCache(unittest.TestCase):
def setUp(self):
self.client = Client()
self.mox = mox.Mox()
self.manager = result_stats.CategoryStatsManager
def tearDown(self):
self.mox.UnsetStubs()
def testNoCategoryGivesError(self):
params = {
'browsers': 'Firefox,IE',
}
response = self.client.get('/admin/update_stats_cache', params)
self.assertEqual('Must set "category".', response.content)
self.assertEqual(500, response.status_code)
def testNoBrowsersGivesError(self):
params = {
'category': 'network',
}
response = self.client.get('/admin/update_stats_cache', params)
self.assertTrue('Must set "browsers"' in response.content)
self.assertEqual(500, response.status_code)
def testBasic(self):
self.mox.StubOutWithMock(self.manager, 'UpdateStatsCache')
self.manager.UpdateStatsCache('network', ['IE']).InAnyOrder()
self.manager.UpdateStatsCache('network', ['Firefox']).InAnyOrder()
params = {
'category': 'network',
'browsers': 'IE,Firefox',
}
self.mox.ReplayAll()
response = self.client.get('/admin/update_stats_cache', params)
self.mox.VerifyAll()
self.assertEqual('Success.', response.content)
self.assertEqual(200, response.status_code)
class TestUpdateAllUncachedStats(unittest.TestCase):
def setUp(self):
self.test_set_1 = mock_data.MockTestSet(category='foo')
self.test_set_2 = mock_data.MockTestSet(category='bar')
all_test_sets.AddTestSet(self.test_set_1)
all_test_sets.AddTestSet(self.test_set_2)
self.client = Client()
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set_1)
all_test_sets.RemoveTestSet(self.test_set_2)
def testUpdateAllUncachedStats(self):
category_browsers = {
self.test_set_1: ('Firefox 2.5.1', 'Firefox 3.0.7', 'Firefox 3.1.7',
'Firefox 3.1.8', 'Firefox 3.5', 'IE 7.0'),
self.test_set_2: ('Firefox 2.5.1', 'Firefox 3.5', 'IE 7.0'),
}
for test_set, browsers in category_browsers.items():
for browser in browsers:
ua = mock_data.GetUserAgentString(browser)
result = ResultParent.AddResult(
test_set, '1.2.2.5', ua, 'apple=1,banana=1,coconut=1')
params = {'categories': 'foo,bar'}
response = self.client.get('/admin/update_all_uncached_stats', params)
# Instead of checking actual stats, I tried to mock out UpdateStatsCache
# as is done in testBasic. However, it did not work for some unknown reason.
# it would not verify the calls. VerifyAll succeeded no matter what I called
# or did not call. Grrr.
expected_stats = {
'summary_display': '3',
'total_runs': 5,
'summary_score': 104,
'results': {
'apple': {'score': 100, 'raw_score': 1, 'display': 'yes'},
'banana': {'score': 2, 'raw_score': 1, 'display': 'd:2'},
'coconut': {'score': 2, 'raw_score': 1, 'display': 'd:2'},
}
}
self.assertEqual(
expected_stats,
memcache.get('Firefox',
**result_stats.CategoryStatsManager.MemcacheParams('foo')))
class TestUpdateAllStatsCache(unittest.TestCase):
def setUp(self):
self.test_set_1 = mock_data.MockTestSet(category='foo')
self.test_set_2 = mock_data.MockTestSet(category='bar')
all_test_sets.AddTestSet(self.test_set_1)
all_test_sets.AddTestSet(self.test_set_2)
self.client = Client()
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set_1)
all_test_sets.RemoveTestSet(self.test_set_2)
def testUpdateAllStatsCache(self):
category_browsers = {
self.test_set_1: ('Firefox 2.5.1', 'Firefox 3.0.7', 'Firefox 3.1.7',
'Firefox 3.1.8', 'Firefox 3.5', 'IE 7.0'),
self.test_set_2: ('Firefox 2.5.1', 'Firefox 3.5', 'IE 7.0'),
}
for test_set, browsers in category_browsers.items():
for browser in browsers:
ua = mock_data.GetUserAgentString(browser)
result = ResultParent.AddResult(
test_set, '1.2.2.5', ua, 'apple=1,banana=1,coconut=1')
params = {'categories': 'foo,bar'}
response = self.client.get('/admin/update_all_stats_cache', params)
# Instead of checking actual stats, I tried to mock out UpdateStatsCache
# as is done in testBasic. However, it did not work for some unknown reason.
# it would not verify the calls. VerifyAll succeeded no matter what I called
# or did not call. Grrr.
expected_stats = {
'summary_display': '3',
'total_runs': 5,
'summary_score': 104,
'results': {
'apple': {'score': 100, 'raw_score': 1, 'display': 'yes'},
'banana': {'score': 2, 'raw_score': 1, 'display': 'd:2'},
'coconut': {'score': 2, 'raw_score': 1, 'display': 'd:2'},
}
}
self.assertEqual(
expected_stats,
memcache.get('Firefox',
**result_stats.CategoryStatsManager.MemcacheParams('foo')))
| 36.40107
| 80
| 0.659027
| 12,023
| 0.883135
| 0
| 0
| 0
| 0
| 0
| 0
| 3,935
| 0.289041
|
fb8120f79917456a521cb4d10307f0c3faeada82
| 3,198
|
py
|
Python
|
ml/rl/models/example_sequence_model.py
|
ccphillippi/Horizon
|
a18d8941f663eea55488781c804e6305a36f1b58
|
[
"BSD-3-Clause"
] | 1
|
2020-07-30T06:15:20.000Z
|
2020-07-30T06:15:20.000Z
|
ml/rl/models/example_sequence_model.py
|
ccphillippi/Horizon
|
a18d8941f663eea55488781c804e6305a36f1b58
|
[
"BSD-3-Clause"
] | null | null | null |
ml/rl/models/example_sequence_model.py
|
ccphillippi/Horizon
|
a18d8941f663eea55488781c804e6305a36f1b58
|
[
"BSD-3-Clause"
] | 1
|
2019-06-05T15:52:18.000Z
|
2019-06-05T15:52:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from dataclasses import dataclass
from typing import Dict, List
import torch
import torch.nn as nn
from ml.rl import types as rlt
from ml.rl.models.base import ModelBase
logger = logging.getLogger(__name__)
HISTORY_LENGTH = 5
@dataclass
class VideoIDFeatures(rlt.IdFeatureBase):
page_id: rlt.ValueType
@classmethod
def get_feature_config(cls) -> Dict[str, rlt.IdFeatureConfig]:
return {"page_id": rlt.IdFeatureConfig(feature_id=2002, id_mapping_name="page")}
@dataclass
class WatchedVideoSequence(rlt.SequenceFeatureBase):
id_features: VideoIDFeatures
@classmethod
def get_max_length(cls) -> int:
return HISTORY_LENGTH
@classmethod
def get_float_feature_infos(cls) -> List[rlt.FloatFeatureInfo]:
return [
rlt.FloatFeatureInfo(name="f{}".format(f_id), feature_id=f_id)
for f_id in [1001, 1002]
]
@dataclass
class SequenceFeatures(rlt.SequenceFeatures):
"""
The whole class hierarchy can be created dynamically from config.
Another diff will show this.
"""
watched_videos: WatchedVideoSequence
@dataclass
class ExampleSequenceModelOutput:
value: torch.Tensor
class ExampleSequenceModel(ModelBase):
def __init__(self, state_dim):
super().__init__()
self.state_dim = state_dim
self.embedding_dim = 4
self.history_length = HISTORY_LENGTH
self.embedding_size = 20
self.page_embedding = nn.Embedding(self.embedding_size, self.embedding_dim)
self.hidden_size = 10
# ONNX cannot export batch_first=True
self.gru = nn.GRU(
self.embedding_dim + len(WatchedVideoSequence.get_float_feature_infos()),
self.hidden_size,
)
self.linear = nn.Linear(10 + self.state_dim, 1)
def input_prototype(self):
return rlt.StateInput(
state=rlt.FeatureVector(
float_features=torch.randn(1, self.state_dim),
sequence_features=SequenceFeatures.prototype(),
)
)
def feature_config(self):
return rlt.ModelFeatureConfig(
id_mapping_config={
"page": rlt.IdMapping(ids=list(range(100, 100 + self.embedding_size)))
},
sequence_features_type=SequenceFeatures,
)
def forward(self, state):
page_embedding = self.page_embedding(
state.state.sequence_features.watched_videos.id_features.page_id
)
gru_input = torch.cat(
(
page_embedding,
state.state.sequence_features.watched_videos.float_features,
),
dim=2,
).transpose(0, 1)
h_0 = torch.zeros(1, gru_input.shape[1], self.hidden_size)
gru_output, h_n = self.gru(gru_input, h_0)
last_gru_output = gru_output[-1, :, :]
float_features = state.state.float_features
linear_input = torch.cat((float_features, last_gru_output), dim=1)
value = self.linear(linear_input)
return ExampleSequenceModelOutput(value=value)
| 29.072727
| 88
| 0.6601
| 2,797
| 0.874609
| 0
| 0
| 943
| 0.294872
| 0
| 0
| 270
| 0.084428
|
fb8171ca82d4da70cd7cdba0a82012d267002dc5
| 4,227
|
py
|
Python
|
compuG/transformacionesPCarte copia.py
|
alejoso76/Computaci-n-gr-fica
|
474a498a328b8951aa0bfa1db2d0d1f3d8cc914b
|
[
"MIT"
] | null | null | null |
compuG/transformacionesPCarte copia.py
|
alejoso76/Computaci-n-gr-fica
|
474a498a328b8951aa0bfa1db2d0d1f3d8cc914b
|
[
"MIT"
] | null | null | null |
compuG/transformacionesPCarte copia.py
|
alejoso76/Computaci-n-gr-fica
|
474a498a328b8951aa0bfa1db2d0d1f3d8cc914b
|
[
"MIT"
] | null | null | null |
import pygame
import math
#Dibuja triangulo y lo escala con el teclado
ANCHO=600
ALTO=480
def dibujarPlano(o, pantalla):
pygame.draw.line(pantalla, [0, 255, 0], [o[0], 0], [o[0], 480] )
pygame.draw.line(pantalla, [0, 255, 0], [0, o[1]], [640, o[1]] )
def dibujarTriangulo(a, b, c, plano):
'''
pygame.draw.line(plano, [0, 255, 0], [a[0], a[1]], [b[0], b[1]] )
pygame.draw.line(plano, [0, 255, 0], [b[0], b[1]], [c[0], c[1]] )
pygame.draw.line(plano, [0, 255, 0], [c[0], c[1]], [a[0], a[1]] )
'''
pygame.draw.polygon(plano, [0, 255, 0], [a,b,c])
pygame.display.flip()
return a, b, c
def mostrarPos():
pos=pygame.mouse.get_pos()
return pos
def escalarPunto (a, factor):
x=a[0]*factor
y=a[1]*factor
return x, y
def rotacionHoraria(a):
#xcos+ysen, -xsen+ycos
x=int(a[0]*math.cos(math.pi/2) + a[1]*math.sin(math.pi/2))
y=int(-(a[0]*math.sin(math.pi/2)) + a[1]*math.cos(math.pi/2))
return x, y
def rotacionAntiHoraria(a):
#xcos-ysen, xsen+ycos
x=int(a[0]*math.cos(math.pi/2) - a[1]*math.sin(math.pi/2))
y=int(a[0]*math.sin(math.pi/2) + a[1]*math.cos(math.pi/2))
return x, y
def calcularPosPlano(o, pos):
x=pos[0]-o[0]
y=-1*(pos[1]-o[1])
return x, y
def calcularPosPantalla(o, p):
x=o[0]+p[0]
y=o[1]-p[1]
return x, y
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO, ALTO]) #Crea la ventana
o=[ANCHO/2, ALTO/2]
dibujarPlano(o, pantalla)
pygame.display.flip()
print 'Funciona'
cont=0
lista=[]
fin=False
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.MOUSEBUTTONDOWN:
cont+=1
lista.append(mostrarPos())
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_RIGHT:
pantalla.fill([0, 0, 0])
dibujarPlano(o, pantalla)
pygame.display.flip()
puntos=lista
puntos[0]=calcularPosPlano(o, puntos[0])
puntos[1]=calcularPosPlano(o, puntos[1])
puntos[2]=calcularPosPlano(o, puntos[2])
'''
print 'Puntos iniciales:'
print puntos
'''
puntos[0]=rotacionHoraria(puntos[0])
puntos[1]=rotacionHoraria(puntos[1])
puntos[2]=rotacionHoraria(puntos[2])
puntos[0]=calcularPosPantalla(o, puntos[0])
puntos[1]=calcularPosPantalla(o, puntos[1])
puntos[2]=calcularPosPantalla(o, puntos[2])
''''
print 'Puntos finales:'
print puntos
'''
dibujarTriangulo(puntos[0], puntos[1], puntos[2], pantalla)
if event.key==pygame.K_LEFT:
pantalla.fill([0, 0, 0])
dibujarPlano(o, pantalla)
pygame.display.flip()
puntos=lista
puntos[0]=calcularPosPlano(o, puntos[0])
puntos[1]=calcularPosPlano(o, puntos[1])
puntos[2]=calcularPosPlano(o, puntos[2])
'''
print 'Puntos iniciales:'
print puntos
'''
puntos[0]=rotacionAntiHoraria(puntos[0])
puntos[1]=rotacionAntiHoraria(puntos[1])
puntos[2]=rotacionAntiHoraria(puntos[2])
puntos[0]=calcularPosPantalla(o, puntos[0])
puntos[1]=calcularPosPantalla(o, puntos[1])
puntos[2]=calcularPosPantalla(o, puntos[2])
'''
print 'Puntos finales:'
print puntos
'''
dibujarTriangulo(puntos[0], puntos[1], puntos[2], pantalla)
if cont==3:
dibujarTriangulo(lista[0], lista[1], lista[2], pantalla)
cont=0
#lista=[]
| 32.767442
| 79
| 0.492548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 774
| 0.183109
|
fb81ef1eb1f00a9abc20cdf31b290c4e33722c10
| 10,692
|
py
|
Python
|
eval.py
|
CLT29/pvse
|
bf5232148396ee5051564ef68a48538de0ddbc84
|
[
"MIT"
] | 119
|
2019-06-18T19:10:04.000Z
|
2022-03-25T02:24:26.000Z
|
eval.py
|
CLT29/pvse
|
bf5232148396ee5051564ef68a48538de0ddbc84
|
[
"MIT"
] | 18
|
2019-08-28T09:32:24.000Z
|
2021-09-08T15:25:01.000Z
|
eval.py
|
CLT29/pvse
|
bf5232148396ee5051564ef68a48538de0ddbc84
|
[
"MIT"
] | 19
|
2019-07-11T08:19:18.000Z
|
2022-02-07T12:59:05.000Z
|
from __future__ import print_function
import os, sys
import pickle
import time
import glob
import numpy as np
import torch
from model import PVSE
from loss import cosine_sim, order_sim
from vocab import Vocabulary
from data import get_test_loader
from logger import AverageMeter
from option import parser, verify_input_args
ORDER_BATCH_SIZE = 100
def encode_data(model, data_loader, use_gpu=False):
"""Encode all images and sentences loadable by data_loader"""
# switch to evaluate mode
model.eval()
use_mil = model.module.mil if hasattr(model, 'module') else model.mil
# numpy array to keep all the embeddings
img_embs, txt_embs = None, None
for i, data in enumerate(data_loader):
img, txt, txt_len, ids = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# compute the embeddings
img_emb, txt_emb, _, _, _, _ = model.forward(img, txt, txt_len)
del img, txt, txt_len
# initialize the output embeddings
if img_embs is None:
if use_gpu:
emb_sz = [len(data_loader.dataset), img_emb.size(1), img_emb.size(2)] \
if use_mil else [len(data_loader.dataset), img_emb.size(1)]
img_embs = torch.zeros(emb_sz, dtype=img_emb.dtype, requires_grad=False).cuda()
txt_embs = torch.zeros(emb_sz, dtype=txt_emb.dtype, requires_grad=False).cuda()
else:
emb_sz = (len(data_loader.dataset), img_emb.size(1), img_emb.size(2)) \
if use_mil else (len(data_loader.dataset), img_emb.size(1))
img_embs = np.zeros(emb_sz)
txt_embs = np.zeros(emb_sz)
# preserve the embeddings by copying from gpu and converting to numpy
img_embs[ids] = img_emb if use_gpu else img_emb.data.cpu().numpy().copy()
txt_embs[ids] = txt_emb if use_gpu else txt_emb.data.cpu().numpy().copy()
return img_embs, txt_embs
def i2t(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Images->Text (Image Annotation)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
index_list = []
ranks, top1 = np.zeros(npts), np.zeros(npts)
for index in range(npts):
# Get query image
im = images[nreps * index]
im = im.reshape((1,) + im.shape)
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = im.mm(sentences.t()).view(-1)
else:
_, K, D = im.shape
sim_kk = im.view(-1, D).mm(sentences.view(-1, D).t())
sim_kk = sim_kk.view(im.size(0), K, sentences.size(0), K)
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(im.size(0), -1, sentences.size(0))
sim, _ = sim_kk.max(dim=1)
sim = sim.flatten()
else:
if order:
if index % ORDER_BATCH_SIZE == 0:
mx = min(images.shape[0], nreps * (index + ORDER_BATCH_SIZE))
im2 = images[nreps * index:mx:nreps]
sim_batch = order_sim(torch.Tensor(im2).cuda(), torch.Tensor(sentences).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[index % ORDER_BATCH_SIZE]
else:
sim = np.tensordot(im, sentences, axes=[2, 2]).max(axis=(0,1,3)).flatten() \
if len(sentences.shape) == 3 else np.dot(im, sentences.T).flatten()
if use_gpu:
_, inds_gpu = sim.sort()
inds = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds = np.argsort(sim)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(nreps * index, nreps * (index + 1), 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Text->Images (Image Search)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
if use_gpu:
ims = torch.stack([images[i] for i in range(0, len(images), nreps)])
else:
ims = np.array([images[i] for i in range(0, len(images), nreps)])
ranks, top1 = np.zeros(nreps * npts), np.zeros(nreps * npts)
for index in range(npts):
# Get query sentences
queries = sentences[nreps * index:nreps * (index + 1)]
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = queries.mm(ims.t())
else:
sim_kk = queries.view(-1, queries.size(-1)).mm(ims.view(-1, ims.size(-1)).t())
sim_kk = sim_kk.view(queries.size(0), queries.size(1), ims.size(0), ims.size(1))
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(queries.size(0), -1, ims.size(0))
sim, _ = sim_kk.max(dim=1)
else:
if order:
if nreps * index % ORDER_BATCH_SIZE == 0:
mx = min(sentences.shape[0], nreps * index + ORDER_BATCH_SIZE)
sentences_batch = sentences[nreps * index:mx]
sim_batch = order_sim(torch.Tensor(images).cuda(),
torch.Tensor(sentences_batch).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[:, (nreps * index) % ORDER_BATCH_SIZE:(nreps * index) % ORDER_BATCH_SIZE + nreps].T
else:
sim = np.tensordot(queries, ims, axes=[2, 2]).max(axis=(1,3)) \
if len(sentences.shape) == 3 else np.dot(queries, ims.T)
inds = np.zeros(sim.shape)
for i in range(len(inds)):
if use_gpu:
_, inds_gpu = sim[i].sort()
inds[i] = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds[i] = np.argsort(sim[i])[::-1]
ranks[nreps * index + i] = np.where(inds[i] == index)[0][0]
top1[nreps * index + i] = inds[i][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def convert_old_state_dict(x, model, multi_gpu=False):
params = model.state_dict()
prefix = ['module.img_enc.', 'module.txt_enc.'] \
if multi_gpu else ['img_enc.', 'txt_enc.']
for i, old_params in enumerate(x):
for key, val in old_params.items():
key = prefix[i] + key.replace('module.','').replace('our_model', 'pie_net')
assert key in params, '{} not found in model state_dict'.format(key)
params[key] = val
return params
def evalrank(model, args, split='test'):
print('Loading dataset')
data_loader = get_test_loader(args, vocab)
print('Computing results... (eval_on_gpu={})'.format(args.eval_on_gpu))
img_embs, txt_embs = encode_data(model, data_loader, args.eval_on_gpu)
n_samples = img_embs.shape[0]
nreps = 5 if args.data_name == 'coco' else 1
print('Images: %d, Sentences: %d' % (img_embs.shape[0] / nreps, txt_embs.shape[0]))
# 5fold cross-validation, only for MSCOCO
mean_metrics = None
if args.data_name == 'coco':
results = []
for i in range(5):
r, rt0 = i2t(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
print("Image to text: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % r)
ri, rti0 = t2i(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
if i == 0:
rt, rti = rt0, rti0
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("Text to image: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % ri)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.2f ar: %.2f ari: %.2f" % (rsum, ar, ari))
results += [list(r) + list(ri) + [ar, ari, rsum]]
mean_metrics = tuple(np.array(results).mean(axis=0).flatten())
print("-----------------------------------")
print("Mean metrics from 5-fold evaluation: ")
print("rsum: %.2f" % (mean_metrics[-1] * 6))
print("Average i2t Recall: %.2f" % mean_metrics[-3])
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[:7])
print("Average t2i Recall: %.2f" % mean_metrics[-2])
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[7:14])
# no cross-validation, full evaluation
r, rt = i2t(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ri, rti = t2i(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("rsum: %.2f" % rsum)
print("Average i2t Recall: %.2f" % ar)
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % r)
print("Average t2i Recall: %.2f" % ari)
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % ri)
return mean_metrics
if __name__ == '__main__':
multi_gpu = torch.cuda.device_count() > 1
args = verify_input_args(parser.parse_args())
opt = verify_input_args(parser.parse_args())
# load vocabulary used by the model
with open('./vocab/%s_vocab.pkl' % args.data_name, 'rb') as f:
vocab = pickle.load(f)
args.vocab_size = len(vocab)
# load model and options
assert os.path.isfile(args.ckpt)
model = PVSE(vocab.word2idx, args)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda() if multi_gpu else model
torch.backends.cudnn.benchmark = True
model.load_state_dict(torch.load(args.ckpt))
# evaluate
metrics = evalrank(model, args, split='test')
| 36.742268
| 107
| 0.607931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,693
| 0.158343
|
fb82fbf2f9c2c62b4641f184634a4204b573ebe0
| 13,286
|
py
|
Python
|
paddlex/ppdet/modeling/heads/detr_head.py
|
xiaolao/PaddleX
|
4fa58cd0e485418ba353a87414052bd8a19204e5
|
[
"Apache-2.0"
] | 3,655
|
2020-03-28T09:19:50.000Z
|
2022-03-31T13:28:39.000Z
|
paddlex/ppdet/modeling/heads/detr_head.py
|
BDGZhengzhou/PaddleX
|
a07b54c6eaa363cb8008b26cba83eed734c52044
|
[
"Apache-2.0"
] | 829
|
2020-03-28T04:03:18.000Z
|
2022-03-31T14:34:30.000Z
|
paddlex/ppdet/modeling/heads/detr_head.py
|
BDGZhengzhou/PaddleX
|
a07b54c6eaa363cb8008b26cba83eed734c52044
|
[
"Apache-2.0"
] | 738
|
2020-03-28T03:56:46.000Z
|
2022-03-31T13:11:03.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register
import pycocotools.mask as mask_util
from ..initializer import linear_init_, constant_
from ..transformers.utils import inverse_sigmoid
__all__ = ['DETRHead', 'DeformableDETRHead']
class MLP(nn.Layer):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.LayerList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
self._reset_parameters()
def _reset_parameters(self):
for l in self.layers:
linear_init_(l)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class MultiHeadAttentionMap(nn.Layer):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self,
query_dim,
hidden_dim,
num_heads,
dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.Constant()) if bias else False
self.q_proj = nn.Linear(query_dim, hidden_dim, weight_attr, bias_attr)
self.k_proj = nn.Conv2D(
query_dim,
hidden_dim,
1,
weight_attr=weight_attr,
bias_attr=bias_attr)
self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
def forward(self, q, k, mask=None):
q = self.q_proj(q)
k = self.k_proj(k)
bs, num_queries, n, c, h, w = q.shape[0], q.shape[1], self.num_heads,\
self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]
qh = q.reshape([bs, num_queries, n, c])
kh = k.reshape([bs, n, c, h, w])
# weights = paddle.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
qh = qh.transpose([0, 2, 1, 3]).reshape([-1, num_queries, c])
kh = kh.reshape([-1, c, h * w])
weights = paddle.bmm(qh * self.normalize_fact, kh).reshape(
[bs, n, num_queries, h, w]).transpose([0, 2, 1, 3, 4])
if mask is not None:
weights += mask
# fix a potenial bug: https://github.com/facebookresearch/detr/issues/247
weights = F.softmax(weights.flatten(3), axis=-1).reshape(weights.shape)
weights = self.dropout(weights)
return weights
class MaskHeadFPNConv(nn.Layer):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, input_dim, fpn_dims, context_dim, num_groups=8):
super().__init__()
inter_dims = [input_dim,
] + [context_dim // (2**i) for i in range(1, 5)]
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.KaimingUniform())
bias_attr = paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.Constant())
self.conv0 = self._make_layers(input_dim, input_dim, 3, num_groups,
weight_attr, bias_attr)
self.conv_inter = nn.LayerList()
for in_dims, out_dims in zip(inter_dims[:-1], inter_dims[1:]):
self.conv_inter.append(
self._make_layers(in_dims, out_dims, 3, num_groups,
weight_attr, bias_attr))
self.conv_out = nn.Conv2D(
inter_dims[-1],
1,
3,
padding=1,
weight_attr=weight_attr,
bias_attr=bias_attr)
self.adapter = nn.LayerList()
for i in range(len(fpn_dims)):
self.adapter.append(
nn.Conv2D(
fpn_dims[i],
inter_dims[i + 1],
1,
weight_attr=weight_attr,
bias_attr=bias_attr))
def _make_layers(self,
in_dims,
out_dims,
kernel_size,
num_groups,
weight_attr=None,
bias_attr=None):
return nn.Sequential(
nn.Conv2D(
in_dims,
out_dims,
kernel_size,
padding=kernel_size // 2,
weight_attr=weight_attr,
bias_attr=bias_attr),
nn.GroupNorm(num_groups, out_dims),
nn.ReLU())
def forward(self, x, bbox_attention_map, fpns):
x = paddle.concat([
x.tile([bbox_attention_map.shape[1], 1, 1, 1]),
bbox_attention_map.flatten(0, 1)
], 1)
x = self.conv0(x)
for inter_layer, adapter_layer, feat in zip(self.conv_inter[:-1],
self.adapter, fpns):
feat = adapter_layer(feat).tile(
[bbox_attention_map.shape[1], 1, 1, 1])
x = inter_layer(x)
x = feat + F.interpolate(x, size=feat.shape[-2:])
x = self.conv_inter[-1](x)
x = self.conv_out(x)
return x
@register
class DETRHead(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim', 'use_focal_loss']
__inject__ = ['loss']
def __init__(self,
num_classes=80,
hidden_dim=256,
nhead=8,
num_mlp_layers=3,
loss='DETRLoss',
fpn_dims=[1024, 512, 256],
with_mask_head=False,
use_focal_loss=False):
super(DETRHead, self).__init__()
# add background class
self.num_classes = num_classes if use_focal_loss else num_classes + 1
self.hidden_dim = hidden_dim
self.loss = loss
self.with_mask_head = with_mask_head
self.use_focal_loss = use_focal_loss
self.score_head = nn.Linear(hidden_dim, self.num_classes)
self.bbox_head = MLP(hidden_dim,
hidden_dim,
output_dim=4,
num_layers=num_mlp_layers)
if self.with_mask_head:
self.bbox_attention = MultiHeadAttentionMap(hidden_dim, hidden_dim,
nhead)
self.mask_head = MaskHeadFPNConv(hidden_dim + nhead, fpn_dims,
hidden_dim)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.score_head)
@classmethod
def from_config(cls, cfg, hidden_dim, nhead, input_shape):
return {
'hidden_dim': hidden_dim,
'nhead': nhead,
'fpn_dims': [i.channels for i in input_shape[::-1]][1:]
}
@staticmethod
def get_gt_mask_from_polygons(gt_poly, pad_mask):
out_gt_mask = []
for polygons, padding in zip(gt_poly, pad_mask):
height, width = int(padding[:, 0].sum()), int(padding[0, :].sum())
masks = []
for obj_poly in polygons:
rles = mask_util.frPyObjects(obj_poly, height, width)
rle = mask_util.merge(rles)
masks.append(
paddle.to_tensor(mask_util.decode(rle)).astype('float32'))
masks = paddle.stack(masks)
masks_pad = paddle.zeros(
[masks.shape[0], pad_mask.shape[1], pad_mask.shape[2]])
masks_pad[:, :height, :width] = masks
out_gt_mask.append(masks_pad)
return out_gt_mask
def forward(self, out_transformer, body_feats, inputs=None):
r"""
Args:
out_transformer (Tuple): (feats: [num_levels, batch_size,
num_queries, hidden_dim],
memory: [batch_size, hidden_dim, h, w],
src_proj: [batch_size, h*w, hidden_dim],
src_mask: [batch_size, 1, 1, h, w])
body_feats (List(Tensor)): list[[B, C, H, W]]
inputs (dict): dict(inputs)
"""
feats, memory, src_proj, src_mask = out_transformer
outputs_logit = self.score_head(feats)
outputs_bbox = F.sigmoid(self.bbox_head(feats))
outputs_seg = None
if self.with_mask_head:
bbox_attention_map = self.bbox_attention(feats[-1], memory,
src_mask)
fpn_feats = [a for a in body_feats[::-1]][1:]
outputs_seg = self.mask_head(src_proj, bbox_attention_map,
fpn_feats)
outputs_seg = outputs_seg.reshape([
feats.shape[1], feats.shape[2], outputs_seg.shape[-2],
outputs_seg.shape[-1]
])
if self.training:
assert inputs is not None
assert 'gt_bbox' in inputs and 'gt_class' in inputs
gt_mask = self.get_gt_mask_from_polygons(
inputs['gt_poly'],
inputs['pad_mask']) if 'gt_poly' in inputs else None
return self.loss(
outputs_bbox,
outputs_logit,
inputs['gt_bbox'],
inputs['gt_class'],
masks=outputs_seg,
gt_mask=gt_mask)
else:
return (outputs_bbox[-1], outputs_logit[-1], outputs_seg)
@register
class DeformableDETRHead(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim']
__inject__ = ['loss']
def __init__(self,
num_classes=80,
hidden_dim=512,
nhead=8,
num_mlp_layers=3,
loss='DETRLoss'):
super(DeformableDETRHead, self).__init__()
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.nhead = nhead
self.loss = loss
self.score_head = nn.Linear(hidden_dim, self.num_classes)
self.bbox_head = MLP(hidden_dim,
hidden_dim,
output_dim=4,
num_layers=num_mlp_layers)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.score_head)
constant_(self.score_head.bias, -4.595)
constant_(self.bbox_head.layers[-1].weight)
bias = paddle.zeros_like(self.bbox_head.layers[-1].bias)
bias[2:] = -2.0
self.bbox_head.layers[-1].bias.set_value(bias)
@classmethod
def from_config(cls, cfg, hidden_dim, nhead, input_shape):
return {'hidden_dim': hidden_dim, 'nhead': nhead}
def forward(self, out_transformer, body_feats, inputs=None):
r"""
Args:
out_transformer (Tuple): (feats: [num_levels, batch_size,
num_queries, hidden_dim],
memory: [batch_size,
\sum_{l=0}^{L-1} H_l \cdot W_l, hidden_dim],
reference_points: [batch_size, num_queries, 2])
body_feats (List(Tensor)): list[[B, C, H, W]]
inputs (dict): dict(inputs)
"""
feats, memory, reference_points = out_transformer
reference_points = inverse_sigmoid(reference_points.unsqueeze(0))
outputs_bbox = self.bbox_head(feats)
# It's equivalent to "outputs_bbox[:, :, :, :2] += reference_points",
# but the gradient is wrong in paddle.
outputs_bbox = paddle.concat(
[
outputs_bbox[:, :, :, :2] + reference_points,
outputs_bbox[:, :, :, 2:]
],
axis=-1)
outputs_bbox = F.sigmoid(outputs_bbox)
outputs_logit = self.score_head(feats)
if self.training:
assert inputs is not None
assert 'gt_bbox' in inputs and 'gt_class' in inputs
return self.loss(outputs_bbox, outputs_logit, inputs['gt_bbox'],
inputs['gt_class'])
else:
return (outputs_bbox[-1], outputs_logit[-1], None)
| 37.215686
| 110
| 0.553967
| 12,228
| 0.920367
| 0
| 0
| 6,977
| 0.525139
| 0
| 0
| 2,321
| 0.174695
|
fb83c2db2b3565baeaebf20f605f38a7b225d465
| 3,911
|
py
|
Python
|
python/raspberryPi.py
|
FirewallRobotics/Vinnie-2019
|
2bc74f9947d41960ffe06e39bfc8dbe321ef2ae1
|
[
"BSD-3-Clause"
] | null | null | null |
python/raspberryPi.py
|
FirewallRobotics/Vinnie-2019
|
2bc74f9947d41960ffe06e39bfc8dbe321ef2ae1
|
[
"BSD-3-Clause"
] | null | null | null |
python/raspberryPi.py
|
FirewallRobotics/Vinnie-2019
|
2bc74f9947d41960ffe06e39bfc8dbe321ef2ae1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import json
import time
import sys
#import numpy as np
import cv2
from cscore import CameraServer, VideoSource, CvSource, VideoMode, CvSink, UsbCamera
from networktables import NetworkTablesInstance
def Track(frame, sd):
Lower = (0,0,0)
Upper = (0,0,0)
if sd.getNumber("Track", 0):
Lower = (0,103,105)
Upper = (150,255,255) #hatch panel
sd.putNumber("Tracking", 0)
elif sd.getNumber("Track", 1):
Lower = (16,18,108) #Tape
Upper = (32,52,127)
sd.putNumber("Tracking", 1)
else:
print("Could not get smartdashboard value, using hatch panel")
Lower = (0,103,105)
Upper = (150,255,255) #none selected using hatch
sd.putNumber("Tracking", 2)
#frame = cv2.flip(frame, 1)
#Blur out the Image
#blurred = cv2.GaussianBlur(frame, (11,11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#Make a mask for the pixals that meet yhe HSV filter
#then run a bunch of dolations and
#erosions to remove any small blobs still in the mask
mask = cv2.inRange(hsv, Lower, Upper)
mask = cv2.erode(mask, None, iterations = 2)
mask = cv2.dilate(mask, None, iterations = 2)
#find the Contours in the mask and initialize the
#current (x,y) center of the ball
a, cnts, b = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
center = None
#only do stuff if a single contor was found
if len(cnts) > 0:
#find the largest contour in the mask, then use it
#to compute the minimum enclosing circle and centroid
c = max(cnts, key=cv2.contourArea)
((x,y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int (M["m01"] / M["m00"]))
#if the dectected contour has a radius big enough, we will send it
if radius > 15:
#draw a circle around the target and publish values to smart dashboard
cv2.circle(frame, (int(x), int(y)), int(radius), (255,255,8), 2)
cv2.circle(frame, center, 3, (0,0,225), -1)
sd.putNumber('X',x)
sd.putNumber('Y',y)
sd.putNumber('R', radius)
print("X: " + repr(round(x, 1)) + " Y: " + repr(round(y, 1)) + " Radius: " + repr(round(radius, 1)))
else:
print("WTF")
#let the RoboRio Know no target has been detected with -1
sd.putNumber('X', -1)
sd.putNumber('Y', -1)
sd.putNumber('R', -1)
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(1)
#HatchPanel = HatchPanelPipeline()
team = None
ntinst = NetworkTablesInstance.getDefault()
ntinst.startClientTeam(team)
SmartDashBoardValues = ntinst.getTable('SmartDashboard')
while(True):
# Capture frame-by-frame
if SmartDashBoardValues.getNumber("Camera to Use", 0):
ret, frame = cap1.read() #use camera 0
SmartDashBoardValues.putNumber("Using Camera", 0)
elif SmartDashBoardValues.getNumber("Camera to Use", 1):
ret, frame = cap2.read() #use camera 1
SmartDashBoardValues.putNumber("Using Camera", 1)
else:
print("No camera selected using camera 0")
ret, frame = cap1.read() #found no value for camera to use, using cam 0
SmartDashBoardValues.putNumber("Using Camera", 2)
# Our operations on the frame come here
Track(frame, SmartDashBoardValues)
cv2.imshow('frame',frame)
#print(type(mask))
#res = cv2.bitwise_and(frame,frame, mask=mask)
#cv2.imshow('frame',frame)
#cv2.imshow('mask',mask)
#cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap1.release()
cap2.release()
cv2.destroyAllWindows()
| 36.212963
| 112
| 0.604449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,384
| 0.353874
|
fb8406595f2e94d2539b01b5e80bd41db3092f8b
| 1,667
|
py
|
Python
|
soccer_stats_calc/soccer_var_ana.py
|
steffens21/diesunddas
|
35222ff2ddac0b115dfd2e5b6764c6878af8d228
|
[
"MIT"
] | null | null | null |
soccer_stats_calc/soccer_var_ana.py
|
steffens21/diesunddas
|
35222ff2ddac0b115dfd2e5b6764c6878af8d228
|
[
"MIT"
] | null | null | null |
soccer_stats_calc/soccer_var_ana.py
|
steffens21/diesunddas
|
35222ff2ddac0b115dfd2e5b6764c6878af8d228
|
[
"MIT"
] | null | null | null |
import sys
from collections import deque
import soccer_toolbox
import csv_tools
def fileToStats(csvfile, stat, nbrAgg):
header, listRawData = csv_tools.loadData(csvfile)
dictCol = csv_tools.getColumns(header)
setTeams = csv_tools.getTeams(listRawData, dictCol['nColHomeTeam'], dictCol['nColAwayTeam'])
resultDict = {team: list() for team in setTeams}
teamDataDict = {team: list() for team in setTeams}
for data in listRawData:
teamDataDict[data[dictCol['nColHomeTeam']]].append(data)
teamDataDict[data[dictCol['nColAwayTeam']]].append(data)
for team in teamDataDict:
currentData = deque()
for data in teamDataDict[team]:
currentData.append(data)
if len(currentData) <= nbrAgg:
continue
else:
currentData.popleft()
# calc
statsDict = soccer_toolbox.getStatsForTeam(team, dictCol, list(currentData))
resultDict[team].append(statsDict[stat])
if currentData:
statsDict = soccer_toolbox.getStatsForTeam(team, dictCol, currentData)
resultDict[team].append(statsDict[stat])
return resultDict
def main():
if len(sys.argv) > 1:
csvfile = sys.argv[1]
else:
csvfile = 'D1.csv'
stat = 'Rating'
if len(sys.argv) > 2:
stat = sys.argv[2]
nbrAgg = 1
if len(sys.argv) > 3:
nbrAgg = int(sys.argv[3])
dictTeamStats = fileToStats(csvfile, stat, nbrAgg)
for name, val in sorted(dictTeamStats.items()):
print ','.join([name] + map(str, val))
if __name__ == "__main__":
main()
| 29.245614
| 96
| 0.619076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.054589
|
fb8546895b2c19d3192fb1d824c0ca8782071aeb
| 895
|
py
|
Python
|
HelloWorldWebsite/searchTest/views.py
|
404NotFound-401/DjangoTutorial
|
8218b5308245b309c7cb36386306152378602b6d
|
[
"MIT"
] | null | null | null |
HelloWorldWebsite/searchTest/views.py
|
404NotFound-401/DjangoTutorial
|
8218b5308245b309c7cb36386306152378602b6d
|
[
"MIT"
] | 10
|
2019-09-07T20:30:34.000Z
|
2019-09-08T19:22:11.000Z
|
HelloWorldWebsite/searchTest/views.py
|
404NotFound-401/DjangoTutorial
|
8218b5308245b309c7cb36386306152378602b6d
|
[
"MIT"
] | 1
|
2019-09-08T19:38:54.000Z
|
2019-09-08T19:38:54.000Z
|
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.template import loader
from django.http import HttpResponse
from django.views import generic
from .models import Movie
from . import searchapi
from django.urls import reverse
# Create your views here.
class Search(generic.DetailView):
model = Movie
template_name = 'searchTest.html'
def get(self, request, *args, **kwargs):
context = {'movieId':'Please enter the film name'}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
if 'movieName' in request.POST:
print("Get movie name")
context = searchapi.getName(request.POST['movieName'])
else:
print("Wrong act")
return redirect('mainpage')
return render(request, "home/result.html", context)
| 28.870968
| 66
| 0.684916
| 590
| 0.659218
| 0
| 0
| 0
| 0
| 0
| 0
| 156
| 0.174302
|
fb8662bca964b254e5cebea057da8c25555e063b
| 27,693
|
py
|
Python
|
mapreduce/handlers.py
|
igeeker/v2ex
|
9fa81f7c82aa7d162a924d357494b241eb8a6207
|
[
"BSD-3-Clause"
] | 161
|
2019-07-23T06:53:45.000Z
|
2022-03-24T01:07:19.000Z
|
mapreduce/handlers.py
|
igeeker/v2ex
|
9fa81f7c82aa7d162a924d357494b241eb8a6207
|
[
"BSD-3-Clause"
] | null | null | null |
mapreduce/handlers.py
|
igeeker/v2ex
|
9fa81f7c82aa7d162a924d357494b241eb8a6207
|
[
"BSD-3-Clause"
] | 26
|
2019-08-05T06:09:38.000Z
|
2021-07-08T02:05:13.000Z
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
import datetime
import logging
import math
import os
from mapreduce.lib import simplejson
import time
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import quota
from mapreduce import model
from mapreduce import quota
from mapreduce import util
# TODO(user): Make this a product of the reader or in quotas.py
_QUOTA_BATCH_SIZE = 20
# The amount of time to perform scanning in one slice. New slice will be
# scheduled as soon as current one takes this long.
_SLICE_DURATION_SEC = 15
# Delay between consecutive controller callback invocations.
_CONTROLLER_PERIOD_SEC = 2
class Error(Exception):
"""Base class for exceptions in this module."""
class NotEnoughArgumentsError(Error):
"""Required argument is missing."""
class NoDataError(Error):
"""There is no data present for a desired input."""
class MapperWorkerCallbackHandler(base_handler.BaseHandler):
"""Callback handler for mapreduce worker task.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
shard_id: id of the shard.
slice_id: id of the slice.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
self._start_time = self._time()
shard_id = self.shard_id()
# TODO(user): Make this prettier
logging.debug("post: shard=%s slice=%s headers=%s",
shard_id, self.slice_id(), self.request.headers)
shard_state, control = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not shard_state:
# We're letting this task to die. It's up to controller code to
# reinitialize and restart the task.
logging.error("State not found for shard ID %r; shutting down",
shard_id)
return
if control and control.command == model.MapreduceControl.ABORT:
logging.info("Abort command received by shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_ABORTED
shard_state.put()
model.MapreduceControl.abort(spec.mapreduce_id)
return
input_reader = self.input_reader(spec.mapper)
if spec.mapper.params.get("enable_quota", True):
quota_consumer = quota.QuotaConsumer(
quota.QuotaManager(memcache.Client()),
shard_id,
_QUOTA_BATCH_SIZE)
else:
quota_consumer = None
ctx = context.Context(spec, shard_state)
context.Context._set(ctx)
try:
# consume quota ahead, because we do not want to run a datastore
# query if there's not enough quota for the shard.
if not quota_consumer or quota_consumer.check():
scan_aborted = False
entity = None
# We shouldn't fetch an entity from the reader if there's not enough
# quota to process it. Perform all quota checks proactively.
if not quota_consumer or quota_consumer.consume():
for entity in input_reader:
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
else:
shard_state.last_work_item = repr(entity)[:100]
scan_aborted = not self.process_entity(entity, ctx)
# Check if we've got enough quota for the next entity.
if (quota_consumer and not scan_aborted and
not quota_consumer.consume()):
scan_aborted = True
if scan_aborted:
break
else:
scan_aborted = True
if not scan_aborted:
logging.info("Processing done for shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
# We consumed extra quota item at the end of for loop.
# Just be nice here and give it back :)
if quota_consumer:
quota_consumer.put(1)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
# TODO(user): Mike said we don't want this happen in case of
# exception while scanning. Figure out when it's appropriate to skip.
ctx.flush()
finally:
context.Context._set(None)
if quota_consumer:
quota_consumer.dispose()
# Rescheduling work should always be the last statement. It shouldn't happen
# if there were any exceptions in code before it.
if shard_state.active:
self.reschedule(spec, input_reader)
def process_entity(self, entity, ctx):
"""Process a single entity.
Call mapper handler on the entity.
Args:
entity: an entity to process.
ctx: current execution context.
Returns:
True if scan should be continued, False if scan should be aborted.
"""
ctx.counters.increment(context.COUNTER_MAPPER_CALLS)
handler = ctx.mapreduce_spec.mapper.handler
if util.is_generator_function(handler):
for result in handler(entity):
if callable(result):
result(ctx)
else:
try:
if len(result) == 2:
logging.error("Collectors not implemented yet")
else:
logging.error("Got bad output tuple of length %d", len(result))
except TypeError:
logging.error(
"Handler yielded type %s, expected a callable or a tuple",
result.__class__.__name__)
else:
handler(entity)
if self._time() - self._start_time > _SLICE_DURATION_SEC:
logging.debug("Spent %s seconds. Rescheduling",
self._time() - self._start_time)
return False
return True
def shard_id(self):
"""Get shard unique identifier of this task from request.
Returns:
shard identifier as string.
"""
return str(self.request.get("shard_id"))
def slice_id(self):
"""Get slice unique identifier of this task from request.
Returns:
slice identifier as int.
"""
return int(self.request.get("slice_id"))
def input_reader(self, mapper_spec):
"""Get the reader from mapper_spec initialized with the request's state.
Args:
mapper_spec: a mapper spec containing the immutable mapper state.
Returns:
An initialized InputReader.
"""
input_reader_spec_dict = simplejson.loads(
self.request.get("input_reader_state"))
return mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
@staticmethod
def worker_parameters(mapreduce_spec,
shard_id,
slice_id,
input_reader):
"""Fill in mapper worker task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by mapper worker to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
shard_id: id of the shard (part of the whole dataset).
slice_id: id of the slice (part of the shard).
input_reader: InputReader containing the remaining inputs for this
shard.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"shard_id": shard_id,
"slice_id": str(slice_id),
"input_reader_state": input_reader.to_json_str()}
@staticmethod
def get_task_name(shard_id, slice_id):
"""Compute single worker task name.
Args:
shard_id: id of the shard (part of the whole dataset) as string.
slice_id: id of the slice (part of the shard) as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrshard-%s-%s" % (shard_id, slice_id)
def reschedule(self, mapreduce_spec, input_reader):
"""Reschedule worker task to continue scanning work.
Args:
mapreduce_spec: mapreduce specification.
input_reader: remaining input reader to process.
"""
MapperWorkerCallbackHandler.schedule_slice(
self.base_path(), mapreduce_spec, self.shard_id(),
self.slice_id() + 1, input_reader)
@classmethod
def schedule_slice(cls,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
queue_name=None,
eta=None,
countdown=None):
"""Schedule slice scanning by adding it to the task queue.
Args:
base_path: base_path of mapreduce request handlers as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
shard_id: current shard id as string.
slice_id: slice id as int.
input_reader: remaining InputReader for given shard.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
"""
task_params = MapperWorkerCallbackHandler.worker_parameters(
mapreduce_spec, shard_id, slice_id, input_reader)
task_name = MapperWorkerCallbackHandler.get_task_name(shard_id, slice_id)
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
queue_name or "default")
try:
taskqueue.Task(url=base_path + "/worker_callback",
params=task_params,
name=task_name,
eta=eta,
countdown=countdown).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class ControllerCallbackHandler(base_handler.BaseHandler):
"""Supervises mapreduce execution.
Is also responsible for gathering execution status from shards together.
This task is "continuously" running by adding itself again to taskqueue if
mapreduce is still active.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
# TODO(user): Make this logging prettier.
logging.debug("post: id=%s headers=%s",
spec.mapreduce_id, self.request.headers)
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.error("State not found for mapreduce_id '%s'; skipping",
spec.mapreduce_id)
return
shard_states = model.ShardState.find_by_mapreduce_id(spec.mapreduce_id)
if state.active and len(shard_states) != spec.mapper.shard_count:
# Some shards were lost
logging.error("Incorrect number of shard states: %d vs %d; "
"aborting job '%s'",
len(shard_states), spec.mapper.shard_count,
spec.mapreduce_id)
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
model.MapreduceControl.abort(spec.mapreduce_id)
active_shards = [s for s in shard_states if s.active]
failed_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_FAILED]
aborted_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_ABORTED]
if state.active:
state.active = bool(active_shards)
state.active_shards = len(active_shards)
state.failed_shards = len(failed_shards)
state.aborted_shards = len(aborted_shards)
if (not state.active and control and
control.command == model.MapreduceControl.ABORT):
# User-initiated abort *after* all shards have completed.
logging.info("Abort signal received for job '%s'", spec.mapreduce_id)
state.result_status = model.MapreduceState.RESULT_ABORTED
if not state.active:
state.active_shards = 0
if not state.result_status:
# Set final result status derived from shard states.
if [s for s in shard_states
if s.result_status != model.ShardState.RESULT_SUCCESS]:
state.result_status = model.MapreduceState.RESULT_FAILED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
logging.info("Final result for job '%s' is '%s'",
spec.mapreduce_id, state.result_status)
# We don't need a transaction here, since we change only statistics data,
# and we don't care if it gets overwritten/slightly inconsistent.
self.aggregate_state(state, shard_states)
poll_time = state.last_poll_time
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
if not state.active:
# This is the last execution.
# Enqueue done_callback if needed.
def put_state(state):
state.put()
done_callback = spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
if done_callback:
taskqueue.Task(
url=done_callback,
headers={"Mapreduce-Id": spec.mapreduce_id}).add(
spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE,
"default"),
transactional=True)
db.run_in_transaction(put_state, state)
return
else:
state.put()
processing_rate = int(spec.mapper.params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
self.refill_quotas(poll_time, processing_rate, active_shards)
ControllerCallbackHandler.reschedule(
self.base_path(), spec, self.serial_id() + 1)
def aggregate_state(self, mapreduce_state, shard_states):
"""Update current mapreduce state by aggregating shard states.
Args:
mapreduce_state: current mapreduce state as MapreduceState.
shard_states: all shard states (active and inactive). list of ShardState.
"""
processed_counts = []
mapreduce_state.counters_map.clear()
for shard_state in shard_states:
mapreduce_state.counters_map.add_map(shard_state.counters_map)
processed_counts.append(shard_state.counters_map.get(
context.COUNTER_MAPPER_CALLS))
mapreduce_state.set_processed_counts(processed_counts)
def refill_quotas(self,
last_poll_time,
processing_rate,
active_shard_states):
"""Refill quotas for all active shards.
Args:
last_poll_time: Datetime with the last time the job state was updated.
processing_rate: How many items to process per second overall.
active_shard_states: All active shard states, list of ShardState.
"""
if not active_shard_states:
return
quota_manager = quota.QuotaManager(memcache.Client())
current_time = int(self._time())
last_poll_time = time.mktime(last_poll_time.timetuple())
total_quota_refill = processing_rate * max(0, current_time - last_poll_time)
quota_refill = int(math.ceil(
1.0 * total_quota_refill / len(active_shard_states)))
if not quota_refill:
return
# TODO(user): use batch memcache API to refill quota in one API call.
for shard_state in active_shard_states:
quota_manager.put(shard_state.shard_id, quota_refill)
def serial_id(self):
"""Get serial unique identifier of this task from request.
Returns:
serial identifier as int.
"""
return int(self.request.get("serial_id"))
@staticmethod
def get_task_name(mapreduce_spec, serial_id):
"""Compute single controller task name.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrcontrol-%s-%s" % (
mapreduce_spec.mapreduce_id, serial_id)
@staticmethod
def controller_parameters(mapreduce_spec, serial_id):
"""Fill in controller task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by controller to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"serial_id": str(serial_id)}
@classmethod
def reschedule(cls, base_path, mapreduce_spec, serial_id, queue_name=None):
"""Schedule new update status callback task.
Args:
base_path: mapreduce handlers url base path as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
try:
taskqueue.Task(url=base_path + "/controller_callback",
name=task_name, params=task_params,
countdown=_CONTROLLER_PERIOD_SEC).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class KickOffJobHandler(base_handler.BaseHandler):
"""Taskqueue handler which kicks off a mapreduce processing.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
input_readers: List of InputReaders objects separated by semi-colons.
"""
def post(self):
"""Handles kick off request."""
spec = model.MapreduceSpec.from_json_str(
self._get_required_param("mapreduce_spec"))
input_readers_json = simplejson.loads(
self._get_required_param("input_readers"))
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
mapper_input_reader_class = spec.mapper.input_reader_class()
input_readers = [mapper_input_reader_class.from_json_str(reader_json)
for reader_json in input_readers_json]
KickOffJobHandler._schedule_shards(
spec, input_readers, queue_name, self.base_path())
ControllerCallbackHandler.reschedule(
self.base_path(), spec, queue_name=queue_name, serial_id=0)
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _schedule_shards(cls, spec, input_readers, queue_name, base_path):
"""Prepares shard states and schedules their execution.
Args:
spec: mapreduce specification as MapreduceSpec.
input_readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
"""
# Note: it's safe to re-attempt this handler because:
# - shard state has deterministic and unique key.
# - schedule_slice will fall back gracefully if a task already exists.
shard_states = []
for shard_number, input_reader in enumerate(input_readers):
shard = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard.shard_description = str(input_reader)
shard_states.append(shard)
# Retrievs already existing shards.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Puts only non-existing shards.
db.put(shard for shard in shard_states
if shard.key() not in existing_shard_keys)
for shard_number, input_reader in enumerate(input_readers):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
MapperWorkerCallbackHandler.schedule_slice(
base_path, spec, shard_id, 0, input_reader, queue_name=queue_name)
class StartJobHandler(base_handler.JsonHandler):
"""Command handler starts a mapreduce job."""
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
queue_name = mapper_params["queue_name"] = mapper_params.get(
"queue_name", "default")
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", model._DEFAULT_SHARD_COUNT)))
mapreduce_id = type(self)._start_map(
mapreduce_name,
mapper_spec,
params,
base_path=self.base_path(),
queue_name=queue_name,
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _start_map(cls, name, mapper_spec,
mapreduce_params,
base_path="/mapreduce",
queue_name="default",
eta=None,
countdown=None,
_app=None):
# Check that handler can be instantiated.
mapper_spec.get_handler()
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_readers = mapper_input_reader_class.split_input(mapper_spec)
if not mapper_input_readers:
raise NoDataError("Found no mapper input readers to process.")
mapper_spec.shard_count = len(mapper_input_readers)
state = model.MapreduceState.create_new()
mapreduce_spec = model.MapreduceSpec(
name,
state.key().id_or_name(),
mapper_spec.to_json(),
mapreduce_params)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = mapper_spec.shard_count
if _app:
state.app_id = _app
# TODO(user): Initialize UI fields correctly.
state.char_url = ""
state.sparkline_url = ""
def schedule_mapreduce(state, mapper_input_readers, eta, countdown):
state.put()
readers_json = [reader.to_json_str() for reader in mapper_input_readers]
taskqueue.Task(
url=base_path + "/kickoffjob_callback",
params={"mapreduce_spec": state.mapreduce_spec.to_json_str(),
"input_readers": simplejson.dumps(readers_json)},
eta=eta, countdown=countdown).add(queue_name, transactional=True)
# Point of no return: We're actually going to run this job!
db.run_in_transaction(
schedule_mapreduce, state, mapper_input_readers, eta, countdown)
return state.key().id_or_name()
class CleanUpJobHandler(base_handler.JsonHandler):
"""Command to kick off tasks to clean up a job's data."""
def handle(self):
# TODO(user): Have this kick off a task to clean up all MapreduceState,
# ShardState, and MapreduceControl entities for a job ID.
self.json_response["status"] = "This does nothing yet."
class AbortJobHandler(base_handler.JsonHandler):
"""Command to abort a running job."""
def handle(self):
model.MapreduceControl.abort(self.request.get("mapreduce_id"))
self.json_response["status"] = "Abort signal sent."
| 35.188056
| 80
| 0.680605
| 26,198
| 0.946015
| 0
| 0
| 8,739
| 0.315567
| 0
| 0
| 10,991
| 0.396887
|
fb886601d83ea5836e86da12edc2cb8f001f8166
| 382
|
py
|
Python
|
radiomicsfeatureextractionpipeline/backend/src/logic/entities/ct_series.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
radiomicsfeatureextractionpipeline/backend/src/logic/entities/ct_series.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | 6
|
2021-06-09T19:39:27.000Z
|
2021-09-30T16:41:40.000Z
|
radiomicsfeatureextractionpipeline/backend/src/logic/entities/ct_series.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
"""
This module is used to represent a CTSeries object from the DICOMSeries table in the database.
Inherits SeriesWithImageSlices module.
"""
from logic.entities.series_with_image_slices import SeriesWithImageSlices
class CtSeries(SeriesWithImageSlices):
"""
This class stores all information about a CT-series from the DICOMSeries table in the database.
"""
pass
| 29.384615
| 99
| 0.780105
| 163
| 0.426702
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.659686
|
fb88b8dcfd3fd4a86eaad1ea35d9e6acff02b1b6
| 8,108
|
py
|
Python
|
models/pointSIFT_pointnet_age.py
|
KelvinTao/pointSIFT_Age2
|
b0684ee989b5f6f3dca25e9bbf15b3c5fd8cf1cf
|
[
"Apache-2.0"
] | null | null | null |
models/pointSIFT_pointnet_age.py
|
KelvinTao/pointSIFT_Age2
|
b0684ee989b5f6f3dca25e9bbf15b3c5fd8cf1cf
|
[
"Apache-2.0"
] | null | null | null |
models/pointSIFT_pointnet_age.py
|
KelvinTao/pointSIFT_Age2
|
b0684ee989b5f6f3dca25e9bbf15b3c5fd8cf1cf
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import tensorflow as tf
import tf_utils.tf_util as tf_util
from tf_utils.pointSIFT_util import pointSIFT_module, pointSIFT_res_module, pointnet_fp_module, pointnet_sa_module
def placeholder_inputs(batch_size,num_point,num_class):
#num_class=numofclasses*2
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_class//2,2))
smpws_pl = tf.placeholder(tf.float32, shape=(batch_size))
return pointclouds_pl, labels_pl, smpws_pl
def get_model(point_cloud, is_training, num_class, bn_decay=None, feature=None):
""" Semantic segmentation PointNet, input is B x N x 3, output B x num_class """
end_points = {}
l0_xyz = point_cloud
l0_points = feature
end_points['l0_xyz'] = l0_xyz
# c0: 1024*128
c0_l0_xyz, c0_l0_points, c0_l0_indices = pointSIFT_res_module(l0_xyz, l0_points, radius=0.15, out_channel=64, is_training=is_training, bn_decay=bn_decay, scope='layer0_c0', merge='concat')
l1_xyz, l1_points, l1_indices = pointnet_sa_module(c0_l0_xyz, c0_l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
# c1: 256*256
c0_l1_xyz, c0_l1_points, c0_l1_indices = pointSIFT_res_module(l1_xyz, l1_points, radius=0.25, out_channel=128, is_training=is_training, bn_decay=bn_decay, scope='layer1_c0')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(c0_l1_xyz, c0_l1_points, npoint=256, radius=0.2, nsample=32, mlp=[128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
# c2: 256*512
c0_l2_xyz, c0_l2_points, c0_l2_indices = pointSIFT_res_module(l2_xyz, l2_points, radius=0.5, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='layer2_c0')
c1_l2_xyz, c1_l2_points, c1_l2_indices = pointSIFT_res_module(c0_l2_xyz, c0_l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='layer2_c1', same_dim=True)
l2_cat_points = tf.concat([c0_l2_points, c1_l2_points], axis=-1)
fc_l2_points = tf_util.conv1d(l2_cat_points, 512, 1, padding='VALID', bn=True, is_training=is_training, scope='layer2_conv_c2', bn_decay=bn_decay)
# c3: 64*512
l3_xyz, l3_points, l3_indices = pointnet_sa_module(c1_l2_xyz, fc_l2_points, npoint=64, radius=0.4, nsample=32, mlp=[512,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# FC layers:64*256->64*128---8192
net = tf_util.conv1d(l3_points, 256, 1, padding='VALID', bn=True, is_training=is_training, scope='layer4_conv', bn_decay=bn_decay)
#net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='layer5_conv', bn_decay=bn_decay)
##flatten:B*8192
flat = tf.reshape(net, [-1,64*128])
##dense layer:4096
dense = tf_util.fully_connected(flat,4096,scope='layer6_dense',bn=True,bn_decay=bn_decay,is_training=is_training)
dense = tf_util.fully_connected(dense,4096,scope='layer7_dense',bn=True,bn_decay=bn_decay,is_training=is_training)
dense = tf_util.dropout(dense, keep_prob=0.5, is_training=is_training, scope='dp')
logits = tf_util.fully_connected(dense,num_class,scope='layer8_dense',activation_fn=None,bn=True,bn_decay=bn_decay,is_training=is_training)#logits
return logits, end_points
'''
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [512,512], is_training, bn_decay, scope='fa_layer2')
_, l2_points_1, _ = pointSIFT_module(l2_xyz, l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2_c0')
_, l2_points_2, _ = pointSIFT_module(l2_xyz, l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2_c1')
_, l2_points_3, _ = pointSIFT_module(l2_xyz, l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2_c2')
l2_points = tf.concat([l2_points_1, l2_points_2, l2_points_3], axis=-1)
l2_points = tf_util.conv1d(l2_points, 512, 1, padding='VALID', bn=True, is_training=is_training, scope='fa_2_fc', bn_decay=bn_decay)
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,256], is_training, bn_decay, scope='fa_layer3')
_, l1_points_1, _ = pointSIFT_module(l1_xyz, l1_points, radius=0.25, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='fa_layer3_c0')
_, l1_points_2, _ = pointSIFT_module(l1_xyz, l1_points_1, radius=0.25, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='fa_layer3_c1')
l1_points = tf.concat([l1_points_1, l1_points_2], axis=-1)
l1_points = tf_util.conv1d(l1_points, 256, 1, padding='VALID', bn=True, is_training=is_training, scope='fa_1_fc', bn_decay=bn_decay)
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')
_, l0_points, _ = pointSIFT_module(l0_xyz, l0_points, radius=0.1, out_channel=128, is_training=is_training, bn_decay=bn_decay, scope='fa_layer4_c0')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
'''
def get_loss(logits,labels,num_class,smpws=1):
"""
:param logits: Bx(C*2)--Bx200(100*2)
:param labels: BxCx2--Bx100x2
:param smpw: B ; sample weight
:num_class:200 --class_number*2
"""
labels=tf.cast(labels, tf.float32)
part_logits=tf.reshape(logits,[-1,num_class//2,2])
classify_loss=tf.reduce_mean(tf.multiply(tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=part_logits, labels=labels),1),smpws))
#classify_loss0=classify_loss
#tf.summary.scalar('classify loss 0', classify_loss0)
###attention!!!
#regularization_loss=tf.reduce_mean(tf.contrib.slim.losses.get_regularization_losses())
#regularization_loss=tf.reduce_mean(tf.losses.get_regularization_losses(scope))
#reg=1;classify_loss+=reg*regularization_loss ##scalar
#tf.summary.scalar('classify loss', classify_loss)
tf.summary.scalar('part logits shape 0', part_logits.shape[0])
tf.summary.scalar('part logits shape 1', part_logits.shape[1])
tf.summary.scalar('part logits shape 2', part_logits.shape[2])
tf.summary.scalar('labels shape 0', labels.shape[0])
tf.summary.scalar('labels shape 1', labels.shape[1])
tf.summary.scalar('labels shape 2', labels.shape[2])
tf.add_to_collection('losses', classify_loss)
return classify_loss
def eval_pred(logits,input_labels,num_class,wt=1):#预测结果评估
"""
:param logits: Bx(C*2)--Bx200(100*2);>< age_thresh
"""
input_labels=tf.cast(input_labels, tf.float32)
wt=tf.cast(wt, tf.float32)
part_logits=tf.reshape(logits,[-1,num_class//2,2])
part_logits1=tf.map_fn(lambda x:x[:,0],tf.nn.softmax(part_logits))
pred=tf.cast(tf.reduce_sum(part_logits1,1),tf.float32)
labb=tf.reduce_sum(tf.map_fn(lambda x:x[:,0],input_labels),1)
#mae_wt=tf.cast(tf.reduce_mean(tf.multiply(tf.abs(pred-labb),wt)), tf.float64)
mae_wt=tf.reduce_mean(tf.multiply(tf.abs(pred-labb),wt))
#mae=tf.cast(tf.reduce_mean(tf.abs(pred-labb)), tf.float64)
mae=tf.reduce_mean(tf.abs(pred-labb))
#tf.summary.scalar('Test set MAE', mae)
#tf.summary.scalar('Test set MAE_weighted', mae_wt)
return pred,mae,mae_wt
'''
def get_loss(pred, label, smpw):
"""
:param pred: BxNxC
:param label: BxN
:param smpw: BxN
:return:
"""
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
'''
| 57.503546
| 218
| 0.741737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,916
| 0.482266
|
fb89161fb05f2325ee9a0854f9561e3db343bc07
| 89
|
py
|
Python
|
cwl_airflow_parser/operators/__init__.py
|
lrodri29/cwl-airflow-parser
|
3854022fc7a5c62cfd92e93fdb7a97d528918239
|
[
"Apache-2.0"
] | 14
|
2018-05-01T01:31:07.000Z
|
2019-09-02T15:41:06.000Z
|
cwl_airflow_parser/operators/__init__.py
|
lrodri29/cwl-airflow-parser
|
3854022fc7a5c62cfd92e93fdb7a97d528918239
|
[
"Apache-2.0"
] | 1
|
2018-08-06T17:28:51.000Z
|
2018-08-27T16:05:10.000Z
|
cwl_airflow_parser/operators/__init__.py
|
lrodri29/cwl-airflow-parser
|
3854022fc7a5c62cfd92e93fdb7a97d528918239
|
[
"Apache-2.0"
] | 8
|
2018-08-06T16:47:31.000Z
|
2020-05-12T20:21:01.000Z
|
from .cwljobdispatcher import CWLJobDispatcher
from .cwljobgatherer import CWLJobGatherer
| 44.5
| 46
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|