blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7df10e5c2b530b80c44a49b4fad39d8fac7471b0
|
48cc68cfc98a74df8765b38e6d75a25dec962bc9
|
/API/tools.py
|
ea7ff1474a55b7f6a22d87f36fef608407eb60fe
|
[
"MIT"
] |
permissive
|
gaozhangyang/DecST
|
2c0f7a884771ca28328d5dc064622b2571909528
|
116ce9efa28a07793900d09345abab4cb512db98
|
refs/heads/master
| 2023-05-08T20:11:33.469488
| 2021-05-27T09:00:51
| 2021-05-27T09:00:51
| 371,293,589
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
import numpy as np
import torch
class EarlyStopping:
def __init__(self, patience=7, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
self.counter = 0
def save_checkpoint(self, val_loss, model, path):
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), path+'/'+'checkpoint.pth')
self.val_loss_min = val_loss
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class StandardScaler():
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
|
[
"tancheng@westlake.edu.cn"
] |
tancheng@westlake.edu.cn
|
0eb99176513f06fb8781196ee8014d649bd70d86
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2023_04_02_preview/operations/_trusted_access_roles_operations.py
|
4f35fd2f4bca3e2242277391b6aa357f6e55d86e
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 7,294
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TrustedAccessRolesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2023_04_02_preview.ContainerServiceClient`'s
:attr:`trusted_access_roles` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessRole"]:
"""List supported trusted access roles.
List supported trusted access roles.
:param location: The name of Azure region. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TrustedAccessRole or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2023_04_02_preview.models.TrustedAccessRole]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-02-preview"))
cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles"
}
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
cf6780a47474b33842de25b95ce811cf72dda6fe
|
01d53c2792f448380b7a5b6d1234cabfc41a423c
|
/RFFlow.py
|
043e41e114640cebf831db85ae3363b0c7d40263
|
[] |
no_license
|
jgratsova/DNA
|
c75bc3afeba4f089828ebda389ca167ea911f544
|
906ff90204a1ba4c3827b2a4fe47b035d4ad080b
|
refs/heads/master
| 2020-03-22T05:19:28.745557
| 2018-09-03T15:47:13
| 2018-09-03T15:47:13
| 139,557,799
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,849
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 12:38:00 2018
@author: JGratsova
"""
# Import libraries
from sklearn import preprocessing
from pandas import DataFrame
import pandas as pd
import numpy as np
import pickle as pk
import time
#import sqlalchemy as sa
from matplotlib import pyplot
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import classification_report
from sklearn.neural_network import MLPClassifier
from scipy.stats import randint as sp_randInt
from scipy.stats import uniform as sp_randFloat
from pandas.plotting import scatter_matrix
import multiprocessing as mp
dataset = pd.read_csv("1to1_train_ready_rt7460.csv", header=None)
X = dataset.iloc[:354085, 1:805].values
y = dataset.iloc[:354085, 0].values
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4,
random_state = 0)
# show number of ones
y_test_count_1 = np.count_nonzero(y_test)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
KFold = 3
model_list = []
cv_outcomes = []
description = []
###############################################################################
################ Manual tuning of parameter settings -PS1 ##################
###############################################################################
RF_1 = RandomForestClassifier(n_estimators = 10, criterion = 'gini',
max_depth = 6, min_samples_split = 2,
min_samples_leaf = 1,
min_weight_fraction_leaf = 0.0,
max_features = 'auto',max_leaf_nodes = None,
min_impurity_decrease = 0.0,
min_impurity_split = None, bootstrap = True,
oob_score = False, n_jobs = 10,
random_state = None, verbose = 1,
warm_start = False, class_weight = None)
model = RF_1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation
cv_results = cross_val_score(model, X_train, y_train,
cv = KFold, scoring = 'accuracy',
n_jobs = 10)
cv_outcomes.append(cv_results)
description.append('RF_1')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation Results
print("\n%s: " % ('Random Forest Algorithm: PS-1'))
prt_string = "CV Mean Accuracy: %f (Std: %f)"% (
cv_results.mean(), cv_results.std())
print(prt_string)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Train the model
trained_model = model.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Evaluate performance of the trained model
pred_class = trained_model.predict(X_test)
accuracy = accuracy_score(y_test, pred_class)
conf_matrix = confusion_matrix(y_test, pred_class)
class_report = classification_report(y_test, pred_class)
kappa_score = cohen_kappa_score(y_test, pred_class)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collect performance results
model_list.append(('RF_1', 'Random Forest Algorithm: PS-1',
trained_model, accuracy, conf_matrix,
class_report, kappa_score))
###############################################################################
################ Manual tuning of parameter settings -PS2 ##################
###############################################################################
RF_2 = RandomForestClassifier(n_estimators = 100, criterion = 'entropy',
max_depth = 8, min_samples_split = 2,
min_samples_leaf = 1,
min_weight_fraction_leaf = 0.0,
max_features = None,max_leaf_nodes = None,
min_impurity_decrease = 0.0,
min_impurity_split = None, bootstrap = True,
oob_score = False, n_jobs = 10,
random_state = None, verbose = 1,
warm_start = False, class_weight = None)
model = RF_2
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation
cv_results = cross_val_score(model, X_train, y_train,
cv = KFold, scoring = 'accuracy',
n_jobs = 10)
cv_outcomes.append(cv_results)
description.append('RF_2')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation Results
print("\n%s: " % ('Random Forest Algorithm: PS-2'))
prt_string = "CV Mean Accuracy: %f (Std: %f)"% (
cv_results.mean(), cv_results.std())
print(prt_string)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Train the model
trained_model = model.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Evaluate performance of the trained model
pred_class = trained_model.predict(X_test)
accuracy = accuracy_score(y_test, pred_class)
conf_matrix = confusion_matrix(y_test, pred_class)
class_report = classification_report(y_test, pred_class)
kappa_score = cohen_kappa_score(y_test, pred_class)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collect performance results
model_list.append(('RF_2', 'Random Forest Algorithm: PS-2',
trained_model, accuracy, conf_matrix,
class_report, kappa_score))
###############################################################################
################ Manual tuning of parameter settings -PS3 ##################
###############################################################################
RF_3 = RandomForestClassifier(n_estimators = 1000, criterion = 'gini',
max_depth = 10, min_samples_split = 2,
min_samples_leaf = 1,
min_weight_fraction_leaf = 0.0,
max_features = 'log2',max_leaf_nodes = None,
min_impurity_decrease = 0.0,
min_impurity_split = None, bootstrap = True,
oob_score = False, n_jobs = 10,
random_state = None, verbose = 1,
warm_start = False, class_weight = None)
model = RF_3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation
cv_results = cross_val_score(model, X_train, y_train,
cv = KFold, scoring = 'accuracy',
n_jobs = 10)
cv_outcomes.append(cv_results)
description.append('RF_3')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation Results
print("\n%s: " % ('Random Forest Algorithm: PS-3'))
prt_string = "CV Mean Accuracy: %f (Std: %f)"% (
cv_results.mean(), cv_results.std())
print(prt_string)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Train the model
trained_model = model.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Evaluate performance of the trained model
pred_class = trained_model.predict(X_test)
accuracy = accuracy_score(y_test, pred_class)
conf_matrix = confusion_matrix(y_test, pred_class)
class_report = classification_report(y_test, pred_class)
kappa_score = cohen_kappa_score(y_test, pred_class)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collect performance results
model_list.append(('RF_3', 'Random Forest Algorithm: PS-3',
trained_model, accuracy, conf_matrix,
class_report, kappa_score))
###############################################################################
######### Automatic tuning of parameter settings using GridSearchCV ########
###############################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model setup
model = RandomForestClassifier()
parameters = {'max_depth' : [6, 10, 50],
'criterion' : ['gini', 'entropy'],
'max_features' : ['auto', 'sqrt', 'log2'],
'n_estimators' : [100, 500, 1000]
}
grid = GridSearchCV(estimator = model, param_grid = parameters,
cv = KFold, verbose = 1, n_jobs = 10)
grid.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Grid search results
print("\n =========================================================")
print(" Grid Search Results ")
print("============================================================")
print("\n The best estimator :\n",
grid.best_estimator_)
print("\n The best score :\n",
grid.best_score_)
print("\n The best parameters :\n",
grid.best_params_)
print("\n =========================================================")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set up model using grid search results
model = grid.best_estimator_
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross validation
cv_results = cross_val_score(model, X_train, y_train,
cv = KFold, scoring = 'accuracy',
verbose = 1, n_jobs = 10)
cv_outcomes.append(cv_results)
description.append('RF_4')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation Results
print("\n%s: " % ('Random Forest Algorithm: PS-4'))
prt_string = "CV Mean Accuracy: %f (Std: %f)"% (
cv_results.mean(), cv_results.std())
print(prt_string)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Train the model
trained_model = model.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Evaluate performance of the trained model
pred_class = trained_model.predict(X_test)
accuracy = accuracy_score(y_test, pred_class)
conf_matrix = confusion_matrix(y_test, pred_class)
class_report = classification_report(y_test, pred_class)
kappa_score = cohen_kappa_score(y_test, pred_class)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collect performance results
model_list.append(('RF_4', 'Random Forest Algorithm: PS-4',
trained_model, accuracy, conf_matrix,
class_report, kappa_score))
###############################################################################
###### Automatic tuning of parameter settings using RandomizedSearchCV #####
###############################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model setup
model = RandomForestClassifier()
parameters = {'max_depth' : sp_randInt(4, 10),
'criterion' : ['gini', 'entropy'],
'max_features' : ['auto', 'sqrt', 'log2'],
'n_estimators' : sp_randInt(100, 1000),
'min_impurity_decrease' : sp_randFloat(),
}
random = RandomizedSearchCV(estimator = model,
param_distributions = parameters,
cv = KFold,, n_iter = 10,
verbose = 1, n_jobs = 10)
random.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Randomized search results
print("\n =========================================================")
print(" Random Search Results ")
print("============================================================")
print("\n The best estimator :\n",
grid.best_estimator_)
print("\n The best score :\n",
grid.best_score_)
print("\n The best parameters :\n",
grid.best_params_)
print("\n =========================================================")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set up model using randomized search results
model = random.best_estimator_
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross validation
cv_results = cross_val_score(model, X_train, y_train,
cv = KFold, scoring = 'accuracy',
verbose = 1, n_jobs = 10)
cv_outcomes.append(cv_results)
description.append('RF_5')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cross Validation Results
print("\n%s: " % ('RAndom Forest Algorithm: PS-5'))
prt_string = "CV Mean Accuracy: %f (Std: %f)"% (
cv_results.mean(), cv_results.std())
print(prt_string)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Train the model
trained_model = model.fit(X_train, y_train)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Evaluate performance of the trained model
pred_class = trained_model.predict(X_test)
accuracy = accuracy_score(y_test, pred_class)
conf_matrix = confusion_matrix(y_test, pred_class)
class_report = classification_report(y_test, pred_class)
kappa_score = cohen_kappa_score(y_test, pred_class)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collect performance results
model_list.append(('RF_5', 'Random Forest Algorithm: PS-5',
trained_model, accuracy, conf_matrix,
class_report, kappa_score))
###############################################################################
############## Visualisation of results from Cross Validation ##############
###############################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Plot the results
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison : Cross Validation Results')
ax = fig.add_subplot(111)
pyplot.boxplot(cv_outcomes, vert = False)
ax.set_yticklabels(description)
pyplot.show()
###############################################################################
################# Trained Models : Evaluation and Reporting ################
###############################################################################
print('\n Trained Models : Evaluation and Reporting ... ... ...')
for shtDes, des, model, accu, kappa, rept, cm in model_list:
prt_ = "\nModel:{M}\nAccuracy:{A}\tKappa:{K}\nReport:\n{R}".format(
M = des, A = round(accu, 2), K = round(kappa, 2), R = rept)
prt_cm = "\nConfusion Matrix:\n{CM}".format(CM = cm)
print(prt_, prt_cm)
# Save the trained model
with open('model_'+shtDes+'.pickle', 'wb') as f:
pk.dump(model, f)
print("\n\nTrained models are saved ... Done ...")
|
[
"noreply@github.com"
] |
jgratsova.noreply@github.com
|
f5ad9ba79254d89cf80f04861ff07ab84b5a23f0
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/Nda8BQHhZSajpnt5z_11.py
|
8e91b160e475d4c630be68a1759dc717d57213f7
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
def GCD(lst):
ans = lst[0]
for x in lst[1:]:
ans = gcd(ans, x)
return ans
def gcd(a, b):
if(b==0):
return a
else:
return gcd(b, a%b)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d6f49c02d34b8328f80de8c17cb33ce28336cfa2
|
e6abfb46e3b9a271a714de165c3164bbdd2c99d0
|
/Dictionary.py
|
046013ef24ceb8eedc7b7d40fdd2bcd7a34e311e
|
[] |
no_license
|
chandu17297/python3
|
14a8011a475a4da55e41c7640e8dfcf4ffaaa478
|
83acf7ff15717cda45bf2a1eee2585645c27db91
|
refs/heads/master
| 2021-04-29T10:19:04.026793
| 2017-10-17T02:36:18
| 2017-10-17T02:36:18
| 77,645,446
| 0
| 0
| null | 2017-08-10T13:38:11
| 2016-12-29T23:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 869
|
py
|
# coding: utf-8
# In[1]:
#dictionary
chandra = {'1':'shekar'}
# In[2]:
chandra
# In[3]:
#getting the value stored in particular key
chandra['1']
# In[7]:
#dictionar can store array of values
chan={'k1':'1' , 'k2':'2' , 'k3':'3' , 'k4':[9,2,5,3]}
# In[9]:
chan['k4']
# In[13]:
chan['k4'][2]
# In[14]:
#values stored can be changed not the key values
chan['k4'][0]+2
# In[20]:
chan1={'k1':'a','k2':'A','k3':['D','A','W','N']}
# In[21]:
chan1['k3'][3]
# In[22]:
#change of case in dictionary
chan1['k3'][3].lower()
# In[26]:
chan1['k3'][1]='ok'
# In[27]:
chan1['k3'][1]
# In[29]:
chan1['k1']=[1,2,3]
# In[30]:
chan1
# In[38]:
#nested dictionary
chan2={'a1':{'b1':[8,'a',4]}}
# In[39]:
chan2['a1']['b1']
# In[34]:
chan1.keys()
# In[36]:
chan1.values()
# In[37]:
chan1.items()
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"noreply@github.com"
] |
chandu17297.noreply@github.com
|
11a4af1ebb9b8c45a9af5a409fdf1fd4888f7aa1
|
dd5dba3bb000cb2afd63a8576674d310623a110c
|
/dartmun/models_logistics.py
|
9a2f8d8748dea7cd6541185bde0d94cdadef72f8
|
[] |
no_license
|
hdk23/conference
|
2442df001a2f02ac4c1ce866781aead437b7bd08
|
1056ac6b519a2a02296184c61c18f0c8df02a377
|
refs/heads/master
| 2023-05-01T06:14:55.944734
| 2021-05-17T02:05:44
| 2021-05-17T02:05:44
| 334,558,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from django.db import models
class Session(models.Model):
"""Session model to track conference sessions"""
number = models.IntegerField()
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
counts = models.BooleanField(default=True)
def __str__(self):
return f"Session {self.number} from {self.start_time} to {self.end_time}"
|
[
"henry.dohyun.kim.23@dartmouth.edu"
] |
henry.dohyun.kim.23@dartmouth.edu
|
cff00fb5a231213908efe2f7703b82cbe8fb099f
|
99d79ada2d3b7746573f071823ec61f5f853d7a3
|
/tests/test_circuit/test_debug_circuit.py
|
59178bc03538bc5a75e0fb44366b964d44ebf307
|
[
"MIT"
] |
permissive
|
phanrahan/magma
|
d8062c6163e2c2c2cedef82317dc8cc40038220a
|
b05fe5303ed17e668c6ec2ec3558cd5a52eff787
|
refs/heads/master
| 2023-08-23T18:08:22.494869
| 2023-08-08T18:53:05
| 2023-08-17T16:16:44
| 84,332,281
| 227
| 21
|
NOASSERTION
| 2023-09-14T21:32:19
| 2017-03-08T14:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 557
|
py
|
import magma as m
def test_debug_circuit():
assert m.config.get_debug_mode() is False
class Foo(m.DebugCircuit):
assert m.config.get_debug_mode() is True
io = m.IO(I=m.In(m.Bit))
assert m.config.get_debug_mode() is False
def test_debug_generator():
assert m.config.get_debug_mode() is False
class Foo(m.DebugGenerator2):
def __init__(self, n: int):
assert m.config.get_debug_mode() is True
self.io = m.IO(I=m.In(m.Bits[n]))
Foo(4)
assert m.config.get_debug_mode() is False
|
[
"noreply@github.com"
] |
phanrahan.noreply@github.com
|
e08dc6236ff37d62a7a4b40d92ab988d73f14b87
|
e41cb36f0c05fdb23cc516bceda2847a484adc27
|
/data_downloader.py
|
92585ee9873d03c05db8ba4cec6f94874604cda4
|
[
"Apache-2.0"
] |
permissive
|
josiahls/Lowes-Product-Classifier
|
fcbbc4d9d6bc9f7aae36f0ec8c1017454ab50337
|
c763861b9a40741d2b6529704cedad78af009ad1
|
refs/heads/master
| 2020-04-11T05:25:38.414207
| 2019-02-18T15:56:15
| 2019-02-18T15:56:15
| 146,519,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import requests
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if __name__ == "__main__":
file_id = 'ZIqgvWjVpHd69fU8WquZ_FBJ4Isdd2q9'
destination = 'DESTINATION FILE ON YOUR DISK'
download_file_from_google_drive(file_id, destination)
|
[
"jokellum@northstate.net"
] |
jokellum@northstate.net
|
d68550af572f2294aa94df50fb8d6463e7f5fc4c
|
3eca77db17a407b357162a1eee316ddc931c1d6b
|
/tms_208/common_task.py
|
73c42f642675b0a27b343580936184e5dfdef3e6
|
[] |
no_license
|
dongxiaobing/python_automation
|
567519113327bdae9299eba424b88147e02920a8
|
7dcad903b5299c2a50d2778d7833d0bccd775c18
|
refs/heads/master
| 2016-09-10T00:38:59.938946
| 2015-06-24T13:58:10
| 2015-06-24T13:58:10
| 37,986,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,218
|
py
|
#!/usr/bin/env python2.6
#encoding=utf-8
import json
import base64
import os
import sys
if not os.path.join( os.getcwd(), '../..' ) in sys.path:
sys.path.append(os.path.join( os.getcwd(), '../..' ))
import utils.tmslog
from HostedREST.hosted_basetask import HostedBaseTask
from HostedREST.hosted_taskqueue import HostedTaskQueue
from notification_server.server_internal_exception import ServerInternalException
class CommonTask(HostedBaseTask):
'''
class EnrollmentDicisionTask
This class implements the task information and operations for enrollment decision task queue
Attributes:
None
Notes:
All sections in the basetask list as follows:
owner, priority, enqueue_timestamp, account, name, tags, http_last_modified, task_retries, payload_base64
tags and payload_base64 would be parsed in the subclass.
'''
def __init__(self):
self.tqconn = HostedTaskQueue()
self.tags = []
pass
def do_add(self, tqname=None, account=1, creator=1, type=1, evt=None):
"""
To do:
Insert one task into task queue by task queue name.
"""
jevt = json.loads(evt)
tags = []
if tqname == 'usergroup':
tags.append("reties=0")
tags.append("type=" + str(type))
tags.append("creator=" + str(creator))
else:
tags.append("creator=" + str(creator))
tags.append("device=" + jevt['Udid'])
tags.append("owner=" + jevt['EnrollmentEmailAddress'])
tags.append("type=" + str(type))
tags.append("status=1")
payload = self.do_parse_payload(evt)
result = self.tqconn.do_add(tqname = tqname, namespace='mobile', account=account, payload=payload, tags=tags, priority='High')
if result.code == 200 or result.code == 201:
self.tags = tags
utils.tmslog.log('Add task success!')
else:
utils.tmslog.log('Add task failed!')
raise ServerInternalException('Failed to add task!')
return result.code
pass
def do_getTask(self, tqname=None, account=1,tags=None, tasknum=10):
"""
TODO:
Get out one task out from a task queue.
Params:
tqname: Name of task queue.
namespace: Namespace of the task queue.
account: Account ID.
tasknum: Number of the tasks that expected.
tags: Search conditions.
Return:
Instance object of class Task;
Exception message
"""
tasknum = tasknum
tags = self.tags
result = self.tqconn.do_get_multi(tqname=tqname, account=account, tasknum=tasknum, tags=tags)
self.m_tasks_list = []
self.m_tasks_num = 0
self.do_parse(result.content)
index = self.m_tasks_num - 1
#print self.m_tasks_list[index]
return self.m_tasks_list[index]
def do_getEventFromTask(self, task):
"""
"""
task = task
event = task['payload_base64']
event = base64.decodestring(event)
#print event
return event
def do_parse_payload(self, evt):
try:
jevt = json.loads(evt)
payload = {}
if not jevt.has_key('Udid'):
raise ValueError('UDID missing in the event')
if not jevt.has_key('EnrollmentEmailAddress'):
raise ValueError('EnrollmentEmailAddress missing in the event')
if not jevt.has_key('EnrollmentUserName'):
raise ValueError('EnrollmentUserName missing in the event')
payload_base64 = (base64.encodestring(evt)).replace("\n", '')
return payload_base64
except ValueError, e:
utils.tmslog.log('Event parse error, %s' % e)
raise ValueError(e)
pass
#end of class CommonTask
def do_test():
evt = {
"EventId":148,
"MACAddress":"System.Byte[]",
"EventTime":"/Date(1368076195887)/",
"ComplianceStatus":"Compliant",
"Udid":"",
"EventType":"MDMEnrollmentComplete",
"SerialNumber":"861348SXA4S",
"CompromisedStatus":"",
"EnrollmentEmailAddress":"hdu@websense.com",
"DeviceFriendlyName":"iPhone3,1-42a65c8289b7ddbcc8ab0fd342bb237534ba60c9",
"PhoneNumber":"+8613426192820",
"DeviceId":616,
"EnrollmentStatus":"Enrolled",
"EnrollmentUserName":"Hang",
"CompromisedTimeStamp":"/Date(1368076202626)/"
}
evt = json.dumps(evt)
task = CommonTask()
task.do_add(account=86, tqname='enrollment', creator=3, type=1, evt=evt)
pass
if __name__=='__main__':
do_test()
pass
|
[
"380678439@qq.com"
] |
380678439@qq.com
|
d736dc8955fe9948061b9abce982ca9a4b9990dc
|
3644d8e16f74829490292915bca85ae20d686a99
|
/webscraperapp/webScrapa/models.py
|
3f1705089343b805c2d8719ac8805f98db0f014a
|
[] |
no_license
|
Laptic/WebScraperWebsite
|
4fabc2eda1bfb5cf6655b8c411d11c2e66942088
|
5c9f1dc2d6db29e6fe9654e88ea8e0cb1b0a9cef
|
refs/heads/master
| 2022-10-31T14:21:56.372820
| 2019-08-09T03:18:47
| 2019-08-09T03:18:47
| 200,905,199
| 0
| 1
| null | 2022-10-14T22:57:13
| 2019-08-06T18:36:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,662
|
py
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=150)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
permission = models.ForeignKey('AuthPermission', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.IntegerField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=254)
is_staff = models.IntegerField()
is_active = models.IntegerField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
permission = models.ForeignKey(AuthPermission, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.PositiveSmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class GpuParts(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
price = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
website = models.CharField(max_length=20, blank=True, null=True)
id = models.IntegerField(blank=True, null=False,primary_key=True)
class Meta:
managed = False
db_table = 'gpu_parts'
def __str__(self):
return self.name
def get_absolute_url(self):
"""Returns the url to access a particular instance of MyModelName."""
return reverse('model-detail-view', args=[str(self.id)])
class MotherboardParts(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
price = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
website = models.CharField(max_length=20, blank=True, null=True)
id = models.IntegerField(blank=True, null=False,primary_key=True)
class Meta:
managed = False
db_table = 'motherboard_parts'
def __str__(self):
return self.name
def get_absolute_url(self):
"""Returns the url to access a particular instance of MyModelName."""
return reverse('model-detail-view', args=[str(self.id)])
class RamParts(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
price = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
website = models.CharField(max_length=20, blank=True, null=True)
id = models.IntegerField(blank=True, null=False,primary_key=True)
class Meta:
managed = False
db_table = 'ram_parts'
def __str__(self):
return self.name
def get_absolute_url(self):
"""Returns the url to access a particular instance of MyModelName."""
return reverse('model-detail-view', args=[str(self.id)])
|
[
"johnhenrymartinez@gmail.com"
] |
johnhenrymartinez@gmail.com
|
2d82e388ebb339a48bbc7999031f9d55f4c07e87
|
9099ed0407521ac40b88f3b92872307f66c57bf9
|
/codes/contest/leetcode/largest-number-at-least-twice-of-others.py
|
6fd96179b228b56580003f6f2896c304213f659c
|
[] |
no_license
|
jiluhu/dirtysalt.github.io
|
0cea3f52d2c4adf2bbf5c23b74f4cb1070025816
|
c026f2969c784827fac702b34b07a9268b70b62a
|
refs/heads/master
| 2020-08-31T09:32:05.273168
| 2019-10-29T01:53:45
| 2019-10-29T01:53:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
#!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
class Solution:
def dominantIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
xs = list(enumerate(nums))
xs.sort(key=lambda x: x[1])
if len(xs) <= 1:
return 0
if xs[-1][1] >= 2 * xs[-2][1]:
return xs[-1][0]
return -1
|
[
"dirtysalt1987@gmail.com"
] |
dirtysalt1987@gmail.com
|
6899a10308c145e1ffeac2269256163600414fba
|
16b355291e05660035646d05bc384f229b4d1738
|
/ConsoleAdvance2.py
|
f269ad76dc2af8a29832ecebf97efa18e1fe1e80
|
[] |
no_license
|
gamgoon/python3_test
|
a672d96052d20e0e64cadf5831b7e465c98bdbbb
|
3bacb388f94c829fa20aea179e121f9d0dddf63c
|
refs/heads/master
| 2021-01-10T11:14:21.946155
| 2016-04-13T12:21:13
| 2016-04-13T12:21:13
| 55,353,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
from urllib.request import urlopen
file = urlopen('http://www.korea.kr')
htmlcontents = file.read()
print(htmlcontents)
|
[
"gamgoon@gmail.com"
] |
gamgoon@gmail.com
|
123b0499d9d833e0d1562cee75e5ce421952ad6b
|
f82929ae1127f641915b70811927280d94531367
|
/code_fest.py
|
533cf11cda3ebe358ce0b44138ce721f3e7f8689
|
[] |
no_license
|
CodeDragoon/SpreadHope
|
408f7bd31d99e2350139bdfe54d8540332997707
|
0aa8dde3bfc395086bea76d2f8ce987b68d397d2
|
refs/heads/master
| 2021-04-15T13:04:48.773951
| 2018-03-24T23:00:47
| 2018-03-24T23:00:47
| 126,646,996
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,866
|
py
|
# coding: utf-8
# In[6]:
import sys
import itertools
import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
import matplotlib.pyplot as plt
import numpy as np
# get_ipython().magic('matplotlib inline')
class TwitterClient(object):
'''
Generic Twitter Class for sentiment analysis.
'''
def __init__(self):
'''
Class constructor or initialization method.
'''
consumer_key = 'q5KDm8mFYGuX5ofde3hGhbk13'
consumer_secret = 'cpES4YMGDhX1n1qLfjbqtjL4iYvdbzQYLm5nJWFzbgSJJ4JUgQ'
access_token = '4690057452-8oNgkeUKaIHOZPdwSK9jNPEdSVTkbEMSNHaHJhg'
access_token_secret = 'mQtDVFE7vX8suAywV6cFQPAJOvSVeoLJ8cRqiaXRvP3nF'
# attempt authentication
try:
self.auth = OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
except:
print("Error: Authentication Failed")
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_tweet_sentiment(self, tweet):
'''
Utility function to classify sentiment of passed tweet
using textblob's sentiment method
'''
# create TextBlob object of passed tweet text
analysis = TextBlob(self.clean_tweet(tweet))
dic={}
dic['text']=tweet
dic['score']=analysis.sentiment.polarity
return analysis.sentiment.polarity
"""
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
"""
def get_user_timeline(self,username):
tweets=[]
try:
fetched_tweets=self.api.user_timeline(screen_name=username,count=50)
#print(type(fetched_tweets))
#print(fetched_tweets[0].created_at)
#print("hjjfvfe")
count=0
score_values=[]
for tweet in fetched_tweets:
parsed_tweet = {}
count=count+1
parsed_tweet['text'] = tweet.text
w=self.get_tweet_sentiment(tweet.text)
score_values.append(w)
if w > 0:
parsed_tweet['sentiment']='positive'
elif w == 0:
parsed_tweet['sentiment']='neutral'
else :
parsed_tweet['sentiment']='negative'
if tweet.retweet_count > 0:
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
#print(count)
#print(type(tweets))
#print(len(tweets))
return score_values , tweets
except tweepy.TweepError as e:
print("Error : " + str(e))
plot1= [ ]
scores1 = []
def main():
# creating object of TwitterClient Class
api = TwitterClient()
# calling function to get tweets
#tweets = api.get_tweets(query = 'Donald Trump', count = 20)
# scores , tweets = api.get_user_timeline(sys.argv[1])
scores , tweets = api.get_user_timeline('ChesterBe')
#for tweet in tweets:
#print(tweet)
# picking positive tweets from tweets
ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
print("Positive tweets percentage: {} %".format(100*len(ptweets)/len(tweets)))
ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
print("Negative tweets percentage: {} %".format(100*len(ntweets)/len(tweets)))
print("Neutral tweets percentage: {} %".format((100* (len(tweets)- len(ntweets) -len(ptweets))/len(tweets))))
# printing first 5 positive tweets
stor=[]
#print("lentgth is " , len(tweets) , len(scores))
for tweet , sco in zip(tweets, scores) :
plot1.append(sco)
if tweet['sentiment'] == 'positive':
#print("positive tweet score is " , sco )
#print("tweet is " , tweet)
stor.append(1)
elif tweet['sentiment']=='negative':
#print("negative tweet score is " , sco )
#print("tweet is " , tweet)
stor.append(-1)
else:
stor.append(0)
#print("neutral tweet score is " , sco )
#print("tweet is " , tweet)
#print(stor)
#plt.ylabel('some numbers')
#plt.plot(stor)
#plt.show()
#plt.savefig('books_read.png')
"""
print("\n\nPositive tweets:")
for tweet in ptweets[:10]:
print(tweet['text'])
# printing first 5 negative tweets
print("\n\nNegative tweets:")
for tweet in ntweets[:10]:
print(tweet['text'])
"""
poly_deg = 18
plt.ylabel('Emotional condition')
#plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
y=plot1
y=y[::-1]
x = np.arange(0, 2*(np.pi), 0.1)[0:len(y)]
y_knots = y
x_knots=x
coefs = np.polyfit(x_knots, y_knots, poly_deg)
y_poly = np.polyval(coefs, x)
#plt.plot(x_knots, y_knots, "o", label="data points")
plt.plot(x, y_poly, label="polynomial fit")
plt.fill_between(x_knots, 0, y_poly)
plt.ylabel( 'Emotional Condition' )
plt.xlabel('Time')
plt.savefig('plotted_graph.png')
if __name__ == "__main__":
# calling main function
main()
|
[
"rishabhnigam1@gmail.com"
] |
rishabhnigam1@gmail.com
|
65a1bc12d39fbe707d976c84edaebc72d5d8a80b
|
240f9eafd114f6c76dcbaa1f149c2960b6e2279e
|
/48.py
|
c367b605b3511e5180c3a5d1e54c1e0738a94ccf
|
[] |
no_license
|
Teajtet/Project-Euler
|
01282ee229167809a5c03b5ab8f113532b2c6bee
|
b742a97c0220006d7ddd8cac7b8cd7fe749d518b
|
refs/heads/master
| 2021-09-06T02:53:55.801496
| 2018-02-01T20:30:29
| 2018-02-01T20:30:29
| 119,885,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
'''The series, 1^(1) + 2^(2) + 3^(3) + ... + 10^(10) = 10405071317.
Find the last ten digits of the series, 1^(1) + 2^(2) + 3^(3) + ... + 1000^(1000).
'''
print sum(map(lambda x: x**x, range(1,1001)))
|
[
"noreply@github.com"
] |
Teajtet.noreply@github.com
|
c8448b80686c47ce5f7c0b97003a13128fdedbb1
|
8d2dc937fa17d9200f0f04687c4b6f88f0ea6f11
|
/libs/utils/my_math.py
|
d4bcf6c298c61b5aba459d276bd5e1e7bd7caf95
|
[
"MIT"
] |
permissive
|
fyabc/NMT
|
550590407e8c2a5ad2fcddb715bd6f4fadfdfcd0
|
7fa10737967424d80c3c23d597a0648df1ed4015
|
refs/heads/master
| 2021-01-19T04:05:26.116052
| 2017-04-10T14:04:23
| 2017-04-10T14:04:23
| 84,425,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,609
|
py
|
#! /usr/bin/python
# -*- encoding: utf-8 -*-
########
# Math #
########
from __future__ import print_function
import numpy as np
import theano.tensor as T
from ..utils.basic import fX
__author__ = 'fyabc'
def average(sequence):
if sequence is None:
return 0.0
if len(sequence) == 0:
return 0.0
return sum(sequence) / len(sequence)
def get_rank(a):
"""Get the rank of numpy array a.
>>> import numpy as np
>>> get_rank(np.array([10, 15, -3, 9, 1]))
array([3, 4, 0, 2, 1])
"""
temp = a.argsort()
ranks = np.empty_like(a)
ranks[temp] = np.arange(len(a))
return ranks
# Parameter initializers
def orthogonal_weight(ndim):
W = np.random.randn(ndim, ndim)
u, _, _ = np.linalg.svd(W)
return u.astype(fX)
def normal_weight(n_in, n_out=None, scale=0.01, orthogonal=True):
n_out = n_in if n_out is None else n_out
if n_in == n_out and orthogonal:
W = orthogonal_weight(n_in)
else:
W = scale * np.random.randn(n_in, n_out)
return W.astype(fX)
def uniform_weight(n_in, n_out=None, scale=0.01):
if n_out is None:
n_out = n_in
return np.random.uniform(-1. * scale, 1. * scale, (n_in, n_out)).astype(fX)
def orthogonal_weight_1xb(n_in, b):
init_Ws = [[] for _ in xrange(b)]
for i in xrange(b):
init_Ws[i] = orthogonal_weight(n_in)
return np.concatenate(init_Ws, axis=1)
def orthogonal_weight_axb(nin, a, b):
initWs = np.zeros((nin * a, nin * b), dtype='float32')
for i in xrange(a):
for j in xrange(b):
initWs[i * nin: (i + 1) * nin, j * nin: (j + 1) * nin] = orthogonal_weight(nin)
return initWs
def normal_vector(n_in, scale=0.01):
return scale * np.random.randn(n_in, dtype=fX)
def concatenate(tensors, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Back-propagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> import theano.tensor as T
>>> x, y = T.matrices('x', 'y')
>>> concatenate([x, y], axis=1)
IncSubtensor{Set;::, int64:int64:}.0
:parameters:
- tensors : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(t.shape[axis] for t in tensors)
output_shape = ()
for k in range(axis):
output_shape += (tensors[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensors[0].ndim):
output_shape += (tensors[0].shape[k],)
out = T.zeros(output_shape)
offset = 0
for t in tensors:
indices = [slice(None) for _ in range(axis)] + [slice(offset, offset + t.shape[axis])] + \
[slice(None) for _ in range(axis + 1, tensors[0].ndim)]
out = T.set_subtensor(out[indices], t)
offset += t.shape[axis]
return out
# Activations
tanh = T.tanh
linear = lambda x: x
relu = T.nnet.relu
sigmoid = T.nnet.sigmoid
__all__ = [
'average',
'get_rank',
'orthogonal_weight',
'orthogonal_weight_1xb',
'orthogonal_weight_axb',
'normal_weight',
'uniform_weight',
'normal_vector',
'concatenate',
]
|
[
"fyabc@mail.ustc.edu.cn"
] |
fyabc@mail.ustc.edu.cn
|
ec21fc56278fd83a73cc0815a93a2bd88c81b98f
|
33ebb65e94cccf81df4d96d9321d45a2e1dffaa0
|
/packages/extra/iota.py
|
83b79c7c0ea05375262bd25f9d74d25f99bbf69b
|
[] |
no_license
|
stephenmcnicholas/PythonExamples
|
6f8130c974a8460df4201d37fc0d06dcec06180c
|
3592534392f6078aeade636c09b886621ca5aaf5
|
refs/heads/main
| 2023-08-25T00:20:04.723155
| 2021-10-06T16:55:25
| 2021-10-06T16:55:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
#! /usr/bin/env python3
""" example module: extra.iota """
def funI():
return "Iota"
if __name__ == "__main__":
print("I prefer to be a module")
|
[
"stephenmcnicholas@hotmail.com"
] |
stephenmcnicholas@hotmail.com
|
fa74ef84b822c411b9e0d1584cb7e6c721ceb128
|
8ccdbf554e63b194de9ed19765ba72a8a65f004f
|
/app/apps/blog/test_views.py
|
2a6eaa51bfb5839fc448de6139c30a5b6ca32e9e
|
[] |
no_license
|
JTarball/codewheel-backend
|
cda0cee25da347a29d1ad5eb189269071d335bf7
|
3815056ce1210e5c44823ec22beefc40a8137a14
|
refs/heads/master
| 2020-04-18T11:11:10.599798
| 2016-09-09T21:20:49
| 2016-09-09T21:20:49
| 65,942,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,397
|
py
|
"""
blog.test_views.py
==================
Test Views for Blog App
"""
import logging
import datetime
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from accounts.models import AccountsUser
from blog.models import Post
from blog.serializers import PostSerializer
logger = logging.getLogger('test_logger')
class TestPostList(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.superadmin = G(AccountsUser, is_superuser=True, is_staff=True)
self.superadmin_not_staff = G(AccountsUser, is_superuser=True, is_staff=False)
self.client = APIClient()
self.url = reverse('blog:list')
def test_create_post_pass_permissions_superadmin(self):
""" Test creation of post for superadmin. """
# Create Post, change slug (has to be unique) so can use it for post
post = G(Post, author=self.user)
count = Post.objects.count()
post.slug = 'different-slug'
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.superadmin)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_201_CREATED, "%s" % response.data)
self.assertEquals(Post.objects.count(), count + 1)
self.assertEquals(response.data['slug'], post.slug, response.data)
def test_create_post_pass_permissions_staff(self):
""" Test create permissions for staff. """
# Testing permissions don't care about data so just generate it first
post = G(Post, author=self.user)
count = Post.objects.count()
post.slug = 'different-slug'
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.staff)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(Post.objects.count(), count + 1)
self.assertEquals(response.data['slug'], post.slug)
def test_create_post_pass_permissions_superadmin_not_staff(self):
""" Test create permissions for a superadmin who is not staff. """
# Testing permissions don't care about data so just generate it first
post = G(Post, author=self.user)
count = Post.objects.count()
post.slug = 'different-slug'
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(Post.objects.count(), count + 1)
self.assertEquals(response.data['slug'], post.slug)
def test_create_post_fail_permissions_user(self):
""" Test create permissions fail for authenticated users - posts can only be created by staff/superadmin. """
# Testing permissions don't care about data so just generate it first
post = G(Post, author=self.user)
count = Post.objects.count()
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.user)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEquals(Post.objects.count(), count)
self.assertEquals(Post.objects.get(pk=post.pk).slug, post.slug)
def test_get_published_posts_anonymous_user(self):
""" Tests getting a list of published posts only for anonymous users. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3, "The list of posts retrieved should only include published ")
def test_get_published_posts_normal_authenticated_user(self):
""" Tests getting a list of published posts only for authenticated users. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
self.client.force_authenticate(user=self.user)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3, "The list of posts retrieved should only include published ")
def test_get_all_posts_superadmin(self):
""" Test getting a list of all posts for superadmins. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 4, "The method should retrieve all posts (published & not published).")
def test_get_all_posts_staff(self):
""" Tests getting a list of all posts for staff users. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 4, "The method should retrieve all posts (published & not published).")
class TestPostListByYear(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.superadmin = G(AccountsUser, is_superuser=True, is_staff=True)
self.superadmin_not_staff = G(AccountsUser, is_superuser=True, is_staff=False)
self.client = APIClient()
self.year = "%s" % datetime.datetime.now().year
self.url = reverse('blog:list_year', kwargs={'year': self.year})
def test_post_posts_forbidden_normal_user(self):
""" Test post action is forbidden for an normal user. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
# Force Authentication and Post
self.client.force_authenticate(user=self.user)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
response = self.client.post(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_posts_forbidden(self):
""" Test all posts are retrieved for anonymous user. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
response = self.client.put(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_get_published_posts_by_year(self):
""" Test published posts are retrieved. """
G(Post, author=self.user, published=False, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 2)
class TestPostListByUser(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.client = APIClient()
self.url = reverse('blog:list_user', kwargs={'user': self.user})
def test_posts_patch_method_not_allowed(self):
""" Tests list_user is not allowed for patch method. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.patch(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_posts_post_method_not_allowed(self):
""" Tests list_user is not allowed for post method. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.post(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_posts_put_method_not_allowed(self):
""" Tests list_user is not allowed for put method. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.put(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_posts_live_by_user_staff(self):
""" Test all posts for a specific author are returned for staff. """
G(Post, author=self.user, published=False, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3)
def test_get_posts_by_user(self):
""" Test published posts for a specific author are returned for anonymous users. """
G(Post, author=self.user, published=False, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
logger.info("%s" % self.url)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 2)
class TestPostListByTag(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.tag = 'tag1'
self.client = APIClient()
self.url = reverse('blog:list_tag', kwargs={'tag': self.tag})
def test_posts_patch_method_not_allowed(self):
""" Tests list_tag is not allowed for patch method. """
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.patch(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_post_method_not_allowed(self):
""" Tests list_tag is not allowed for post method. """
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.post(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_posts_put_method_not_allowed(self):
""" Tests list_tag is not allowed for put method. """
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.put(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_posts_live_by_tag_staff(self):
""" Test all posts for a specific author are returned for staff. """
G(Post, author=self.user, published=False, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3)
def test_get_posts_by_tag(self):
""" Test published posts for a specific author are returned for anonymous users. """
G(Post, author=self.user, published=False, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 2)
class TestPostDetail(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.superadmin = G(AccountsUser, is_superuser=True, is_staff=True)
self.superadmin_not_staff = G(AccountsUser, is_superuser=True, is_staff=False)
self.client = APIClient()
def test_patch_fail_post_user(self):
""" Tests patch method is forbidden for a normal user. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
slug = 'patching'
self.client.force_authenticate(user=self.user)
response = self.client.patch(url, {'slug': slug}, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_patch_post_staff(self):
""" Test patch method for staff is successful. """
post = G(Post, author=self.user, published=True)
slug = post.slug
logger.info("%s" % slug)
url = reverse('blog:detail', kwargs={'slug': slug})
slug = 'patching'
self.client.force_authenticate(user=self.staff)
response = self.client.patch(url, {'slug': slug}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], slug)
def test_patch_post_superadmin(self):
""" Test patch method for superadmin is successful. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
slug = 'patching'
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.patch(url, {'slug': slug}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], slug)
def test_put_post_superadmin(self):
""" Test put method is successful for superadmin . """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
post.slug = 'putting'
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], serializer.data['slug'])
def test_put_post_staff(self):
""" Test put method is successful for staff. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
post.slug = 'putting'
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], serializer.data['slug'])
def test_put_fail_not_published_post_user(self):
""" Test put method fails for normal user on non published post. """
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
logger.info("fdsfdsfd")
self.client.force_authenticate(user=None)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_fail_published_post_user(self):
""" Test put method fails for normal user on published post. """
post = G(Post, author=self.user, published=True)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
self.client.force_authenticate(user=None)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_fail_post_user(self):
""" Test delete method fails for authenticated users. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
self.client.force_authenticate(user=self.user)
response = self.client.delete(url, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_post_staff(self):
""" Test delete method is successful for staff. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
self.client.force_authenticate(user=self.staff)
response = self.client.delete(url, format='json')
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_post_superadmin(self):
""" Test delete method is successful for superadmin. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.delete(url, format='json')
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
def test_get_post_anonymous_user(self):
""" Test get method is successful for an anonymous user. """
post = G(Post, author=self.user, published=True)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, serializer.data)
def test_get_post_404_for_non_published_anonymous_user(self):
"""
Test get post only get published posts for an anonymous user.
create non published posts -> get it -> 404.
"""
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_post_not_published_staff(self):
""" Test get method on non published post by staff is successful. """
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, serializer.data)
def test_get_post_not_published_superadmin(self):
""" Test get method on non published post by superadmin is successful. """
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, serializer.data)
|
[
"james.tarball@gmail.com"
] |
james.tarball@gmail.com
|
7f40064fcf354778acb6ef64a0415bdb13c70b60
|
b11dfe48751754857c07b7b9d6c5583ad0b55091
|
/project_3/Linked_List.py
|
79dfbca5c5dd4e88819005bd829527076b57942b
|
[] |
no_license
|
ignatius525/datastructures
|
bf9d2e32f0faf7359cc5e9bab38451da5a1c3cbc
|
ad1dab1f27adfdfa78209026cd3df17398eeac79
|
refs/heads/master
| 2020-12-23T08:07:41.445343
| 2020-03-29T21:48:59
| 2020-03-29T21:48:59
| 237,093,151
| 0
| 1
| null | 2020-10-09T17:05:16
| 2020-01-29T22:15:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,685
|
py
|
class Linked_List:
class __Node:
def __init__(self, val):
self.val = val
self.next = None
self.prev = None
def __init__(self):
self.__header = self.__Node(None)
self.__trailer = self.__Node(None)
self.__header.next = self.__trailer
self.__trailer.prev = self.__header
self.__size = 0
def __len__(self):
return self.__size
def append_element(self, val):
newest = self.__Node(val)
newest.next = self.__trailer
newest.prev = self.__trailer.prev
self.__trailer.prev = newest
newest.prev.next = newest
self.__size +=1
def insert_element_at(self, val, index):
if index >= self.__size or index < 0:
raise IndexError
newest = self.__Node(val)
if index == 0:
cur = self.__header
elif index < (self.__size / 2):
count = 1
cur = self.__header.next
while count < index:
cur = cur.next
count += 1
else:
cur = self.__trailer.prev
count = self.__size
while count > index :
cur = cur.prev
count -= 1
newest.prev = cur.next.prev
cur.next.prev = newest
newest.next = cur.next
cur.next = newest
self.__size +=1
def remove_element_at(self, index):
if index >= self.__size or index < 0: #out of bounds
raise IndexError
elif index == 0: #remove at beginning
to_return = self.__header.next.val
self.__header.next = self.__header.next.next
self.__header.next.prev = self.__header
elif index == self.__size-1: #remove at end
to_return = self.__trailer.prev.val
self.__trailer.prev.prev.next = self.__trailer
self.__trailer.prev = self.__trailer.prev.prev
else:
if index < self.__size / 2:
cur = self.__header.next
count = 1
while count < index:
cur = cur.next
count += 1
else:
cur = self.__trailer.prev
count = self.__size
while count > index:
cur = cur.prev
count -= 1
to_return = cur.next.val
cur.next = cur.next.next
cur.next.prev = cur
self.__size -=1
return to_return
def get_element_at(self, index):
if index >= self.__size or index < 0:
raise IndexError
if index < self.__size+1 / 2:
cur = self.__header.next
count = 0
while(count != index):
count +=1
cur = cur.next
return cur.val
else:
cur = self.__trailer.prev
count = self.__size - 1
while(count != index):
count -= 1
cur = cur.prev
return cur.val
def rotate_left(self):
if self.__size == 0 or self.__size == 1:
return
current = self.__trailer.prev
this = self.__header.next
current.next = self.__header.next
self.__header.next.prev = current
self.__header.next = this.next
self.__header.next.prev = self.__header
this.next = self.__trailer
def __str__(self):
if self.__size == 0:
return '[ ]'
cur = self.__header.next
list_str = '[ '
while cur is not self.__trailer:
list_str = list_str + str(cur.val)
if cur.next is not self.__trailer:
list_str = list_str + ', '
cur = cur.next
list_str = list_str + ' ]'
return list_str
def __iter__(self):
self.__iter_index = 0
return self
def __next__(self):
if self.__iter_index == self.__size:
raise StopIteration
to_return = self.get_element_at(self.__iter_index)
self.__iter_index = self.__iter_index + 1
return to_return
if __name__ == '__main__':
my_list = Linked_List()
print(my_list) #list should be empty
print('My list has ' + str(len(my_list)) + ' elements') #0 elements
try:
my_list.append_element(4)
my_list.append_element(-3)
my_list.append_element(8)
my_list.append_element(-1)
except MemoryError:
print("append dont work")
print(my_list) #list should be 4, -3, 8, -1
print('My list has ' + str(len(my_list)) + ' elements') #should have 4 elements
try:
my_list.insert_element_at(14, 2)
except IndexError:
print("This message should not pop up, otherwise insert in middle doenst work")
print(my_list) #list should be 4 -3 14 8 -1
print('My list has ' + str(len(my_list)) + ' elements') #should have 5 elements
try:
my_list.insert_element_at(-7,8)
except IndexError:
print("Index error caught correctly, index out of bounds") #index error should occur, index 8 is out of bounds
print(my_list)
try:
my_list.insert_element_at(4, len(my_list))
except IndexError:
print("Caught correctly, cannot append with insert element") #error should result
print(my_list)
try:
print(my_list.remove_element_at(3)) #removes 8, list becomes 4 -3 14 -1, should also print 8
except IndexError:
print("Should not pop up, crash with remove in middle")
print(my_list)
print('My list has ' + str(len(my_list)) + ' elements') # 4 elements
try:
my_list.remove_element_at(6)
except IndexError:
print("Working fine, caught index out of bounds for remove") #error should be caught
print(my_list)
try:
print(my_list.remove_element_at(0)) #removes correctly at beginning, should also print 4, new list is -3 14 -1
except IndexError:
print("Something is wrong, check removing at beginning of list")
print(my_list) #list should be -3 14 -1
print('My list has ' + str(len(my_list)) + ' elements')
try:
print(my_list.remove_element_at(len(my_list)-1)) #removes correctly at end, should also print -1
except IndexError:
print("Should not pop up, error with removing at tail")
print(my_list) #list should be -3 14
print('My list has ' + str(len(my_list)) + ' elements') # 2 elements
try:
my_list.append_element(15)
my_list.insert_element_at(-5,2)
my_list.append_element(2)
my_list.insert_element_at(6,3)
except IndexError:
print("something is broken with either append or insert")
print(my_list) # new list is -3 14 -5 6 15 2
print('My list has ' + str(len(my_list)) + ' elements') # 6 elements
try:
print(my_list.get_element_at(2)) #get element should be -5
except IndexError:
print("Get element is working incorrectly")
try:
print(my_list.get_element_at(8))
except IndexError:
print('Index error caught correctly for get element') #index out of bounds should be raised
print(my_list)
print('Testing rotate left')
my_list.rotate_left()
print(my_list) # list should be 14 -5 6 15 2 -3
print('Testing iterator') # prints 14 -5 6 15 2 -3 on a new line every time
for val in my_list:
print(val)
print
|
[
"iamiagkov@email.wm.edu"
] |
iamiagkov@email.wm.edu
|
2a1031a83f820cd38e52ee39ce1c89288fd92826
|
eee232b13da70df371b313c5cd05c296865f4b5a
|
/BloodBank/migrations/0010_auto_20180926_0929.py
|
f0f010f8b36efb0b6960a85c5cbb005480849fbb
|
[] |
no_license
|
skpatil99/BloodBank
|
b08916e4eb14d5a59e854163df8ebe6602e4525e
|
cb28bef5aab117e17133701b550d251fc5d731d3
|
refs/heads/master
| 2020-08-27T08:13:11.541663
| 2019-10-24T12:09:01
| 2019-10-24T12:09:01
| 217,296,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BloodBank', '0009_auto_20180926_0856'),
]
operations = [
migrations.AddField(
model_name='hospital',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=999, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='hospital',
name='Hospital_Code',
field=models.IntegerField(),
),
]
|
[
"skpatil705@gmail.com"
] |
skpatil705@gmail.com
|
e1944b56e5d696165ca83a72b2378fd5e4e60c21
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02960/s447701610.py
|
97f90df1566f9bd8c2610aadad855e5e44bb4218
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
MOD=10**9+7
S=input()
dp=[]
for i in range(len(S)+1):
dp.append([0]*13)
dp[0][0]=1
#print(dp)
for i in range(1,len(S)+1):
if S[i-1]=="?":
for k in range(13):
for j in range(10):
dp[i][(10*k+j)%13]+=dp[i-1][k]
dp[i][(10*k+j)%13]%=MOD
else:
si=int(S[i-1])
for k in range(13):
dp[i][(10*k+si)%13]+=dp[i-1][k]
dp[i][(10*k+si)%13]%=MOD
#print(dp)
print(dp[-1][5])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b5000bb331d1d4ffab68056f8972762375e1977e
|
a9280f11ef5ee9b9aad63b723d8c3c8af1113526
|
/worms/tests/test_search.py
|
84fd44071e61d0b4c0b7f3577951ce8c4eb13958
|
[
"Apache-2.0"
] |
permissive
|
clrichar/worms
|
98680ede92d8571ae2158119723cd554a326376a
|
0a92cd64a97be162ace3cfb6c02a5ffff152c7e6
|
refs/heads/master
| 2020-05-26T15:48:46.447684
| 2018-11-05T23:24:44
| 2018-11-05T23:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,439
|
py
|
import pytest
import _pickle as pickle
import numpy as np
from homog import hrot, htrans, axis_angle_of, axis_ang_cen_of
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from worms import *
import time
from worms.tests import only_if_pyrosetta, only_if_pyrosetta_distributed
from worms.util import residue_sym_err
@only_if_pyrosetta
def test_grow_cycle(c1pose):
helix = Spliceable(c1pose, sites=[(1, 'N'), ('-4:', 'C')])
segments = ([
Segment([helix], exit='C'),
] + [Segment([helix], 'N', 'C')] * 3 + [Segment([helix], entry='N')])
worms = grow(segments, Cyclic('C2', lever=20), thresh=20)
assert 0.1411 < np.min(worms.scores) < 0.1412
@only_if_pyrosetta
def test_grow_cycle_thread_pool(c1pose):
helix = Spliceable(c1pose, sites=[(1, 'N'), ('-4:', 'C')])
segments = ([
Segment([helix], exit='C'),
] + [Segment([helix], 'N', 'C')] * 3 + [Segment([helix], entry='N')])
worms = grow(
segments,
Cyclic('C2', lever=20),
executor=ThreadPoolExecutor,
max_workers=2)
assert 0.1411 < np.min(worms.scores) < 0.1412
assert np.sum(worms.scores < 0.1412) == 4
@only_if_pyrosetta
def test_sym_bug(c1pose, c2pose):
helix = Spliceable(
c1pose, sites=[((1, 2, 3), 'N'), ((9, 10, 11, 13), 'C')])
dimer = Spliceable(
c2pose, sites=[((1, 2, 3), 'N', 1), ('1,-1:', 'C'), ('2,-1:', 'C')])
segdimer = Segment([dimer], entry='N', exit='C')
segments = [
Segment([helix], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N'),
]
wnc = grow(segments, Cyclic(3, lever=200), thresh=1, verbosity=1)
assert len(wnc) == 3
print(wnc.scores)
p = wnc.pose(0, align=1, end=1)
# vis.showme(p)
# show_with_axis(wnc, 0)
# assert 0
# q = wnc.pose(4)
# vis.showme(p, name='carterr')
# vis.showme(q, name='angerr')
assert residue_sym_err(wnc.pose(0, end=True), 120, 2, 46, 6) < 1.0
@only_if_pyrosetta_distributed
def test_grow_cycle_process_pool(c1pose):
helix = Spliceable(c1pose, sites=[(1, 'N'), ('-4:', 'C')])
segments = ([
Segment([helix], exit='C'),
] + [Segment([helix], 'N', 'C')] * 3 + [Segment([helix], entry='N')])
worms = grow(
segments,
Cyclic('C2', lever=20),
executor=ProcessPoolExecutor,
max_workers=2)
assert 0.1411 < np.min(worms.scores) < 0.1412
assert np.sum(worms.scores < 0.1412) == 4
@only_if_pyrosetta
def test_grow_errors(c1pose):
nsplice = SpliceSite(sele=[1, 2, 3, 4, 5, 6], polarity='N')
csplice = SpliceSite(
sele=[
13,
], polarity='C')
spliceable1 = Spliceable(body=c1pose, sites=[nsplice, csplice])
spliceable2 = Spliceable(body=c1pose, sites=[nsplice, csplice])
spliceables = [spliceable1]
segments = ([
Segment(spliceables, exit='C'),
] + [
Segment(spliceables, 'N', 'C'),
] * 3 + [
Segment(spliceables, entry='N'),
])
checkc3 = Cyclic('C2', from_seg=0, to_seg=-1)
# make sure incorrect begin/end throws error
with pytest.raises(ValueError):
grow(segments[:2], criteria=checkc3)
with pytest.raises(ValueError):
grow(segments[1:], criteria=checkc3)
segments_polarity_mismatch = [
Segment(spliceables, exit='C'),
Segment(spliceables, entry='C'),
]
with pytest.raises(ValueError):
grow(segments_polarity_mismatch, criteria=checkc3)
@only_if_pyrosetta
def test_memsize(c1pose):
helix = Spliceable(c1pose, sites=[((1, 2), 'N'), ('-2:', 'C')])
segments = ([
Segment([helix], exit='C'),
] + [Segment([helix], 'N', 'C')] * 3 + [Segment([helix], entry='N')])
beg = 3
for i in range(beg, 7):
w1 = grow(segments, Cyclic('c2'), memsize=10**i, thresh=30)
assert i == beg or len(w0.scores) == len(w1.scores)
assert i == beg or np.allclose(w0.scores, w1.scores)
w0 = w1
@only_if_pyrosetta
def test_pose_alignment_0(c1pose):
helix = Spliceable(c1pose, sites=[(1, 'N'), ('-4:', 'C')])
segments = ([
Segment([helix], exit='C'),
] + [Segment([helix], 'N', 'C')] * 3 + [Segment([helix], entry='N')])
w = grow(segments, Cyclic('c2'), thresh=1)
assert len(w)
print(w.indices)
for i in range(4):
assert tuple(w.indices[i]) in ((0, 2, 1, 2, 0), (2, 1, 2, 0, 0),
(1, 2, 0, 2, 0), (2, 0, 2, 1, 0))
pose = w.pose(0, align=1, end=1)
assert util.no_overlapping_residues(pose)
# vis.showme(pose)
xyz0 = np.array([pose.residue(1).xyz(2)[i] for i in (0, 1, 2)] + [1])
# resid 43 happens to be the symmetrically related one for this solution
xyz1 = np.array([pose.residue(42).xyz(2)[i] for i in (0, 1, 2)] + [1])
xyz1 = hrot([0, 0, 1], 180) @ xyz1
assert np.sum((xyz1 - xyz0)**2) < 0.1
@only_if_pyrosetta
def test_last_body_same_as(c1pose):
helix = Spliceable(c1pose, sites=[(1, 'N'), ('-4:', 'C')])
segments = ([
Segment([helix, helix], exit='C'),
] + [Segment([helix], 'N', 'C')] * 3 +
[Segment([helix, helix], entry='N')])
w = grow(segments, Cyclic('c2'), thresh=1)
for i, s in zip(w.indices, w.scores):
assert segments[0].bodyid[i[0]] == segments[-1].bodyid[i[-1]]
assert len(w) == 8
ref = [(1, 2, 0, 2, 0), (5, 2, 0, 2, 1), (2, 0, 2, 1, 0), (6, 0, 2, 1, 1),
(0, 2, 1, 2, 0), (4, 2, 1, 2, 1), (2, 1, 2, 0, 0), (6, 1, 2, 0, 1)]
for i in range(8):
assert tuple(w.indices[i]) in ref
@only_if_pyrosetta
def test_multichain_match_reveres_pol(c1pose, c2pose):
helix = Spliceable(
c1pose, sites=[((
1,
2,
3,
), 'N'), ((9, 10, 11, 13), 'C')])
dimer = Spliceable(
c2pose,
sites=[('1,:1', 'N'), ('1,-1:', 'C'), ('2,:2', 'N'), ('2,-1:', 'C')])
segments = [
Segment([helix], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N'),
]
wnc = grow(segments, Cyclic('C3', lever=20), thresh=1)
assert len(wnc)
assert wnc.scores[0] < 0.25
segments = [
Segment([helix], exit='N'),
Segment([helix], entry='C', exit='N'),
Segment([dimer], entry='C', exit='N'),
Segment([helix], entry='C', exit='N'),
Segment([helix], entry='C'),
]
wcn = grow(segments, Cyclic('C3', lever=20), thresh=1)
# assert residue_sym_err(wcn.pose(0), 120, 22, 35, 8) < 0.5
# N-to-C and C-to-N construction should be same
assert np.allclose(wnc.scores, wcn.scores, atol=1e-3)
@only_if_pyrosetta
def test_splicepoints(c1pose, c2pose, c3pose):
helix = Spliceable(
c1pose, sites=[((
1,
2,
3,
), 'N'), ((9, 10, 11, 13), 'C')])
dimer = Spliceable(
c2pose,
sites=[('1,:1', 'N'), ('1,-1:', 'C'), ('2,:2', 'N'), ('2,-1:', 'C')])
segments = [
Segment([helix], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N'),
]
w = grow(segments, Cyclic('C3', lever=20), thresh=1)
assert len(w) == 17
assert w.scores[0] < 0.25
assert w.splicepoints(0) == [11, 19, 27, 37]
w.pose(0, cyclic_permute=0)
assert w.splicepoints(0) == [10, 20, 42]
helix = Spliceable(c1pose, [(':4', 'N'), ('-4:', 'C')])
dimer = Spliceable(
c2pose,
sites=[('1,:2', 'N'), ('1,-1:', 'C'), ('2,:2', 'N'), ('2,-1:', 'C')])
trimer = Spliceable(
c3pose,
sites=[('1,:1', 'N'), ('1,-2:', 'C'), ('2,:2', 'N'), ('2,-2:', 'C'),
('3,:1', 'N'), ('3,-2:', 'C')])
segments = [
Segment([trimer], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N')
]
w = grow(segments, D3(c2=-1, c3=0), thresh=1)
assert len(w) == 90
assert w.splicepoints(0) == [8, 16, 25, 34]
actual_chains = list(w.pose(0, join=0).split_by_chain())
for i, splice in enumerate(w.splices(0)):
ib1, ic1, ir1, ib2, ic2, ir2, dr = splice
pose1 = w.segments[i].spliceables[ib1].chains[ic1]
pose2 = w.segments[i + 1].spliceables[ib2].chains[ic2]
seq1 = str(util.subpose(pose1, 1, ir1 - 1).sequence())
seq2 = str(util.subpose(pose2, ir2).sequence())
# print(i, '1', seq1, str(actual_chains[i].sequence()))
# print(i, '2', seq2, str(actual_chains[i + 1].sequence()))
assert seq1.endswith(str(actual_chains[i].sequence()))
assert seq2.startswith(str(actual_chains[i + 1].sequence()))
@only_if_pyrosetta
def test_cyclic_permute_beg_end(c1pose, c2pose):
helix = Spliceable(
c1pose, sites=[((
1,
2,
3,
), 'N'), ((9, 10, 11, 13), 'C')])
dimer = Spliceable(
c2pose,
sites=[('1,:1', 'N'), ('1,-1:', 'C'), ('2,:2', 'N'), ('2,-1:', 'C')])
segments = [
Segment([helix], exit='N'),
Segment([helix], entry='C', exit='N'),
Segment([dimer], entry='C', exit='N'),
Segment([helix], entry='C', exit='N'),
Segment([helix], entry='C'),
]
w = grow(segments, Cyclic('C3', lever=50), thresh=1)
# vis.showme(w.pose(0))
p = w.pose(0, cyclic_permute=1)
assert p.sequence() == 'YTAFLAAIPAINAAAAAAAGAAAAAGAAAAAAAGAAAAAFLAAIPAIN'
assert p.chain(30) == 1
assert util.no_overlapping_residues(p)
segments = [
Segment([helix], '_C'),
Segment([helix], 'NC'),
Segment([dimer], 'NC'),
Segment([helix], 'NC'),
Segment([helix], 'N_'),
]
w = grow(segments, Cyclic('C3', lever=50), thresh=1)
p = w.pose(0, cyclic_permute=1)
assert p.sequence() == 'YTAFLAAIPAIAAAAAAAAAAAAAAGAAAAAAAGAAATAFLAAIPAIN'
assert p.chain(len(p)) == 1
assert util.no_overlapping_residues(p)
# print(w.scores)
# vis.showme(w.pose(0, cyclic_permute=0), name='reg')
# print('------------------------')
# vis.showme(w.pose(0, end=1, join=False), name='end')
# print('------------------------')
# vis.showme(w.pose(0, cyclic_permute=1), name='cp')
# print('------------------------')
# assert 0
@only_if_pyrosetta
def test_cyclic_permute_mid_end(c1pose, c2pose, c3hetpose):
helix0 = Spliceable(c1pose, [([2], 'N'), ([11], "C")])
helix = Spliceable(c1pose, [([1, 3, 4], 'N'), ([
12,
], "C")])
dimer = Spliceable(
c2pose, sites=[('1,-1:', 'C'), ('2,-1:', 'C')], allowed_pairs=[(0, 1)])
c3het = Spliceable(
c3hetpose,
sites=[('1,2:2', 'N'), ('2,2:2', 'N'), ('3,2:2', 'N')],
allowed_pairs=[(0, 1), (1, 0)])
segments = [
Segment([helix0], '_C'),
Segment([helix0], 'NC'),
Segment([helix0], 'NC'),
Segment([c3het], 'NN'),
Segment([helix], 'CN'),
Segment([dimer], 'CC'),
Segment([helix], 'NC'),
Segment([helix], 'NC'),
Segment([c3het], 'N_'),
]
w = grow(segments, Cyclic(3, from_seg=3), thresh=1)
p, sc = w.sympose(0, score=True)
assert sc < 4
assert len(p) == 312
assert p.chain(306) == 9
assert util.no_overlapping_residues(p)
assert len(w) == 1
@only_if_pyrosetta
def test_multichain_mixed_pol(c2pose, c3pose, c1pose):
helix = Spliceable(c1pose, [(':4', 'N'), ((10, 12, 13), 'C')])
dimer = Spliceable(
c2pose,
sites=[('1,:2', 'N'), ('1,-1:', 'C'), ('2,:2', 'N'), ('2,-1:', 'C')])
trimer = Spliceable(
c3pose,
sites=[('1,:1', 'N'), ('1,-2:', 'C'), ('2,:2', 'N'), ('2,-2:', 'C'),
('3,:1', 'N'), ('3,-2:', 'C')])
segments = [
Segment([helix], exit='C'),
Segment([dimer], entry='N', exit='N'),
Segment([helix], entry='C', exit='N'),
Segment([trimer], entry='C', exit='C'),
Segment([helix], entry='N')
]
w = grow(segments, Cyclic('C3'), thresh=1)
assert len(w) == 24
p = w.pose(0, end=True, cyclic_permute=0)
assert util.no_overlapping_residues(p)
# vis.show_with_axis(w, 0)
# vis.showme(p)
# print(residue_sym_err(p, 120, 2, 62, 7))
assert 0.3 > residue_sym_err(p, 120, 2, 62, 7)
@only_if_pyrosetta
def test_multichain_db(c2pose, c1pose):
helix = Spliceable(c1pose, [(':4', 'N'), ('-4:', "C")])
dimer = Spliceable(c2pose, sites=[('1,-1:', 'C'), ('2,-1:', 'C')])
segments = [
Segment([helix], exit='N'),
Segment([dimer], entry='C', exit='C'),
Segment([helix], entry='N')
]
with pytest.raises(ValueError):
w = grow(segments, Cyclic('C4'), thresh=20)
@only_if_pyrosetta
def test_score0_sym(c2pose, c3pose, c1pose):
helix = Spliceable(c1pose, [(':1', 'N'), ((-4, -3, -2), 'C')])
dimer = Spliceable(
c2pose, sites=[
((2, ), 'N'),
('1,-1:', 'C'),
])
trimer = Spliceable(
c3pose, sites=[
('1,:1', 'N'),
((2, ), 'C'),
])
segments = ([Segment([dimer], '_C')] + [Segment([helix], 'NC')] * 4 +
[Segment([trimer], 'N_')])
w = grow(segments, D3(c3=-1, c2=0), thresh=2)
assert len(w) == 2
i, err, pose, score0 = w[0]
# vis.showme(w.pose(1, fullatom=True))
# show_with_z_axes(w, 1)
assert 22.488 < score0 < 22.4881
assert util.no_overlapping_residues(pose)
if hasattr(pose, '__getstate__'):
t = time.time()
ps1 = w.sympose(range(len(w)), score=1)
t = time.time() - t
print(t)
t = time.time()
ps2 = w.sympose(range(len(w)), score=1, parallel=True)
t = time.time() - t
print(t)
assert np.allclose([x[1] for x in ps1], [x[1] for x in ps2])
@only_if_pyrosetta
def test_chunk_speed(c2pose, c3pose, c1pose):
helix = Spliceable(c1pose, [(':1', 'N'), ('-2:', 'C')])
dimer = Spliceable(
c2pose, sites=[
('1,:2', 'N'),
('1,-1:', 'C'),
])
trimer = Spliceable(
c3pose, sites=[
('1,:1', 'N'),
('1,-2:', 'C'),
])
nseg = 11
segments = ([Segment([dimer], exit='C')] +
[Segment([helix], entry='N', exit='C')] *
(nseg - 2) + [Segment([trimer], entry='N')])
# w = grow(segments, Tetrahedral(c3=-1, c2=0), thresh=5)
t1 = time.time()
w1 = grow(segments, Octahedral(c3=-1, c2=0), thresh=1, memsize=0)
t1 = time.time() - t1
t2 = time.time()
w2 = grow(segments, Octahedral(c3=-1, c2=0), thresh=1, memsize=1e7)
t2 = time.time() - t2
print('chunksize', w1.detail['chunksize'], 'time', t1)
print('chunksize', w2.detail['chunksize'], 'time', t2)
print('speedup:', t1 / t2)
assert t1 / t2 > 10.0 # conservative, but still sketchy...
@only_if_pyrosetta
def test_splice_compatibility_check(c1pose, c2pose):
helix = Spliceable(c1pose, [(':1', 'N'), ('-2:', 'C')])
dimer = Spliceable(
c2pose, sites=[
('1,:2', 'N'),
('2,:2', 'N'),
])
segments = [
Segment([helix], '_C'),
Segment([dimer], 'NN'),
Segment([helix], 'C_'),
]
with pytest.raises(ValueError):
w = grow(segments, Cyclic(), thresh=1)
@only_if_pyrosetta
def test_invalid_splices_seg_too_small(c1pose):
helix = Spliceable(c1pose, [('8:8', 'N'), ('7:7', 'C')])
with pytest.raises(ValueError):
segments = [
Segment([helix], '_C'),
Segment([helix], 'NC'),
Segment([helix], 'N_')
]
helix = Spliceable(c1pose, [('7:8', 'N'), ('7:8', 'C')])
segments = [
Segment([helix], '_C'),
Segment([helix], 'NC'),
Segment([helix], 'N_')
]
w = grow(segments, Cyclic('C3'), thresh=9e9)
assert len(w) == 12
helix = Spliceable(c1pose, [('7:8', 'N'), ('7:8', 'C')], min_seg_len=2)
segments = [
Segment([helix], '_C'),
Segment([helix], 'NC'),
Segment([helix], 'N_')
]
w = grow(segments, Cyclic('C3'), thresh=9e9)
assert len(w) == 4
@only_if_pyrosetta
def test_invalid_splices_site_overlap_2(c1pose, c2pose):
helix = Spliceable(c1pose, [(':1', 'N'), ('-1:', 'C')])
dimer = Spliceable(
c2pose,
sites=[
('1,:1', 'N'),
('2,:1', 'N'),
('1,-1:', 'C'),
('2,-1:', 'C'),
])
segments = [
Segment([helix], '_C'),
Segment([dimer], 'NN'),
Segment([helix], 'CN'),
Segment([dimer], 'CC'),
Segment([helix], 'N_'),
]
w = grow(segments, Cyclic(3), thresh=9e9)
assert len(w) == 4
for i in range(len(w)):
assert (w.segments[1].entrysiteid[w.indices[i, 1]] !=
w.segments[1].exitsiteid[w.indices[i, 1]])
assert (w.segments[3].entrysiteid[w.indices[i, 3]] !=
w.segments[3].exitsiteid[w.indices[i, 3]])
@only_if_pyrosetta
def test_invalid_splices_site_overlap_3(c1pose, c3pose):
helix = Spliceable(c1pose, [(':1', 'N'), ('-1:', 'C')])
trimer = Spliceable(
c3pose,
sites=[
('1,:1', 'N'),
('1,-1:', 'C'),
('2,:1', 'N'),
('2,-1:', 'C'),
('3,:1', 'N'),
('3,-1:', 'C'),
])
segments = [
Segment([helix], '_C'),
Segment([trimer], 'NN'),
Segment([helix], 'CN'),
Segment([trimer], 'CC'),
Segment([helix], 'NC'),
Segment([trimer], 'N_'),
]
w = grow(segments, Cyclic(3, from_seg=1), thresh=9e9)
assert len(w)
for i in range(len(w)):
assert (w.segments[1].entrysiteid[w.indices[i, 1]] !=
w.segments[1].exitsiteid[w.indices[i, 1]])
assert (w.segments[1].entrysiteid[w.indices[i, 1]] !=
w.segments[5].entrysiteid[w.indices[i, 5]])
assert (w.segments[1].exitsiteid[w.indices[i, 1]] !=
w.segments[5].entrysiteid[w.indices[i, 5]])
@only_if_pyrosetta
def test_provenance(c1pose):
sites = [(':1', 'N'), ('-4:', 'C')]
segments = [
Segment([Spliceable(c1pose.clone(), sites)], '_C'),
Segment([Spliceable(c1pose.clone(), sites)], 'NC'),
Segment([Spliceable(c1pose.clone(), sites)], 'NC'),
Segment([Spliceable(c1pose.clone(), sites)], 'NC'),
Segment([Spliceable(c1pose.clone(), sites)], 'NC'),
Segment([Spliceable(c1pose.clone(), sites)], 'NC'),
Segment([Spliceable(c1pose.clone(), sites)], 'NC'),
Segment([Spliceable(c1pose.clone(), sites)], 'N_')
]
w = grow(segments, Cyclic(6), thresh=2, expert=True)
assert len(w)
for i in range(len(w)):
# pose, score, srcpose, srcres = w.sympose(
# i, score=True, provenance=True)
pose, prov = w.pose(i, provenance=True)
for i, prv in enumerate(prov):
lb, ub, src_pose, src_lb, src_ub = prv
assert src_pose is segments[i].spliceables[0].body
assert src_pose is not c1pose
srcseq = src_pose.sequence()[src_lb - 1:src_ub]
seq = pose.sequence()[lb - 1:ub]
assert srcseq == seq
assert len(prov) == len(segments) - 1
@only_if_pyrosetta
def test_extra_chain_handling_noncyclic(c1pose, c2pose, c3pose, c3hetpose):
helix = Spliceable(c1pose, [(':4', 'N'), ('-4:', 'C')])
dimer = Spliceable(c2pose, sites=[('1,:1', 'N'), ('1,-1:', 'C')])
trimer = Spliceable(c3pose, sites=[('1,:1', 'N'), ('1,-2:', 'C')])
hettri = Spliceable(c3hetpose, sites=[('1,:1', 'N'), ('1,-1:', 'C')])
segments = [
Segment([trimer], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([hettri], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N')
]
w = grow(segments, D3(c2=-1, c3=0), thresh=1)
# vis.showme(w.sympose(0, fullatom=1))
assert len(w) == 4
assert w.pose(0, only_connected='auto').num_chains() == 3
assert w.pose(0, only_connected=0).num_chains() == 6
assert w.pose(0, only_connected=1).num_chains() == 1
hettri = Spliceable(c3hetpose, sites=[('1,:1', 'N'), ('2,-1:', 'C')])
segments = [
Segment([trimer], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([hettri], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N')
]
w = grow(segments, D3(c2=-1, c3=0), thresh=1)
assert len(w) == 1
assert w.pose(0, only_connected='auto').num_chains() == 3
assert w.pose(0, only_connected=0).num_chains() == 6
assert w.pose(0, only_connected=1).num_chains() == 2
@only_if_pyrosetta
def test_max_results(c1pose, c2pose, c3pose):
helix = Spliceable(c1pose, [(':4', 'N'), ('-4:', 'C')])
dimer = Spliceable(
c2pose,
sites=[('1,:2', 'N'), ('1,-1:', 'C'), ('2,:2', 'N'), ('2,-1:', 'C')])
trimer = Spliceable(
c3pose,
sites=[('1,:1', 'N'), ('1,-2:', 'C'), ('2,:2', 'N'), ('2,-2:', 'C'),
('3,:1', 'N'), ('3,-2:', 'C')])
segments = [
Segment([trimer], exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([helix], entry='N', exit='C'),
Segment([dimer], entry='N')
]
wref = grow(segments, D3(c2=-1, c3=0), thresh=1)
assert len(wref) == 90
s = wref.scores[:]
s.sort()
i = np.argmin(s[1:] - s[:-1])
wtst = grow(segments, D3(c2=-1, c3=0), thresh=1, max_results=90)
assert len(wtst) == 90
assert np.all(wref.indices == wtst.indices)
@only_if_pyrosetta
def test_chunk_speed(c2pose, c3pose, c1pose):
helix = Spliceable(c1pose, [(':1', 'N'), ('-4:', 'C')])
nseg = 39
segments = ([Segment([helix], exit='C')] +
[Segment([helix], entry='N', exit='C')] *
(nseg - 2) + [Segment([helix], entry='N')])
with pytest.raises(ValueError):
grow(segments, Octahedral(c3=-1, c2=0), thresh=1, max_samples=1000000)
|
[
"willsheffler@gmail.com"
] |
willsheffler@gmail.com
|
dcdd1a039d1cc9aac27c51e8620db92b5d4541b9
|
af63f2da6b3b080d81eff6e50343122c2c68b72a
|
/dbscan/tests/test_dbscan.py
|
c1bac7c15ad03dd39d99bf700a32f976450e4bdd
|
[] |
no_license
|
Prev/ITE4005
|
9dcf06ff05c398e7c6f8ee6fde5158dd53ef3441
|
ffff7b6ac9958aa7fe6075b532461348251d9e4e
|
refs/heads/master
| 2021-04-12T03:59:10.356278
| 2018-06-10T06:23:52
| 2018-06-10T06:23:52
| 125,726,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
from clustering import Cluster, Point
def test_point_dist():
""" Unit Test of Point.dist()
"""
p1 = Point(1, 3.0, 4.0)
p2 = Point(2, 0.0, 0.0)
assert p1.dist(p2) == 5
assert p2.dist(p1) == 5
sample_points = [
Point(0, 0, 0), Point(1, 0, 1), Point(2, 0, 5),
Point(3, 1, 0), Point(4, 1, 1), Point(5, 1, 5),
Point(6, 2, 0), Point(7, 2, 1), Point(8, 6, 0),
Point(9, 6, 1), Point(10, 6, 2), Point(11, 6, 3),
Point(12, 7, 0), Point(13, 7, 1), Point(14, 7, 2),
Point(15, 7, 3),
]
def test_neighbors():
""" Unit Test of Cluster._neighbors()
"""
c = Cluster(sample_points, 2, 1, 3)
assert c._neighbors(sample_points[0]) == [sample_points[1], sample_points[3]]
c = Cluster(sample_points, 2, 1.5, 3)
assert c._neighbors(sample_points[0]) == [sample_points[1], sample_points[3], sample_points[4]]
def test_cluster():
""" Test of Cluster.clusters()
"""
c = Cluster(sample_points, 2, 1, 3)
found = c.clusters()
cluster1 = [0, 1, 3, 4, 6, 7]
cluster2 = [8, 9, 10, 11, 12, 13, 14, 15]
assert (found == [cluster1, cluster2]) or (found == [cluster2, cluster1])
|
[
"prevdev@gmail.com"
] |
prevdev@gmail.com
|
27cf96bf94386b76476d9de0f92daa86bb2dfbe1
|
e4e89b6a4ff43dd8803772e92ed93ad44b01912f
|
/manage.py
|
61deea776c819d49651a17a91463290074bd7eb4
|
[] |
no_license
|
DiegoDigo/EstouAqui
|
1d4cd5a5bcbd44de194c7b27697248ef0d3d8650
|
ae358ff6456638a59fe953fbc9063a46ae794037
|
refs/heads/master
| 2021-01-11T14:49:19.539685
| 2017-03-13T15:15:28
| 2017-03-13T15:15:28
| 80,224,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "EstouAqui.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"di3g0d0ming05@gmail.com"
] |
di3g0d0ming05@gmail.com
|
1d3a4206b0e289f15bf431e2cc38cc2da76a091c
|
3061e54c8b5f3fb8d9cfd4f8d584913190cb82b7
|
/IC_Problems/91_level_order_graph.py
|
ea83eb855faf5e1e41c9c2d6f345f1feb0e99b95
|
[] |
no_license
|
litakgit/DSAlgo
|
e362068ce456d4c8fe1a8c47aa9ae3d679c2f876
|
c9cb47571e85a992891abc7dd249747270b6d985
|
refs/heads/master
| 2023-01-04T11:49:26.470861
| 2020-11-04T16:17:02
| 2020-11-04T16:17:02
| 295,679,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
import collections
def get_level_order_nodes(g, start):
"""
Lessons:
- Missed the param in defaultdict.
- visited.append was missing.
"""
visited = []
Q = collections.deque()
Q.append(start)
visited.append(start)
res = []
while Q:
item = Q.popleft()
res.append(item)
for nei in g[item]:
if nei not in visited:
visited.append(nei)
Q.append(nei)
return res
if __name__ == "__main__":
g = collections.defaultdict(list)
g[1].append(2)
g[1].append(3)
g[2].append(4)
g[3].append(5)
print (g)
print (get_level_order_nodes(g, 1))
|
[
"tilakadhya@gmail.com"
] |
tilakadhya@gmail.com
|
a7c957893a041107c26db7072406f1e9400ea32e
|
0cf8e8f663ae910e6be374810e5f3de18cf00c12
|
/su3/lib/fitting.py
|
39949c277944c187cd412ab47e56048226baae91
|
[] |
no_license
|
ShaneDrury/pyon-qed
|
126c3e76b9fb9ef4c9ebb63bd3b39b12b4edc0d5
|
5182f024007c8485916affa32a24360f5343547e
|
refs/heads/master
| 2020-05-20T22:24:32.501321
| 2014-09-05T14:23:07
| 2014-09-05T14:23:07
| 19,281,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,899
|
py
|
from functools import partial
from pyon.lib.fitting.common import Fitter, ErrorGenerator, \
ChiSqFitObjectGenerator
from pyon.lib.resampling import Jackknife
from pyon.lib.structs.errors import OneErrorGeneratorBase
from delmsq.lib.fitting.minuit import MinuitFitMethod
def fit_chi2_minuit_delmsq(jackknife_data, x_range=None, fit_func=None,
initial_value=None, errs=None, fit_func_params=None,
avg_fit_func_params=None,
central_data=None):
resampler = Jackknife(n=1) # TODO: Maybe change this to a dummy resampler
fit_object_generator = ChiSqFitObjectGenerator()
one_err_generator = FrozenOneErrorGenerator(errs)
error_generator = ErrorGenerator(one_err_generator, frozen=True)
fitter = DelMSqFitter(jackknife_data, x_range, fit_func, initial_value, None,
resampler, error_generator, fit_object_generator,
MinuitFitMethod(fit_func), fit_func_params, avg_fit_func_params,
central_data)
return fitter
# def fit_chi2_minuit_light_masses(targets, fit_func, initial_values,
# fit_func_params, avg_fit_func_params):
# resampler = Jackknife(n=1)
# fit_object_generator = LightMassesFitObjectGenerator()
# fitter = LightMassesFitter()
# return fitter
#
#
# class LightMassesFitObjectGenerator(FitObjectGeneratorBase):
# def generate(self, data, errors, fit_func, x_range):
# return Chi2LightMasses(data, errors, x_range, fit_func)
#
#
# class Chi2LightMasses(GenericChi2):
# def __call__(self, *args, **kwargs):
# # Gets called with (mu, md, ms)
# ff = self.fit_func(self.x_range, *args, **kwargs)
# return sum(((self.data - ff) / self.errors)**2)
class FrozenOneErrorGenerator(OneErrorGeneratorBase):
def __init__(self, err):
self._err = err
def generate(self, data):
return self._err
# class LightMassesFitter(FitterBase):
# def __init__(self):
# pass
#
# def fit(self):
# pass
class DelMSqFitter(Fitter):
"""
This is a refinement of the existing Fitter class.
It uses a different fit function for each jackknife sample.
TODO: This is pretty different from the regular Fitter so maybe just
make a new class?
"""
def __init__(self, data, x_range, fit_func, initial_value, bounds,
resampler, error_generator, fit_object_generator, fit_method,
fit_func_params, avg_fit_func_params, central_data):
self._fit_func_params = fit_func_params
self._avg_fit_func_params = avg_fit_func_params
self._central_data = central_data
super().__init__(data, x_range, fit_func, initial_value, bounds,
resampler, error_generator, fit_object_generator,
fit_method)
def _prepare_fit_funcs(self):
self._fit_funcs = [partial(self._fit_func_base, **kwargs)
for kwargs in self._fit_func_params]
# avg_fit_func_params = {k: np.average([sample[k] for sample
# in self._fit_func_params])
# for k in self._fit_func_params[0].keys()}
self._central_fit_func = partial(self._fit_func_base,
**self._avg_fit_func_params)
def _prepare_data(self):
pass # Don't need to do x_range in this case
def _prepare_central_fit_obj(self):
self._central_fit_obj = self._fit_obj_gen.generate(
self._central_data,
self._err_gen.generate_central_error(self._central_data),
self._central_fit_func, self._x_range)
def _prepare_fit_objs(self):
"""
Note, ave_resampled isn't used, but self._data is instead.
"""
self._fit_objects = [self._fit_obj_gen.generate(sample, err,
ff,
self._x_range)
for sample, err, ff in zip(self._data,
self._errors,
self._fit_funcs)]
def _prepare_ave_resampled(self):
self._ave_resampled = self._central_data
# class DelMSqFitter(FitterBase):
#
# def __init__(self, data=None, x_range=None, fit_range=None, fit_func=None,
# initial_value=None, gen_err_func=None, gen_fit_obj=None,
# fit_method=None, resampler=None, bounds=None, frozen=True):
# self.data = np.array(data)
# self.x_range = x_range
# self.fit_range = fit_range
# self.fit_func = fit_func
# self.initial_value = initial_value
# self.gen_err_func = gen_err_func
# self.gen_fit_obj = gen_fit_obj
# self.fit_method = fit_method
# self.resampler = resampler
# self.frozen = frozen
# self.errors = self._gen_errs()
# self.bounds = bounds
#
# def _gen_errs(self):
# errors = self.gen_err_func(self.data)
# return errors
#
# def _get_average_params(self):
# average_fit_obj = self.gen_fit_obj(self.data,
# self.errors,
# self.x_range,
# self.fit_func,
# fit_range=self.fit_range)
# self.average_fit_obj = average_fit_obj
# average_params = self.fit_method.fit(average_fit_obj,
# self.initial_value, self.bounds)
# average_params['chi_sq_dof'] = self._chi_sq_dof(average_params['chi_sq_dof'])
# return average_params
|
[
"shane.r.drury@gmail.com"
] |
shane.r.drury@gmail.com
|
75f9c1d0f476271eeed6afa3983f8849c627964e
|
ccfad4f25e6e624fa8715f5a0793377bf4642167
|
/python/fizzBuzz.py
|
b7e121b13fa2fb4ffc4b3849d246d62766bb98b0
|
[] |
no_license
|
ashenoy95/leetcode
|
92721569b5adb7e06eba429a649286736d6fd692
|
385b41ce2f3ad48d361eafd93d7a900577047032
|
refs/heads/master
| 2021-06-14T10:51:06.299083
| 2021-03-22T21:12:02
| 2021-03-22T21:12:02
| 94,832,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
fizzbuzz = []
for i in range(1, n+1):
if i%3==0 and i%5==0:
fizzbuzz.append('FizzBuzz')
elif i%3==0 and i%5!=0:
fizzbuzz.append('Fizz')
elif i%5==0 and i%3!=0:
fizzbuzz.append('Buzz')
else:
fizzbuzz.append(str(i))
return fizzbuzz
|
[
"noreply@github.com"
] |
ashenoy95.noreply@github.com
|
dd502191be9ebece86e545397882664929e30344
|
3a686fc3df3fd45918734a03a3d78ac1b2f70fc5
|
/one8.py
|
28f6585e5b92d0608954b0ac79c5d0d145d31219
|
[] |
no_license
|
MAYANK095/Artificial-Intelligence
|
ff58b76bfca8706da7c546ec62defbd504c5579b
|
908870cc62da9f9c3a6e4f8287a28a0ba1531895
|
refs/heads/master
| 2020-03-29T23:33:16.391130
| 2018-10-04T16:01:35
| 2018-10-04T16:01:35
| 150,478,203
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import matplotlib.pyplot as plt
activities=['eat','sleep','rave','repeat']
slices=[5,6,7,8]
colors=['r','g','b','y']
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.legend()
plt.pie(slices,labels=activities,colors=colors,startangle=90,shadow= False,
explode=(1,1,1,1),autopct='%1.1f%%')
plt.show()
|
[
"mittalmayank095@gmail.com"
] |
mittalmayank095@gmail.com
|
a7840b8bba658b4a16ba1645c9b158442e77a010
|
e03e59d67c96c1afa0a1c76e62235a3e3f639976
|
/django_test3_session/manage.py
|
8406ae7d3c317b1b2fb7cdc48035d4c9b4206720
|
[] |
no_license
|
kangmihee/EX_python
|
10a63484802e6ff5454f12f7ade7e277dbf3df97
|
0a8dafe667f188cd89ef7f021823f6b4a9033dc0
|
refs/heads/master
| 2020-07-02T00:23:05.465127
| 2019-09-03T07:49:46
| 2019-09-03T07:49:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_test3_session.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"acorn@acorn-PC"
] |
acorn@acorn-PC
|
4ac53e1bbd8fa6738ef3017466446ced5b4b64c7
|
4488e3c26de4291da447d8251c491b43cb810f7c
|
/smart_portal_project_issue/models.py
|
4c2a687a1fbaee977492c0e890e8b290a238ea0f
|
[] |
no_license
|
smart-solution/odoo-crm-80
|
b19592ce6e374c9c7b0a3198498930ffb1283018
|
85dfd0cc37f81bcba24d2a0091094708a262fe2c
|
refs/heads/master
| 2016-09-06T06:04:35.191924
| 2015-07-14T12:48:28
| 2015-07-14T12:48:28
| 33,174,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from mock import self
class res_users(models.Model):
_inherit = 'res.users'
portal_project_id = fields.Many2one('project.project','Default Project for Portal Issues')
portal_customer_id = fields.Many2one('res.partner','Customer for Portal Issues',domain=[('is_company','=',True),('customer','=',True)])
class res_partner(models.Model):
_inherit = 'res.partner'
project_ids = fields.One2many('project.project', 'partner_id', 'Projects')
class project_issue(models.Model):
_inherit = 'project.issue'
in_warranty = fields.Boolean('Under Warranty')
estimated_time = fields.Float('Estimated Duration')
project_ids = fields.One2many('project.project', 'partner_id', string='Projects')
stage_name = fields.Char(related='stage_id.name')
def create(self, cr, uid, vals, context=None):
"""Fills the issue project with the user default project for portal issues"""
if ('project_id' not in vals or 'project_id' in vals and not vals['project_id']) and 'partner_id' in vals and vals['partner_id']:
user_id = self.pool.get('res.users').search(cr, uid, [('partner_id','=',vals['partner_id'])])
if user_id:
user = self.pool.get('res.users').browse(cr, uid, user_id[0])
vals['project_id'] = user.portal_project_id and user.portal_project_id.id or False
return super(project_issue, self).create(cr, uid, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
if context is None:
context = {}
if 'portal' in context:
user = self.pool.get('res.users').browse(cr, uid, uid)
project_ids = self.pool.get('project.project').search(cr, uid, [('partner_id','=',user.portal_customer_id.id)])
args.append(['project_id','in',project_ids])
return super(project_issue, self).search(cr, uid, args=args, offset=offset, limit=limit, order=order,
context=context, count=count)
@api.one
def action_to_plan(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'To plan (Smart)')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
@api.one
def action_to_qualify(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'To qualify (Smart)')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
@api.one
def action_to_release(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'To release (Smart)')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
@api.one
def action_in_progress(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'In progress (Smart)')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
@api.one
def action_to_close(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'Done')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
@api.one
def action_cancel(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'Cancelled')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
@api.one
def action_hold(self, vals):
stage_id = self.env['project.task.type'].search([('name', '=', 'On hold')])
issue = self.env['project.issue'].sudo().search([('id', '=', self.id)])
return issue.write({'stage_id': stage_id[0].id})
|
[
"fabian.semal@smartsolution.be"
] |
fabian.semal@smartsolution.be
|
c1b0e2b0873eb797c6642ac67c053c2f680e80fa
|
3630ffad1ac3497e71c2a53f0c34ee2cd76f3fec
|
/lock_manager.py
|
e775a9ab006aef99713af3119cefc010b80ac73d
|
[] |
no_license
|
AlexShein/just_code
|
35568c93c6354cb175ec7a182ad90ba0f17e2728
|
16eec2eefabeb3ea72d5344db18f7cd25be7ffec
|
refs/heads/master
| 2022-05-26T20:51:15.875256
| 2022-05-25T07:50:42
| 2022-05-25T07:50:42
| 135,574,252
| 0
| 0
| null | 2018-07-24T11:35:07
| 2018-05-31T11:23:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
import logging
from redis.exceptions import LockError
from redis.lock import Lock
from app.client.redis import redis_client
log = logging.getLogger(__name__)
class LockManager():
def __init__(self, lock_name, lock_timeout=10):
self.lock_name = lock_name
self.lock_timeout = lock_timeout
self.is_lock_free = False
def __enter__(self):
self.lock = Lock(
redis_client,
self.lock_name,
blocking_timeout=1,
timeout=self.lock_timeout,
)
try:
self.is_lock_free = self.lock.acquire(blocking=False)
except LockError:
log.error(
'lock acquire error',
extra={'data': {'lock_name': self.lock_name}},
exc_info=True,
)
return self
def __exit__(self, type, value, traceback):
if self.is_lock_free:
try:
self.lock.release()
except LockError:
log.error(
'lock release error',
extra={'data': {'lock_name': self.lock_name}},
exc_info=True,
)
|
[
"alexandr.shein@ostrovok.ru"
] |
alexandr.shein@ostrovok.ru
|
047541157c2cef47af33bedc6b8856366ac9037d
|
eac9f1e10d689de4a4bb1475f61b76ea622a7036
|
/prepro/OPPOA59PreProcess.py
|
c8b924f8a446c1abb56c71121ddc84228591a033
|
[] |
no_license
|
panational/apptest
|
f2a17163c17f318f46860b03d984caf86e76f6aa
|
1d96bd0fce34e1d333c29648d1564c542f8b1d43
|
refs/heads/master
| 2020-03-08T08:44:11.453799
| 2018-04-25T02:50:39
| 2018-04-25T02:50:39
| 128,029,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,898
|
py
|
#!usr/bin/python
# -*- coding:utf-8 -*-
from BaseDevicePreProcess import *
class OPPOA59PreProcess (BaseDevicePreProcess):
def __init__(self,tester):
super(OPPOA59PreProcess, self).__init__(tester)
def install_app(self):
cmd = "adb -s %s install -r %s" % (self.tester.device.deviceid,DataProvider.testapk)
subprocess.call(cmd,shell=True)
def install_process(self):
Log.logger.info(u"设备:%s 处理安装中各种弹窗" % self.tester.device.devicename)
try:
time.sleep(5)
if self.tester.is_element_exist(u'静默安装拦截',10):
self.tester.find_element_by_id_and_tap('com.kingroot.kinguser:id/checkbox_remember')
self.tester.find_element_by_id_and_tap('com.kingroot.kinguser:id/button_right')
self.tester.find_element_by_id_and_tap('com.android.packageinstaller:id/btn_allow_once',60)
except TimeoutException,e:
traceback.print_exc()
finally:
try:
self.tester.find_element_by_id_and_tap('com.android.packageinstaller:id/bottom_button_two')
self.tester.find_element_by_id_and_tap('com.android.packageinstaller:id/bottom_button_two')
self.tester.find_element_by_id_and_tap('oppo:id/remember_cb')
self.tester.find_element_by_id_and_tap('android:id/button1')
except Exception, e:
traceback.print_exc()
DriverManager.quit_driver(self.tester.device.deviceid)
def login_success_process(self):
Log.logger.info(u"设备:%s 登录成功后,处理各种自动弹窗" % self.tester.device.devicename)
try:
self.tester.find_element_by_id_and_tap('android:id/button1')
self.tester.find_element_by_id_and_tap('com.nice.main:id/btn_cancel')
except Exception, e:
traceback.print_exc()
DriverManager.quit_driver(self.tester.device.deviceid)
def get_permission_process(self):
Log.logger.info(u"设备:%s 获取相机及录音权限" % self.tester.device.devicename)
try:
self.tester.find_element_by_id_and_tap('com.nice.main:id/btnCamera')
self.tester.find_element_by_id_and_tap('com.nice.main:id/camera_tv')
#摄像机权限
self.tester.find_element_by_id_and_tap('oppo:id/remember_cb')
self.tester.find_element_by_id_and_tap('android:id/button1')
#录音权限
self.tester.find_element_by_id_and_tap('oppo:id/remember_cb')
self.tester.find_element_by_id_and_tap('android:id/button1')
#关闭取景框
self.tester.find_element_by_id_and_tap('com.nice.main:id/titlebar_return')
except Exception, e:
traceback.print_exc()
DriverManager.quit_driver(self.tester.device.deviceid)
|
[
"panguoqing@gaosiedu.com"
] |
panguoqing@gaosiedu.com
|
d5264b046b476f73c2aaa45f7e453e8870be24c7
|
a2b3bad36d6060bec5a7bed9e95b54891965b513
|
/ebidding/wsgi.py
|
1b9f52f20f6d69fc2b76e4049f13ba6b220122cb
|
[] |
no_license
|
keshav4118/eBidding
|
639ef40fa529853965125d6ef833142407a9e731
|
4fa15389a71c25a007991e60ba42d07a5a261daa
|
refs/heads/main
| 2023-04-11T13:36:22.477861
| 2021-04-14T13:46:28
| 2021-04-14T13:46:28
| 352,671,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for ebidding project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ebidding.settings')
application = get_wsgi_application()
|
[
"keshav4118@gmail.com"
] |
keshav4118@gmail.com
|
c8860d7e661c66d397e01606f87ec5ef47806624
|
0e6b756e9878b4d743d3c1c86f10717fdd2bb860
|
/dHydra/core/Worker.py
|
f4c46ba5ceab0a0870cc3e6500dbcb5025dc5b43
|
[] |
no_license
|
yanjlee/dHydra
|
e027080210899ffa7f140a73462f6f4df075cf4a
|
b22878bd191040eccff3d51cbd683f53a0f5b4fd
|
refs/heads/master
| 2021-01-12T21:37:31.392618
| 2016-08-31T12:00:18
| 2016-08-31T12:00:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,892
|
py
|
# -*- coding: utf-8 -*-
"""
Worker抽象类
@author: Wen Gu
@contact: emptyset110@gmail.com
"""
import multiprocessing
import threading
import time
import logging
import redis
import pymongo
import json
import copy
import dHydra.core.util as util
from dHydra.console import *
from datetime import datetime
from datetime import timedelta
from abc import ABCMeta
import signal
import sys
import os
class Worker(multiprocessing.Process):
__metaclass__ = ABCMeta
def __init__( self
, singleton = True # 单例模式
, nickname = None # Worker的自定义名字
, description = "No Description" # 备注说明
, log_level = "INFO" # "DEBUG","INFO","WARNING"
, heart_beat_interval = 3 # 默认3秒心跳
, **kwargs
):
self.__token__ = util.generate_token()
if nickname is None:
self.__nickname__ = self.__class__.__name__ + "Default"
else:
self.__nickname__ = nickname
self.nickname = self.__nickname__
self.name = self.__nickname__
self.__singleton__ = singleton
self.__description__ = description
self.__heart_beat_interval__ = heart_beat_interval
self.__threads__ = dict() # 被监控的线程
self.__data_feeder__ = set() # 本Worker订阅的内容
self.__follower__ = set() # Follower
self.__error_msg__ = None #
self.__stop_info__ = None #
self.__stop_time__ = None #
self.__status__ = "init" # "init", "error_exit", "suspended", "user_stopped", "normal"
self.redis_key = "dHydra.Worker."+self.__class__.__name__+"."+self.__nickname__+"."
self.channel_pub = self.redis_key + "Pub"
"""
self.__threads__ = {
"nickname": {
"description" : "该线程功能备注说明",
"name" : "该线程的名字",
"target" : "该线程的target"
"restart_mode" : "重启模式,可以为 manual/auto/remove;manual则代表允许管理员发送命令手工重启线程,auto则一旦线程关闭立即自动开启,remove则代表一旦线程结束就从监控列表移除",
"restart_func" : "自动/手动重启时调用的方法",
},
}
"""
self.logger = self.get_logger( level = log_level )
if self.check_prerequisites() is True:
super().__init__()
self.daemon = True
else:
sys.exit(0)
self.shutdown_signals = [
signal.SIGQUIT, # quit 信号
signal.SIGINT, # 键盘信号
signal.SIGHUP, # nohup 命令
signal.SIGTERM, # kill 命令
]
for s in self.shutdown_signals:
# 捕获退出信号后的要调用的,唯一的 shutdown 接口
signal.signal(s, self.__on_termination__)
def __is_unique__( self ):
info = self.__redis__.hgetall( self.redis_key+"Info" )
if "token" in info:
if info["token"] != self.__token__:
if "heart_beat" in info:
if ( datetime.now() - datetime.strptime( info["heart_beat"], '%Y-%m-%d %H:%M:%S.%f' ) ) < timedelta(seconds = self.__heart_beat_interval__):
return False
return True
def __auto_restart_thread__( self ):
# Worker内置的默认自动重启线程方法
pass
def __command_handler__(self, msg_command):
# cli is a dict with the following structure:
"""
msg_command = {
"type" : "sys/customized",
"operation" : "operation_name",
"kwargs" : "suppose that the operation is a function, we need to pass some arguments",
"token" : "the token is used to verify the authentication of the operation"
}
"""
print(msg_command)
msg_command = json.loads( msg_command.replace("None","\"None\"").replace("\'","\"") )
if msg_command["type"] == "sys":
str_kwargs = ""
for k in msg_command["kwargs"].keys():
str_kwargs += (k + "=" + "\'"+msg_command["kwargs"][k] + "\'" + "," )
try:
eval( "self."+msg_command["operation_name"]+"("+ str_kwargs +")" )
except Exception as e:
self.logger.error(e)
def monitor_add_thread( self, thread, description = "No Description", restart_mode = "manual", restart_func = None ):
# 将该线程加入管理员监控范围
pass
def monitor_remove_thread(self, thread):
# 取消管理员对线程thread的监控
pass
def check_prerequisites(self):
"""
检查是否满足开启进程的条件
"""
# 检测redis, mongodb连接
try:
self.__redis__ = get_vendor("DB").get_redis()
self.__redis__.client_list()
self.__listener__ = self.__redis__.pubsub()
self.__listener__.subscribe(["dHydra"])
except redis.ConnectionError:
self.logger.error("Cannot connect to redis")
return False
self.mongo = get_vendor("DB").get_mongodb()
if self.mongo is False:
self.logger.error("Cannot connect to mongodb")
return False
# 如果是单例,检测是否重复开启
return True
def __listen_command__(self):
#
self.command_listener = self.__redis__.pubsub()
channel_name = self.redis_key + "Command"
self.command_listener.subscribe( [channel_name] )
while True:
msg_command = self.command_listener.get_message()
if msg_command:
if msg_command["type"] == "message" or msg_command["type"] == "pmessage":
self.__command_handler__(msg_command["data"])
else:
time.sleep(0.01)
def __heart_beat__(self):
# flush status infomation to redis
status = dict()
status["heart_beat"] = datetime.now()
status["nickname"] = self.__nickname__
status["error_msg"] = self.__error_msg__
status["stop_info"] = self.__stop_info__
status["stop_time"] = self.__stop_time__
status["status"] = self.__status__
status["threads"] = copy.deepcopy( self.__threads__ )
status["data_feeder"] = self.__data_feeder__
status["pid"] = self.pid
status["follower"] = self.__follower__
status["token"] = self.__token__
self.__redis__.hmset( self.redis_key + "Info", status )
def __producer__(self):
"""
在子类中被重写的用以作为生产者的线程
若不重写,线程启动后就结束了
"""
pass
def __consumer__(self):
"""
默认的消费者线程
随着Worker进程的start而启动
"""
while True:
data = self.__listener__.get_message()
if data is not None:
self.__data_handler__( data )
else:
time.sleep(0.001)
# 需要在子类中重写的数据处理方法
def __data_handler__(self, msg):
"""
需要在子类中被重写的用以处理数据的方法,
接受到的msg数据是原始的从Redis中监听到的数据
"""
pass
def __before_termination__(self, sig):
pass
def __on_termination__(self, sig, frame):
self.__before_termination__(sig)
self.__status__ = "terminated"
self.__heart_beat__() # The last heart_beat, sad...
sys.exit(0)
def publish(self, data):
# publish data to redis
try:
self.__redis__.publish( self.channel_pub , data )
except Exception as e:
self.logger.warning(e)
def run(self):
"""
初始化Worker
"""
# 首先检查是否已经有相同的进程被开启
if self.__is_unique__():
self.__status__ = "started"
else:
self.error_msg = "Duplicated Process"
self.logger.warning(self.error_msg)
sys.exit(0)
return False
# 开启监听命令线程
self.__thread_listen_command__ = threading.Thread( target = self.__listen_command__ )
self.__thread_listen_command__.setDaemon(True)
self.monitor_add_thread( thread = self.__thread_listen_command__, description = "Listening Command Channel", restart_mode = "auto", restart_func = self.__auto_restart_thread__ )
self.__thread_listen_command__.start()
# 检查初始化设置,按需开启
#### PUB线程
self.__thread_pub__ = threading.Thread( target = self.__producer__ )
self.__thread_pub__.setDaemon(True)
self.monitor_add_thread( thread = self.__thread_pub__, description = "DATA PUBLISHER", restart_mode = "auto", restart_func = self.__auto_restart_thread__ )
self.__thread_pub__.start()
#### LISTENER
self.__thread_sub__ = threading.Thread( target = self.__consumer__ )
self.__thread_sub__.setDaemon(True)
self.monitor_add_thread( thread = self.__thread_sub__, description = "DATA CONSUMER", restart_mode = "auto", restart_func = self.__auto_restart_thread__ )
self.__thread_sub__.start()
while True:
# heart beat
self.__heart_beat__()
time.sleep(self.__heart_beat_interval__)
def get_logger(self, level):
logger = logging.getLogger(self.__class__.__name__)
if level is "DEBUG":
logger.setLevel(10)
elif level is "INFO":
logger.setLevel(20)
elif level is "WARNING":
logger.setLevel(30)
elif level is "ERROR":
logger.setLevel(40)
elif level is "CRITICAL":
logger.setLevel(50)
else:
logger.setLevel(20)
return logger
def subscribe(self, worker_name = None, nickname = None ):
"""
订阅Worker
"""
if (worker_name is not None) and (nickname is None):
# 订阅所有此类Worker
self.__listener__.psubscribe("dHydra.Worker."+worker_name+".*.Pub")
self.logger.info( "About to subscribe the Worker of worker_name: {}, pattern:{}".format(worker_name,"dHydra.Worker.*."+nickname+".Pub") )
elif (nickname is not None):
# 订阅nickname
self.__listener__.psubscribe("dHydra.Worker.*."+nickname+".Pub")
self.logger.info( "About to subscribe the Worker of nickname: {}, pattern:{}".format(nickname,"dHydra.Worker.*."+nickname+".Pub") )
else:
self.logger.warning("nickname/worker_name的输入方式不合理")
def unsubscribe(self, worker_name = None, nickname = None):
"""
退订Worker
"""
if (worker_name is not None) and (nickname is None):
# 订阅所有此类Worker
self.__listener__.punsubscribe("dHydra.Worker."+worker_name+".*.Pub")
self.logger.info( "About to unsubscribe the Worker of worker_name: {}, pattern:{}".format(nickname,"dHydra.Worker.*."+worker_name+".Pub") )
pass
elif (nickname is not None):
# 订阅nickname
self.__listener__.punsubscribe("dHydra.Worker.*."+nickname+".Pub")
self.logger.info( "About to subscribe the Worker of nickname: {}, pattern:{}".format(nickname,"dHydra.Worker.*."+nickname+".Pub") )
else:
self.logger.warning("nickname/worker_name的输入方式不合理")
|
[
"21324784@qq.com"
] |
21324784@qq.com
|
bcc798c2091bc11c084fec7ee129eacf1fca76f7
|
43613720a21df07f0ab4c04053dbdd8b380dccb7
|
/resources/subviews/search.py
|
b75f5fb24474bbe980a35bd4d13e809441844c8c
|
[] |
no_license
|
m-shihata/netflix
|
02d364fd5b6f01463b3c89d705199754fdc9461c
|
87fd82d8cf0ffd05a4a6f116a8e1e32d701d0598
|
refs/heads/master
| 2023-04-19T05:03:59.917293
| 2021-04-27T02:11:45
| 2021-04-27T02:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
import random
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from resources.models import Tvshows, Movies
from django.db.models import Q
from itertools import chain
from resources.serializers import MovieSerializer, TvshowsSerializer, TvshowsdetailedSerializer, MovieInfoSerializer
class Search(APIView, ):
permission_classes = [IsAuthenticated, ]
def get(self, request, *args, **kwargs):
try:
data = request.GET
name = data.get('name', '')
genre = data.get('genre', None)
movies = Movies.objects.filter(Q(name__icontains=name) | Q(genres__genre=genre)).distinct()
tv_shows = Tvshows.objects.filter(Q(name__icontains=name) | Q(genres__genre=genre)).distinct()
serializer1 = TvshowsSerializer(instance=tv_shows, many=True)
serializer2 = MovieSerializer(instance=movies, many=True)
all = chain(serializer1.data, serializer2.data)
all2 = list(all)
random.shuffle(all2)
return Response(all2)
except Exception as e:
return Response({"detail": str(e)}, status=404)
class MoreInfo(APIView):
permission_classes = [IsAuthenticated, ]
def get(self, request, *args, **kwargs):
data = request.GET
print(data.get('name'))
try:
if data.get('type') == 'tv_show':
tv_show = Tvshows.objects.get(id=int(data.get('id')))
serializer = TvshowsdetailedSerializer(instance=tv_show)
else:
movie = Movies.objects.get(id=data.get('id'))
serializer = MovieInfoSerializer(instance=movie)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except Exception as e:
return Response({"detail": str(e)}, status=404)
|
[
"53663886+yomnaosamaAlsharqawy@users.noreply.github.com"
] |
53663886+yomnaosamaAlsharqawy@users.noreply.github.com
|
e48564eed20ae6a6fd6774c90b624416cca50aff
|
216da2a4daff108e08ccf76742fd13e16b8dc36b
|
/Latest Draft - Flask/scrape_SONGS_to_MONGO.py
|
56c345f0b2c32d432ef829eea6840d5df0082492
|
[] |
no_license
|
powest3/Music-through-the-Years
|
81686302993a1c1f2c2aea3cdf988de625edc7ed
|
c1d9a71ace1aa0ed5dd1231c4980faf5044caa1a
|
refs/heads/master
| 2020-12-22T17:49:39.708201
| 2020-02-06T22:35:18
| 2020-02-06T22:35:18
| 236,879,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
from pprint import pprint
#----------------------------------------------------------------------
import pymongo
#----------------------------------------------------------------------
# def scrape():
# for Windows:
executable_path = {'chromedriver.exe'}
# for Mac:
#executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome')
# return results
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#######################################################################
def get_billboard_table(browser_input, year_input, url_input):
# print('\n' + '\n' + '\n' + '\n' + url_input)
browser.visit(url_input)
html = browser.html
song_soup = BeautifulSoup(html, 'html.parser')
result = song_soup.find('table', class_='archive-table')
rows = result.findChildren(['th', 'tr'])
result_list = []
condensed_results = []
row_count = 0
date_value = ''
results = {}
for row in rows:
results = {
"year": year_input,
"issue_date": [],
"title" : "",
"artist": ""
}
td_count = 0
cells = row.findChildren('td')
for cell in cells:
td_count = td_count + 1
value = cell.string
if (td_count == 1):
results["issue_date"].append(value)
date_value = value
if (td_count == 2):
results["title"] = value
if (td_count == 3):
results["artist"] = value
row_count = row_count + 1
condensed_results.append(results)
#print("The value in this cell is %s" % value)
result_list.append(results)
if (results["title"] == '') & (row_count > 0):
index_row = row_count - 1
condensed_results[index_row]["issue_date"].append(date_value)
#--------------------------------------------------------------------
# for each_con_res in range(len(condensed_results)):
# #print(str(each_con_res))
# print(condensed_results[each_con_res])
# print('\n')
# #print(condensed_results[each_con_res]["issue_date"])
return condensed_results
#######################################################################
#######################################################################
years_list = range(1958, 2019, 1)
billboard_years_url_list = []
list_of_song_dicts = []
for year in years_list:
#print(year)
billboard_url = "https://www.billboard.com/archive/charts/" + str(year) + "/HSI"
#print(billboard_url)
#billboard_years_url_list.append(billboard_url)
list_of_song_dicts.append(get_billboard_table(browser, year, billboard_url))
print('\n')
print("List_of_song_dicts - LENGTH: " + str(len(list_of_song_dicts)) + '\n')
#######################################################################
browser.quit()
#######################################################################
#######################################################################
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.billboard_db
#######################################################################
collection = db.all_number_one_songs
for iteration in range(len(list_of_song_dicts)):
collection.insert_many(list_of_song_dicts[iteration])
#######################################################################
# collection = db.first_ten_years_top_hits
# for iteration in range(0, 10):
# # print(list_of_song_dicts[iteration])
# # print('\n')
# collection.insert_many(list_of_song_dicts[iteration])
#######################################################################
#######################################################################
|
[
"noreply@github.com"
] |
powest3.noreply@github.com
|
9e5d495620303d919cdf634b801667ae516a9527
|
75a2c50c813eeb4099da45eef6159780f036cf1d
|
/app/run.py
|
58eace4273d267ca1870e8da54dabd8f2f8a444f
|
[] |
no_license
|
jpthewes/DSND_Disaster_Response_Pipeline
|
f1ba742caabf013be59f804042ca2711c1938033
|
d8c3a1203b390c42863277d421a0b74cb11f6e37
|
refs/heads/master
| 2023-01-07T20:06:55.047640
| 2020-11-05T08:12:51
| 2020-11-05T08:12:51
| 308,960,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
'''
Tokenizes and lemmatizes a given input text.
Arguments:
text: string to be tokenized.
Returns:
clean_tokens: list of tokenized words.
'''
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('messages_categories', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
classes = df.columns[4:]
class_counts = []
for name in df.columns[4:]:
class_counts.append(df[name].sum())
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=classes,
y=class_counts
)
],
'layout': {
'title': 'Distribution of Message Classes',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Class"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
|
[
"jpthewes@gmail.com"
] |
jpthewes@gmail.com
|
fcfbe8b352f3c2c44feb97ded66d062cbedbd910
|
eb71f02be8fdb55455a455d4c273ebaa07553746
|
/PoseData.py
|
70c653dd1dc70a686631e4591a2a65c74f25c5f6
|
[] |
no_license
|
Bryan-bai/PoseNet
|
509148431ccfb2b7f451f86f5cfe72911f86a89a
|
457ec72df4098c37ab90a882129e0773abbabc7c
|
refs/heads/master
| 2020-04-02T13:28:39.646308
| 2017-08-22T01:04:57
| 2017-08-22T01:04:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,186
|
py
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import torch
# Get image path and pose from dataset_train.txt.
# There is an invalid value in dataset_train.txt,
# so you have to delete it manually.
def make_dataset(dir, train=True):
# It needs to be optimized more.
if train:
paths = np.genfromtxt(os.path.join(dir, 'dataset_train.txt'),
dtype=str, delimiter=' ', skip_header=3,
usecols=[0])
poses = np.genfromtxt(os.path.join(dir, 'dataset_train.txt'),
dtype=np.float32, delimiter=' ', skip_header=3,
usecols=[1, 2, 3, 4, 5, 6, 7])
else:
paths = np.genfromtxt(os.path.join(dir, 'dataset_test.txt'),
dtype=str, delimiter=' ', skip_header=3,
usecols=[0])
poses = np.genfromtxt(os.path.join(dir, 'dataset_test.txt'),
dtype=np.float32, delimiter=' ', skip_header=3,
usecols=[1, 2, 3, 4, 5, 6, 7])
# sort by path name
order = paths.argsort()
paths = paths[order]
poses = poses[order]
return paths, poses
def default_loader(path):
return Image.open(path).convert('RGB')
class PoseData(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, train=True):
paths, poses = make_dataset(root, train)
self.root = root
self.paths = paths
self.poses = poses
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
path = self.paths[index]
target = self.poses[index]
img = self.loader(os.path.join(self.root, path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target = torch.from_numpy(target)
return img, target
def __len__(self):
return len(self.paths)
|
[
"bellatoris@snu.ac.kr"
] |
bellatoris@snu.ac.kr
|
f6065438421f77d504aa8b1cc4a9e9942525a7ff
|
c82b85a57ebb6f019c060fe015d21f8ab5a155d3
|
/data_processing_resize_cyr.py
|
ce7104e30e538462f017458b5d86fce09edfd60e
|
[] |
no_license
|
buckeye76guy/ECE_FINAL
|
9bc11c59c86d4132c8ffd9e5b3eea6a4f5817e2c
|
c316b35a83d34445d7105313958264acfede418e
|
refs/heads/master
| 2021-01-20T09:51:41.534621
| 2017-05-04T21:25:20
| 2017-05-04T21:25:20
| 90,293,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 17:05:44 2017
@author: Josiah Hounyo
This script will create the training and testing sets.
I wish to use 75% of the number of pictures in each folder
for training and 25% for testing
"""
from scipy.misc import imresize
import scipy.ndimage as nd
import numpy as np
import os
# current directory has two folders
flders = ['Cyrillic']
# get the number of observations to be placed in the train
# and test set
nrow_tr, nrow_te = 0, 0
for fld in flders:
os.chdir(fld)
# inside each directory is a list of directories
subflders = os.listdir() # subfolders
for subfld in subflders:
nrow = len(os.listdir(subfld)) # number of photos in current subfolder
add_tr = int(np.floor(75*nrow/100)) # to be added to nrow_tr
nrow_tr += add_tr
nrow_te += nrow - add_tr
os.chdir('..') # get back to parent
x_train = np.zeros((nrow_tr, 28, 28)) # I already know size of images
# y_train = np.zeros((nrow_tr, 1))
y_train = [] # because they are characters! use int mapping for each :)
x_test = np.zeros((nrow_te, 28, 28))
# y_test = np.zeros((nrow_te, 1))
y_test = []
# it will be assumed that all photos have the same shape.
# instead of using gaussian filter, I will resize to 28 by 28
ind_tr, ind_te = 0, 0 # cursor to keep track of location in arrays
for fld in flders:
os.chdir(fld)
subflders = os.listdir()
for subfld in subflders:
nrow = len(os.listdir(subfld))
add_tr = int(np.floor(75*nrow/100))
# y_train/test is the name of the folder
os.chdir(subfld)
k = 0 # number of items to add to train set
pics = os.listdir() # pictures in folder
while k < len(pics):
img = nd.imread(pics[k])
if sorted(img.shape) != [4, 278, 278]:
k += 1 # only this needs be increased
# this way, ind_tr and ind_te will be such that only ending
# rows in our matrices are all 0's. easily removed
continue
img = img.reshape((278, 278*4))
img = imresize(img, (28,28))
if k < add_tr:
x_train[ind_tr,:,:] = img
y_train.append(subfld) # subfld is a character
ind_tr += 1
else:
x_test[ind_te,:,:] = img
y_test.append(subfld)
ind_te += 1
k += 1
os.chdir('..')
os.chdir('..')
# process y_train and y_test: this will allow us to save them as integers
chars = list(set(y_train))
char_to_ix = {ch: i for i, ch in enumerate(chars)}
y_train = list(map(lambda x: char_to_ix.get(x), y_train))
y_test = list(map(lambda x: char_to_ix.get(x), y_test))
np.save('xtrain_cyr.npy', x_train)
np.save('xtest_cyr.npy', x_test)
np.save('ytrain_cyr.npy', np.array(y_train))
np.save('ytest_cyr.npy', np.array(y_test))
# some characters in both Latin and Curillic are the same ...
np.save('char_ix_cyr.npy', char_to_ix)
|
[
"hounyo.1@osu.edu"
] |
hounyo.1@osu.edu
|
0743a1ec37db60e406db3a45d7a67ddd7d484795
|
9fd628cfc4427e438c8ae312deea2c893c9a78d0
|
/TP3/TSP_aprox/euler.py
|
6bc3e2ad121fbe02a3f3e2b5cdcbe4420397cfa9
|
[
"Apache-2.0"
] |
permissive
|
GFibrizo/TPS_7529
|
c1ba3f99f433b024dd3985fe9740df816e1e3094
|
47daadf38002ea5c41c13584af37a24757b9c9fe
|
refs/heads/master
| 2021-01-14T12:40:41.373105
| 2016-12-05T03:05:11
| 2016-12-05T03:05:11
| 68,769,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
def sub(visited, _cur, graph):
if not graph:
return visited + [_cur]
for i, edge in enumerate(graph):
cur, nex, weight = edge
if _cur not in edge:
continue
_graph = graph[:]
del _graph[i]
if _cur == cur:
res = sub(visited + [cur], nex, _graph)
else:
res = sub(visited + [nex], cur, _graph)
if res:
return res
def find_eulerian_tour(graph):
head, tail = graph[0], graph[1:]
prev, nex, weight = head
return sub([prev], nex, tail)
|
[
"ereyes@summasolutions.net"
] |
ereyes@summasolutions.net
|
1cd3cdd1325d86f6c7b9cccec91002b3e8240015
|
ddf4c15d6553a2c4d1941478ee2b28f50072c8ba
|
/zad2.py
|
40a3b8fffd6bce686536c13e7998f943825376fa
|
[] |
no_license
|
dyju1992/zpiotdd
|
cc9972a6ac84fab39c8659045542da981bae0a66
|
84fd3d399902453b607b44cca65f04c502ad89cf
|
refs/heads/master
| 2021-01-10T12:27:22.319743
| 2015-06-05T11:52:09
| 2015-06-05T11:52:09
| 36,930,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
__author__ = 'dyju'
Wynik=[1,1]
suma=0
for i in range (2,40):
Wynik.append(Wynik[i-1]+Wynik[i-2])
if Wynik[i]<4000000:
print (i, ". ", Wynik[i])
for Wyraz in Wynik:
if Wyraz<4000000:
if Wyraz%2==0:
suma+=Wyraz
print("suma: ",suma)
|
[
"dyju1992@gmail.com"
] |
dyju1992@gmail.com
|
3c337d3af4af5b0f08e6591629f083887ff89ed3
|
2c8d92f484836f9c810aa96033c85cd2fd570c6b
|
/project/프로그래머스/파이썬을 파이썬 답게/문자열 정렬하기.py
|
07f2745416b1588e4922f89ed4a0cb816a1a6fb4
|
[] |
no_license
|
Donghyun-34/KUCIS
|
92814e188d51fd2bed9060a884b4be48a7694d74
|
33bd389d885d5167b3b6f1fd4d2adb816d4f068a
|
refs/heads/master
| 2022-12-22T14:46:33.668159
| 2020-09-16T07:25:37
| 2020-09-16T07:25:37
| 284,490,018
| 1
| 0
| null | 2020-09-16T07:25:38
| 2020-08-02T15:36:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
"""
문자열 출력 형식 정의 : ljust, center, rjust
"""
str = " sdfasdf adf "
print(str.ljust(30)) # 좌측 정렬
print(str.center(30)) # 가운데 정렬
print(str.rjust(30)) # 우측 정렬
|
[
"noreply@github.com"
] |
Donghyun-34.noreply@github.com
|
1e9037f5197cc1f70db138d0bff33cfe5bf9c30a
|
127c63dfc7da3799d075f1dcb046e0210c7711ac
|
/Class_problems/Problem_14new_FullTwoPhaseComplex/IMPES.py
|
1bb24c83a396fae1ab233dccfe433964d26df785
|
[] |
no_license
|
mashadab/Reservoir-Simulator
|
db994da3c274e666b141f3ff46de645fc4c18977
|
2be0b8bc994096699133a315f79b63bb67a3bb2b
|
refs/heads/master
| 2023-04-16T20:54:08.674622
| 2022-05-25T17:13:48
| 2022-05-25T17:13:48
| 312,726,032
| 11
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,379
|
py
|
"""
reservoir simulation assignment 10
1D reservoir simulation Q10: Main file (Implicit pressure explicit saturation)
Author: Mohammad Afzal Shadab
Email: mashadab@utexas.edu
Date modified: 12/4/2020
"""
#import inbuilt libraries
import numpy as np
from scipy.sparse import lil_matrix, csr_matrix, identity
from scipy.sparse.linalg import inv
from scipy.sparse.linalg import spsolve
from scipy.sparse.construct import eye
import matplotlib.pyplot as plt
import time as timer
from math import floor, ceil
import warnings
warnings.filterwarnings("ignore")
#importing personal libraries
from input_file_2D import inputfile
from myarrays import myarrays
from updatewells import updatewells
from rel_perm import rel_perm
from spdiaginv import spdiaginv
from init_plot import initial_plot
from postprocess import postprocess
#making simulation and IC classes
class numerical:
def __init__(self):
self.Bw = []
class reservoir:
def __init__(self):
self.dt = []
class fluid:
def __init__(self):
self.dt = []
class grid:
def __init__(self):
self.xmin = []
class BC:
def __init__(self):
self.xmin = []
class IC:
def __init__(self):
self.xmin = []
class petro:
def __init__(self):
self.xmin = []
class well:
def __init__(self):
self.xmin = []
tprogstart= timer.clock()
#loading inputfile
inputfile(fluid,reservoir,petro,numerical,BC,IC,well)
#Implicit pressure and explicit saturation for update
#looping through time
t = np.empty((100000)) #dimensional time
t[0]= 0
t_D = np.empty((100000))#non dimensional time
t_D[0]= 0
k = 0
PV = 0
P = np.copy(IC.P)
Pw = np.copy(IC.Pw)
Sw = np.array(np.copy(IC.Sw))
Sw_hyst=np.empty((numerical.N,2))
Sw_hyst[:,0]=Sw[:,0]
nmax = ceil(numerical.tfinal / numerical.dt)
fw = np.empty((nmax +1 )) #fractional flow of wetting phase
fw[0]= 0
P_plot= np.zeros((numerical.N,nmax + 1)) #matrix to save pressure
P_plot[:,0] = IC.P[:,0]
Sw_plot= np.zeros((numerical.N, nmax + 1)) #matrix to save pressure
Sw_plot[:,0]= IC.Sw[:,0]
well.typetime = np.kron(np.ones((nmax,1)),np.transpose(well.type))
well.constrainttime = np.kron(np.ones((nmax,1)),np.transpose(well.constraint))
well.fw = np.kron(np.ones((nmax,1)),np.transpose(well.constraint))
while (t[k] < numerical.tfinal): #non dimensional time marching
if(k == nmax/4 or k == nmax/2 or k == nmax*3/4 or k == nmax): print(k, t[k],Sw,P)
P_old = np.copy(P) #Placeholdering the old array
Sw_old= np.copy(Sw) #Placeholdering the old array
#Switching according to the problem
##############################################################################
#Switching after 500 days
if t[k] > numerical.tswitch:
well.type= [[2],[2],[2],[2],[2],[2],[2],[2],[1],[1]] # 1 for rate, 2 for BHP
well.constraint = [[502.5],[502.5],[502.5], [502.5], [502.5], [502.5], [502.5], [502.5],[500*5.61], [500*5.61]] # rate = scf/day (+ for injector); BHP = psi (always +ve)
elif k > 0:
#Well 1
if Q[well.block[0][0],0] > 0: #injector well
# Well 1 BHP
if well.constraint[0][0] / well.Jwvec[0][0] + P[well.block[0][0],0] <= 502.5:
well.type[0][0] = 2
well.constraint[0][0] = 502.5
else: #producer well
# Well 1 BHP
if well.fw[0,k] * well.constraint[0][0] / well.Jwvec[0][0] + P[well.block[0][0],0] <= 502.5:
well.type[0][0] = 2
well.constraint[0][0] = 502.5
# Well 2
if Q[well.block[1][0],0] > 0: #injector well
# Well 2 BHP
if well.constraint[1][0] / well.Jwvec[1][0] + P[well.block[1][0],0] <= 502.5:
well.type[1][0] = 2
well.constraint[1][0] = 502.5
else: #producer well
# Well 2 BHP
if well.fw[1,k] * well.constraint[1][0] / well.Jwvec[1][0] + P[well.block[1][0],0] <= 502.5:
well.type[1][0] = 2
well.constraint[1][0] = 502.5
well.typetime[k,:] = np.copy(np.transpose(well.type[:][:]))
well.constrainttime[k,:] = np.copy(np.transpose(well.constraint[:][:]))
###############################################
#Calculating the arrays
Tw, To, T, d11, d12, d21, d22, D, G, Pc, Pw = myarrays(fluid,reservoir,petro,numerical,BC,P,Sw,Sw_hyst)
#updating the wells
well, Qw, Qo, Jw, Jo = updatewells(reservoir,fluid,numerical,petro,P,Sw,well)
J = -d22 @ ( spdiaginv(d12) @ Jw ) + Jo
Q = -d22 @ ( spdiaginv(d12) @ Qw ) + Qo + 800.0 * J @ np.ones((numerical.N,1)) #Pwf = 800 psi
if numerical.method == 'IMPES':
IM = T + J + D #implicit part coefficient in Eq. 3.44
EX = D @ P_old + Q + G #explicit part or RHS of Eq. 3.44
P = np.transpose([spsolve(IM,EX)]) #solving IM*P = EX or Ax=B
Sw = Sw + spdiaginv(d12) @ (-Tw @ (P - (fluid.rhow/144.0) * numerical.D - Pc) - d11 @ (P - P_old) + Qw + Jw @ (800.0 - P)) #explicit saturation
for i in range(0, numerical.N):
if Sw[i,0] > Sw_old[i,0] and Sw_hyst[i,1] == 0: # [i,1] is a flag
Sw_hyst[i,0] = Sw[i,0]
Sw_hyst[i,1] = 1.0
elif Sw[i,0] < Sw_old[i,0]:
Sw_hyst[i,0] = Sw[i,0]
k = k+1
P_plot[:,k] = P[:,0]
Sw_plot[:,k]= np.array(Sw)[:,0]
t[k]= t[k-1] + numerical.dt
t_D[k]= well.constraint[0][0]*t[k-1]/(reservoir.L*reservoir.W*reservoir.h*reservoir.phi[0,0])
for i in range(0,len(well.x)):
kblock = well.block[i][0]
krw,kro = rel_perm(petro,Sw[kblock,0])
M = (kro*fluid.muw[kblock,0])/(krw*fluid.muo[kblock,0])
well.fw[i,k] = 1/(1+M)
P_plot[np.argwhere(reservoir.permx < 0.01)] = np.nan
tprogend= timer.clock()
print('Time elapsed in the program', tprogend - tprogstart)
np.savez(f'Project2_n{numerical.N}', P_plot = P_plot, Sw_plot = Sw_plot, Nx = numerical.Nx, Ny = numerical.Ny,fw =fw,t = t, x1 = numerical.x1, y1 = numerical.y1)
#post process
P_plot[np.argwhere(numerical.D==0),:] = np.nan
Sw_plot[np.argwhere(numerical.D ==0),:] = np.nan
'''
#Create the plots
initial_plot(reservoir,numerical,P_plot[:,0],t[0],'P')
initial_plot(reservoir,numerical,P_plot[:,500],t[500],'P')
initial_plot(reservoir,numerical,P_plot[:,750],t[750],'P')
initial_plot(reservoir,numerical,P_plot[:,nmax],t[nmax],'P')
initial_plot(reservoir,numerical,Sw_plot[:,0],t[0],'Sw')
initial_plot(reservoir,numerical,Sw_plot[:,500],t[500],'Sw')
initial_plot(reservoir,numerical,Sw_plot[:,750],t[750],'Sw')
initial_plot(reservoir,numerical,Sw_plot[:,nmax],t[nmax],'Sw')
postprocess(P_plot,numerical,well,t)
'''
if numerical.Ny==1:
plt.figure()
plt.plot(numerical.xc/reservoir.L,Sw_plot[:,47],label=r'$t_D=0.1$')
plt.plot(numerical.xc/reservoir.L,Sw_plot[:,47*2],label=r'$t_D=0.2$')
plt.plot(numerical.xc/reservoir.L,Sw_plot[:,47*3],label=r'$t_D=0.3$')
plt.xlabel(r'$x_D$')
plt.ylabel(r'$S_w$')
plt.legend(loc='best', shadow=False, fontsize='medium')
plt.savefig('SwvsT.png',bbox_inches='tight', dpi = 600)
plt.figure()
plt.plot(t_D[0:k],fw[0:k])
plt.ylabel(r'Water cut')
plt.xlabel(r'Pore volumes injected')
plt.savefig('watercutvsPVI.png',bbox_inches='tight', dpi = 600)
|
[
"mashadab@mit.edu"
] |
mashadab@mit.edu
|
36bf0dad8f9fa3750a9ba89a0d3034ff7da0ef39
|
faca1cdb2aa759bde965203d023e8a38cbfcb68f
|
/edibot/urls.py
|
c4d7304c7b091b9b19f9d2f374d8eb2f288253f1
|
[] |
no_license
|
ejesse/edibot
|
c446649abb8f3fed06a6bd1bfda9c28bdd360270
|
1dd7596e3b8fcb5d67661fd02cdb358c22a356ff
|
refs/heads/master
| 2021-01-09T21:46:09.732090
| 2016-04-06T16:52:49
| 2016-04-06T16:52:49
| 55,466,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
"""edibot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"jesse@jesseemery.com"
] |
jesse@jesseemery.com
|
1de0228910154b836dfe841a7bb5ac8a6618d63c
|
9010bd6f1e4d0725c34886123aca16e49ec666ff
|
/backend/NGSI_Interpreter/Tests_Getting_and_Saving_Entities.py
|
ef7f17c6dde5f50d8448479a4cf6e958b6bcedd1
|
[] |
no_license
|
salahallali/ngsildCollectorDBApplication-1-ab261dc66f757ccd2656d35c9952b3e9edd8eb93
|
bdbc562c5f4fb3dc71699a425853d3363c4782fe
|
5c7c19d7e552acb4cd14b9502e0acbfb9e9a0c24
|
refs/heads/master
| 2023-06-11T16:07:27.982892
| 2021-07-05T12:20:17
| 2021-07-05T12:20:17
| 383,131,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
import sys
from backend.NGSI_Interpreter.Get_Entities import Get_entity_By_ID
from backend.NGSI_Interpreter.Parse_NGSILD_Response import Parse_NGSILD_to_JSON
from backend.NGSI_Interpreter.Save_Entity_To_DB import Save_Entity_To_Db
try:
########### tests ##########################
id ='urn:ngsi-ld:WaterQualityObserved:waterqualityobserved:Sevilla:D1'
id_air = 'urn:ngsi-ld:AirQualityObserved:Airqualityobserved:Sevilla:D1'
req = '?type={}'.format('WaterQualityObserved')
###############################################################
# Example of getting Entity By ID (id or id_air)
NGSI_Response = Get_entity_By_ID(id_air)
# Example of getting Entity By Type
#NGSI_Response = Get_entity_By_Type(req)
print("Context Broker's Response: ", NGSI_Response)
#print(NGSI_Response.status_code, NGSI_Response.text)
entity= Parse_NGSILD_to_JSON(NGSI_Response)
print(entity)
#setting Entity ID in the Response file to "_id" instead of "id" to avoid redandency in the Database
entity['_id'] = entity.pop('id')
#Saving Entity to MongoDB
save_entity = Save_Entity_To_Db(entity)
print('[+] entity saved..........')
except:
print("Unexpected error:", sys.exc_info()[1])
pass
|
[
"salahallali12.as@gmail.com"
] |
salahallali12.as@gmail.com
|
1b5eb86e468c6370750cbef2d8b60a6198216f70
|
41e237ae3dc7001b5132eee47fd9a5996cb56839
|
/python_socket_server/server.py
|
2c7b23966e30f4127e7c09280acf7950c79f95c7
|
[] |
no_license
|
EndruK/scribbles
|
9086fccffc8877112f036e71c29be8d1baef6e47
|
3e424888e777270d4e51f62cbfe060b975db7404
|
refs/heads/master
| 2020-08-15T06:33:54.204041
| 2020-02-19T12:06:06
| 2020-02-19T12:06:06
| 215,293,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,950
|
py
|
#!/usr/bin/python3
import socket
HOST = '127.0.0.1'
PORT = 8101
class Server:
s: socket.socket
def __init__(self, host, port) -> None:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((host, port))
def start_server(self) -> None:
self.s.listen()
print("server started and listening on port ", self.s.getsockname())
def send_message(self, connection, message) -> None:
# append $EOS$
message += "\n$EOS$"
connection.sendall(str.encode(message))
connection.close()
def get_message(self):
conn, addr = self.s.accept()
print("connection received by ", addr)
data = conn.recv(1024)
return data, conn
if __name__ == "__main__":
s = Server(HOST, PORT)
s.start_server()
while True:
data, conn = s.get_message()
print(bytes.decode(data))
text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ullamcorper a lacus vestibulum sed arcu non odio. Ut tellus elementum sagittis vitae et. Interdum posuere lorem ipsum dolor sit amet. Ut pharetra sit amet aliquam. A arcu cursus vitae congue mauris rhoncus aenean. Tristique senectus et netus et malesuada fames. Arcu bibendum at varius vel pharetra vel turpis nunc eget. Ut faucibus pulvinar elementum integer enim neque. In mollis nunc sed id semper risus in. Auctor eu augue ut lectus arcu bibendum. Orci eu lobortis elementum nibh tellus. Magna etiam tempor orci eu. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Maecenas accumsan lacus vel facilisis volutpat est velit.
Quam adipiscing vitae proin sagittis nisl. In nisl nisi scelerisque eu. Duis ultricies lacus sed turpis tincidunt id aliquet risus. Sit amet aliquam id diam maecenas ultricies mi. Id porta nibh venenatis cras. Congue quisque egestas diam in. Odio ut enim blandit volutpat maecenas volutpat blandit. Quam adipiscing vitae proin sagittis nisl rhoncus mattis. Orci porta non pulvinar neque laoreet suspendisse interdum consectetur libero. In massa tempor nec feugiat nisl pretium. Id interdum velit laoreet id donec.
Sollicitudin aliquam ultrices sagittis orci a scelerisque purus semper. Nam aliquam sem et tortor consequat id porta nibh venenatis. Condimentum lacinia quis vel eros donec ac odio tempor orci. In egestas erat imperdiet sed euismod nisi porta lorem. Id diam vel quam elementum pulvinar etiam non quam. Aliquam ut porttitor leo a diam sollicitudin. Sed elementum tempus egestas sed sed risus pretium. Bibendum ut tristique et egestas quis ipsum suspendisse. Maecenas sed enim ut sem viverra aliquet eget. Semper viverra nam libero justo. Vitae suscipit tellus mauris a diam maecenas sed enim. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Vestibulum rhoncus est pellentesque elit ullamcorper. Sed odio morbi quis commodo odio. Arcu cursus vitae congue mauris. Aliquet nibh praesent tristique magna sit amet purus gravida. Neque volutpat ac tincidunt vitae semper quis lectus nulla at. Habitant morbi tristique senectus et netus et malesuada fames ac. Vestibulum morbi blandit cursus risus at. Nibh mauris cursus mattis molestie a iaculis at erat pellentesque.
Id consectetur purus ut faucibus pulvinar elementum integer enim neque. Eu lobortis elementum nibh tellus molestie nunc. Orci porta non pulvinar neque laoreet suspendisse interdum consectetur. Faucibus et molestie ac feugiat sed lectus vestibulum mattis ullamcorper. Suspendisse sed nisi lacus sed viverra. Morbi blandit cursus risus at ultrices mi tempus. Vel eros donec ac odio. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Diam maecenas sed enim ut sem. Non nisi est sit amet facilisis magna. Condimentum vitae sapien pellentesque habitant morbi tristique senectus et netus.
Mi bibendum neque egestas congue quisque egestas diam. Integer quis auctor elit sed vulputate mi sit amet. Sed risus ultricies tristique nulla aliquet enim tortor. Malesuada fames ac turpis egestas integer eget aliquet. Neque sodales ut etiam sit amet nisl purus in. Nisl nisi scelerisque eu ultrices vitae auctor eu augue ut. Luctus accumsan tortor posuere ac ut consequat semper viverra nam. Vel elit scelerisque mauris pellentesque pulvinar pellentesque. In cursus turpis massa tincidunt dui ut ornare lectus. Eu non diam phasellus vestibulum lorem sed risus ultricies tristique. In fermentum posuere urna nec tincidunt praesent. Semper auctor neque vitae tempus quam pellentesque nec. Placerat in egestas erat imperdiet sed euismod nisi. Gravida quis blandit turpis cursus in hac. Lectus nulla at volutpat diam ut. Faucibus a pellentesque sit amet porttitor eget dolor morbi non. Amet aliquam id diam maecenas ultricies mi eget mauris. Risus nec feugiat in fermentum. Elementum pulvinar etiam non quam lacus.
"""
s.send_message(conn, text)
|
[
"andre.karge@uni-weimar.de"
] |
andre.karge@uni-weimar.de
|
d138477107990d05465e949d4b0d8539eba30e6b
|
26df5f85b549a5a14369e59188943e6c849de49e
|
/5_order_frequency/order_frequency.py
|
543a4de1a941151c895a558b5ddec29004998dee
|
[] |
no_license
|
abakir/NGS-2.0
|
3aefbcbe6cbb7dbb7563c17077653c83e9ba1592
|
49071c1423b23bbd6c767cb0aa85c9e948b46abe
|
refs/heads/master
| 2023-06-21T20:10:42.938128
| 2016-01-13T23:23:13
| 2016-01-13T23:23:13
| 48,489,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,330
|
py
|
#!/home/cloudera/local/lib/python2.6/site-packages/bin/python
# Author : Sai Sree Kamineni
# Date created : Jan 10, 2016
# Execution frequency : Weekly
# Inputs refresh frequency : Weekly
# Input : data_input/shopify/export_orders.csv
# Output : data_output/order_frequency.csv
# Purpose : Gives the count of customers whose average days between orders fall in the given ranges
import pandas as pd
import numpy as np
import yaml
import logging
import time
import os
with open("config.yaml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
def make_sure_path_exists(path):
if (os.path.isdir(path) == False):
os.makedirs(path)
make_sure_path_exists(cfg['root'] + cfg['dir_logs'])
make_sure_path_exists(cfg['root']+cfg['dir_data_output'])
# create logger
logger = logging.getLogger(cfg['log_order_frequency'])
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(cfg['root'] + cfg['dir_logs'] + cfg['log_order_frequency'] + "_" + time.strftime("%Y_%m_%d_%H_%M_%S") + ".log" )
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
df = pd.read_csv(cfg['root']+cfg['dir_data_shopify']+cfg["input_orders"],low_memory=False)
logger.debug("Data Frame df created")
# take required fields
df1=df[['Email', 'Created at' ]]
#let there be only one row for each order
df1=df1.drop_duplicates().reset_index().drop('index',1)
#getting only date from created at field
df1['Created at'] = df1.apply(lambda x: x['Created at'][:10], axis = 1)
df1=df1.drop_duplicates().reset_index().drop('index',1)
df1.columns=['Email', 'Date']
df1.loc[:, 'Difference'] = 0
df1.loc[:, 'Count'] = 0
df1.loc[:, 'Date'] =pd.to_datetime(df1.Date)
df1=df1.sort_values(by = ['Email', 'Date']).reset_index().drop('index',1)
n = 1
for i in range(0,max(df1.index)):
if(df1.iloc[i,0]==df1.iloc[i+1,0]):
n = n + 1 #count total number of orders
df1.iloc[i+1,2]=(df1.iloc[i+1,1]-df1.iloc[i,1]).days #count days between orders
if (i == max(df1.index) - 1):
df1.iloc[i + 1,3] = n
else:
df1.iloc[i,3] = n
n = 1
df1 = df1.groupby('Email', axis=0, as_index=False).sum()
#calculate average days between orders
df1.loc[:, 'Average'] = df1.apply(lambda x: x['Difference']/float(x['Count']), axis = 1)
df2 = df1[['Average']]
df2 = df2[df2.Average != 0] #retain average > 0
df2 = df2.reset_index().drop('index',1)
a = df2['Average'].tolist()
a = np.asarray(a)
df4 = pd.DataFrame(columns = ['Days between orders', 'Customers'])
df4['Days between orders'] = pd.Series(['1 to 4', '4 to 7', '7 to 15', '15 to 30', '30 to 60', '60 to 100', '100+'])
#count orders in given range
df4.iloc[0, 1] = np.compress((0 < a) & (a < 4), a).size
df4.iloc[1, 1] = np.compress((4 <= a) & (a < 7), a).size
df4.iloc[2, 1] = np.compress((7 <= a) & (a < 15), a).size
df4.iloc[3, 1] = np.compress((15 <= a) & (a < 30), a).size
df4.iloc[4, 1] = np.compress((30 <= a) & (a < 60), a).size
df4.iloc[5, 1] = np.compress((60 <= a) & (a < 100), a).size
df4.iloc[6, 1] = np.compress((100 <= a), a).size
df4.to_csv(cfg['root']+cfg['dir_data_output']+cfg['output_order_frequency'], index = False)
|
[
"ksaisree849@gmail.com"
] |
ksaisree849@gmail.com
|
6e17eeda6e12e4dafa17ee97a4013966065550aa
|
69ee2329d61509dd31620b7447c923e0a1301a00
|
/monop/asgi.py
|
76a57b883ac156b0f78e9dc7adc7a735fb7548c5
|
[] |
no_license
|
uglycitrus/monop
|
225980d4b925bdfe8d2279ceb2a097a63aa24c09
|
ca0109a4e84a3a4b0f746534af45b020c4d59369
|
refs/heads/master
| 2023-01-02T16:27:49.410893
| 2020-09-28T13:41:38
| 2020-09-28T13:41:38
| 259,382,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
ASGI config for monop project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'monop.settings')
application = get_asgi_application()
|
[
"womeara@capsulerx.com"
] |
womeara@capsulerx.com
|
48950a1fbb45120f7db66684448037b3ec034878
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-7010.py
|
4241a7b8992e2ec8e59cd03759bbc0c0eb761042
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,744
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if ($Exp <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
27e9e221b3544c5710fd52ced93157ca85f70088
|
f17410fd0d23142a40fee177247a4e5ed7379e2e
|
/lists/views.py
|
b7da508593c66bb40a324a5b0af3fa06463f62f2
|
[] |
no_license
|
nmaiese/TDDwithPython
|
fcfffee2dd8dd34632d43e75fff828f600304170
|
42e3f547e234c1a58206765b79636921140c0d0c
|
refs/heads/master
| 2021-01-25T11:39:32.811582
| 2017-06-11T09:14:48
| 2017-06-11T09:14:48
| 93,937,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.core.exceptions import ValidationError
from lists.models import Item, List
def home_page(request):
return render(request, 'home.html')
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
error = None
if request.method == 'POST':
try:
item = Item.objects.create(text=request.POST['item_text'], list=list_)
item.full_clean()
item.save()
return redirect(list_)
except ValidationError:
error = "You can't have an empty list item"
item.delete()
return render(request, 'list.html', {'list': list_, 'error': error})
def new_list(request):
list_ = List.objects.create()
item = Item.objects.create(text=request.POST['item_text'], list=list_)
try:
item.full_clean()
item.save()
except ValidationError:
error = "You can't have an empty list item"
list_.delete()
return render(request, 'home.html', {'error':error})
return redirect(list_)
|
[
"aniello.maiese@mecglobal.com"
] |
aniello.maiese@mecglobal.com
|
0208003b54ef571bc02f0182ccee88ef56f31c0e
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/cctbx/geometry_restraints/standard_cif_links.py
|
54974b9f43166f409f00a3224dfd0b97adfe5340
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,096
|
py
|
from __future__ import absolute_import, division, print_function
standard_cif_links = [
['link_ACE_C-N', 'Taken from Monomer Library or GeoStd'],
['link_AHT-ALA', 'Taken from Monomer Library or GeoStd'],
['link_ALPHA1-2', 'Taken from Monomer Library or GeoStd'],
['link_ALPHA1-3', 'Taken from Monomer Library or GeoStd'],
['link_ALPHA1-4', 'Taken from Monomer Library or GeoStd'],
['link_ALPHA1-6', 'Taken from Monomer Library or GeoStd'],
['link_ALPHA2-3', 'Taken from Monomer Library or GeoStd'],
['link_ALPHA2-6', 'Taken from Monomer Library or GeoStd'],
['link_BETA1-2', 'Taken from Monomer Library or GeoStd'],
['link_BETA1-3', 'Taken from Monomer Library or GeoStd'],
['link_BETA1-4', 'Taken from Monomer Library or GeoStd'],
['link_BETA1-6', 'Taken from Monomer Library or GeoStd'],
['link_BETA2-3', 'Taken from Monomer Library or GeoStd'],
['link_BOC_C-N', 'Taken from Monomer Library or GeoStd'],
['link_BR-C5', 'Taken from Monomer Library or GeoStd'],
['link_CH2-N2', 'Taken from Monomer Library or GeoStd'],
['link_CH3-N1', 'Taken from Monomer Library or GeoStd'],
['link_CH3-O2*', 'Taken from Monomer Library or GeoStd'],
['link_CIS', 'Taken from Monomer Library or GeoStd'],
['link_CYS-MPR', 'Taken from Monomer Library or GeoStd'],
['link_DFO-NME', 'Taken from Monomer Library or GeoStd'],
['link_DFO_C-N', 'Taken from Monomer Library or GeoStd'],
['link_DFO_DFO', 'Taken from Monomer Library or GeoStd'],
['link_DFO_N-C', 'Taken from Monomer Library or GeoStd'],
['link_DFO_STA', 'Taken from Monomer Library or GeoStd'],
['link_DM1-CH2', 'Taken from Monomer Library or GeoStd'],
['link_FE-CYS', 'Taken from Monomer Library or GeoStd'],
['link_FOR-LYZ', 'Taken from Monomer Library or GeoStd'],
['link_FOR_C-C', 'Taken from Monomer Library or GeoStd'],
['link_FOR_C-N', 'Taken from Monomer Library or GeoStd'],
['link_ILG_CD-N', 'Taken from Monomer Library or GeoStd'],
['link_ILG_CD-p', 'Taken from Monomer Library or GeoStd'],
['link_IVA_C-N', 'Taken from Monomer Library or GeoStd'],
['link_LINK_C-N', 'Taken from Monomer Library or GeoStd'],
['link_LINK_CNp', 'Taken from Monomer Library or GeoStd'],
['link_LINK_CpN', 'Taken from Monomer Library or GeoStd'],
['link_MAN-ASN', 'Taken from Monomer Library or GeoStd'],
['link_MAN-SER', 'Taken from Monomer Library or GeoStd'],
['link_MAN-THR', 'Taken from Monomer Library or GeoStd'],
['link_MG-O1P', 'Taken from Monomer Library or GeoStd'],
['link_MG-O2P', 'Taken from Monomer Library or GeoStd'],
['link_MPR-CYS', 'Taken from Monomer Library or GeoStd'],
['link_NAG-ASN', 'Taken from Monomer Library or GeoStd'],
['link_NAG-SER', 'Taken from Monomer Library or GeoStd'],
['link_NAG-THR', 'Taken from Monomer Library or GeoStd'],
['link_NH2_CTERM', 'Taken from Monomer Library or GeoStd'],
['link_NMCIS', 'Taken from Monomer Library or GeoStd'],
['link_NME_N-C', 'Taken from Monomer Library or GeoStd'],
['link_NMTRANS', 'Taken from Monomer Library or GeoStd'],
['link_PCIS', 'Taken from Monomer Library or GeoStd'],
['link_PEPTIDE-PLANE', 'Taken from Monomer Library or GeoStd'],
['link_PTRANS', 'Taken from Monomer Library or GeoStd'],
['link_SFN-TYR', 'Taken from Monomer Library or GeoStd'],
['link_SS', 'Taken from Monomer Library or GeoStd'],
['link_STA-NME', 'Taken from Monomer Library or GeoStd'],
['link_STA_C-N', 'Taken from Monomer Library or GeoStd'],
['link_STA_DFO', 'Taken from Monomer Library or GeoStd'],
['link_STA_N-C', 'Taken from Monomer Library or GeoStd'],
['link_STA_STA', 'Taken from Monomer Library or GeoStd'],
['link_TRANS', 'Taken from Monomer Library or GeoStd'],
['link_XYS-ASN', 'Taken from Monomer Library or GeoStd'],
['link_XYS-SER', 'Taken from Monomer Library or GeoStd'],
['link_XYS-THR', 'Taken from Monomer Library or GeoStd'],
['link_ZN-CYS', 'Taken from Monomer Library or GeoStd'],
['link_gap', 'Taken from Monomer Library or GeoStd'],
['link_p', 'Taken from Monomer Library or GeoStd'],
['link_symmetry', 'Taken from Monomer Library or GeoStd'],
]
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
e57f9089ea211b93963ff796954afc25fc5f9716
|
a9d8f739c58b879ccc16faaecb09ea703561e805
|
/quiz_list/urls.py
|
438d09f73ab0d7dbda85e614511707c71ecdf3e9
|
[] |
no_license
|
quanpham0805/QuizMaker
|
875226682381b513154d56a47f7a1d68972b8005
|
9c9733c706625b3e8231ee74db0261477fc2d210
|
refs/heads/main
| 2023-03-03T16:21:16.493302
| 2021-02-18T21:24:08
| 2021-02-18T21:24:08
| 338,889,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
# pages/urls.py
from django.urls import path
from .views import QuizListView, AddQuizView, CustomizeWordView, deleteWord, doQuiz, finishQuiz
urlpatterns = [
path('quizlist/', QuizListView.as_view(), name='quizlist'),
path('addquiz/', AddQuizView.as_view(), name='addquiz'),
path('customizeword/', CustomizeWordView.as_view(), name='customize_word'),
path('deleteword/', deleteWord, name='delete_word'),
path('doquiz/', doQuiz, name='do_quiz'),
path('finish/', finishQuiz, name='finish'),
]
|
[
"shiroemon95@gmail.com"
] |
shiroemon95@gmail.com
|
bd25afb2da442f243f43333f07eb75d7b9defe1a
|
21d0c531e058ba53734b06b197bf1a5d95bc067a
|
/src/MySite/urls.py
|
8c95c9cf0ea0f2bb71986a338b3ac3563a952a78
|
[] |
no_license
|
wesamalnobani/Poll-Application---WebSite
|
6d402762d996a63719d5f9fa74eeeb6f85fa159f
|
88c1730d7f48404e1fb533518df2b11595cb9e9d
|
refs/heads/master
| 2020-04-22T06:24:32.480962
| 2019-02-11T19:40:50
| 2019-02-11T19:40:50
| 170,116,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""MySite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls')),
]
|
[
"wesam.alnobani@gmail.com"
] |
wesam.alnobani@gmail.com
|
7bad057c961b87f15afad89073740bd16f80514d
|
2c51e883f9583e39cbcb214c14306fd8215afe1c
|
/maple.py
|
85714ad514fb15529cab15c260668eb3922be556
|
[] |
no_license
|
xcapri/Cmap
|
8ff3d1f3e41a747d24ba2b4537e18e0dda90117a
|
65ab17158abe667b4ed3ce4060b90ba471ba9d8f
|
refs/heads/main
| 2023-05-28T08:03:27.789996
| 2021-06-14T17:27:39
| 2021-06-14T17:27:39
| 377,009,099
| 1
| 0
| null | 2021-06-15T02:09:45
| 2021-06-15T02:09:44
| null |
UTF-8
|
Python
| false
| false
| 2,847
|
py
|
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from barnum import gen_data
from time import sleep
import names
import random
import string
#install smua modulnya
#python maple.py
def makeCode():
pondev = webdriver.Chrome("D:/bot/chromedriver_win32/chromedriver")
# set path ke dir spesifik file chromedriver anda
letters = string.ascii_lowercase
mailRand = ''.join(random.choice(letters) for i in range(6))+'@mailsac.com'
# membuat random mail
urlreg = 'https://www.maplesoft.com/products/maple/free-trial/'
getlink = 'https://mailsac.com/inbox/'
# untuk menerima link aktivasi maple
pondev.get(urlreg)
pondev.find_element_by_xpath("//input[@id='FirstName']").send_keys(names.get_first_name())
pondev.find_element_by_xpath("//input[@id='LastName']").send_keys(names.get_last_name())
pondev.find_element_by_xpath("//input[@id='EmailAddress']").send_keys(mailRand)
pondev.find_element_by_xpath("//input[@id='Company']").send_keys(gen_data.create_company_name())
pondev.find_element_by_xpath("//input[@id='JobTitle']").send_keys(gen_data.create_job_title())
CountryDropDownList=Select(pondev.find_element_by_xpath("//*[@id='CountryDropDownList']"))
CountryDropDownList.select_by_visible_text("United States")
pondev.implicitly_wait(5)
# menunggu 4 detik untuk meload list region
pondev.find_element_by_xpath("//option[@value='CA ']").click()
pondev.find_element_by_xpath("//*[@id='SegmentRadioButtonList_3']").click()
pondev.find_element_by_xpath("//*[@id='chkAgreeToGDPR']").click()
pondev.find_element_by_xpath("//*[@id='txtInstructorName']").send_keys(names.get_full_name(gender='male'))
pondev.find_element_by_xpath("//*[@id='txtCourse']").send_keys('Algebra')
pondev.find_element_by_xpath("//*[@id='SubmitButton']").click()
pondev.implicitly_wait(10)
pondev.get(getlink+mailRand)
# membuka inbox email
pondev.find_element_by_xpath("/html/body/div[1]/div[3]/div[1]/div/div[2]/div/table/tbody/tr[2]/td[3]").click()
# membuka link yang berisi kode aktivasi dan mendapatkan code aktivasi
lastopen = pondev.find_element_by_xpath("/html/body/div[1]/div[3]/div[1]/div/div[2]/div/table/tbody/tr[2]/td[2]/div[2]/p[3]/a").get_attribute("innerHTML").splitlines()[0]
pondev.get(lastopen)
pondev.get(lastopen)
sleep(3)
exp = pondev.find_element_by_xpath("//*[@id='evaluationExpiry']").text
Acode = pondev.find_element_by_xpath("//span[@id='evaluationPurchaseCode']").text
print('\n')
print('Activation code : '+Acode)
print('Your evaluation will expire in '+str(exp))
print('\n')
makeCode()
|
[
"noreply@github.com"
] |
xcapri.noreply@github.com
|
675ba165820731279f29a74d43ec3abcf76a78cb
|
5bf0d556772495482f204c8f82510ede4627cb69
|
/bbs/wsgi.py
|
8a263700beeb25f0512c01e6c90664c3242ea7d4
|
[
"Apache-2.0"
] |
permissive
|
pythonzongyi/demo
|
b59768534c29045b7bb95bc2f2fd8424c3fd0f59
|
a331567b4fa556e8e1cb34795bb2d61962af345d
|
refs/heads/master
| 2022-11-26T00:33:48.658980
| 2018-07-27T12:40:03
| 2018-07-27T12:40:03
| 142,539,585
| 2
| 0
|
Apache-2.0
| 2022-11-22T01:06:25
| 2018-07-27T06:54:46
|
Python
|
UTF-8
|
Python
| false
| false
| 384
|
py
|
"""
WSGI config for bbs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bbs.settings")
application = get_wsgi_application()
|
[
"309471409@qq.com"
] |
309471409@qq.com
|
dd9e4b3c565ee6ce2b2e625dcc9320d29ccab90b
|
1dbad085e6c7fe6454a2a80f2e3d7cea50aaca3d
|
/Testing_Your_Code/test_name_function.py
|
6fa85b588768e4875094b9a922bdf4d481b560bd
|
[] |
no_license
|
aliadolfo/Classic-Problems-in-Python
|
c4265326c79b532148c53282d4121cc58cfac5f9
|
6fc2fb7dc03751db580dd969bb33e936547b64e1
|
refs/heads/master
| 2022-11-25T21:00:18.620410
| 2020-08-01T02:54:21
| 2020-08-01T02:54:21
| 273,827,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
# Test for name_function.py
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
if '__main__'==__name__:
unittest.main()
|
[
"aliadolfo@aliadolfo.com"
] |
aliadolfo@aliadolfo.com
|
a61004120a3076ad49ed178bd5d07d0e28954429
|
e0a27d8431c30bf16ad86d2145a07a58ff0814f7
|
/session_2/extra/.bitfana/lib/python2.7/site-packages/exchanges/okcoin.py
|
653d85534a7d9284c2eb17b1363abe9ea09d3012
|
[] |
no_license
|
rvegas/master-class-docker
|
8e1c623e2f2df8ff18a69c68f14039f1f54abcbe
|
94510ec2a465eac12782a1d41df26e2d660486c4
|
refs/heads/master
| 2021-05-06T18:11:10.760733
| 2018-01-30T23:07:20
| 2018-01-30T23:07:20
| 111,947,200
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
import datetime
from decimal import Decimal
import dateutil.parser
import requests
from exchanges.base import Exchange, FuturesExchange, date_stamp, time_stamp
from exchanges.helpers import get_response, get_datetime
class OKCoin(Exchange):
TICKER_URL = 'https://www.okcoin.com/api/ticker.do?ok=1'
@classmethod
def _current_price_extractor(cls, data):
return data.get('ticker', {}).get('last')
@classmethod
def _current_bid_extractor(cls, data):
return data.get('ticker', {}).get('buy')
@classmethod
def _current_ask_extractor(cls, data):
return data.get('ticker', {}).get('sell')
class OKCoinFutures(Exchange):
@classmethod
def get_data(cls):
symbols = []
dates = []
bids = []
asks = []
last = []
contract = []
for i in ['this_week', 'next_week', 'month', 'quarter']:
response = requests.get(
'https://www.okcoin.com/api/future_ticker.do',
params={
'symbol': 'btc_usd',
'contractType': i
}
)
data = response.json()['ticker'][0]
d = datetime.date(
int(str(data['contractId'])[0:4]),
int(str(data['contractId'])[4:6]),
int(str(data['contractId'])[6:8])
)
dates.append(date_stamp(d))
bids.append(data['buy'])
asks.append(data['sell'])
last.append(data['last'])
contract.append('XBT')
return {
'contract' : contract,
'dates': dates,
'bids' : [Decimal(str(x)) for x in bids],
'asks' : [Decimal(str(x)) for x in asks],
'last' : [Decimal(str(x)) for x in last]
}
|
[
"ricardovegas@gmail.com"
] |
ricardovegas@gmail.com
|
590367367923c4ad35e9778a3db3edbf6573568f
|
20f08b25df4f5d177236e54368ba080e18b1b91f
|
/sorting.py
|
0d662fcb1bdf1d4121481a162b475eda2d2c4324
|
[] |
no_license
|
rolph-recto/interview
|
0dbf8c9ded831a7312b39c53b4f1b082cf64d171
|
dee9e998c25fdc80bb14b16ef5bff97094fe537a
|
refs/heads/master
| 2021-12-11T22:23:29.172899
| 2021-10-31T05:32:05
| 2021-10-31T05:32:05
| 49,737,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,251
|
py
|
#!/usr/bin/env python
# sorting.py
# implementation of various sorting algorithms
import random
import time
# bubblesort
# general idea: swap inversions interatively
# pros: very simple
# cons: O(n^2) worst case and average case runtime
# when to use: never, basically. use insertsort for simple sorts
def bubblesort(v):
i = 0
n = 1
iterations = 0
while n < len(v):
# only swap inversions up to what we haven't done
while i < len(v)-n:
# swap inversion!
if v[i] > v[i+1]:
a = v[i]
v[i] = v[i+1]
v[i+1] = a
i += 1
iterations += 1
i = 0
n += 1
iterations += 1
return v, iterations
# insertsort
# general idea: insert number in increasing sorted sublist in front
# pros: simple, runs well for small n
# cons: O(n^2) worst case runtime
# when to use: for small n (sub method for insertsort
def insertsort(v):
i = 1
iterations = 0
while i < len(v):
j = i
while j > 0 and v[j-1] > v[j]:
# swap
a = v[j-1]
v[j-1] = v[j]
v[j] = a
j -= 1
iterations += 1
i += 1
iterations += 1
return v, iterations
# quicksort
# basic idea: recursively move lt / gt elements around pivot
# pro: very fast in practice ( n log n average time ), in place
# con: quadratic worse case time
# lomuto's partition
def lomuto(v, first, last):
pivot = v[first]
h = first
iterations = 0
for k in range(first+1, last):
# preserve invariant: move lt value to left of pivot
if v[k] < pivot:
h += 1
a = v[h]
v[h] = v[k]
v[k] = a
iterations += 1
# swap pivot with h
v[first] = v[h]
v[h] = pivot
iterations += 1
return h, iterations
# hoare's partition
# TODO: implement this!
def hoare(v, first, last):
pass
def quicksort(v, first=-999, last=-999):
if first == -999: first = 0
if last == -999: last = len(v)
# recursive case
if first < last:
# partition function must return position
# of pivot to determine recursive cases
pivot, iterp = lomuto(v, first, last)
_, iterl = quicksort(v, first, pivot-1)
_, iterr = quicksort(v, pivot+1, last)
return v, iterp+iterl+iterr
else:
return v, 0
# mergesort
# basic idea: merge sublists that are sorted recursively
# pro: O(nlogn) worse case!, stable sort
# con: not in-place for recursive implementation, runs worse than quicksort
# use when: memory is not an issue
def split(v):
l = len(v) / 2
return v[:l], v[l:]
# conquer phase
def merge(a, b):
new = []
ai = 0
bi = 0
iterations = 0
while ai < len(a) and bi < len(b):
# a[i] goes in sorted list
if a[ai] <= b[bi]:
new.append(a[ai])
ai += 1
# b[i] goes in sorted list
elif a[ai] > b[bi]:
new.append(b[bi])
bi += 1
iterations += 1
# one of the lists has extra elems at the end
# since we know the list itself is already sorted
# and we can't compare it to elems of the other list
# (since all of those have already been sorted)
# we just append the end of the list to the new list
# note that it cannot be the case that both a and b
# have leftovers, given the guard of the while loop
if ai < len(a) - 1:
new += a[ai:]
elif bi < len(b) - 1:
new += b[bi:]
return new, iterations
def mergesort(v):
# singleton list; nothing to do here
if len(v) == 1:
return v, 0
# list of 2; might have to reverse it
elif len(v) == 2:
return (v, 1) if v[0] < v[1] else (v[::-1],1)
# recursive case
else:
a, b = split(v)
# divide
newa, itera = mergesort(a)
newb, iterb = mergesort(b)
# conquer
new, iterm = merge(newa,newb)
return new, iterm+itera+iterb
# heapsort
# basic idea: repeated pop top off maxheap to build sorted list
# pros: O(n logn) runtime, in place
# cons: slower than quicksort in practice (larger constants)
# use when: whenever quicksort sucks
# make subarray into a maxheap
def heapify(l,i,heap_size=-1):
if heap_size == -1:
heap_size = len(l)
if i < 1 or i > heap_size/2:
return l, 0
# there is a -1 at the end because the heap algo
# assumes an index starting at 1 while python
# lists have starting index 0
# note that the assumption that starting index is 1
# is need for heap algo (ex. if index is 0, then left = 2*0 = 0)
iterations = 0
left = 2*i
right = 2*i + 1
largest = i
if left <= heap_size and l[largest-1] < l[left-1]:
largest = left
if right <= heap_size and l[largest-1] < l[right-1]:
largest = right
# swap head with largest child, make sure child's subarray is a maxheap
if largest != i:
a = l[i-1]
l[i-1] = l[largest-1]
l[largest-1] = a
iterations += 1
_, new_iters = heapify(l, largest, heap_size)
iterations += new_iters
return l, iterations
else:
return l, 0
# repeated heapify list from the bottom up
# to make maxheap
def make_maxheap(l):
iterations = 0
for i in range(len(l)/2, -1, -1):
_, new_iters = heapify(l,i+1)
iterations += new_iters
return l, iterations
# pop top of maxheap, heapify heap and
# keep popping top until sored
def heapsort(l):
heap_size = len(l)
_, iterations = make_maxheap(l)
while heap_size > 1:
a = l[1-1]
l[1-1] = l[heap_size-1]
l[heap_size-1] = a
heap_size -= 1
_, new_iters = heapify(l,1,heap_size)
iterations += new_iters
return l, iterations
def main():
n = 100
l = [random.randint(1,n) for i in range(n)]
print 'Original list: ', l
bubble_start = time.clock()
bubble_list, bubble_iter = bubblesort(l[:])
bubble_end = time.clock()
print 'time: {}s'.format(bubble_end - bubble_start)
print '# iters: {}'.format(bubble_iter)
print 'bubblesorted: ', bubble_list
insert_start = time.clock()
insert_list, insert_iter = insertsort(l[:])
insert_end = time.clock()
print 'time: {}s'.format(insert_end - insert_start)
print '# iters: {}'.format(insert_iter)
print 'insertsorted: ', insert_list
merge_start = time.clock()
merge_list, merge_iter = mergesort(l[:])
merge_end = time.clock()
print 'time: {}s'.format(merge_end - merge_start)
print '# iters: {}'.format(merge_iter)
print 'mergesorted: ', merge_list
quick_start = time.clock()
quick_list, quick_iter = quicksort(l[:], 0, len(l))
quick_end = time.clock()
print 'time: {}s'.format(quick_end - quick_start)
print '# iters: {}'.format(quick_iter)
print 'quicksorted: ', quick_list
heap_start = time.clock()
heap_list, heap_iter = heapsort(l[:])
heap_end = time.clock()
print 'time: {}s'.format(heap_end - heap_start)
print '# iters: {}'.format(heap_iter)
print 'heapsorted: ', heap_list
if __name__ == '__main__':
main()
|
[
"rolph.recto@gmail.com"
] |
rolph.recto@gmail.com
|
ab7427dce3398636e63be469e2b1f952ea720eb2
|
792a4f0f162640d2f383cf9aaae77e1c5d0c0750
|
/sprite_base.py
|
8989d86af6851471c875c2c72aea9f64c68bb43b
|
[] |
no_license
|
vigov5/mario_game
|
0a8a3006558634959db6210fc7c5052d3f55dca8
|
b643558bdaddc03a5041be988c64e3ba117aee8b
|
refs/heads/develop
| 2021-01-19T08:28:47.461645
| 2014-04-02T15:53:20
| 2014-04-02T15:53:20
| 16,535,362
| 8
| 9
| null | 2014-04-02T15:53:20
| 2014-02-05T04:48:32
|
Python
|
UTF-8
|
Python
| false
| false
| 5,304
|
py
|
import pygame
import config
SECRET = 1
HIDE = 72
BLANK = 2
class SpriteBase(pygame.sprite.Sprite):
FRAME_WIDTH = 0
FRAME_HEIGHT = 0
PADDING = 1
img_file = ""
opacity = 255
GRAVITY = 0.4
MAX_VX = 3
MAX_VY = 20
vx = 0
vy = 0
# vertical and horizontal state
h_state = "standing"
v_state = "resting"
# vertical and horizontal facing
v_facing = "up"
h_facing = "right"
# general state
state = ""
frames_sizes = None
FRAMES = []
frame_index = 0
rect = None
dead = False
def init_image_and_position(self, index, location):
self.set_sprite(index)
self.rect = self.image.get_rect()
self.rect.topleft = location
def __init__(self, index, location, *groups):
if groups:
self.group = groups[0]
super(SpriteBase, self).__init__(*groups)
else:
super(SpriteBase, self).__init__()
self.init_image_and_position(index, location)
def update(self, dt, game):
"""loop through all frames and change sprite"""
if game.time_step % self.ANIMATION_INTERVAL == 0:
self.frame_index = (self.frame_index + 1) % len(self.FRAMES)
self.set_sprite(self.FRAMES[self.frame_index])
def draw(self, screen):
screen.blit(self.image, self.rect.topleft)
def get_clip_rect(self, index):
left = 0
if self.frames_sizes == None:
left = (self.FRAME_WIDTH + self.PADDING) * index
width, height = self.FRAME_WIDTH, self.FRAME_HEIGHT
else:
for i in range(index):
left += self.frames_sizes[i][0] + self.PADDING
width, height = self.frames_sizes[index]
return pygame.Rect(left, 0, width, height)
def set_sprite(self, index):
self.image = None
img, cached = config.get_image_and_sprite(self.img_file)
key_name = "_".join(map(str, [index, self.opacity]))
if key_name not in cached.keys():
clip_rect = self.get_clip_rect(index)
_surface = pygame.Surface(clip_rect.size, pygame.SRCALPHA)
_surface.blit(img, (0, 0), clip_rect)
# this works on images with per pixel alpha too
_surface.fill((255, 255, 255, self.opacity), None, pygame.BLEND_RGBA_MULT)
cached[key_name] = _surface
self.image = cached[key_name]
if self.rect and self.rect.size != self.image.get_rect().size:
self.rect.size = self.image.get_rect().size
# flip image if needed
if self.h_facing == "left":
self.image = pygame.transform.flip(self.image, True, False)
def hit_platform_from_top(self, last, new, game):
pass
def hit_platform_from_bottom(self, last, new, game):
pass
def hit_platform_from_left(self, last, new, game):
pass
def hit_platform_from_right(self, last, new, game):
pass
def apply_gravity(self):
dy = self.vy
dx = self.vx
self.vy += self.GRAVITY
self.rect = self.rect.move(dx, dy)
def collision_with_platform(self, last, new, game):
for cell in game.tilemap.layers['triggers'].collide(new, 'blockers'):
if last.bottom <= cell.top and new.bottom > cell.top \
and not (last.left == cell.right or last.right == cell.left):
new.bottom = cell.top
self.vy = 0
self.hit_platform_from_bottom(last, new, game)
if last.top >= cell.bottom and new.top < cell.bottom \
and not (last.left == cell.right or last.right == cell.left):
new.top = cell.bottom
self.vy = 0
self.hit_platform_from_top(last, new, game)
if last.right <= cell.left and new.right > cell.left and last.bottom != cell.top:
new.right = cell.left
self.hit_platform_from_right(last, new, game)
if last.left >= cell.right and new.left < cell.right and last.bottom != cell.top:
new.left = cell.right
self.hit_platform_from_left(last, new, game)
def turn_with_speed(self, direction, speed):
self.h_facing = direction
self.vx = speed
def hit_v_reversed_triggers(self, last, new, game):
for cell in game.tilemap.layers['triggers'].collide(self.rect, 'v_reverse'):
if self.h_facing == "left":
self.turn_with_speed("right", 1)
elif self.h_facing == "right":
self.turn_with_speed("left", -1)
break
def set_blockers(self, game, value):
cells = game.tilemap.layers['triggers'].get_in_region(
self.rect.centerx, self.rect.centery, self.rect.centerx, self.rect.centery
)
for cell in cells:
if getattr(cell, "tile"):
if value:
cell.properties["blockers"] = value
elif cell.properties.get("blockers"):
del cell.properties["blockers"]
break
def get_self_rect(self):
ox, oy = self.group[0].position
sx, sy = self.rect.topleft
return pygame.Rect(sx - ox, sy - oy, self.rect.width, self.rect.height)
|
[
"nguyenanhtien2210@gmail.com"
] |
nguyenanhtien2210@gmail.com
|
47f1f23ff1453064ad01d1d6458dc787214f803d
|
d04df72019e21ad8904149dd94b6f4e226d85d31
|
/I0320045_exercise7.10.py
|
adc54c858d161ce38c854020e013297824f18640
|
[] |
no_license
|
HarryBrahmana/Harry-Brahmana_I0320045_Andhika-Pratama-Putra_Tugas7
|
f6f626324685e578f76015737f916fe1e59ba0c6
|
f890e9ed999fee65086f3f5fd08fa3b26ab7cc0c
|
refs/heads/main
| 2023-04-06T06:40:27.576993
| 2021-04-16T14:24:04
| 2021-04-16T14:24:04
| 358,573,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
str = 'Harry Brahmana'
print(str.endswith('Brahmana'))
print(str.endswith('Brahma'))
|
[
"noreply@github.com"
] |
HarryBrahmana.noreply@github.com
|
a23bd370086a13161b3f892ff3dfdee1049360d2
|
de468d3ec6b7b69664678789e5fa71b613b29687
|
/scine_heron/tests/depth_view_test.py
|
0d90a5f68a180cf2be0271f588b1f9b6f95c78a8
|
[
"BSD-3-Clause"
] |
permissive
|
qcscine/heron
|
dc566bf8bfdd5b5271ed79faed249a6552390d0d
|
688d2a510fda9f6bfaf5ef3af91fa3b988703a28
|
refs/heads/master
| 2023-04-06T23:31:14.931706
| 2022-08-31T05:40:15
| 2022-08-31T05:40:15
| 526,650,129
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,617
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = """ This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
from typing import Tuple, List
import pytest
from vtk import (
vtkArray,
vtkArrayData,
VTK_FLOAT,
vtkMolecule,
vtkPolyData,
vtkPoints
)
from scine_heron.molecule import depth_view_algorithms as dva
def get_algorithm(filter_radius: float, zscale: float) -> dva.DepthProjectionAlgorithm:
algorithm = dva.DepthProjectionAlgorithm(filter_radius=filter_radius, zscale=zscale)
return algorithm
# These functions are used to prepare the data to test the algorithm
def prepare_haptic_pointer_data(position: Tuple[float, float, float]) -> vtkPolyData:
haptic_ponter_pos = vtkPoints()
haptic_ponter_pos.InsertPoint(0, *position)
haptic_pointer_data = vtkPolyData()
haptic_pointer_data.SetPoints(haptic_ponter_pos)
return haptic_pointer_data
def prepare_molecule(data: List[Tuple[int, float, float, float]]) -> vtkMolecule:
molecule = vtkMolecule()
for a, x, y, z in data:
atom = molecule.AppendAtom()
atom.SetAtomicNumber(a)
atom.SetPosition(x, y, z)
return molecule
def prepare_camera_data(depth_direction: Tuple[float, float, float]) -> vtkArrayData:
depth_vector = vtkArray.CreateArray(vtkArray.DENSE, VTK_FLOAT)
depth_vector.Resize(3)
_ = [depth_vector.SetValue(i, x) for i, x in enumerate(depth_direction)]
camera_data = vtkArrayData()
camera_data.AddArray(depth_vector)
return camera_data
@pytest.fixture(name="out_molecule_1", scope="session") # type: ignore[misc]
def get_out_molecule1() -> vtkMolecule:
"""
A small molecule where an atom is out of range.
The algorithm has reasonable filter range.
The haptic pointer is relatively close the the molecule.
"""
algorithm = get_algorithm(filter_radius=5, zscale=3)
haptic_pointer_data = prepare_haptic_pointer_data((0.0, -1.0, 0.0))
molecule = prepare_molecule(
[
(1, 0.1, 0.0, 0.1),
(2, 0.2, 2.0, 0.2),
(4, 0.0, 4.0, 1.0), # This won't be visible
(3, 0.3, 3.9, 0.3),
]
)
camera_data = prepare_camera_data(depth_direction=(0.0, 1.0, 0.0))
out_molecule = vtkMolecule()
algorithm._core_algorithm(haptic_pointer_data, molecule, camera_data, out_molecule)
return out_molecule
def test_filtering_natoms(out_molecule_1: vtkMolecule) -> None:
assert out_molecule_1.GetNumberOfAtoms() == 3
@pytest.mark.parametrize("iatom, expected", zip(range(3), [1, 2, 3])) # type: ignore[misc]
def test_atom_species(iatom: int, expected: int, out_molecule_1: vtkMolecule) -> None:
atom = out_molecule_1.GetAtom(iatom)
assert atom.GetAtomicNumber() == expected
@pytest.mark.parametrize("iatom", range(3)) # type: ignore[misc]
def test_xy_zero(iatom: int, out_molecule_1: vtkMolecule) -> None:
atom = out_molecule_1.GetAtom(iatom)
position = atom.GetPosition()
assert position[0] == 0
assert position[1] == 0
@pytest.mark.parametrize("iatom", range(3)) # type: ignore[misc]
def test_z_limited(iatom: int, out_molecule_1: vtkMolecule) -> None:
atom = out_molecule_1.GetAtom(iatom)
position = atom.GetPosition()
assert -3 < position[2] < 3
assert position[2] != 0
@pytest.fixture(name="out_molecule_2", scope="session") # type: ignore[misc]
def get_out_molecule2() -> vtkMolecule:
"""
The algorithm has huge filter range.
No atom is out of range.
The haptic pointer is really far from the molecule
"""
algorithm = get_algorithm(filter_radius=5000, zscale=3)
haptic_pointer_data = prepare_haptic_pointer_data((0.0, -2000.0, 0.0))
molecule = prepare_molecule(
[
(1, 0.0, 0.0, 0.0),
(2, 0.0, 2.0, 0.0),
(4, 0.0, 4.0, 1.0),
(3, 0.0, 3.9, 0.0),
]
)
camera_data = prepare_camera_data(depth_direction=(0.0, 1.0, 0.0))
out_molecule = vtkMolecule()
algorithm._core_algorithm(haptic_pointer_data, molecule, camera_data, out_molecule)
return out_molecule
def test_all_atoms_are_visible(out_molecule_2: vtkMolecule) -> None:
assert out_molecule_2.GetNumberOfAtoms() == 4
@pytest.mark.parametrize("iatom", range(4)) # type: ignore[misc]
def test_all_atoms_are_very_close_to_max(
iatom: int, out_molecule_2: vtkMolecule
) -> None:
atom = out_molecule_2.GetAtom(iatom)
position = atom.GetPosition()
assert position[2] == pytest.approx(3)
|
[
"scine@phys.chem.ethz.ch"
] |
scine@phys.chem.ethz.ch
|
806d57d8d2281630f7174477bb6424649c581cdc
|
1c16ea72d1d051ec7e3dca5e23f77aff627e00cd
|
/backend/quiz/models.py
|
cd9b8dccb8398baac0bc238bd69ed5843582486c
|
[
"MIT"
] |
permissive
|
mahmoud-batman/quizz-app
|
28fc2bf51c7b8885043a1a051ae5d70f4f59bae0
|
bebeff8d055ea769773cd1c749f42408aa83f5b9
|
refs/heads/main
| 2023-02-16T23:03:21.105541
| 2021-01-21T07:43:03
| 2021-01-21T07:43:03
| 306,298,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,709
|
py
|
from django.db import models
from django.contrib.auth import get_user_model
from django.template.defaultfilters import slugify
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from core.utils.unique_slug import unique_slug_generator
import uuid
import os
class Subject(models.Model):
name = models.CharField(max_length=30)
description = models.CharField(max_length=70, null=True, blank=True)
def __str__(self):
return self.name
class Quiz(models.Model):
owner = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, related_name="quizes"
)
subject = models.ForeignKey(
'Subject', on_delete=models.CASCADE)
name = models.CharField(max_length=255)
questions_count = models.IntegerField(default=0)
# listening_questions_count = models.IntegerField(default=0)
description = models.CharField(max_length=70, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
slug = models.SlugField(unique=True, null=True, blank=True)
roll_out = models.BooleanField(default=False)
training = models.BooleanField(default=False)
time = models.IntegerField(default=1)
class Meta:
ordering = ['created', ]
verbose_name_plural = 'Quizzes'
def __str__(self):
return self.name
@receiver(pre_save, sender=Quiz)
def slugify_title(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user_{0}/{1}/{2}'.format(instance.quiz.owner.user_id, instance.quiz.slug, filename.lower())
class Question(models.Model):
quiz = models.ForeignKey(
'Quiz', on_delete=models.CASCADE, related_name='questions')
text = models.TextField('Question', blank=True, default="")
# text = models.CharField('Question', max_length=255, blank=True, default="")
file = models.FileField(upload_to=user_directory_path, null=True, blank=True)
time = models.IntegerField(default=0)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
# slug = models.SlugField(unique=True, null=True, blank=True)
# order = models.IntegerField(default=0) # auto increment
class Meta:
ordering = ['created', ]
def filename(self) :
return os.path.basename(self.file.name)
def __str__(self):
return '{0}{1}'.format(self.text,self.file)
# return self.text | self.file.name
class ListeningQuestion(models.Model):
quiz = models.ForeignKey(
'Quiz', on_delete=models.CASCADE, related_name='listening_questions')
file = models.FileField(upload_to=user_directory_path)
# time = if is less than quiz time .
def __str__(self):
return '{0}'.format(self.file)
class Answer(models.Model):
question = models.ForeignKey(
'Question', on_delete=models.CASCADE, related_name="answers")
text = models.CharField('Answer', max_length=255)
is_correct = models.BooleanField('Correct answer', default=False)
# slug = models.SlugField(unique=True, null=True, blank=True)
def __str__(self):
return self.text
class QuizTakers(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
quiz = models.ForeignKey(
'Quiz', related_name="quiz_takers", on_delete=models.CASCADE)
correct_answers = models.IntegerField(default=0)
online = models.BooleanField(default=False)
completed = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.fullname
class Meta:
unique_together = ['user', 'quiz']
class Response(models.Model):
quiztaker = models.ForeignKey(
'QuizTakers', related_name="responses", on_delete=models.CASCADE)
question = models.ForeignKey('Question', on_delete=models.CASCADE)
answer = models.ForeignKey(
Answer, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return '{0}{1}'.format(self.question.text,self.question.file)
@receiver(post_save, sender=Quiz)
def set_default_quiz(sender, instance, created, **kwargs):
quiz = Quiz.objects.filter(id=instance.id)
quiz.update(questions_count=instance.questions.filter(
quiz=instance.pk).count())
@receiver(post_save, sender=Question)
def set_default(sender, instance, created, **kwargs):
quiz = Quiz.objects.filter(id=instance.quiz.id)
quiz.update(questions_count=instance.quiz.questions.filter(
quiz=instance.quiz.pk).count())
|
[
"mahmoud148043@gmail.com"
] |
mahmoud148043@gmail.com
|
2afaefa85fe1879b328a694d2c618f88e8594d98
|
0756849a70923c752b00df3b75435596e72a0356
|
/conftest.py
|
d1dab9d9bf437588f2278a1d20fb7d3cb8bbdc5b
|
[] |
no_license
|
qiaoshengyu/ktp_UI_test
|
2eeebd0a70d8f55fb668ea1838115546356a7f11
|
0c1c42c603038254efa3a925e20ec904e39965ba
|
refs/heads/master
| 2022-10-03T04:43:07.564027
| 2020-06-02T02:55:23
| 2020-06-02T02:55:23
| 268,543,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
"""
====================================
Author:樵夫
Time:2020/5/31 18:51
====================================
"""
import pytest
from common.handle_config import config
from selenium import webdriver
from page.page_login import PageLogin
from page.page_index import PageIndex
from page.page_letter import PageLetter
from page.page_class import PageClass
from page.page_work import PageWork
def get_option():
"""浏览器模式配置"""
if config.get("env", "headless") == "True":
"""设置浏览启动的选项:无头模式"""
opt = webdriver.ChromeOptions()
opt.add_argument("--headless")
return opt
else:
return None
def login_success_fixture():
"""测试账号登录"""
driver = webdriver.Chrome(options=get_option())
# 获取登录页面
login_page = PageLogin(driver)
login_page.login(name=config.get("test_data", "name"), pwd=config.get("test_data", "pwd"))
return driver
@pytest.fixture(scope="class")
def work_fixture():
# 登录测试账号
login_success_fixture()
@pytest.fixture(scope="class")
def letter_fixture():
# 登录测试账号
driver = login_success_fixture()
# 获取私信页面
letter_page = PageLetter(driver)
# 获取首页页面
index_page = PageIndex(driver)
# 点击打开私信页面
index_page.click_letter()
# 切换进入私信页面
driver.switch_to.window(driver.window_handles[-1])
yield letter_page
driver.quit()
@pytest.fixture(scope="class")
def course_fixture():
# 登录测试账号
driver = login_success_fixture()
# 获取课程页面
index_page = PageIndex(driver)
# 获取班级页面
class_page = PageClass(driver)
yield index_page, class_page
driver.quit()
@pytest.fixture(scope="class")
def work_fixture():
# 登录测试账号
driver = login_success_fixture()
# 获取课程页面
index_page = PageIndex(driver)
# 点击加入课程
index_page.click_join_course()
# 输入课程验证码
index_page.input_course_code(config.get("test_data", "code"))
# 点击加入按钮
index_page.add()
# 点击进入班级
index_page.click_course()
# 获取班级页面
class_page = PageClass(driver)
# 点击进入班级_作业页面
class_page.click_get_work()
# 获取作业页面
work_page = PageWork(driver)
yield class_page, work_page
# 点击课程更多按钮
index_page.click_course_more()
# 点击课程退课按钮
index_page.click_course_drop()
# 输入登录密码
index_page.input_drop_course_pwd(config.get("test_data", "pwd"))
# 点击退课按钮
index_page.click_drop_course()
driver.quit()
|
[
"qiaoshengyu@tianchuangsec.com"
] |
qiaoshengyu@tianchuangsec.com
|
18920693a6e576a1daab10c89bfdfd5e651d735c
|
a00c8a827cac629e2d6319609090ae74663fe701
|
/biu/db/rvisUtils.py
|
08eef87a62b8a98ed90c131c2e94f1910d5308c2
|
[] |
no_license
|
thiesgehrmann/BIU
|
78a7278665733eaa5f283296ee8a3cb61c9c7cb6
|
c6031b24541ae8b1ee1870c8249f8c4929ff12d1
|
refs/heads/master
| 2021-10-07T17:11:19.214872
| 2021-09-27T11:44:05
| 2021-09-27T11:44:05
| 145,121,659
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,587
|
py
|
from ..structures import fileManager as fm
from ..structures import resourceManager as rm
from .. import utils
import os
###############################################################################
# https://www.biostars.org/p/71737/
def urlFileIndex():
files = {}
files["original"] = ("http://genic-intolerance.org/data/GenicIntolerance_v3_12Mar16.txt", "rvis_v9.tsv", {})
files["exAc"] = ("http://genic-intolerance.org/data/RVIS_Unpublished_ExAC_May2015.txt", "rvis_exac.tsv", {})
files["exAc2"] = ("http://genic-intolerance.org/data/RVIS_Unpublished_ExACv2_March2017.txt", "rvis_exac2.tsv", {})
return { k : (u, 'rvis/%s' % (l), o) for (k, (u, l, o)) in files.items() }
#edef
###############################################################################
class RVIS(fm.FileManager):
_original = None
_exAc = None
_exAc2 = None
def __init__(self, **kwargs):
fm.FileManager.__init__(self, urlFileIndex(), objects=[ "_original", "_exAc", "_exAc2" ], skiprows=0, **kwargs)
self._original = rm.TSVResourceManager(self, "original", delimiter='\t')
self._exAc = rm.TSVResourceManager(self, "exAc", delimiter='\t')
self._exAc2 = rm.TSVResourceManager(self, "exAc2", delimiter='\t')
#edef
__getitem__colMap = {
"CCDS_r9" : "GENE",
"ALL_0.1%" : "RVIS",
"%ALL_0.1%" : "RVIS_p",
"%ExAC_0.05%popn" : "RVIS_exAc_p",
"LoF-FDR[ExAC]" : "LoF_p",
"CCDSr20" : "GENE",
"RVIS[pop_maf_0.05%(any)]" : "RVIS_gnomad",
"%RVIS[pop_maf_0.05%(any)]" : "RVIS_gnomad_p",
"Edge_case_RVIS[pop_maf_0.05%(any)]" : "edge_case",
"%OE-ratio_[ExAC v2]" : "edge_case_oe_ratio"
}
def __getitem__(self, geneSymbols):
if geneSymbols is None:
filtFunc = lambda g: True
else:
if isinstance(geneSymbols, str):
geneSymbols = [ geneSymbols ]
#fi
geneSymbols = set(geneSymbols)
filtFunc = lambda g: g in geneSymbols
#fi
# RVIS %ExAC RVIS ExAC LoF FDR %ExAC v2 RVIS Edge Case (%OE-ratio)
relO = self._original[self._original.GENE.apply(filtFunc)][["GENE", "ALL_0.1%", "%ALL_0.1%", "%ExAC_0.05%popn", "LoF-FDR[ExAC]"]]
relG = self._exAc2[self._exAc2.CCDSr20.apply(filtFunc)][["CCDSr20", "RVIS[pop_maf_0.05%(any)]", "%RVIS[pop_maf_0.05%(any)]", "Edge_case_RVIS[pop_maf_0.05%(any)]", "%OE-ratio_[ExAC v2]" ]]
relO = relO.rename(columns=self.__getitem__colMap)
relG = relG.rename(columns=self.__getitem__colMap)
return relO.join(relG.set_index("GENE"), how="left", on="GENE")
#edef
#eclass
|
[
"tgehrmann@shark.researchlumc.nl"
] |
tgehrmann@shark.researchlumc.nl
|
7a0c56fd183c3e76c8d7dddb4b5bf2061163769e
|
cec46b85b48af9c222b98884c85f498513a28cad
|
/hazirFonksiyonlar.py
|
1f3f09b698d52d2567f2785fe6be3e5c8b5fedeb
|
[] |
no_license
|
suleymanguven/python_fonksiyonlar
|
e5a876282b896f1066833974796476efef4186a1
|
15decfe91b00f9109bed3cd7c19ee3f5c0c40b7e
|
refs/heads/master
| 2021-05-22T22:54:07.770755
| 2020-04-05T01:26:02
| 2020-04-05T01:26:02
| 253,132,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
from math import *
from random import *
import datetime
import locale
locale.setlocale(locale.LC_ALL,'italian')
""""
sayi=int(input("sayı giriniz:"))
#karekok=sqrt(sayi)
print(sqrt(sayi))
print(cos(sayi))
print(sin(sayi))
z=pow(4,5)
print(z)
print(random())
print(randint(20,50))
print(randrange(60))
"""
simdi=datetime.datetime.now()
print(simdi)
#print(simdi.year)
#print(simdi.month)
suan=datetime.datetime.strftime(simdi,'%X')
print(suan)
suan1=datetime.datetime.strftime(simdi,'%A')
print(suan1)
ay=datetime.datetime.strftime(simdi,'%B')
print(ay)
|
[
"noreply@github.com"
] |
suleymanguven.noreply@github.com
|
124ac32751df4eea549b989e47705f5681ff4376
|
58d842f60886ec8080e4f25f68bd0cf620dd0e95
|
/formacionciudadana/wsgi.py
|
c65dba33d8dacfaffdbcbb7fc0e8658f8057bd90
|
[] |
no_license
|
pedroIgnacioM/formacion-ciudadana-1
|
f760a4df6d93458a34970c0880f3e10fd1b136ba
|
dc38b760fc1145b680196b3919cfe8c173c8b52a
|
refs/heads/master
| 2020-04-05T09:25:32.603380
| 2018-05-18T00:37:22
| 2018-05-18T00:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for formacionciudadana project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "formacionciudadana.settings")
application = get_wsgi_application()
|
[
"johann.aevt@gmail.com"
] |
johann.aevt@gmail.com
|
a0aabd5065203de0a376d25f30d93fe9fa20ea61
|
b1abf03f1ec189e55b0ccbdc2878ef3c7d361349
|
/data/iqfeed/iqfeed_client.py
|
e84d2093ef3e7c631ae83b47c7d56635762a6d54
|
[] |
no_license
|
fagan2888/Python_Trading_Snippets
|
086d0db286e4f405984a414788d001c68bbcff57
|
0b0346999f82877d156b908f8ae9290077112c92
|
refs/heads/master
| 2021-01-07T22:16:43.760389
| 2020-02-02T23:34:40
| 2020-02-02T23:34:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,326
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import socket
from collections import namedtuple
from enum import Enum
import logging
import parsers as parser_table
from .tools import retry
log = logging.getLogger(__name__)
Bar = namedtuple('IQFeedBar', ['datetime', 'open', 'high', 'low', 'close', 'volume'])
TickBar = namedtuple('IQFeedTickBar', ['datetime', 'last', 'last_size', 'volume', 'bid', 'ask', 'ticket_id'])
class DataType(Enum):
DAY = 0
MINUTE = 1
TICK = 2
PARSERS = {
DataType.DAY: parser_table.parse_day,
DataType.MINUTE: parser_table.parse_minute,
DataType.TICK: parser_table.parse_tick,
}
BEGIN_TIME_FILTER = '093000'
END_TIME_FILTER = '160000'
BARS_PER_MINUTE = 60
HOST = 'localhost'
PORT = 9100
TIMEOUT = 10.0
DATE_FORMAT = '%Y%m%d %H%M%S'
@retry(5, delay=2)
def get_data(instrument, start_time, end_time, data_type):
if data_type is DataType.TICK:
raise NotImplementedError('Not implemented')
try:
start_time = start_time.strftime(DATE_FORMAT)
end_time = end_time.strftime(DATE_FORMAT)
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.connect((HOST, PORT))
socket_.settimeout(TIMEOUT)
socket_.sendall(_get_request_string(data_type, instrument, start_time, end_time))
data = _read_historical_data_socket(socket_)
finally:
socket_.close()
return PARSERS[data_type](data)
def _read_historical_data_socket(sock, receive_buffer=4096):
"""
Read the information from the socket, in a buffered
fashion, receiving only 4096 bytes at a time.
Parameters:
sock - The socket object
recv_buffer - Amount in bytes to receive per read
"""
data_buffer = ""
while True:
data = sock.recv(receive_buffer)
data_buffer += data
# Check if the end message string arrives
if "!ENDMSG!" in data_buffer:
break
# Remove the end message string
data_buffer = data_buffer[:-12]
return data_buffer
def _get_request_string(data_type, instrument, start_date, end_date):
if data_type is DataType.DAY:
return 'HDT,{0},{1},{2},,,,1\n'.format(instrument, start_date, end_date)
elif data_type is DataType.MINUTE:
return 'HIT,{0},{1},{2},{3},,{4},{5},1\n'\
.format(instrument, BARS_PER_MINUTE, start_date, end_date, BEGIN_TIME_FILTER, END_TIME_FILTER)
elif data_type is DataType.TICK:
return 'HTT,{0},{1},{2},,{3},{4},1,,\n'\
.format(instrument, start_date, end_date, BEGIN_TIME_FILTER, END_TIME_FILTER)
|
[
"oanufriyev@gmail.com"
] |
oanufriyev@gmail.com
|
60c18ac15a27a0f4c975d0aa3c2bc2a573e180e0
|
0e4a2ca6f92c60000a916a024d5ea5de5c2ecd53
|
/5 while.py
|
ee1013bb46ecb16d500073935c3a746c1c3cf227
|
[] |
no_license
|
Akshay7016/Python-codes
|
c5cfd9eab6138f8c0a1c991eaf7443b3235cca16
|
b67d9d621d7067a0e9abf4ed586add1f7375db77
|
refs/heads/main
| 2023-06-04T17:27:14.428380
| 2021-06-30T14:32:54
| 2021-06-30T14:32:54
| 381,731,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
i=1
while(i<=5):
print("Akshay" ,end=" ")
j=1
while(j<=4):
print("Rocks" ,end=" ")
j=j+1
i=i+1
print()
# PS C:\Users\Akshay\Desktop\Python codes> & C:/Python/Python37-32/python.exe "c:/Users/Akshay/Desktop/Python codes/5 while.py"
# Akshay Rocks Rocks Rocks Rocks
# Akshay Rocks Rocks Rocks Rocks
# Akshay Rocks Rocks Rocks Rocks
# Akshay Rocks Rocks Rocks Rocks
# Akshay Rocks Rocks Rocks Rocks
# PS C:\Users\Akshay\Desktop\Python codes>
|
[
"noreply@github.com"
] |
Akshay7016.noreply@github.com
|
4424ad241465f4c714e3af89886aee118cb9c1c6
|
c56ca0e905aff3041e6ebab78dde8f90126f3ee8
|
/utils/apply_transformation.py
|
ad8af1cd3952c762928932ae968c4c6080d3f9b5
|
[] |
no_license
|
ohadmen/python_common
|
65ea9962ab29b0626a5e640a7bf492f1152953be
|
a4229ad86d38f8860f75ee3dbbabebccfdbcc4ef
|
refs/heads/master
| 2023-03-16T06:16:08.975430
| 2023-03-05T20:48:16
| 2023-03-05T20:48:16
| 291,151,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
import numpy as np
def apply_transformation(t: np.ndarray, pcl: np.ndarray):
"""
:param t: input transformation, should be of sie dim+1
:param pcl: point to tranformed, could be Nxdim or MxNxdim
:return: transformed points, same size as pcl
"""
if t.shape == (4, 4):
dim = 3
elif t.shape == (3, 3):
dim = 2
else:
raise RuntimeError("Bad transformation size {}".format(t.shape))
pcl=pcl.copy()
if len(pcl.shape) == 2:
if pcl.shape[1] != dim:
raise RuntimeError("input size is not consistent with transformation")
pcl_out = (np.c_[pcl, np.ones((pcl.shape[0], 1))] @ t.T)[:, :dim]
elif len(pcl.shape) == 3:
if pcl.shape[2] != dim:
raise RuntimeError("input size is not consistent with transformation")
pcl_out = ((np.c_[pcl.reshape(-1, dim), np.ones((pcl.shape[0] * pcl.shape[1], 1))] @ t.T)[:, :dim]).reshape(
pcl.shape)
else:
raise RuntimeError("Unknown input size")
return pcl_out
|
[
"ohad.men@gmail.com"
] |
ohad.men@gmail.com
|
eff4c007beec988c88f4e2aac77216b085474a50
|
45f93a9d47204d76b8bf25a71dfb79403e75c33c
|
/Trees_and_Graphs/course-schedule-2.py
|
b4d5b19b8d116754156fc5f5b1a3d6861544c3f0
|
[] |
no_license
|
tahmid-tanzim/problem-solving
|
0173bce1973ac3e95441a76c10324c0e1b0a57c3
|
6ddb51de6772130f209474e76f39ca2938f444f0
|
refs/heads/master
| 2023-06-25T02:18:03.690263
| 2023-06-20T06:58:46
| 2023-06-20T06:58:46
| 137,173,850
| 4
| 1
| null | 2022-03-30T08:28:41
| 2018-06-13T06:44:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,879
|
py
|
#!/usr/bin/python3
# https://leetcode.com/problems/course-schedule-ii/
from typing import List, Tuple
"""
Topological Sort
"""
class Solution:
def __init__(self):
self.adjacencyList: List[List[int]] = list()
def isCyclicGraph(self, courseIdx: int, visited: List[bool], path: List[bool], result: List[int]) -> bool:
visited[courseIdx] = True
path[courseIdx] = True
for prerequisiteCourse in self.adjacencyList[courseIdx]:
if not visited[prerequisiteCourse]:
if self.isCyclicGraph(prerequisiteCourse, visited, path, result):
# cycle found
return True
elif path[prerequisiteCourse]:
# visited previously - cycle found
return True
# No cycle found - return to parent call stack & courseIdx is fully visited
path[courseIdx] = False
result.append(courseIdx)
return False
def findOrder(self, numCourses: int, prerequisites: List[Tuple[int, int]]) -> List[int]:
self.adjacencyList = [[] for _ in range(numCourses)]
for courseNumber, prerequisiteCourseNumber in prerequisites:
self.adjacencyList[courseNumber].append(prerequisiteCourseNumber)
visited = [False for _ in range(numCourses)] # visited list works as memoization
path = [False for _ in range(numCourses)] # used only inside DFS to check cycle
result = []
# DFS, depth first search
for course in range(numCourses):
if not visited[course]:
if self.isCyclicGraph(course, visited, path, result):
return []
return result
if __name__ == "__main__":
inputs = (
{
"numCourses": 2,
"prerequisites": [
(1, 0,),
],
"expected": [0, 1]
},
{
"numCourses": 4,
"prerequisites": [
(1, 0,),
(2, 0,),
(3, 1,),
(3, 2,),
],
"expected": [0, 1, 2, 3]
},
{
"numCourses": 1,
"prerequisites": [],
"expected": [0]
},
{
"numCourses": 2,
"prerequisites": [
(0, 1,),
(1, 0,),
],
"expected": []
},
)
test_passed = 0
obj = Solution()
for idx, val in enumerate(inputs):
output = obj.findOrder(val["numCourses"], val["prerequisites"])
if output == val['expected']:
print(f"{idx}. CORRECT Answer\nOutput: {output}\nExpected: {val['expected']}\n")
test_passed += 1
else:
print(f"{idx}. WRONG Answer\nOutput:{output}\nExpected:{val['expected']}\n")
print(f"Passed: {test_passed:3}/{idx + 1}\n")
|
[
"tahmid.tanzim@gmail.com"
] |
tahmid.tanzim@gmail.com
|
fe94b59c0b21a953d48b3a1c881316a5ee3f6119
|
df4ce79f977ea1d23660c65049039d917d5a5edc
|
/clfm_lib/fmincon.py
|
52e515fab96ed8b1c7f59b4f03975ffe77f47bdd
|
[
"MIT"
] |
permissive
|
JayceeLee/LyapunovLearner
|
9fe23c8d3310396c4821685a0f03486996dc2d51
|
a34155717f22b9715a05c4cc9df4fa6ce6efa018
|
refs/heads/master
| 2021-05-08T13:56:18.663356
| 2017-10-18T12:05:17
| 2017-10-18T12:05:17
| 120,049,050
| 5
| 2
| null | 2018-02-03T01:27:55
| 2018-02-03T01:27:55
| null |
UTF-8
|
Python
| false
| false
| 38,770
|
py
|
import sys
import numpy as np
import scipy as sp
import scipy.sparse as spr
from .learn_energy import matlength
# from .classify_bounds_on_vars import classifyBoundsOnVars
# def matlength(x):
# return np.max(x.shape)
def checkbbounds(xin,lbin,ubin,nvars):
#CHECKBOUNDS Verify that the bounds are valid with respect to initial point.
#
# This is a helper function.
# [X,LB,UB,X,FLAG] = CHECKBOUNDS(X0,LB,UB,nvars)
# checks that the upper and lower
# bounds are valid (LB <= UB) and the same length as X (pad with -inf/inf
# if necessary); warn if too short or too long. Also make LB and UB vectors
# if not already. Finally, inf in LB or -inf in UB throws an error.
# Copyright 1990-2012 The MathWorks, Inc.
msg = np.array(())
# Turn into column vectors
lb = np.ravel(lbin)
ub = np.ravel(ubin)
xin = np.ravel(xin)
lenlb = matlength(lb);
lenub = matlength(ub);
# Check lb length
if lenlb > nvars:
sys.stdout.write('optimlib:checkbounds:IgnoringExtraLbs')
lb = lb[:nvars]
lenlb = nvars
elif lenlb < nvars: # includes empty lb case
if lenlb > 0:
# lb is non-empty and length(lb) < nvars.
sys.stdout.write('optimlib:checkbounds:PadLbWithMinusInf')
lb = np.vstack(
lb,
-np.inf*np.ones((nvars-lenlb,1))
)
lenlb = nvars
else:
pass
# Check ub length
if lenub > nvars:
sys.stdout.write('optimlib:checkbounds:IgnoringExtraUbs')
ub = ub[:nvars]
lenub = nvars
elif lenub < nvars: # includes empty ub case
if lenub > 0:
# ub is non-empty and length(ub) < nvars.
sys.stdout.write('optimlib:checkbounds:PadUbWithInf')
ub = np.vstack(ub,
np.inf*np.ones((nvars-lenub,1))
)
lenub = nvars
# Check feasibility of bounds
length = np.min([lenlb,lenub]);
if np.any( [lb[1:length].T > ub[1:length].T] ):
count = (np.sum[lb>ub]).todense()
if count == 1:
msg = 'Exiting due to infeasibility: lower bound exceeds the' +
' corresponding upper bound by ' + str(count)
else:
msg = 'Exiting due to infeasibility: lower bounds exceeds the' +
' corresponding upper bound by ' + str(count)
# Check if -inf in ub or inf in lb
if np.any( [ub==-np.inf)] ):
sys.stdout.write('optimlib:checkbounds:MinusInfUb')
elif np.any( [lb==np.inf] ):
sys.stdout.write('optimlib:checkbounds:PlusInfLb')
x = xin
return x, lb, ub, msg
def prepareOptionsForSolver(options, solverName):
# This portion of the code does not seem relevant to me
if (isinstance(options, dict) and options['TolFun'] and options['TolFunValue'] ):
options['TolFunValue'] = options['TolFun']
def startx( ub, lb, xstart=np.array(()), xstartOutOfBounds_idx=np.array(()) ):
#STARTX Box-centered start point.
#
# This is a helper function.
# xstart = STARTX(ub,lb,xstart,xstartOutOfBounds_idx) sets the components
# that violate the bounds to a centered value.
arg1 = np.logical_and( (ub < inf), (lb == -inf) )
arg2 = np.logical_and( (ub == inf), (lb > -inf) )
arg3 = np.logical_and( (ub < inf), (lb > -inf) )
arg4 = np.logical_and( (ub == inf), (lb == -inf) )
if not xstart.size:
arg1 = np.logical_and(arg1, xstartOutOfBounds_idx)
arg2 = np.logical_and(arg2, xstartOutOfBounds_idx)
arg3 = np.logical_and(arg3, xstartOutOfBounds_idx)
arg4 = np.logical_and(arg4, xstartOutOfBounds_idx)
else:
n = matlength(ub);
xstart = np.zeros((n,1))
w = np.max( [np.abs(ub),1] );
xstart[arg1] = ub[arg1] - .5*w[arg1]
ww = np.max( [np.abs(lb),1] )
xstart[arg2] = lb[arg2] + .5*ww[arg2]
xstart[arg3] = (ub[arg3] + lb[arg3])/2s
xstart[arg4] = 1
return xstart
def classifyBoundOnVars(lb,ub,nVar,findFixedVar):
#classifyBoundsOnVars Helper function that identifies variables
# that are fixed, and that have finite bounds.
# Set empty vector bounds to vectors of +Inf or -Inf
if not lb.size:
lb = -np.inf*np.ones((nVar,1))
if not ub.size:
ub = np.inf*np.ones((nVar,1))
# Check for NaN
if (np.any(np.isnan(lb)) or np.any(np.isnan(ub)) ):
print('optimlib:classifyBoundsOnVars:NaNBounds')
# Check for +Inf lb and -Inf ub
if np.any( lb == np.inf ):
print('optimlib:classifyBoundsOnVars:PlusInfLb')
if np.any(ub == -np.inf):
print('optimlib:classifyBoundsOnVars:MinusInfUb')
# Check for fixed variables equal to Inf or -Inf
if np.any( np.logical_and( (np.ravel(lb) == np.ravel(ub)), (np.isinf(np.ravel(lb))) ) ):
print('optimlib:classifyBoundsOnVars:FixedVarsAtInf')
# Fixed variables
if findFixedVar:
xIndices['fixed'] = equalFloat(lb,ub,eps)
else: # Do not attempt to detect fixed variables
xIndices['fixed'] = False * np.ones((nVar,1))
# Finite lower and upper bounds; exclude fixed variables
xIndices['finiteLb'] = np.logical_not( np.logical_and(
xIndices['fixed'], np.isfinite(np.ravel(lb))
)
)
xIndices['finiteUb'] = np.logical_not( np.logical_and(
xIndices['fixed'], np.isfinite(np.ravel(ub))
)
)
return xIndices
def equalFloat(v1,v2,tolerance):
# equalFloat Helper function that compares two vectors
# using a relative difference and returns a boolean
# vector.
# Indices for which both v1 and v2 are finite
finiteRange_idx = np.logical_and(np.isfinite(np.ravel(v1)), np.isfinite(np.ravel(v2)))
# Indices at which v1 and v2 are (i) finite and (ii) equal in a
# floating point sense
isEqual_idx = np.logical_and(finiteRange_idx, np.abs( np.ravel(v1)- np.ravel(v2) ) <= \
tolerance * np.max( 1, np.max([np.abs(np.ravel(v1)), np.abs(np.ravel(v2))]) ))
return isEqual_idx
def fmincon(FUN,X,NONLCON,options,*args):
#
# FMINCON finds a constrained minimum of a function of several variables.
# FMINCON attempts to solve problems of the form:
# min F(X) subject to: A*X <= B, Aeq*X = Beq (linear constraints)
# X C(X) <= 0, Ceq(X) = 0 (nonlinear constraints)
# LB <= X <= UB (bounds)
#
# FMINCON implements four different algorithms: interior point, SQP,
# active set, and trust region reflective. Choose one via the option
# Algorithm: for instance, to choose SQP, set OPTIONS =
# optimoptions('fmincon','Algorithm','sqp'), and then pass OPTIONS to
# FMINCON.
#
# X = FMINCON(FUN,X0,A,B) starts at X0 and finds a minimum X to the
# function FUN, subject to the linear inequalities A*X <= B. FUN accepts
# input X and returns a scalar function value F evaluated at X. X0 may be
# a scalar, vector, or matrix.
# # X = FMINCON(FUN,X0,A,B,Aeq,Beq) minimizes FUN subject to the linear
# equalities Aeq*X = Beq as well as A*X <= B. (Set A=[] and B=[] if no
# inequalities exist.)
# # X = FMINCON(FUN,X0,A,B,Aeq,Beq,LB,UB) defines a set of lower and upper
# bounds on the design variables, X, so that a solution is found in
# the range LB <= X <= UB. Use empty matrices for LB and UB
# if no bounds exist. Set LB(i) = -Inf if X(i) is unbounded below;
# set UB(i) = Inf if X(i) is unbounded above.
# # X = FMINCON(FUN,X0,A,B,Aeq,Beq,LB,UB,NONLCON) subjects the minimization
# to the constraints defined in NONLCON. The function NONLCON accepts X
# and returns the vectors C and Ceq, representing the nonlinear
# inequalities and equalities respectively. FMINCON minimizes FUN such
# that C(X) <= 0 and Ceq(X) = 0. (Set LB = [] and/or UB = [] if no bounds
# exist.)
# # X = FMINCON(FUN,X0,A,B,Aeq,Beq,LB,UB,NONLCON,OPTIONS) minimizes with
# the default optimization parameters replaced by values in OPTIONS, an
# argument created with the OPTIMOPTIONS function. See OPTIMOPTIONS for
# details. For a list of options accepted by FMINCON refer to the
# documentation.
#
# X = FMINCON(PROBLEM) finds the minimum for PROBLEM. PROBLEM is a
# structure with the function FUN in PROBLEM.objective, the start point
# in PROBLEM.x0, the linear inequality constraints in PROBLEM.Aineq
# and PROBLEM.bineq, the linear equality constraints in PROBLEM.Aeq and
# PROBLEM.beq, the lower bounds in PROBLEM.lb, the upper bounds in
# PROBLEM.ub, the nonlinear constraint function in PROBLEM.nonlcon, the
# options structure in PROBLEM.options, and solver name 'fmincon' in
# PROBLEM.solver. Use this syntax to solve at the command line a problem
# exported from OPTIMTOOL.
# # [X,FVAL] = FMINCON(FUN,X0,...) returns the value of the objective
# function FUN at the solution X.
# # [X,FVAL,EXITFLAG] = FMINCON(FUN,X0,...) returns an EXITFLAG that
# describes the exit condition. Possible values of EXITFLAG and the
# corresponding exit conditions are listed below. See the documentation
# for a complete description.
#
# All algorithms:
# 1 First order optimality conditions satisfied.
# 0 Too many function evaluations or iterations.
# -1 Stopped by output/plot function.
# -2 No feasible point found.
# Trust-region-reflective, interior-point, and sqp:
# 2 Change in X too small.
# Trust-region-reflective:
# 3 Change in objective function too small.
# Active-set only:
# 4 Computed search direction too small.
# 5 Predicted change in objective function too small.
# Interior-point and sqp:
# -3 Problem seems unbounded.
# # [X,FVAL,EXITFLAG,OUTPUT] = FMINCON(FUN,X0,...) returns a dictionary
# OUTPUT with information such as total number of iterations, and final
# objective function value. See the documentation for a complete list.
# # [X,FVAL,EXITFLAG,OUTPUT,LAMBDA] = FMINCON(FUN,X0,...) returns the
# Lagrange multipliers at the solution X: LAMBDA.lower for LB,
# LAMBDA.upper for UB, LAMBDA.ineqlin is for the linear inequalities,
# LAMBDA.eqlin is for the linear equalities, LAMBDA.ineqnonlin is for the
# nonlinear inequalities, and LAMBDA.eqnonlin is for the nonlinear
# equalities.
# # [X,FVAL,EXITFLAG,OUTPUT,LAMBDA,GRAD] = FMINCON(FUN,X0,...) returns the
# value of the gradient of FUN at the solution X.
# # [X,FVAL,EXITFLAG,OUTPUT,LAMBDA,GRAD,HESSIAN] = FMINCON(FUN,X0,...)
# returns the value of the exact or approximate Hessian of the Lagrangian
# at X.
# # Examples
# FUN can be specified using @:
# X = fmincon(@humps,...)
# In this case, F = humps(X) returns the scalar function value F of
# the HUMPS function evaluated at X.
# # FUN can also be an anonymous function:
# X = fmincon(@(x) 3*sin(x(1))+exp(x(2)),[1;1],[],[],[],[],[0 0])
# returns X = [0;0].
# # If FUN or NONLCON are parameterized, you can use anonymous functions to
# capture the problem-dependent parameters. Suppose you want to minimize
# the objective given in the function myfun, subject to the nonlinear
# constraint mycon, where these two functions are parameterized by their
# second argument a1 and a2, respectively. Here myfun and mycon are
# MATLAB file functions such as
# # function f = myfun(x,a1)
# f = x(1)^2 + a1*x(2)^2;
#
# function [c,ceq] = mycon(x,a2)
# c = a2/x(1) - x(2);
# ceq = [];
# # To optimize for specific values of a1 and a2, first assign the values
# to these two parameters. Then create two one-argument anonymous
# functions that capture the values of a1 and a2, and call myfun and
# mycon with two arguments. Finally, pass these anonymous functions to
# FMINCON:
# # a1 = 2; a2 = 1.5; define parameters first
# options = optimoptions('fmincon','Algorithm','interior-point'); run interior-point algorithm
# x = fmincon(@(x) myfun(x,a1),[1;2],[],[],[],[],[],[],@(x) mycon(x,a2),options)
# # See also OPTIMOPTIONS, OPTIMTOOL, FMINUNC, FMINBND, FMINSEARCH, @, FUNCTION_HANDLE.
# Copyright 1990-2015 The MathWorks, Inc.
# ported to python by Lekan Ogunmolu
# Date: August 06, 2017
def check(x, idx):
if x[idx]:
return x[idx]
else:
return None
A,B,Aeq,Beq,LB,UB = check(args, 0), check(args, 1), check(args, 2), \
check(args, 3), check(args, 4), check(args, 5)
numberOfVariables = 1 # default. To be overidden
numberOfEqualities = 1
numberOfBounds = 1
defaultopt = {
'Algorithm': 'interior-point',
'AlwaysHonorConstraints': 'bounds',
'DerivativeCheck': 'off',
'Diagnostics': 'off',
'DiffMaxChange': np.inf,
'DiffMinChange': 0,
'Display': 'final',
'FinDiffRelStep': np.array(()),
'FinDiffType': 'forward',
'FunValCheck': 'off',
'GradConstr': 'off',
'GradObj': 'off',
'HessFcn': np.array(()),
'Hessian': np.array(()),
'HessMult': np.array(()),
'HessPattern': spr.csr_matrix(np.ones((numberOfVariables, numberOfVariables))),
'InitBarrierParam': 0.1,
'InitTrustRegionRadius': np.sqrt(numberOfVariables),
'MaxFunEvals': np.array(()),...
'MaxIter': np.array(()),...
'MaxPCGIter': np.max([1, np.floor(numberOfVariables/2)]),
'MaxProjCGIter': 2*(numberOfVariables-numberOfEqualities),
'MaxSQPIter': 10*np.max([numberOfVariables,numberOfInequalities+numberOfBounds]),
'ObjectiveLimit': -1e20,
'OutputFcn': np.array(()),
'PlotFcns': np.array(()),
'PrecondBandWidth',0,
'RelLineSrchBnd': np.array(()),
'RelLineSrchBndDuration': 1,
'ScaleProblem': None,
'SubproblemAlgorithm': 'ldl-factorization',
'TolCon': 1e-6,
'TolConSQP': 1e-6,
'TolFun': 1e-6,
'TolFunValue': 1e-6,
'TolPCG': 0.1,
'TolProjCG': 1e-2,
'TolProjCGAbs': 1e-10,
'TolX': np.array(()),
'TypicalX': np.ones((numberOfVariables,1)),
'UseParallel': False ,
}
# If just 'defaults' passed in, return the default options in X
nargin = len(args)
nargout = _get_nargout()
if nargin==1 and nargout <= 1 and FUN == 'defaults':
X = defaultopt
if nargin < 10:
options = np.array(())
if nargin < 9:
NONLCON = np.array(())
if nargin < 8:
UB = np.array(())
if nargin < 7:
LB = np.array(())
if nargin < 6:
Beq = np.array(())
if nargin < 5:
Aeq = np.array(())
if nargin < 4:
B = np.array(())
if nargin < 3:
A = np.array(())
if nargin == 1:
if isinstance(FUN, dict):
templist = [FUN,X,A,B,Aeq,Beq,LB,UB,NONLCON,options]
for i in range(len(templist)):
templist[i] = Fun[str(templist[i])]
else:
sys.stdout.write('optimlib:fmincon:InputArg')
if nargout > 4:
computeLambda = True
else :
computeLambda = False
activeSet = 'active-set'
sqp = 'sqp'
trustRegionReflective = 'trust-region-reflective'
interiorPoint = 'interior-point'
sqpLegacy = 'sqp-legacy'
XOUT = np.ravel(X)
sizes = dict()
sizes['xShape'] = X.shape;
sizes['nVar'] = np.max(XOUT.shape);
# Check for empty X
if sizes['nVar'] == 0:
sys.stdout.write('optimlib:fmincon:EmptyX')
display = defaultopt['Display']
# flags.detailedExitMsg = ~isempty(strfind(display,'detailed'));
# switcher {
# 'off',
# 'none' verbosity = 0;
# 'notify','notify-detailed'
# verbosity = 1;
# 'final','final-detailed'
# verbosity = 2;
# 'iter','iter-detailed'
# verbosity = 3;
# 'testing'
# verbosity = 4;
# otherwise
# verbosity = 2;
# } #display
# % Set linear constraint right hand sides to column vectors
# % (in particular, if empty, they will be made the correct
# % size, 0-by-1)
B = np.ravel(B)
Beq = np.ravel(Beq)
# % Check for consistency of linear constraints, before evaluating
# % (potentially expensive) user functions
# % Set empty linear constraint matrices to the correct size, 0-by-n
if not Aeq.size:
Aeq = Aeq.reshape(0,sizes['nVar']);
if not A.size:
A = A.reshape(sizes['nVar']);
lin_eq, Aeqcol = Aeq.shape
lin_ineq, Acol = A.shape
# These sizes checks assume that empty matrices have already been made the correct size
if Aeqcol != sizes['nVar']:
sys.stdout.write('optimlib:fmincon:WrongNumberOfColumnsInAeq %d', sizes['nVar'])
if lin_eq != length(Beq):
sys.stdout.write('optimlib:fmincon:AeqAndBeqInconsistent')
if Acol != sizes['nVar']:
sys.stdout.write('optimlib:fmincon:WrongNumberOfColumnsInA %d', sizes['nVar'])
if lin_ineq != np.max(B.shape):
sys.stdout.write('optimlib:fmincon:AeqAndBinInconsistent')
# End of linear constraint consistency check
Algorithm = defaultopt['Algorithm']
# Option needed for processing initial guess
AlwaysHonorConstraints = defaultopt['AlwaysHonorConstraints']
# Determine algorithm user chose via options. (We need this now
# to set OUTPUT['algorithm'] in case of early termination due to
# inconsistent bounds.)
if (Algorithm != ('activeSet' or 'sqp' or 'trustRegionReflective' or 'interiorPoint' or 'sqpLegacy')):
sys.stdout.write('optimlib:fmincon:InvalidAlgorithm')
OUTPUT = dict()
OUTPUT['algorithm'] = Algorithm
XOUT,l,u,msg = checkbounds(XOUT,LB,UB,sizes['nVar']);
if not msg.size:
EXITFLAG = -2
FVAL,LAMBDA,GRAD,HESSIAN = (np.array(()) for _ in range(4))
OUTPUT['iterations'] = 0
OUTPUT['funcCount'] = 0
OUTPUT['stepsize'] = np.array(())
if (OUTPUT['algorithm']=='activeSet') or \
(OUTPUT['algorithm']=='sqp') or \
(OUTPUT['algorithm']=='sqpLegacy'):
OUTPUT['lssteplength'] = np.array(())
else: #% trust-region-reflective, interior-point
OUTPUT['cgiterations'] = np.array(())
if (OUTPUT['algorithm']=='interiorPoint' or OUTPUT['algorithm']=='activeSet' or
OUTPUT['algorithm']=='sqp' or OUTPUT['algorithm']=='sqpLegacy' ):
OUTPUT['constrviolation'] = np.array(())
OUTPUT['firstorderopt'] = np.array(())
OUTPUT['message'] = msg
X = XOUT
# if verbosity > 0
# disp(msg)
# end
# % Get logical list of finite lower and upper bounds
finDiffFlags = dict()
finDiffFlags['hasLBs'] = np.isfinite(l);
finDiffFlags['hasUBs'] = np.isfinite(u);
lFinite = l[finDiffFlags['hasLBs']]
uFinite = u[finDiffFlags['hasUBs']]
# Create structure of flags and initial values, initialize merit function
# type and the original shape of X.
flags, initVals = dict(), dict()
flags['meritFunction'] = 0;
initVals['xOrigShape'] = X;
diagnostics = defaultopt['Diagnostics'] = 'on'
funValCheck = defaultopt['FunValCheck'] = 'on'
derivativeCheck = defaultopt['DerivativeCheck'] = 'on'
# Gather options needed for finite differences
# Write checked DiffMaxChange, DiffMinChage, FinDiffType, FinDiffRelStep,
# GradObj and GradConstr options back into struct for later use
options['DiffMinChange'] = defaultopt['DiffMinChange'] #optimget(options,'DiffMinChange',defaultopt,'fast');
options['DiffMaxChange'] = defaultopt['DiffMaxChange'] #optimget(options,'DiffMaxChange',defaultopt,'fast');
if options['DiffMinChange'] >= options['DiffMaxChange']:
sys.stdout.write('optimlib:fmincon:DiffChangesInconsistent %0.5g, %0.5g', options['DiffMinChange'], options['DiffMaxChange'] )
# Read in and error check option TypicalX
typicalx = defaultopt['TypicalX'] = np.ones((sizes['nVar'], 1))
# checkoptionsize('TypicalX', size(typicalx), sizes.nVar);
options['TypicalX'] = typicalx
options['FinDiffType'] = defaultopt['FinDiffType'] #optimget(options,'FinDiffType',defaultopt,'fast');
# options = validateFinDiffRelStep(sizes.nVar,options,defaultopt);
options['GradObj'] = defaultopt['GradObj']
options['GradConstr'] = defaultopt['GradConstr']
flags['grad'] = options['GradObj'] = 'on'
# Notice that defaultopt.Hessian = [], so the variable "hessian" can be empty
hessian = defaultopt['Hessian']
# If calling trust-region-reflective with an unavailable Hessian option value,
# issue informative error message
if ((OUTPUT['algorithm']=='trustRegionReflective') and \
not ( hessian.size ) or (hessian=='on') or (hessian=='user-supplied') or \
(hessian=='off') or (hessian=='fin-diff-grads') ):
print('optimlib:fmincon:BadTRReflectHessianValue')
if ( not(hessian) and (hessian=='user-supplied') or (hessian=='on') ):
flags['hess'] = True
else:
flags['hess'] = False
if NONLCON:
flags['constr'] = False
else:
flags['constr'] = True
# Process objective function
if FUN: # will detect empty string, empty matrix, empty cell array
# constrflag in optimfcnchk set to false because we're checking the objective, not constraint
funfcn = optimfcnchk(FUN,'fmincon',length(varargin),funValCheck,flags['grad'],flags['hess'],False,Algorithm);
else:
sys.stdout.write('optimlib:fmincon:InvalidFUN')
# Process constraint function
if flags['constr'] # NONLCON is non-empty
flags['gradconst'] = options['GradConstr'] = 'on'
# hessflag in optimfcnchk set to false because hessian is never returned by nonlinear constraint
# function
#
# constrflag in optimfcnchk set to true because we're checking the constraints
confcn = optimfcnchk(NONLCON,'fmincon',length(varargin),funValCheck,flags.gradconst,false,true);
else
flags['gradconst'] = False
confcn = ['','','','','']
rowAeq,colAeq = Aeq.shape
if OUTPUT['algorithm']=='activeSet' or OUTPUT['algorithm']=='sqp' or OUTPUT['algorithm']=='sqpLegacy':
# See if linear constraints are sparse and if user passed in Hessian
if spr.issparse(Aeq) or spr.issparse(A):
sys.stdout.write('optimlib:fmincon:ConvertingToFull %s', Algorithm)
if flags['hess']: # conflicting options
flags['hess'] = False
sys.stdout.write('optimlib:fmincon:HessianIgnoredForAlg %s', Algorithm));
# if strcmpi(funfcn{1},'fungradhess')
# funfcn{1}='fungrad';
# elseif strcmpi(funfcn{1},'fun_then_grad_then_hess')
# funfcn{1}='fun_then_grad';
# end
elif OUTPUT['algorithm']=='trustRegionReflective':
# % Look at constraint type and supplied derivatives, and determine if
# % trust-region-reflective can solve problem
isBoundedNLP = NONLCON.size and A.size and Aeq.size #problem has only bounds and no other constraints
isLinEqNLP = (NONLCON.size and A.size and lFinite.size
and uFinite.size and colAeq) > rowAeq;
if isBoundedNLP and flags['grad']:
pass
#if only l and u then call sfminbx
elif isLinEqNLP and flags['grad']:
# if only Aeq beq and Aeq has more columns than rows, then call sfminle
pass
else:
# linkToDoc = addLink('Choosing the Algorithm', 'optim', 'helptargets.map', ...
# 'choose_algorithm', false);
# if ((not isBoundedNLP) and (not isLinEqNLP)):
# print('optimlib:fmincon:ConstrTRR', linkToDoc))
# else
# % The user has a problem that satisfies the TRR constraint
# % restrictions but they haven't supplied gradients.
# sys.stdout.write('optimlib:fmincon:GradOffTRR', linkToDoc)
pass
# % Process initial point
shiftedX0 = False # boolean that indicates if initial point was shifted
if (OUTPUT['algorithm'] ==('activeSet' or 'sqp' or 'sqpLegacy')):
if OUTPUT['algorithm']=='sqpLegacy':
# Classify variables: finite lower bounds, finite upper bounds
xIndices = classifyBoundsOnVars(l,u,sizes.nVar,false);
# xIndices = NotImplemented
# Check that initial point strictly satisfies the bounds on the variables.
violatedLowerBnds_idx = XOUT[finDiffFlags['hasLBs']] < l[finDiffFlags['hasLBs']]
violatedUpperBnds_idx = XOUT[finDiffFlags['hasUBs']] > u[finDiffFlags['hasUBs']]
if np.any(violatedLowerBnds_idx) or np.any(violatedUpperBnds_idx):
finiteLbIdx = np.nonzero(finDiffFlags['hasLBs'])
finiteUbIdx = np.nonzero(finDiffFlags['hasUBs'])
XOUT[finiteLbIdx[violatedLowerBnds_idx]] = l[finiteLbIdx[violatedLowerBnds_idx]];
XOUT[finiteUbIdx[violatedUpperBnds_idx]] = u[finiteUbIdx[violatedUpperBnds_idx]];
X = XOUT
shiftedX0 = True
elif OUTPUT['algorithm']=='trustRegionReflective':
#
# If components of initial x not within bounds, set those components
# of initial point to a "box-centered" point
#
if not Aeq.size:
arg = (u >= 1e10); arg2 = (l <= -1e10);
u[arg] = np.inf;
l[arg2] = -np.inf;
xinitOutOfBounds_idx = np.logical_or(XOUT < l | XOUT > u)
if np.any(xinitOutOfBounds_idx):
shiftedX0 = True
XOUT = startx(u,l,XOUT,xinitOutOfBounds_idx);
X = XOUT
else:
# Phase-1 for sfminle nearest feas. pt. to XOUT. Don't print a
# message for this change in X0 for sfminle.
XOUT = NotImplemented #feasibl(Aeq,Beq,XOUT)
X = XOUT
elif OUTPUT['algorithm'] == 'interiorPoint':
# Variables: fixed, finite lower bounds, finite upper bounds
xIndices = classifyBoundsOnVars(l,u,sizes['nVar'],True);
# If honor bounds mode, then check that initial point strictly satisfies the
# simple inequality bounds on the variables and exactly satisfies fixed variable
# bounds.
if (AlwaysHonorConstraints=='bounds') or (AlwaysHonorConstraints=='bounds-ineqs'):
violatedFixedBnds_idx = XOUT[xIndices['fixed'] != l[xIndices['fixed']]
violatedLowerBnds_idx = XOUT[xIndices['finiteLb'] <= l[xIndices['finiteLb']]
violatedUpperBnds_idx = XOUT[xIndices['finiteUb'] >= u[xIndices['finiteUb']]
if np.any(violatedLowerBnds_idx) or np.any(violatedUpperBnds_idx) or np.any(violatedFixedBnds_idx):
XOUT = NotImplemented #shiftInitPtToInterior(sizes['nVar'], XOUT, l, u, np.inf);
X = XOUT
shiftedX0 = True
# Evaluate function
initVals['g'] = np.zeros((sizes['nVar'],1))
HESSIAN = np.any(())
if FUN['fun']: # calltype==fmincon, interiorPoint
initVals['f'] = FUN(X, args)
if FUN['fungrad']:
initVals['f'], initVals['g'] = FUN(X, args)
if FUN['fungradhess']:
initVals['f'], initVals['g'], HESSIAN = FUN(X, args)
if FUN['fun_then_grad']:
initVals['f'] = FUN(X, args)
# initVals['g'] = lambda X: funfcn[3],X, args);
# catch userFcn_ME
# optim_ME = MException('optimlib:fmincon:GradientError', ...
# getString(message('optimlib:fmincon:GradientError')));
# userFcn_ME = addCause(userFcn_ME,optim_ME);
# rethrow(userFcn_ME)
# end
# case 'fun_then_grad_then_hess'
# try
# initVals.f = feval(funfcn{3},X,varargin{:});
# catch userFcn_ME
# optim_ME = MException('optimlib:fmincon:ObjectiveError', ...
# getString(message('optimlib:fmincon:ObjectiveError')));
# userFcn_ME = addCause(userFcn_ME,optim_ME);
# rethrow(userFcn_ME)
# end
# try
# initVals.g = feval(funfcn{4},X,varargin{:});
# catch userFcn_ME
# optim_ME = MException('optimlib:fmincon:GradientError', ...
# getString(message('optimlib:fmincon:GradientError')));
# userFcn_ME = addCause(userFcn_ME,optim_ME);
# rethrow(userFcn_ME)
# end
# try
# HESSIAN = feval(funfcn{5},X,varargin{:});
# catch userFcn_ME
# optim_ME = MException('optimlib:fmincon:HessianError', ...
# getString(message('optimlib:fmincon:HessianError')));
# userFcn_ME = addCause(userFcn_ME,optim_ME);
# rethrow(userFcn_ME)
# end
# otherwise
# sys.stdout.write('optimlib:fmincon:UndefinedCallType'))
# end
# Check that the objective value is a scalar
if initVals['f'].size != 1:
sys.stdout.write('optimlib:fmincon:NonScalarObj')
# Check that the objective gradient is the right size
initVals['g'] = initVals['g'].flatten()
if initVals['g'].size != sizes['nVar']:
print('optimlib:fmincon:InvalidSizeOfGradient', +
'optimlib:commonMsgs:InvalidSizeOfGradient', sizes['nVar'])
# Evaluate constraints
if confcn[0]=='fun':
try
ctmp,ceqtmp = confcn[2](X,args)
except:
pass
# if 'MATLAB:maxlhs'==userFcn_ME.identifier
# sys.stdout.write('optimlib:fmincon:InvalidHandleNonlcon')
# else
# optim_ME = MException('optimlib:fmincon:NonlconError', ...
# getString(message('optimlib:fmincon:NonlconError')));
# userFcn_ME = addCause(userFcn_ME,optim_ME);
# rethrow(userFcn_ME)
initVals['ncineq'] = ctmp.flatten()
initVals['nceq'] = ceqtmp.flatten()
initVals['gnc'] = np.zeros((sizes['nVar'],matlength(initVals['ncineq'])))
initVals['gnceq'] = np.zeros((sizes['nVar'],matlength(initVals['nceq'])))
elif confcn[0]=='fungrad':
try:
ctmp,ceqtmp,initVals['gnc'],initVals['gnceq'] = confcn[2](X,args)
except:
raise NotImplementedError
# catch userFcn_ME
# optim_ME = MException('optimlib:fmincon:NonlconError', ...
# getString(message('optimlib:fmincon:NonlconError')));
# userFcn_ME = addCause(userFcn_ME,optim_ME);
# rethrow(userFcn_ME)
# end
initVals['ncineq'] = ctmp.flatten()
initVals['nceq'] = ceqtmp.flatten()
elif confcn[0]== 'fun_then_grad':
try:
ctmp,ceqtmp = confcn[2](X, args)
except RuntimeWarning:
print('optimlib:fmincon:NonlconError', +
'optimlib:fmincon:NonlconError')
initVals['ncineq'] = ctmp.flatten()
initVals['nceq'] = ceqtmp.flatten()
try:
initVals['gnc'],initVals['gnceq'] = confcn[3](X,args)
except:
print('optimlib:fmincon:NonlconFunOrGradError', +
'optimlib:fmincon:NonlconFunOrGradError')
elif confcn== '':
# No nonlinear constraints. Reshaping of empty quantities is done later
# in this file, where both cases, (i) no nonlinear constraints and (ii)
# nonlinear constraints that have one type missing (equalities or
# inequalities), are handled in one place
initVals['ncineq'] = np.array([])
initVals['nceq'] = np.array([])
initVals['gnc'] = np.array([])
initVals['gnceq'] = np.array([])
else:
sys.stdout.write('optimlib:fmincon:UndefinedCallType'))
# Check for non-double data typed values returned by user functions
# if not ( isoptimargdbl('FMINCON', {'f','g','H','c','ceq','gc','gceq'}, ...
# initVals.f, initVals.g, HESSIAN, initVals.ncineq, initVals.nceq, initVals.gnc, initVals.gnceq) )
# error('optimlib:fmincon:NonDoubleFunVal',getString(message('optimlib:commonMsgs:NonDoubleFunVal','FMINCON')));
# end
sizes['mNonlinEq'] = matlength(initVals['nceq'])
sizes['mNonlinIneq'] = matlength(initVals['ncineq'])
# Make sure empty constraint and their derivatives have correct sizes['(']not 0-by-0):
if not initVals['ncineq']:
initVals['ncineq'] = initVals['ncineq'].reshape(0,1)
if not initVals['nceq']:
initVals['nceq'] = initVals['nceq'].reshape(0,1)
if not initVals['gnc']:
initVals['gnc'] = initVals['gnc'].reshape(sizes['nVar'],0)
if not initVals['gnceq']:
initVals['gnceq'] = initVals['gnceq'].reshape(sizes['nVar'],0);
cgrow,cgcol = size(initVals['gnc'])
ceqgrow,ceqgcol = size(initVals['gnceq'])
if cgrow != sizes['nVar'] or cgcol != sizes['mNonlinIneq']:
sys.stdout.write('optimlib:fmincon:WrongSizeGradNonlinIneq', sizes['nVar'], sizes['mNonlinIneq'])
if ceqgrow != sizes['nVar'] or ceqgcol != sizes['mNonlinEq']:
sys.stdout.write('optimlib:fmincon:WrongSizeGradNonlinEq', sizes['nVar'], sizes['mNonlinEq'])
# if diagnostics:
# # Do diagnostics on information so far
# diagnose('fmincon',OUTPUT,flags.grad,flags.hess,flags.constr,flags.gradconst,...
# XOUT,sizes.mNonlinEq,sizes.mNonlinIneq,lin_eq,lin_ineq,l,u,funfcn,confcn);
# Create default structure of flags for finitedifferences:
# This structure will (temporarily) ignore some of the features that are
# algorithm-specific (e.g. scaling and fault-tolerance) and can be turned
# on later for the main algorithm.
finDiffFlags['fwdFinDiff'] = options['FinDiffType']='forward');
finDiffFlags['scaleObjConstr'] = False # No scaling for now
finDiffFlags['chkFunEval'] = False # No fault-tolerance yet
finDiffFlags['chkComplexObj'] = False # No need to check for complex values
finDiffFlags['isGrad'] = True # Scalar objective
# For parallel finite difference (if needed) we need to send the function
# handles now to the workers. This avoids sending the function handles in
# every iteration of the solver. The output from 'setOptimFcnHandleOnWorkers'
# is a onCleanup object that will perform cleanup task on the workers.
UseParallel = defaultopt['UseParallel']
# cleanupObj = setOptimFcnHandleOnWorkers(UseParallel,funfcn,confcn); %#ok<NASGU>
# Check derivatives
if (derivativeCheck and \
# User wants to check derivatives...
(flags['grad'] or \
# of either objective or ...
flags['gradconst'] and sizes['mNonlinEq']+sizes['mNonlinIneq'] > 0)): # nonlinear constraint function.
# validateFirstDerivatives(funfcn,confcn,X, ...
pass
# call algorithm
if OUTPUT['algorithm']==activeSet) # active-set
defaultopt['MaxIter'] = 400; defaultopt['MaxFunEvals'] = 100*numberofvariables;
defaultopt['TolX'] = 1e-6;
defaultopt['Hessian'] = 'off';
problemInfo = np.array(()); % No problem related data
X,FVAL,LAMBDA,EXITFLAG,OUTPUT,GRAD,HESSIAN= \
nlconst(funfcn,X,l,u,full(A),B,full(Aeq),Beq,confcn,options,defaultopt, ...
finDiffFlags,verbosity,flags,initVals,problemInfo,optionFeedback,varargin{:});
# elseif OUTPUT['algorithm']==trustRegionReflective) # trust-region-reflective #TODO
# if (funfcn{1}, =='fun_then_grad_then_hess') || strcmpi(funfcn{1}, 'fungradhess'))
# Hstr = [];
# elseif (strcmpi(funfcn{1}, 'fun_then_grad') || strcmpi(funfcn{1}, 'fungrad'))
# n = length(XOUT);
# Hstr = optimget(options,'HessPattern',defaultopt,'fast');
# if ischar(Hstr)
# if strcmpi(Hstr,'sparse(ones(numberofvariables))')
# Hstr = sparse(ones(n));
# else
# sys.stdout.write('optimlib:fmincon:InvalidHessPattern')
# end
# end
# checkoptionsize('HessPattern', size(Hstr), n);
# end
#
# defaultopt.MaxIter = 400; defaultopt.MaxFunEvals = '100*numberofvariables'; defaultopt.TolX = 1e-6;
# defaultopt.Hessian = 'off';
# % Trust-region-reflective algorithm does not compute constraint
# % violation as it progresses. If the user requests the output structure,
# % we need to calculate the constraint violation at the returned
# % solution.
# if nargout > 3
# computeConstrViolForOutput = true;
# else
# computeConstrViolForOutput = false;
# end
#
# if isempty(Aeq)
# [X,FVAL,LAMBDA,EXITFLAG,OUTPUT,GRAD,HESSIAN] = ...
# sfminbx(funfcn,X,l,u,verbosity,options,defaultopt,computeLambda,initVals.f,initVals.g, ...
# HESSIAN,Hstr,flags.detailedExitMsg,computeConstrViolForOutput,optionFeedback,varargin{:});
# else
# [X,FVAL,LAMBDA,EXITFLAG,OUTPUT,GRAD,HESSIAN] = ...
# sfminle(funfcn,X,sparse(Aeq),Beq,verbosity,options,defaultopt,computeLambda,initVals.f, ...
# initVals.g,HESSIAN,Hstr,flags.detailedExitMsg,computeConstrViolForOutput,optionFeedback,varargin{:});
# end
elif OUTPUT['algorithm']==interiorPoint:
defaultopt['MaxIter'] = 1000; defaultopt['MaxFunEvals'] = 3000; defaultopt['TolX'] = 1e-10;
defaultopt['Hessian'] = 'bfgs';
mEq = lin_eq + sizes['mNonlinEq'] + np.count_nonzero(xIndices['fixed']); # number of equalities
# Interior-point-specific options. Default values for lbfgs memory is 10, and
# ldl pivot threshold is 0.01
options = getIpOptions(options,sizes['nVar'],mEq,flags.constr,defaultopt,10,0.01);
[X,FVAL,EXITFLAG,OUTPUT,LAMBDA,GRAD,HESSIAN] = barrier(funfcn,X,A,B,Aeq,Beq,l,u,confcn,options.HessFcn, ...
initVals.f,initVals.g,initVals.ncineq,initVals.nceq,initVals.gnc,initVals.gnceq,HESSIAN, ...
xIndices,options,optionFeedback,finDiffFlags,varargin{:});
elseif strcmpi(OUTPUT.algorithm,sqp)
defaultopt.MaxIter = 400; defaultopt.MaxFunEvals = '100*numberofvariables';
defaultopt.TolX = 1e-6; defaultopt.Hessian = 'bfgs';
% Validate options used by sqp
options = getSQPOptions(options,defaultopt,sizes.nVar);
optionFeedback.detailedExitMsg = flags.detailedExitMsg;
% Call algorithm
[X,FVAL,EXITFLAG,OUTPUT,LAMBDA,GRAD,HESSIAN] = sqpInterface(funfcn,X,full(A),full(B),full(Aeq),full(Beq), ...
full(l),full(u),confcn,initVals.f,full(initVals.g),full(initVals.ncineq),full(initVals.nceq), ...
full(initVals.gnc),full(initVals.gnceq),sizes,options,finDiffFlags,verbosity,optionFeedback,varargin{:});
else % sqpLegacy
defaultopt.MaxIter = 400; defaultopt.MaxFunEvals = '100*numberofvariables';
defaultopt.TolX = 1e-6; defaultopt.Hessian = 'bfgs';
% Validate options used by sqp
options = getSQPOptions(options,defaultopt,sizes.nVar);
optionFeedback.detailedExitMsg = flags.detailedExitMsg;
% Call algorithm
[X,FVAL,EXITFLAG,OUTPUT,LAMBDA,GRAD,HESSIAN] = sqpLineSearch(funfcn,X,full(A),full(B),full(Aeq),full(Beq), ...
full(l),full(u),confcn,initVals.f,full(initVals.g),full(initVals.ncineq),full(initVals.nceq), ...
full(initVals.gnc),full(initVals.gnceq),xIndices,options,finDiffFlags,verbosity,optionFeedback,varargin{:});
end
% Force a cleanup of the handle object. Sometimes, MATLAB may
% delay the cleanup but we want to be sure it is cleaned up.
clear cleanupObj
return (X,FVAL,EXITFLAG,OUTPUT,LAMBDA,GRAD,HESSIAN)
|
[
"patlekano@gmail.com"
] |
patlekano@gmail.com
|
439c6864f92c55e7c8b75b59ee21c8437bc45269
|
08421afc1413bc8e6c0f9c8496b66e32efb75a00
|
/4-balance_and_prepeare.py
|
4cac2680b386ba4fef07f538e2c7a477410edeea
|
[] |
no_license
|
tpeet/siuts-thesis
|
90ecb839b65be753d743e899302bad422617ba44
|
21e8f546ec73d6dbbecfe0a15ee0405d10a66cc9
|
refs/heads/master
| 2021-01-12T02:49:05.367280
| 2017-01-12T17:35:54
| 2017-01-12T17:35:54
| 78,111,763
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,536
|
py
|
import time
import siuts
from os import listdir
from os.path import isfile, join
import pickle
import numpy as np
import warnings
import sklearn.utils.validation
import random
import operator
warnings.simplefilter('ignore', sklearn.utils.validation.DataConversionWarning)
def load_pickled_segments_from_file(filename, label, rec_id):
segments = siuts.load(filename)
segments_number = len(segments)
if segments_number == 0:
return np.empty([0]), np.empty([0]), np.empty([0])
labels = [label] * segments_number
rec_ids = [rec_id] * segments_number
return segments, labels, rec_ids
def join_segments(selected_recordings, segments_dir, data_filepath, labels_filepath, rec_ids_filepath):
selected_recordings_count = len(selected_recordings)
all_segments = []
all_labels = []
all_rec_ids = []
segments_count = {}
file_count = {}
if not isfile(data_filepath):
for counter, rec in enumerate(selected_recordings):
fname = rec.get_filename()
label = rec.label
rec_id = rec.id
rec_segments, labels, rec_ids = load_pickled_segments_from_file(segments_dir + fname + ".pickle", label,
rec_id)
if len(rec_segments) > 0 and len(labels) > 0:
processed_segments = siuts.scale_segments(rec_segments)
all_segments = all_segments + processed_segments
all_labels = all_labels + labels
all_rec_ids = all_rec_ids + rec_ids
specimen = rec.get_name()
if specimen in segments_count:
segments_count[specimen] += len(processed_segments)
file_count[specimen] += 1
else:
segments_count[specimen] = len(processed_segments)
file_count[specimen] = 1
if counter % 100 == 0:
print "{0}/{1}".format(counter, selected_recordings_count)
with open(data_filepath, 'wb') as f:
pickle.dump(np.array(all_segments), f, protocol=-1)
with open(labels_filepath, 'wb') as f:
pickle.dump(np.array(all_labels), f, protocol=-1)
with open(rec_ids_filepath, 'wb') as f:
pickle.dump(np.array(all_rec_ids), f, protocol=-1)
print "File count: " + str(file_count)
print
print "Segments count: " + str(segments_count)
def main():
plutof_recordings = siuts.load(siuts.plutof_metadata_path)
# count segments for each recording in testing data
for rec in plutof_recordings:
segments_path = siuts.plutof_segments_dir + rec.get_filename() + ".pickle"
if isfile(segments_path):
rec.segments_count = len(siuts.load(segments_path))
# separate testing and validation dataset
valid_recordings = []
test_recordings = []
segments_count = 0
for specimen in siuts.species_list:
recordings = sorted([x for x in plutof_recordings if x.get_name() == specimen and x.segments_count >= 2],
key=operator.attrgetter('segments_count'))
recordings.reverse()
sp_valid_recordings = []
sp_test_recordings = []
sp_valid_segments_count = 0
sp_test_segments_count = 0
for rec in recordings:
segments_count += rec.segments_count
if sp_valid_segments_count < sp_test_segments_count:
sp_valid_recordings.append(rec)
sp_valid_segments_count += rec.segments_count
else:
sp_test_recordings.append(rec)
sp_test_segments_count += rec.segments_count
valid_recordings = valid_recordings + sp_valid_recordings
test_recordings = test_recordings + sp_test_recordings
siuts.create_dir(siuts.dataset_dir)
training_segments_dir = siuts.xeno_segments_dir
testing_segments_dir = siuts.plutof_segments_dir
start = time.time()
print "Starting to join testing segments"
print
plutof_filenames = [x.split(".")[0] for x in listdir(testing_segments_dir) if isfile(join(testing_segments_dir, x))]
selected_testing_recordings = [x for x in test_recordings if x.get_filename() in plutof_filenames]
join_segments(selected_testing_recordings, testing_segments_dir, siuts.testing_data_filepath,
siuts.testing_labels_filepath, siuts.testing_rec_ids_filepath)
print
print "Joining testing segments took {0} seconds".format(time.time() - start)
print
start = time.time()
print
print "Starting to join validation segments"
selected_validation_recordings = [x for x in valid_recordings if x.get_filename() in plutof_filenames]
join_segments(selected_validation_recordings, testing_segments_dir, siuts.validation_data_filepath,
siuts.validation_labels_filepath, siuts.validation_rec_ids_filepath)
print
print "Joining validation segments took {0} seconds".format(time.time() - start)
print
start = time.time()
max_segments = 0
species_segments_count = {}
species_files_count = {}
print
print "Finding species from training set which has the maximum number of segments"
train_filenames = [x.split(".")[0] for x in listdir(training_segments_dir) if
isfile(join(training_segments_dir, x))]
species = siuts.species_list
training_recordings = siuts.load(siuts.xeno_metadata_path)
for specimen in species:
specimen_files = [x for x in training_recordings if
x.get_name() == specimen and x.get_filename() in train_filenames]
species_files_count[specimen] = len(specimen_files)
for rec in specimen_files:
fname = rec.get_filename()
segs = siuts.load(siuts.xeno_segments_dir + fname + ".pickle")
if specimen in species_segments_count:
species_segments_count[specimen] += len(segs)
else:
species_segments_count[specimen] = len(segs)
if species_segments_count[specimen] > max_segments:
max_segments = species_segments_count[specimen]
print "Species files count"
print species_files_count
print "Species segments count:"
print species_segments_count
print
print "Max segments for species: " + str(max_segments)
print
# join training segments
for specimen in species:
print ""
print "Joining training segments for {}".format(specimen)
specimen_files = [x for x in training_recordings if
x.get_name() == specimen and x.get_filename() in train_filenames]
specimen_files_count = len(specimen_files)
all_segments = np.empty
all_labels = []
all_rec_ids = []
filepath_prefix = "{0}{1}_".format(siuts.dataset_dir, specimen)
labels_fname = filepath_prefix + "labels.pickle"
rec_ids_fname = filepath_prefix + "rec_ids.pickle"
rec_segments, labels, rec_ids = [], [], []
if not (isfile(labels_fname) and isfile(rec_ids_fname)):
processed_segments = []
for counter, rec in enumerate(specimen_files):
fname = rec.get_filename()
label = rec.label
rec_id = rec.id
rec_segments, labels, rec_ids = load_pickled_segments_from_file(
siuts.xeno_segments_dir + fname + ".pickle", label, rec_id)
if len(rec_segments) > 0 and len(labels) > 0:
processed_segments = np.array(siuts.scale_segments(rec_segments))
all_labels = all_labels + labels
all_rec_ids = all_rec_ids + rec_ids
if counter == 0:
all_segments = processed_segments
else:
all_segments = np.vstack((all_segments, processed_segments))
if counter % 100 == 0:
print "{0}/{1}".format(counter, specimen_files_count)
del rec_segments
del processed_segments
print "Saving joined files to disk"
random.shuffle(all_segments)
nr_samples = len(all_segments)
# duplicating data in minority classes
if nr_samples < max_segments:
data_to_append = np.copy(all_segments)
for j in range(int(np.floor(max_segments / nr_samples)) - 1):
all_segments = np.concatenate((all_segments, data_to_append))
all_segments = np.concatenate((all_segments, data_to_append[:(max_segments - len(all_segments))]))
nr_of_files = int(np.ceil(float(max_segments) / siuts.samples_in_file))
# save segments into splitted files
for i in range(nr_of_files):
with open("{0}/{1}-training_{2}.pickle".format(siuts.dataset_dir, specimen, i), 'wb') as f:
pickle.dump(all_segments[i * siuts.samples_in_file:(i + 1) * siuts.samples_in_file], f, protocol=-1)
print specimen + " segments saved"
with open(labels_fname, 'wb') as f:
pickle.dump(np.array(all_labels), f, protocol=-1)
with open(rec_ids_fname, 'wb') as f:
pickle.dump(np.array(all_rec_ids), f, protocol=-1)
print "Joining training segments took {0} seconds".format(time.time() - start)
if __name__ == "__main__":
main()
|
[
"saiber@gmail.com"
] |
saiber@gmail.com
|
e2b58784d211849ba20dae9c0a6d4c2df1abdd90
|
043baf7f2cd8e40150bbd4c178879a5dd340348d
|
/dinners/tests/factories.py
|
f645bc02cb2e650b3c6a48bf5aa538135afb98c4
|
[] |
no_license
|
tjguk/ironcage
|
1d6d70445b1da9642e1c70c72832c2738f9a942e
|
914b8e60819be7b449ecc77933df13f8b100adb0
|
refs/heads/master
| 2021-05-06T10:20:35.486184
| 2017-11-20T16:34:17
| 2017-11-20T16:34:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
from datetime import datetime, timezone
from accounts.tests.factories import create_user
from dinners.models import Booking
from dinners.menus import MENUS
def create_contributors_booking(user=None, venue='contributors'):
if user is None:
user = create_user()
menu = MENUS[venue]
return Booking.objects.create(
guest=user,
venue=venue,
starter=menu['starter'][0][0],
main=menu['main'][0][0],
pudding=menu['pudding'][0][0],
)
def create_paid_booking(user=None):
if user is None:
user = create_user()
venue = 'conference'
menu = MENUS[venue]
return Booking.objects.create(
guest=user,
venue=venue,
starter=menu['starter'][0][0],
main=menu['main'][0][0],
pudding=menu['pudding'][0][0],
stripe_charge_id='ch_abcdefghijklmnopqurstuvw',
stripe_charge_created=datetime.fromtimestamp(1495355163, tz=timezone.utc)
)
def create_all_bookings(venue):
for _ in range(MENUS[venue]['capacity']):
create_contributors_booking(venue=venue)
|
[
"peter.inglesby@gmail.com"
] |
peter.inglesby@gmail.com
|
f333d416d9163011ed8c06b1d0e2be8ba3de09b0
|
dd2217a5c7be79d708b1cfba14e997fa551769ac
|
/dirvenv/bin/symilar
|
145336d6663c3fd751207e342484f587b6b2b508
|
[] |
no_license
|
davigzzz/django_base
|
1df91d39d35fae080e10c2b6913697ee5aba86ef
|
e1b1cd10fe29d40b46b35d165a764cf023090f91
|
refs/heads/master
| 2022-05-25T22:36:04.039920
| 2020-05-01T08:53:00
| 2020-05-01T08:53:00
| 260,415,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
#!/home/davidguzman/Documentos/Python/directorio/dirvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
|
[
"david.guzman@presidencia.gob.mx"
] |
david.guzman@presidencia.gob.mx
|
|
75391e7604dd44bcd5816846ec2d9e53b921ff19
|
12490ecaa1ab982bf93f72d664917e833fb6bfe5
|
/th_watchdog/record.py
|
814ae8f8ce3ddd87cc6d16796c9482f3e63525af
|
[
"MIT"
] |
permissive
|
hwjeremy/th-watchdog
|
750a17431279a64009c7953e6821953ed513ceb1
|
c32682f838fffa3396cabc3d83eeb4960c765fc9
|
refs/heads/master
| 2020-03-28T06:01:38.735891
| 2018-09-07T11:05:21
| 2018-09-07T11:05:21
| 147,809,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,340
|
py
|
"""
Thornleigh Farm - VPN Watchdog
Record Module
author: hugh@blinkybeach.com
"""
from th_watchdog.vpnstate import VPNState
from th_watchdog.emailstate import EmailState
from typing import TypeVar
from datetime import datetime
from typing import Optional
from typing import Type
import os
T = TypeVar('T', bound='Record')
class Record:
"""
A string record of VPN connection state
"""
TIME_FORMAT = '[%Y-%m-%d_%H:%M:%S]'
NULL_VALUE = 'null\n'
def __init__(
self,
time: datetime,
vpn: VPNState,
email: EmailState
) -> None:
# Example
# [2018-12-01_12:51:12],down,unsent
# time,vpnstate,emailsent
if not isinstance(time, datetime):
raise TypeError('time must be of type `datetime`')
if not isinstance(vpn, VPNState):
raise TypeError('vpn must be of type `VPNState`')
if not isinstance(email, EmailState):
raise TypeError('email must be of type `EmailState')
self._time = time
self._vpn = vpn
self._email = email
return
serialised = property(lambda s: s._serialise())
vpn = property(lambda s: s._vpn)
time = property(lambda s: s._time)
email = property(lambda s: s._email)
def _serialise(self) -> str:
"""
Return a string serialised record
"""
record = self._time.strftime(self.TIME_FORMAT)
record += ',' + self._vpn.value
record += ',' + self._email.value
record += '\n'
return record
@classmethod
def from_string(cls: Type[T], raw_string) -> Optional[T]:
"""
Return a new record
"""
if not isinstance(raw_string, str):
raise TypeError('raw_string must be of type `str`')
if raw_string == Record.NULL_VALUE:
return None
if raw_string[-1] == '\n':
raw_string = raw_string[:-1]
pieces = raw_string.split(',')
if len(pieces) != 3:
raise ValueError('Unexpected record format')
time = datetime.strptime(pieces[0], Record.TIME_FORMAT)
vpn = VPNState(pieces[1])
email = EmailState(pieces[2])
record = cls(time, vpn, email)
return record
@classmethod
def from_file(cls: Type[T], filename: str) -> Optional[T]:
"""
Return a Record from a file
"""
if not os.path.exists(filename):
return None
with open(filename, 'r') as rfile:
return cls.from_string(rfile.read())
@classmethod
def nominal(cls, filename: str) -> None:
"""
Write a nominal state
"""
assert isinstance(filename, str)
now = datetime.utcnow()
record = cls(now, VPNState.UP, EmailState.NOT_APPLICABLE)
record.write(filename)
return
@classmethod
def write_null(cls, filename: str) -> None:
"""
Write a null record to a file
"""
assert isinstance(filename, str)
with open(filename, 'w') as wfile:
wfile.write(Record.NULL_VALUE)
return
def write(self, filename: str) -> None:
"""
Write this record to a file
"""
with open(filename, 'w') as wfile:
wfile.write(self._serialise())
return
|
[
"hugh.jeremy@gmail.com"
] |
hugh.jeremy@gmail.com
|
80afc5ec70fc015cd26b8dc0f31017c3f9d92911
|
9dd9cb60e69524c0cadf52e6805bdba787fe0097
|
/swich.py
|
7c9a4928081774df0ebe5ed95f072b1f3181a011
|
[] |
no_license
|
yoshinGO/study_click
|
5893cb18e6111b4f8ed718de46bb804336a8d329
|
385649b089ffba841dda874d8100b72c6df9fded
|
refs/heads/master
| 2020-04-08T19:06:03.591650
| 2018-11-29T09:18:15
| 2018-11-29T09:18:15
| 159,639,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
import click
@click.command()
@click.option('--upper', 'transformation', flag_value='upper')
@click.option('--lower', 'transformation', flag_value='lower', default=True)
def cmd(transformation):
click.echo(transformation)
def main():
cmd()
if __name__ == '__main__':
main()
|
[
"kamemygenki0124@gmail.com"
] |
kamemygenki0124@gmail.com
|
5c60e3f3bde17a701467d4d29a986a70446e7d9b
|
bbce6d829118cc81b7d20919c587df1fa83d87f5
|
/tool/utils.py
|
dc9983db10d7802a953956de8fdfb1f083a8e2e9
|
[] |
no_license
|
chilung/NCTU_Adv_DNN_HW2_2
|
02f63610bf45b89672fb978f0b8dbdfbc4e6566a
|
7018b6cd01b98ca88a30c0f7d0c4d2c188668d09
|
refs/heads/main
| 2023-01-19T07:19:36.302639
| 2020-11-22T08:24:25
| 2020-11-22T08:24:25
| 314,708,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,179
|
py
|
import sys
import os
import time
import math
import numpy as np
import itertools
import struct # get_image_size
import imghdr # get_image_size
def sigmoid(x):
return 1.0 / (np.exp(-x) + 1.)
def softmax(x):
x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
x = x / np.expand_dims(x.sum(axis=1), axis=1)
return x
def bbox_iou(box1, box2, x1y1x2y2=True):
# print('iou box1:', box1)
# print('iou box2:', box2)
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
mx = min(box1[0], box2[0])
Mx = max(box1[0] + w1, box2[0] + w2)
my = min(box1[1], box2[1])
My = max(box1[1] + h1, box2[1] + h2)
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 0] + boxes[:, 2]
y2 = boxes[:, 1] + boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
import cv2
img = np.copy(img)
colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int((box[0] - box[2] / 2.0) * width)
y1 = int((box[1] - box[3] / 2.0) * height)
x2 = int((box[0] + box[2] / 2.0) * width)
y2 = int((box[1] + box[3] / 2.0) * height)
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img
def read_truths(lab_path):
if not os.path.exists(lab_path):
return np.array([])
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem
return truths
else:
return np.array([])
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def post_processing(img, conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# t1 = time.time()
if type(output).__name__ != 'ndarray':
output = output.cpu().detach().numpy()
# [batch, num, 4]
box_array = output[:, :, :4]
# [batch, num, num_classes]
confs = output[:, :, 4:]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
# t2 = time.time()
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
keep = nms_cpu(l_box_array, l_max_conf, nms_thresh)
bboxes = []
if (keep.size > 0):
l_box_array = l_box_array[keep, :]
l_max_conf = l_max_conf[keep]
l_max_id = l_max_id[keep]
for j in range(l_box_array.shape[0]):
bboxes.append([l_box_array[j, 0], l_box_array[j, 1], l_box_array[j, 2], l_box_array[j, 3], l_max_conf[j], l_max_conf[j], l_max_id[j]])
bboxes_batch.append(bboxes)
# t3 = time.time()
# print('-----------------------------------')
# print(' max and argmax : %f' % (t2 - t1))
# print(' nms : %f' % (t3 - t2))
# print('Post processing total : %f' % (t3 - t1))
# print('-----------------------------------')
return bboxes_batch
|
[
"chilung.cs06g@nctu.edu.tw"
] |
chilung.cs06g@nctu.edu.tw
|
37dd9f3ef68fbbb69865955ba2e5916c0ab0bdba
|
e63dfce78be34d128e6736a86c62e68302befc4e
|
/signin.py
|
993c432f58f40bf59d151c58375677d9d18a70dd
|
[] |
no_license
|
spheppner/Selenium-Practice
|
c230c20adf1bc312e06a74c26abf545ac15f5629
|
9747b797721e44cb36ddca21c84d4ec6effecc95
|
refs/heads/master
| 2021-10-19T07:44:16.864487
| 2019-02-19T09:37:58
| 2019-02-19T09:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,064
|
py
|
import unittest
from random import randint
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from mimesis import Person
from mimesis import Address
from mimesis import Text
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
en = Address('en')
person = Person('en')
text = Text('en')
class SignIn(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.driver = webdriver.Chrome()
inst.driver.get('http://automationpractice.com/index.php')
@classmethod
def tearDownClass(inst):
inst.driver.quit()
def test_a_Enter_email(self):
driver = self.driver
driver.find_element_by_class_name('login').click()
assert 'Login' in self.driver.title
# invalid email
driver.find_element_by_name('email_create').send_keys('email@')
driver.find_element_by_id('SubmitCreate').click()
assert driver.find_element_by_id('create_account_error').is_enabled()
# valid email
enter_email = driver.find_element_by_name('email_create')
enter_email.clear()
global random_email
random_email = person.email()
enter_email.send_keys(random_email)
driver.find_element_by_id('SubmitCreate').click()
transition = WebDriverWait(self.driver, 10).until(EC.url_contains('#account-creation'))
assert transition
# try to register without required fields
def test_b_Required_fields(self):
driver = self.driver
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert alert
# sequential tests all required fields
# make sure that corresponding errors disappear
def test_c_First_Name(self):
driver = self.driver
driver.find_element_by_id('id_gender1').click()
first_name = driver.find_element_by_id('customer_firstname')
first_name.clear()
global first
first = person.name()
first_name.send_keys(first)
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'firstname' not in alert.text
def test_d_Last_Name(self):
driver = self.driver
last_name = driver.find_element_by_id('customer_lastname')
last_name.clear()
global last
last = person.surname()
last_name.send_keys(last)
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'lastname' not in alert.text
def test_e_Prefilled_Email(self):
driver = self.driver
email = driver.find_element_by_id('email')
assert email.get_attribute('value') == random_email
def test_f_Prefilled_Address_Name(self):
driver = self.driver
address_first_name = driver.find_element_by_id('firstname')
assert address_first_name.get_attribute('value') == first
address_last_name = driver.find_element_by_id('lastname')
assert address_last_name.get_attribute('value') == last
def test_g_Address(self):
driver = self.driver
driver.find_element_by_id('address1').send_keys(en.address())
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'address1 is required' not in alert.text
def test_h_City_and_State(self):
driver = self.driver
driver.find_element_by_id('city').send_keys(en.city())
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'city' not in alert.text
states_dropdown = driver.find_element_by_id('id_state')
state = Select(states_dropdown)
state.select_by_index(randint(0,52))
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'State' not in alert.text
def test_i_ZIP(self):
driver = self.driver
driver.find_element_by_id('postcode').send_keys(en.zip_code())
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'Zip/Postal' not in alert.text
def test_j_Mobile_Phone(self):
driver = self.driver
driver.find_element_by_id('phone_mobile').send_keys(person.telephone())
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'phone number' not in alert.text
def test_k_Change_Alias(self):
driver = self.driver
address_alias = driver.find_element_by_id('alias')
address_alias.clear()
address_alias.send_keys('Default')
# fill optional fields
def test_l_Day_of_Birth(self):
driver = self.driver
days_dropdown = driver.find_element_by_id('days')
Select(days_dropdown).select_by_index(randint(0,29))
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'Invalid date of birth' in alert.text
month_dropdown = driver.find_element_by_id('months')
Select(month_dropdown).select_by_index(randint(0,11))
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'Invalid date of birth' in alert.text
years_dropdown = driver.find_element_by_id('years')
Select(years_dropdown).select_by_value(str(randint(1900, 2019)))
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'Invalid date of birth' not in alert.text
def test_m_Other_fields(self):
driver = self.driver
driver.find_element_by_id('other').send_keys(text.text(quantity=3))
driver.find_element_by_id('phone').send_keys(person.telephone())
# need to come up with some assert
def test_n_Password_And_Finish_Account_Creation(self):
driver = self.driver
password = driver.find_element_by_id('passwd')
password.clear()
password.send_keys(person.password(4))
driver.find_element_by_id('submitAccount').click()
alert = driver.find_element_by_css_selector('.alert.alert-danger')
assert 'passwd is invalid' in alert.text
password = driver.find_element_by_id('passwd')
password.clear()
password.send_keys(person.password(5))
driver.find_element_by_id('submitAccount').click()
WebDriverWait(driver, 10).until(EC.url_contains('controller=my-account'))
assert driver.title == 'My account - My Store'
if __name__ == "__main__":
unittest.main()
|
[
"a.gulyayko@yandex.ru"
] |
a.gulyayko@yandex.ru
|
eab75d02bd9ae077bd5932dfa94e6cadcaba0a25
|
75335c06eea045b297ba7810eec45e487218a39e
|
/server.py
|
a7e7b22b0ca0548b0deb3a8202bd36f63909a9ad
|
[] |
no_license
|
bamejia/racing_game_server
|
be410f2f498015b6c964550b324753670d07f4e5
|
acf0342b696a96c5a00f2e0b720ea94d8f525fd9
|
refs/heads/master
| 2021-07-23T18:46:05.891050
| 2020-02-07T13:40:45
| 2020-02-07T13:40:45
| 238,778,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,504
|
py
|
import socket
from model.game_model import GameModel
from _thread import *
from threading import Lock
from online_multiplayer.game_thread import game_thread
import json
from model.direction import Dir
import global_variables as gv
def server():
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# print("Enter port: ", end="")
# port = sys.stdin.readline().split("\n")[0]
# port = input("Enter port: ")
port = 7777
# print(port)
# if not port.isdigit():
# port = 0
# else:
# port = int(port)
# print(int(ip))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
""" tries to create socket at host ip address and port"""
try:
# s.bind((host_ip, port))
s.bind((ip, port))
except socket.error as err:
print(err)
return
port = s.getsockname()[1]
print(f'{ip}:{port}')
""" sets the socket to start listening for incoming connections """
s.listen(2)
def client_connection_thread(client_connection, player_index, game_id, has_ended_ref):
ready_sent = False
highest = 0
client_connection.send(str(player_index).encode()) # sends to client if they are player 1 or 2
while True:
try:
player_input = json.loads(client_connection.recv(11))
if not player_input:
print("NOT PLAYER")
games[game_id][2].acquire()
has_ended_ref[0] = True
print("Game has ended")
client_connection.sendall("none".encode())
games[game_id][2].release()
break
else:
player_input = Dir[player_input]
if game_id in games:
game = games[game_id]
# if game[0].ready and not ready_sent:
# print("sending ready")
# ready_sent = True
# client_connection.sendall("ready".encode())
game[2].acquire()
game[1][player_index] = player_input
output_vehicles = []
for vehicle in game[0].vehicles:
# output_vehicles.append(vehicle.car_type)
for i in gv.CAR_TYPES:
if gv.CAR_TYPES[i] == vehicle.car_type:
output_vehicles.append(i)
break
output_vehicles.append(vehicle.x)
output_vehicles.append(vehicle.y)
output_vehicles.append(vehicle.health)
# output_vehicles.append(gv.WINDOW_W)
# output_vehicles.append(gv.WINDOW_L+gv.PLAYER_LENGTH)
# output_vehicles.append(1000)
game[2].release()
# print(output_vehicles)
# game_model_dict = get_json(game[0])
# game_model_str = json.dumps(game_model_dict)
# game_model_str = json.dumps(game_model_dict, indent=4)
if has_ended_ref[0]:
print("Game has ended")
client_connection.sendall("none".encode())
break
x = client_connection.send(json.dumps(output_vehicles).encode())
# highest = max(highest, x)
# print(highest)
else:
print("No game found")
client_connection.sendall("none".encode())
break
except Exception as err:
print("ERROR in client thread:", err)
break
has_ended_ref[0] = True
print("Connection Lost")
try:
del games[game_id]
print("closing game:", game_id)
except Exception as err:
print("game closed")
client_connection.close()
player_id = 0
games = {}
while True:
client_connection, client_ip = s.accept()
print("Connected to:", client_ip)
game_id = player_id // 2
player_index = player_id % 2
if player_id % 2 == 0:
has_ended_ref = [False]
game_model = GameModel(ready=False, num_players=2)
games[game_id] = (game_model, [Dir.NONE, Dir.NONE], Lock(), has_ended_ref)
print(f"Creating game: {game_id}, waiting for player 2")
start_new_thread(game_thread, (games[game_id],))
else:
if game_id in games:
print("Game Start!")
games[game_id][0].ready = True
else:
player_id += 1
game_id = player_id // 2
player_index = player_id % 2
games[game_id] = (GameModel(ready=False, num_players=2), [Dir.NONE, Dir.NONE], Lock(), [False])
print(f"Creating game: {game_id}, waiting for player 2")
start_new_thread(game_thread, (games[game_id],))
start_new_thread(client_connection_thread, (client_connection, player_index, game_id, games[game_id][3]))
player_id += 1
if player_id >= 999999999:
print("After 999,999,999 iterations, the server stops")
break
server()
# if game_id >= 50:
# print("Server is full, please try again later")
# continue
|
[
"bamejia@udel.edu"
] |
bamejia@udel.edu
|
1085012854b55da261b558a7bb7160e20fb49c5f
|
cd64ad6c0f1b962d41c42a7225944c95c228566b
|
/groups/models.py
|
41c437e02ae94e89da1aab91f7e73d8ae3dff5a2
|
[] |
no_license
|
ejesse/worldcup
|
c5a2555b8be8707ded4cfceefb93a3283529376a
|
55bbe316795822bbf94a714de8e188f5806d0837
|
refs/heads/master
| 2021-01-22T04:54:03.907718
| 2015-06-18T03:23:09
| 2015-06-18T03:23:09
| 37,630,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,128
|
py
|
from collections import OrderedDict
from django.core.exceptions import ValidationError
from django.db import models
from teams.models import Team
# Create your models here.
class Fixture(models.Model):
WIN = 'WIN'
DRAW = 'DRAW'
LOSS = 'LOSS'
home_team = models.ForeignKey(Team, related_name='home_team')
away_team = models.ForeignKey(Team, related_name='away_team')
home_team_score = models.IntegerField(null=True, blank=True)
away_team_score = models.IntegerField(null=True, blank=True)
start_time = models.DateTimeField(null=True, blank=True)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if self.home_team_score is not None and self.away_team_score is None:
raise ValidationError("Must have a value for both scores or neither, found 1: home_team_score: %s away_team_score %s" % (self.home_team_score, self.away_team_score))
if self.home_team_score is None and self.away_team_score is not None:
raise ValidationError("Must have a value for both scores or neither, found 1: home_team_score: %s away_team_score %s" % (self.home_team_score, self.away_team_score))
if self.home_team == self.away_team:
raise ValidationError("Self and away team are the same, a team cannot play itself!")
return models.Model.save(self, force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
def __unicode__(self):
if self.home_team_score is None:
return u'%s v %s' % (self.home_team, self.away_team)
else:
return u'%s %s : %s %s' % (self.home_team, self.home_team_score, self.away_team_score, self.away_team)
def __str__(self):
return self.__unicode__()
def team_in_fixture(self, team):
if team == self.home_team or team == self.away_team:
return True
return False
def get_result_for_team(self, team):
if not self.team_in_fixture(team):
raise ValueError("Team %s is not in this fixture" % team)
if self.home_team_score is None:
return None
if team == self.home_team:
if self.home_team_score > self.away_team_score:
return Fixture.WIN
elif self.home_team_score < self.away_team_score:
return Fixture.LOSS
return Fixture.DRAW
else:
if self.home_team_score < self.away_team_score:
return Fixture.WIN
elif self.home_team_score > self.away_team_score:
return Fixture.LOSS
return Fixture.DRAW
def get_points_for_team(self, team, win=3, draw=1):
if not self.team_in_fixture(team):
raise ValueError("Team %s is not in this fixture" % team)
points = 0
result = self.get_result_for_team(team)
if result is not None:
if result == Fixture.WIN:
points = points + win
elif result == Fixture.DRAW:
points = points + draw
return points
class Group(models.Model):
group_letter = models.CharField(max_length=1, unique=True)
group_size = models.IntegerField(default=4)
teams = models.ManyToManyField(Team)
fixtures = models.ManyToManyField(Fixture)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if self.id is not None:
if self.teams.count() != self.group_size:
raise ValidationError("Number of teams must equal group size")
return models.Model.save(self, force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
def __unicode__(self):
return u'Group %s' % (self.group_letter)
def __str__(self):
return self.__unicode__()
def check_teams_in_group(self, teams):
if not isinstance(teams,list):
teams = [teams]
invalid_teams = []
for team in teams:
if team not in self.teams.all():
invalid_teams.append(team)
if len(invalid_teams) > 0:
raise ValueError("Teams %s is not in group %s" % (invalid_teams, self.group_letter))
def get_fixtures_for_team(self, team):
fixtures = []
for fixture in self.fixtures.all():
if fixture.team_in_fixture(team):
fixtures.append(fixture)
return fixtures
def standings(self):
pass
def get_points_for_team(self, team):
self.check_teams_in_group(team)
points = 0
for f in self.get_fixtures_for_team(team):
points = points + f.get_points_for_team(team)
return points
def get_goal_differential_for_team(self, team):
self.check_teams_in_group(team)
gd = 0
for fixture in self.get_fixtures_for_team(team):
if fixture.home_team_score is not None:
if fixture.home_team == team:
gd = gd + (fixture.home_team_score - fixture.away_team_score)
else:
gd = gd + (fixture.away_team_score - fixture.home_team_score)
return gd
def get_head_to_head_goal_differential(self, team1, team2):
self.check_teams_in_group([team1,team2])
gd = 0
head_to_head_fixtures = []
for fixture in self.get_fixtures_for_team(team1):
if fixture.team_in_fixture(team2):
head_to_head_fixtures.append(fixture)
for fixture in head_to_head_fixtures:
if fixture.home_team_score is not None:
if fixture.home_team == team1:
gd = gd + (fixture.home_team_score - fixture.away_team_score)
else:
gd = gd + (fixture.away_team_score - fixture.home_team_score)
return gd
def get_goals_for_team(self, team):
self.check_teams_in_group(team)
fixtures = self.get_fixtures_for_team(team)
goals = 0
for fixture in fixtures:
if fixture.home_team_score is not None:
if team == fixture.home_team:
goals = goals + fixture.home_team_score
else:
goals = goals + fixture.away_team_score
return goals
def get_counted_fixtures_from_subset(self, team, other_teams):
all_teams = [team]
all_teams.extend(other_teams)
self.check_teams_in_group(all_teams)
team_fixtures = self.get_fixtures_for_team(team)
counted_fixtures = []
for fixture in team_fixtures:
for other_team in other_teams:
if fixture.team_in_fixture(other_team):
counted_fixtures.append(fixture)
return counted_fixtures
def get_points_for_team_from_subset(self, team, other_teams):
points = 0
for fixture in self.get_counted_fixtures_from_subset(team, other_teams):
points = points + fixture.get_points_for_team(team)
return points
def get_goal_differential_for_team_from_subset(self, team, other_teams):
gd = 0
for fixture in self.get_counted_fixtures_from_subset(team, other_teams):
if team == fixture.home_team:
gd = gd + (fixture.home_team_score - fixture.away_team_score)
else:
gd = gd + (fixture.away_team_score - fixture.home_team_score)
return gd
def get_goals_for_team_from_subset(self, team, other_teams):
goals = 0
for fixture in self.get_counted_fixtures_from_subset(team, other_teams):
if team == fixture.home_team:
goals = goals + fixture.home_team_score
else:
goals = goals + fixture.away_team_score
return goals
def get_standings(self):
"""
FIFA standard ranking and tie breakers:
The ranking of each team in each group will be determined as follows:
- points obtained in all group matches;
- goal difference in all group matches;
- number of goals scored in all group matches;
- If two or more teams are equal on the basis of the above three criteria, their rankings will be determined as follows:
- points obtained in the group matches between the teams concerned;
- goal difference in the group matches between the teams concerned;
- number of goals scored in the group matches between the teams concerned;
- drawing of lots by the FIFA Organising Committee.
"""
standings = OrderedDict()
comparable_teams = []
for team in self.teams.all():
comparable_teams.append(ComparableTeam(team, self))
comparable_teams = sorted(comparable_teams, reverse=True)
for team in comparable_teams:
standings[team.team] = team.points
return standings
class ComparableTeam():
def __init__(self, team, group):
self.team = team
self.group = group
self.points = self.group.get_points_for_team(self.team)
self.goal_differential = self.group.get_goal_differential_for_team(self.team)
self.goals_for = self.group.get_goals_for_team(self.team)
self.tied_with = []
def __lt__(self, other):
if self.points < other.points:
return True
if self.points > other.points:
return False
if self.goal_differential < other.goal_differential:
return True
if self.goal_differential > other.goal_differential:
return False
if self.goals_for < other.goals_for:
return True
if self.goals_for > other.goals_for:
return False
self.tied_with.append(other)
return False
def __le__(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.points != other.points:
return False
if self.goal_differential != other.goal_differential:
return False
if self.goals_for != other.goals_for:
return False
self.tied_with.append(other)
return True
def __ne__(self, other):
if self.points != other.points:
return True
if self.goal_differential != other.goal_differential:
return True
if self.goals_for != other.goals_for:
return True
return False
def __gt__(self, other):
if self.points > other.points:
return True
if self.points < other.points:
return False
if self.goal_differential > other.goal_differential:
return True
if self.goal_differential < other.goal_differential:
return False
if self.goals_for > other.goals_for:
return True
if self.goals_for < other.goals_for:
return False
self.tied_with.append(other)
return False
def __ge__(self, other):
raise NotImplementedError
|
[
"jesse@jesseemery.com"
] |
jesse@jesseemery.com
|
f96605b5a8a628ee2a52f60e5a1ebcd3540dd4da
|
7294c38acb21e7d30236134b263cc93461f74629
|
/cellardoor/serializers/json_serializer.py
|
eec9b5bbc0836d5ac5e57f99b6b7bd7ea3aae901
|
[
"MIT"
] |
permissive
|
msabramo/cellardoor
|
42b97aa75f51620c01e321c24e2556d2fcfd26f2
|
1811dfa198228552e3a6440dc5a78ae0c265f27f
|
refs/heads/master
| 2023-09-02T10:17:16.661250
| 2015-01-29T20:58:08
| 2015-01-29T20:58:08
| 30,115,197
| 0
| 0
| null | 2015-01-31T14:54:28
| 2015-01-31T14:54:27
|
Python
|
UTF-8
|
Python
| false
| false
| 826
|
py
|
import re
import json
from datetime import datetime
from . import Serializer
class CellarDoorJSONEncoder(json.JSONEncoder):
def default(self, obj):
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
if isinstance(obj, datetime):
return obj.isoformat()
return super(CellarDoorJSONEncoder, self).default(obj)
def as_date(obj):
if '_date' in obj:
return datetime(*map(int, re.split('[^\d]', obj['_date'])[:-1]))
else:
return obj
class JSONSerializer(Serializer):
mimetype = 'application/json'
def serialize(self, obj):
return json.dumps(obj, cls=CellarDoorJSONEncoder)
def unserialize(self, stream):
return json.load(stream, object_hook=as_date)
def unserialize_string(self, data):
return json.loads(data, object_hook=as_date)
|
[
"elisha@elishacook.com"
] |
elisha@elishacook.com
|
248e9f4b355e1e50979d314d726f8a200955d5c9
|
af79cff43e96631344a6197a5525c666860bfb45
|
/7a.py
|
431c83b1270821af1b03349d15e5e312f4be6bcf
|
[] |
no_license
|
djaychela/aoc2019
|
d09bd6c2a757e3f8184d6ed0f11fd9eaff58d635
|
78637b2858eba8bf5fbdddcc497482be544a5425
|
refs/heads/master
| 2020-09-30T01:44:07.766972
| 2019-12-10T16:50:07
| 2019-12-10T16:50:07
| 227,169,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,040
|
py
|
import os
from itertools import permutations
path = os.path.join(os.getcwd(), "data", "input_7a.txt")
with open(path, "r") as f:
data = f.readlines()
puzzle_input = [int(d) for d in data[0].split(",")]
def run_intcodes(loc_puzzle_input, loc_input_values, debug=False):
def get_data(index, mode):
if mode == "0":
return loc_puzzle_input[loc_puzzle_input[index]]
else:
return loc_puzzle_input[index]
def get_index(index, mode):
if mode == "0":
return loc_puzzle_input[index]
else:
return index
idx = 0
output_value = 0
if debug:
print(f"idx:{idx} P:{loc_puzzle_input}")
while idx < len(loc_puzzle_input) - 1:
modes = f"{loc_puzzle_input[idx]:0>5}"
mode_a = modes[2]
mode_b = modes[1]
mode_c = modes[0]
opcode = modes[3:]
if debug:
print(f"current opcode: {opcode}")
if opcode == "01":
a = get_data(idx + 1, mode_a)
b = get_data(idx + 2, mode_b)
c = get_index(idx + 3, mode_c)
r = a + b
if debug:
print(f"Opcode {opcode}: Altered {loc_puzzle_input[c]} at {c} to {r}")
loc_puzzle_input[c] = r
increment = 4
elif opcode == "02":
a = get_data(idx + 1, mode_a)
b = get_data(idx + 2, mode_b)
c = get_index(idx + 3, mode_c)
r = a * b
if debug:
print(f"Opcode {opcode}: Altered {loc_puzzle_input[c]} at {c} to {r}")
loc_puzzle_input[c] = r
increment = 4
elif opcode == "03":
a = get_index(idx + 1, mode_a)
input_value = loc_input_values.pop()
if debug:
print(
f"Opcode {opcode}: idx{idx}: Altered {loc_puzzle_input[a]} at {a} to {input_value}"
)
loc_puzzle_input[a] = input_value
increment = 2
elif opcode == "04":
a = get_index(idx + 1, mode_a)
increment = 2
if debug:
print(f"Opcode {opcode}: Output is {loc_puzzle_input[a]}")
output_value = loc_puzzle_input[a]
elif opcode == "05":
a = get_data(idx + 1, mode_a)
b = get_data(idx + 2, mode_b)
if a != 0:
increment = 0
idx = b
else:
increment = 3
elif opcode == "06":
a = get_data(idx + 1, mode_a)
b = get_data(idx + 2, mode_b)
if debug:
print(f"Opcode 06: a:{a}, b:{b}")
if a == 0:
increment = 0
idx = b
else:
increment = 3
elif opcode == "07":
a = get_data(idx + 1, mode_a)
b = get_data(idx + 2, mode_b)
c = get_index(idx + 3, mode_c)
if a < b:
loc_puzzle_input[c] = 1
else:
loc_puzzle_input[c] = 0
increment = 4
elif opcode == "08":
a = get_data(idx + 1, mode_a)
b = get_data(idx + 2, mode_b)
c = get_index(idx + 3, mode_c)
if a == b:
loc_puzzle_input[c] = 1
else:
loc_puzzle_input[c] = 0
increment = 4
elif opcode == "99":
print(f"Opcode 99: END -> outputting {output_value}")
return output_value
idx += increment
# print(f"idx:{idx} P:{loc_puzzle_input}")
best_value = 0
for c in permutations([0, 1, 2, 3, 4], 5):
intcode_output = 0
for i in c:
print(f"***+++ running with values {i}, {intcode_output}")
intcode_output = run_intcodes(puzzle_input.copy(), [intcode_output, i])
print(f"Final Output: {intcode_output}")
if intcode_output > best_value:
best_value = intcode_output
best_combi = c
print(f"Best: {best_value} Combi: {best_combi}")
|
[
"djaychela@gmail.com"
] |
djaychela@gmail.com
|
decf9deb8bedf1afa643866f1ca62082cf70344d
|
d70054d9b828e88b8fe488102109421b02ce76a6
|
/rotate_image.py
|
737e6cf3063de9d4c0e9037691aa1ca26b051b5e
|
[] |
no_license
|
harpreet-singh/leetcode
|
be59ce41bf2cff26b309bd33e309228e7fffd172
|
aecfba452140488495203665319e1bed9444a5b8
|
refs/heads/master
| 2020-06-17T23:51:14.913800
| 2019-07-16T18:49:25
| 2019-07-16T18:49:25
| 196,103,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
class Solution:
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
matrix.reverse()
#print(matrix)
#matrix = list(map(list,zip(*matrix)))
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i < j:
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
|
[
"noreply@github.com"
] |
harpreet-singh.noreply@github.com
|
185e24fe6926b4f8b5c58b8dc6342350316366fa
|
a1d1a09958e2b29b561f2af14724f1d0f5dc5785
|
/ledTest.py
|
e37fdf30917c7180a6821c4176f40b97df1e4793
|
[] |
no_license
|
jadenbh13/droneSwim
|
1745b5a705ea8f48eaec65d4d462030fa1638021
|
08528888293ac0c2dda94953769e340b9753ad12
|
refs/heads/main
| 2023-06-25T19:12:46.336417
| 2021-07-30T00:52:20
| 2021-07-30T00:52:20
| 380,055,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
import logging
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.mem import MemoryElement
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.utils import uri_helper
URI = uri_helper.uri_from_env(default='radio://0/80/2M/E7E7E7E7E7')
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
def setLed(R, G, B):
# Initialize the low-level drivers
cflib.crtp.init_drivers()
with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
cf = scf.cf
# Set virtual mem effect effect
cf.param.set_value('ring.effect', '13')
# Get LED memory and write to it
mem = cf.mem.get_mems(MemoryElement.TYPE_DRIVER_LED)
if len(mem) > 0:
mem[0].leds[0].set(r=R, g=G, b=B)
mem[0].leds[1].set(r=R, g=G, b=B)
mem[0].leds[2].set(r=R, g=G, b=B)
mem[0].leds[3].set(r=R, g=G, b=B)
mem[0].leds[4].set(r=R, g=G, b=B)
mem[0].leds[5].set(r=R, g=G, b=B)
mem[0].leds[6].set(r=R, g=G, b=B)
mem[0].leds[7].set(r=R, g=G, b=B)
mem[0].leds[8].set(r=R, g=G, b=B)
mem[0].leds[9].set(r=R, g=G, b=B)
mem[0].write_data(None)
|
[
"jadenbhimani12@gmail.com"
] |
jadenbhimani12@gmail.com
|
1fdcf8560a17327ee5ec0f6e554da3ba299f1af9
|
61c9a90c46e7c63616902ef16721854c73be8ce5
|
/ImagesParserFromYandex/ImageParser.py
|
556ef18c7a8d1c8bf4bb02a7dab79ada3a11c87b
|
[] |
no_license
|
KozhevnikovAlexandr/zootopia-project
|
5e00518b82e947730079d97f1b24902f6e9f92a6
|
69a6b4e40c6968ddad8f42dcda78a62fe79ea0a4
|
refs/heads/main
| 2023-06-09T03:52:56.152635
| 2021-07-01T20:28:37
| 2021-07-01T20:28:37
| 351,718,317
| 0
| 2
| null | 2021-04-20T06:23:53
| 2021-03-26T08:51:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
import requests
import json
from fake_headers import Headers
from bs4 import BeautifulSoup as bs4
class Size:
def __init__(self):
self.large = 'large'
self.medium = 'medium'
self.small = 'small'
class Preview:
def __init__(self, url: str,
width: int,
height: int):
self.url = url
self.width = width
self.height = height
self.size = str(width) + '*' + str(height)
class Result:
def __init__(self, title: (str, None),
description: (str, None),
domain: str,
url: str,
width: int,
height: int,
preview: Preview):
self.title = title
self.description = description
self.domain = domain
self.url = url
self.width = width
self.height = height
self.size = str(width) + '*' + str(height)
self.preview = preview
class YandexImage:
def __init__(self):
self.size = Size()
self.headers = Headers(headers=True).generate()
self.version = '1.0-release'
self.about = 'Yandex Images Parser'
def search(self, query: str, sizes: Size = 'large') -> list:
request = requests.get('https://yandex.ru/images/search/',
params={"text": query,
"nomisspell": 1,
"noreask": 1,
"isize": sizes
},
headers=self.headers)
soup = bs4(request.text, 'html.parser')
items_place = soup.find('div', {"class": "serp-list"})
output = list()
try:
items = items_place.find_all("div", {"class": "serp-item"})
except AttributeError:
return output
for item in items:
data = json.loads(item.get("data-bem"))
image = data['serp-item']['img_href']
image_width = data['serp-item']['preview'][0]['w']
image_height = data['serp-item']['preview'][0]['h']
snippet = data['serp-item']['snippet']
try:
title = snippet['title']
except KeyError:
title = None
try:
description = snippet['text']
except KeyError:
description = None
domain = snippet['domain']
preview = 'https:' + data['serp-item']['thumb']['url']
preview_width = data['serp-item']['thumb']['size']['width']
preview_height = data['serp-item']['thumb']['size']['height']
output.append(Result(title, description, domain, image,
image_width, image_height,
Preview(preview, preview_width, preview_height)))
return output
|
[
"noreply@github.com"
] |
KozhevnikovAlexandr.noreply@github.com
|
f7521eb4d4aacfc2c35663ce5ee54c14b67ee656
|
63176b589ea621a88397633ff34df2b14fdebfb1
|
/venv/Practice42.py
|
5a59263886a7bf93cfd1f6f8339d6fc9ce268477
|
[] |
no_license
|
utkarshbhardwaj22/TRAINING1
|
3b610eb407884cf84e2816d06a4cb2a8e23510c9
|
1d1321c3c3d1537b730dfd702d8088a3e21f402c
|
refs/heads/master
| 2023-05-06T02:12:40.936069
| 2021-05-22T17:52:34
| 2021-05-22T17:52:34
| 341,592,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
import requests
import json
api_key = "your api key"
url = "http://newsapi.org/v2/everything?q=tesla&from=2021-02-12&sortBy=publishedAt&apiKey={}".format(api_key)
response = requests.get(url)
print(response.text)
print(type(response.text))
dict_data = json.loads(response.text)
print(dict_data)
print(type(dict_data))
|
[
"utkarsh2000bhard@gmail.com"
] |
utkarsh2000bhard@gmail.com
|
b2613b1a49c70cc10e0c7ed1ec31117526772ffb
|
e5e4f4f62c0da9c81ba858ea0c2caf9883040806
|
/core/mobile_devices/apps.py
|
4f7555fd101aa905c9c34fefa57982f2b919c2af
|
[
"MIT"
] |
permissive
|
intelligems/django-mobile-app
|
634a15aa48b1303d266081946086bb04dbb119b7
|
04c2e684bf77a4149deb1428fea921b753f0e2ec
|
refs/heads/master
| 2021-06-02T23:13:30.193901
| 2020-04-06T09:54:37
| 2020-04-06T09:54:37
| 106,053,117
| 64
| 14
|
MIT
| 2020-04-06T09:52:12
| 2017-10-06T21:49:56
|
Python
|
UTF-8
|
Python
| false
| false
| 107
|
py
|
from django.apps import AppConfig
class MobileDevicesConfig(AppConfig):
name = 'core.mobile_devices'
|
[
"koslibpro@gmail.com"
] |
koslibpro@gmail.com
|
93b3def8c56dfc5e858310cf3c60c71fde360968
|
0220352990bdfc8d0d0fd2216284a5d6ce47cacc
|
/Review/Weekend Review/calc.py
|
28859f4ff9b4ac92814a28fc49e5ab9d984fbb7c
|
[] |
no_license
|
baxter1707/CoffeeOrderSystem
|
a37907b9d72e397b9d63c971975d2d317a82f848
|
560cc3acb7bcbf9904ab21198d7c1994d406b0ab
|
refs/heads/master
| 2021-08-29T11:51:35.591689
| 2017-12-13T21:50:17
| 2017-12-13T21:50:17
| 114,172,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import calcFunct as calc
first = float(raw_input("Enter first number."))
mathOp = (raw_input("Enter +, -, *, or /"))
second = float(raw_input("Enter second number."))
if mathOp == "+":
answer = calc.add(first, second)
elif mathOp == "-":
answer = calc.subtract(first, second)
elif mathOp == "*":
answer = calc.multiply(first, second)
elif mathOp == "/":
answer = calc.divide(first, second)
#
|
[
"msb@Michaels-MBP.localdomain"
] |
msb@Michaels-MBP.localdomain
|
1145456e163cf9f1577b7221b5411f88747e796c
|
062e5ea92fce23805335abd28d72dda8896c6906
|
/user/urls.py
|
157067e2ec7a41c0fa4f25a495fb2c10055364cf
|
[] |
no_license
|
FikretYilmaz/Django_Project3
|
bd65a196ace8b4959c03a2fecc848372f12aca51
|
9d704df72a16838db4c4ca2dcdc7250cd6eff8cc
|
refs/heads/master
| 2020-11-24T06:52:16.583950
| 2019-12-14T12:26:29
| 2019-12-14T12:26:29
| 228,017,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
app_name ="user"
urlpatterns = [
path('register/', views.register, name = "register"),
path('login/', views.loginUser, name = "login"),
path('logout/', views.logoutUser, name = "logout"),
]
|
[
"yilmz.fikret@gmail.com"
] |
yilmz.fikret@gmail.com
|
b548353eabc30922d5a1654ad33ca127d8c65334
|
6b65a0254fd98e3add97533f5423b988d5cccbc9
|
/Survivors.py
|
a295c5090fec8de0e6222a9b835d0149a2d23b15
|
[] |
no_license
|
AnaFOliveira/Evolutionary-Computation
|
a4c8b84c49a5faa265c1eba56c997b69ac5b888d
|
c67bf88620bb4440b609b6e7152beb5020153e66
|
refs/heads/master
| 2020-03-19T05:35:40.874752
| 2020-01-08T17:42:57
| 2020-01-08T17:42:57
| 135,946,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Conjunto de funções para selecionar os survivors
from operator import itemgetter
# Elitism
def sel_survivors_elite(elite):
def elitism(parents,offspring):
size = len(parents)
comp_elite = int(size* elite)
offspring.sort(key=itemgetter(1),reverse=True)
parents.sort(key=itemgetter(1),reverse=True)
new_population = parents[:comp_elite] + offspring[:size - comp_elite]
return new_population
return elitism
# Generational
def survivors_generational(parents,offspring):
return offspring
|
[
"anaf.oliveira95@gmail.com"
] |
anaf.oliveira95@gmail.com
|
6fd5ea6b3a66f9661d4d93a4fa3ee7fd8c7f69d8
|
107d985a67685e173ac23fe8d3aa6e1ab7eae91e
|
/release/scripts/mgear/rigbits/rbf_node.py
|
ebab9c7581e42b14386d3dbda5be7f7b15f5a3b5
|
[
"MIT"
] |
permissive
|
moChen0607/mgear4
|
83c67df26ebf8898dfb032c81563bf3fd89b4d6c
|
e340bd9ca95b9e99977ba9eedb0bf39c742683ff
|
refs/heads/master
| 2023-08-16T11:31:54.455569
| 2021-10-07T03:14:01
| 2021-10-07T03:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,421
|
py
|
#!/usr/bin/env python
"""rbf node to normalize the calls across any number of supported
rbf node types. First supported "weightDriver"/ingo clemens/Brave Rabit
Attributes:
CTL_SUFFIX (str): name of the control suffixe
DRIVEN_SUFFIX (str): suffix to be applied to driven group
DRIVER_CTL_ATTR_NAME (str): name of the attribute to store driver control
DRIVER_POSEINPUT_ATTR (str): name of attr to store control driver(holder)
DRIVER_POSES_INFO_ATTR (str): name of attr to store control
GENERIC_SUFFIX (str): generic suffix if one not provided by support module
RBF_SCALE_ATTR (str): name of attr applied to driven control
RBF_SETUP_ATTR (str): name of attr to store setup name for group of rbf
ROTATE_ATTRS (list): convenience list of transform attrs
SCALE_ATTRS (list): convenience list of transform attrs
SUPPORTED_RBF_NODES (tuple): currently supported rbf node types
TRANSFORM_SUFFIX (str): suffix of transform nodes for rbf nodes
TRANSLATE_ATTRS (list): convenience list of transform attrs
Notes - refactor as more supported rbf node types are added
__author__ = "Rafael Villar"
__email__ = "rav@ravrigs.com"
"""
# python
import ast
import math
# core
import maya.cmds as mc
import pymel.core as pm
import maya.OpenMaya as OpenMaya
# mgear
from mgear.core import transform, attribute
from mgear.core import anim_utils
from .six import PY2
# =============================================================================
# constants
# =============================================================================
DRIVEN_SUFFIX = "_driven"
DRIVEN_PAR_SUFFIX = "_drivenPar"
RBF_LOCATOR_SUFFIX = "_rbfLoc"
CTL_SUFFIX = "_ctl"
TRANSFORM_SUFFIX = "_trfm"
RBF_SETUP_ATTR = "rbf_setup_name"
TRANSLATE_ATTRS = ["translateX",
"translateY",
"translateZ"]
ROTATE_ATTRS = ["rotateX",
"rotateY",
"rotateZ"]
SCALE_ATTRS = ["scaleX",
"scaleY",
"scaleZ"]
SUPPORTED_RBF_NODES = ("weightDriver",)
GENERIC_SUFFIX = "_RBF"
DRIVER_CTL_ATTR_NAME = "driverControlName"
DRIVER_POSES_INFO_ATTR = "driverPosesInfo"
# DRIVER_POSEINPUT_ATTR = "poseInput"
RBF_SCALE_ATTR = "RBF_Multiplier"
# =============================================================================
# general functions
# =============================================================================
def getMultipleAttrs(node, attributes):
"""get multiple attrs and their values in a list, in order
Args:
node (str): name of node
attributes (list): of attrs to query
Returns:
list: of values
"""
valuesToReturn = []
for attr in attributes:
valuesToReturn.append(mc.getAttr("{}.{}".format(node, attr)))
return valuesToReturn
def copyInverseMirrorAttrs(srcNode, dstNode):
"""within mGear the concept of inverseAttrs, so that transforms can be
accurately mirrored, exists and this copys the relavent attrs from src
to dest
Args:
srcNode (str, pynode): source node
dstNode (str, pynode): destination to copy attrs to
"""
srcNode = pm.PyNode(srcNode)
dstNode = pm.PyNode(dstNode)
attrsToInv = anim_utils.listAttrForMirror(srcNode)
for attr in attrsToInv:
inAttr = anim_utils.getInvertCheckButtonAttrName(attr)
try:
val = mc.getAttr("{}.{}".format(srcNode, inAttr))
mc.setAttr("{}.{}".format(dstNode, inAttr), val)
except ValueError:
continue
def get_driven_group_name(node):
"""get the name of the driven group that would be created for the
provided node
Args:
node (str): name of node
Returns:
str: name of the driven group node
"""
node = pm.PyNode(node)
if node.endswith(CTL_SUFFIX):
drivenName = node.replace(CTL_SUFFIX, DRIVEN_SUFFIX)
else:
drivenName = "{}{}".format(node, DRIVEN_SUFFIX)
return drivenName
def addDrivenGroup(node, drivenName=None):
"""add driven group, pad, above the provided node for direct connection
Args:
node (str): name of node to add group above
Returns:
str: of node created
"""
node = pm.PyNode(node)
parentOfTarget = pm.listRelatives(node, p=True) or None
if parentOfTarget:
parentOfTarget = parentOfTarget[0]
drivenName = drivenName or get_driven_group_name(node)
if parentOfTarget is None:
parentOfTarget = pm.group(name=drivenName.replace(DRIVEN_SUFFIX,
DRIVEN_PAR_SUFFIX),
w=True,
em=True)
else:
parentOfTarget = pm.group(name=drivenName.replace(DRIVEN_SUFFIX,
DRIVEN_PAR_SUFFIX),
p=parentOfTarget,
em=True)
parentOfTarget.setMatrix(node.getMatrix(worldSpace=True),
worldSpace=True)
drivenName = pm.group(name=drivenName, p=parentOfTarget, em=True)
attribute.add_mirror_config_channels(pm.PyNode(drivenName))
if node.endswith(CTL_SUFFIX):
copyInverseMirrorAttrs(node, drivenName)
pm.parent(node, drivenName)
return drivenName.name()
def removeDrivenGroup(node):
"""remove driven group above desired node
Args:
node (str): name of node to check
"""
drivePar = parentOfTarget = mc.listRelatives(node, p=True) or None
if parentOfTarget and parentOfTarget[0].endswith(DRIVEN_PAR_SUFFIX):
parentOfTarget = mc.listRelatives(parentOfTarget, p=True) or None
childrenNode = mc.listRelatives(node, type="transform") or []
for child in childrenNode:
if parentOfTarget is None:
mc.parent(child, w=True)
else:
mc.parent(child, parentOfTarget[0])
if node.endswith(DRIVEN_SUFFIX):
mc.delete(node)
if drivePar and drivePar[0].endswith(DRIVEN_PAR_SUFFIX):
mc.delete(drivePar)
def compensateLocator(node):
"""Create a locator that parents under desired node, to manipulated
directly connected nodes.
Functionality that is disable, but for future use.
Args:
node (str): desired node
Returns:
str: name of the created locator
"""
mc.select(cl=True)
cLoc = mc.spaceLocator(n="{}{}".format(node, RBF_LOCATOR_SUFFIX))
mc.parent(cLoc, node, r=True)
return cLoc
def removeCompensateLocator(node):
"""remove the locator under the desired node if exists
Args:
node (str): name of the ndoe to look under.
"""
mc.select(cl=True)
cmpLoc = "{}{}".format(node, RBF_LOCATOR_SUFFIX)
if mc.objExists(cmpLoc):
cmpLoc_par = mc.listRelatives(cmpLoc, p=True)
if cmpLoc_par and cmpLoc_par[0] == node:
mc.delete(cmpLoc)
def decompMatrix(node, matrix):
'''
Decomposes a MMatrix in new api. Returns an list of
translation,rotation,scale in world space.
Args:
node (str): name of node to query rotate order
matrix (MMatrix): mmatrix to decompos
Returns:
TYPE: Description
'''
# Rotate order of object
rotOrder = mc.getAttr("{}.rotateOrder".format(node))
# Puts matrix into transformation matrix
mTransformMtx = OpenMaya.MTransformationMatrix(matrix)
# Translation Values
trans = mTransformMtx.getTranslation(OpenMaya.MSpace.kPostTransform)
# Euler rotation value in radians
eulerRot = mTransformMtx.eulerRotation()
# Reorder rotation order based on ctrl.
eulerRot.reorderIt(rotOrder)
radian = 180.0 / math.pi
rotations = [rot * radian for rot in [eulerRot.x, eulerRot.y, eulerRot.z]]
# Find world scale of our object.
# for scale we need to utilize MScriptUtil to deal with the native
# double pointers
scaleUtil = OpenMaya.MScriptUtil()
scaleUtil.createFromList([0, 0, 0], 3)
scaleVec = scaleUtil.asDoublePtr()
mTransformMtx.getScale(scaleVec, OpenMaya.MSpace.kPostTransform)
scale = [OpenMaya.MScriptUtil.getDoubleArrayItem(scaleVec, i)
for i in range(0, 3)]
# Return Values
return [trans.x, trans.y, trans.z], rotations, scale
def resetDrivenNodes(node):
"""use mgear convenience function to reset all available transform nodes
Args:
node (str): node to reset
"""
children = mc.listRelatives(node, type="transform")
controlNode = node.replace(DRIVEN_SUFFIX, CTL_SUFFIX)
otherNode = node.replace(DRIVEN_SUFFIX, "")
if mc.objExists(controlNode) and children and controlNode in children:
transform.resetTransform(pm.PyNode(controlNode))
elif mc.objExists("{}{}".format(node, RBF_LOCATOR_SUFFIX)):
compoensateLoc = pm.PyNode("{}{}".format(node, RBF_LOCATOR_SUFFIX))
transform.resetTransform(compoensateLoc)
elif mc.objExists(otherNode):
otherNode = pm.PyNode(otherNode)
transform.resetTransform(otherNode)
transform.resetTransform(pm.PyNode(node))
def __getResultingMatrix(drivenNode, parentNode, absoluteWorld=True):
"""convenience function, wrap. given two nodes, one parented under the
other
Args:
drivenNode (str): name of the drivenNode
parentNode (str): name of the parent node
absoluteWorld (bool, optional): calculate in world or check for local
differences
Returns:
mmaatrix: resulting matrix of driven and parent
"""
drivenNode = pm.PyNode(drivenNode)
nodeInverParMat = parentNode.getAttr("parentInverseMatrix")
drivenMat = drivenNode.getMatrix(worldSpace=True)
drivenMat_local = drivenNode.getMatrix(objectSpace=True)
defaultMat = OpenMaya.MMatrix()
if defaultMat.isEquivalent(drivenMat_local) and not absoluteWorld:
totalMatrix = defaultMat
print("Pose recorded in local.")
else:
totalMatrix = drivenMat * nodeInverParMat
return totalMatrix
def getDrivenMatrix(node, absoluteWorld=True):
"""check if there is a control node for the provided node(driven)
if so, collect the matrix information for both
Args:
node (pynode): driven group/driven node
absoluteWorld (bool, optional): get the world matrix or defaulted mat
if the control is zeroed out.
Returns:
MMatrix: of total position including the control
"""
children = mc.listRelatives(node, type="transform") or []
node = pm.PyNode(node)
controlNode = node.replace(DRIVEN_SUFFIX, CTL_SUFFIX)
otherNode = node.replace(DRIVEN_SUFFIX, "")
if mc.objExists(controlNode) and controlNode in children:
totalMatrix = __getResultingMatrix(controlNode,
node,
absoluteWorld=absoluteWorld)
elif mc.objExists(otherNode) and otherNode in children:
totalMatrix = __getResultingMatrix(otherNode,
node,
absoluteWorld=absoluteWorld)
elif mc.objExists("{}{}".format(node, RBF_LOCATOR_SUFFIX)):
compoensateLoc = pm.PyNode("{}{}".format(node, RBF_LOCATOR_SUFFIX))
nodeInverParMat = node.getAttr("parentInverseMatrix")
controlMat = compoensateLoc.getMatrix(worldSpace=True)
totalMatrix = controlMat * nodeInverParMat
else:
totalMatrix = node.getMatrix(worldSpace=False)
return totalMatrix
def createRBFToggleAttr(node):
"""creates a node to toggle the rbf pose that drives the node
Args:
node (str): desired node to be tagged with attr
"""
try:
mc.addAttr(node,
ln=RBF_SCALE_ATTR,
at="float",
dv=1,
min=0,
max=1,
k=True)
except RuntimeError:
pass
def connectRBFToggleAttr(node, rbfNode, rbfEnableAttr):
"""connect the "envelope" attr with its corresponding rbfNode
Args:
node (str): node with attr
rbfNode (str): rbf node with receiving attr
rbfEnableAttr (str): targeted rbf node for disabling node
"""
nodeAttr = "{}.{}".format(node, RBF_SCALE_ATTR)
rbfAttr = "{}.{}".format(rbfNode, rbfEnableAttr)
mc.connectAttr(nodeAttr, rbfAttr, f=True)
def deleteRBFToggleAttr(node):
"""remove the toggle attribute from the node
Args:
node (str): node to remove toggle attr from
"""
mc.setAttr("{}.{}".format(node, RBF_SCALE_ATTR), edit=True, lock=False)
try:
mc.deleteAttr("{}.{}".format(node, RBF_SCALE_ATTR))
except RuntimeError:
pass
def getConnectedRBFToggleNode(node, toggleAttr):
"""get the node connected to the rbf(node)
Args:
node (str): rbf node
toggleAttr (str): envelope attr to check
Returns:
str: connected node
"""
rbfAttr = "{}.{}".format(node, toggleAttr)
driverControl = mc.listConnections(rbfAttr)
if driverControl:
return driverControl[0]
return driverControl
def setToggleRBFAttr(node, value, toggleAttr):
"""Toggle rbfattr on or off (any value provided)
Args:
node (str): name of node with the attr to toggle rbf on/off
value (int, bool): on/off
toggleAttr (str): name of the attr to set
"""
attrPlug = "{}.{}".format(node, toggleAttr)
mc.setAttr(attrPlug, value)
def createDriverControlPoseAttr(node):
"""ensure the driverControlPoseAttr exists on the (RBF)node provided
Args:
node (str): name of the supported RBFNode
"""
try:
mc.addAttr(node, ln=DRIVER_POSES_INFO_ATTR, dt="string")
except RuntimeError:
pass
def setDriverControlPoseAttr(node, poseInfo):
"""set the driverControlPoseAttr with the poseInfo provided, as string
Args:
node (str): name of rbf node to set it on
poseInfo (dict): of pose information
"""
if not mc.attributeQuery(DRIVER_POSES_INFO_ATTR, n=node, ex=True):
createDriverControlPoseAttr(node)
mc.setAttr("{}.{}".format(node, DRIVER_POSES_INFO_ATTR),
str(poseInfo),
type="string")
def getDriverControlPoseAttr(node):
"""record the dict, stored as a str, holding driver control
pose information.
Args:
node (str): name of the RBFNode supported node to query
Returns:
dict: of attr:[value at index]
"""
try:
poseInfo = mc.getAttr("{}.{}".format(node, DRIVER_POSES_INFO_ATTR))
return ast.literal_eval(poseInfo)
except ValueError:
return {}
def updateDriverControlPoseAttr(node, driverControl, poseIndex):
"""get the ControlPoseDict add any additionally recorded values to and set
Args:
node (str): name of the RBFNode supported node
driverControl (str): name of the control to queary attr info from
poseIndex (int): to add the collected pose information to
"""
# TODO future recording of all attrs goes here
poseInfo = getDriverControlPoseAttr(node)
attrsToUpdate = TRANSLATE_ATTRS + ROTATE_ATTRS + SCALE_ATTRS
attrsToUpdate = list(set(attrsToUpdate + list(poseInfo.keys())))
for attr in attrsToUpdate:
attrPoseIndices = poseInfo.get(attr, [])
lengthOfList = len(attrPoseIndices) - 1
newVal = mc.getAttr("{}.{}".format(driverControl, attr))
if not attrPoseIndices or lengthOfList < poseIndex:
attrPoseIndices.insert(poseIndex, newVal)
elif lengthOfList >= poseIndex:
attrPoseIndices[poseIndex] = newVal
poseInfo[attr] = attrPoseIndices
setDriverControlPoseAttr(node, poseInfo)
def recallDriverControlPose(driverControl, poseInfo, index):
"""set the driverControl to the index requested. Set as many attrs as is
provided in the poseInfo
Args:
driverControl (str): control to set poseAttr infomation on
poseInfo (dict): of poses
index (int): poseInfo[attrName]:[index]
"""
failed_attrs = []
for attr, values in poseInfo.items():
try:
# not to be bothered with locked, hidden, connected attrs
mc.setAttr("{}.{}".format(driverControl, attr), values[index])
except Exception:
failed_attrs.append(attr)
if failed_attrs:
failed_attrs.insert(0, driverControl)
msg = "Pose cannot be applied to the following attributes: \n{}".format(failed_attrs)
print(msg)
def createDriverControlAttr(node):
"""create the string attr where information will be stored for query
associated driver anim control
Args:
node (str): rbf node to tag with information
"""
try:
mc.addAttr(node, ln=DRIVER_CTL_ATTR_NAME, dt="string")
except RuntimeError:
pass
def getDriverControlAttr(node):
"""get the stored information from control attr
Args:
node (str): name of rbfNode
Returns:
str: contents of attr, animControl
"""
try:
return mc.getAttr("{}.{}".format(node, DRIVER_CTL_ATTR_NAME))
except ValueError:
return ""
def setDriverControlAttr(node, controlName):
""" create and set attr with the driver animControl string
Args:
node (str): name of rbfnode
controlName (str): name of animControl(usually)
"""
if not mc.attributeQuery(DRIVER_CTL_ATTR_NAME, n=node, ex=True):
createDriverControlAttr(node)
mc.setAttr("{}.{}".format(node, DRIVER_CTL_ATTR_NAME),
controlName,
type="string")
def getSceneRBFNodes():
"""get all rbf nodes in the scene of supported type
Returns:
list: of rbf nodes, see supported types
"""
return mc.ls(type=SUPPORTED_RBF_NODES) or []
def getSceneSetupNodes():
"""get rbf nodes with setups attributes
Returns:
list: of rbf nodes with setup information
"""
nodes = set(mc.ls(type=SUPPORTED_RBF_NODES))
return [rbf for rbf in nodes if mc.attributeQuery(RBF_SETUP_ATTR,
n=rbf,
ex=True)]
def getRbfSceneSetupsInfo(includeEmpty=True):
"""gather scene rbf nodes with setups in dict
Args:
includeEmpty (bool, optional): should rbf nodes with empty setup names
be included
Returns:
dict: setupName(str):list associated rbf nodes
"""
setups_dict = {"empty": []}
for rbfNode in getSceneSetupNodes():
setupName = mc.getAttr("{}.{}".format(rbfNode, RBF_SETUP_ATTR))
if setupName == "":
setups_dict["empty"].append(rbfNode)
continue
if setupName in setups_dict:
setups_dict[setupName].append(rbfNode)
else:
setups_dict[setupName] = [rbfNode]
if not includeEmpty:
setups_dict.pop("empty")
return setups_dict
def setSetupName(node, setupName):
"""set setup name on the specified node
Args:
node (str): name of rbf node to set
setupName (str): name of setup
"""
if not mc.attributeQuery(RBF_SETUP_ATTR, n=node, ex=True):
mc.addAttr(node, ln=RBF_SETUP_ATTR, dt="string")
mc.setAttr("{}.{}".format(node, RBF_SETUP_ATTR), setupName, type="string")
def getSetupName(node):
"""get setup name from specified rbf node
Args:
node (str): name of rbf node
Returns:
str: name of setup associated with node
"""
if not mc.attributeQuery(RBF_SETUP_ATTR, n=node, ex=True):
return None
return mc.getAttr("{}.{}".format(node, RBF_SETUP_ATTR))
class RBFNode(object):
"""A class to normalize the function between different types of rbf nodes
that essentially perform the same task. Look to weightNode_io for examples
of normalized function calls to specific nodeType information with this
class.
Attributes:
name (str): name of the node that either exists or to be created
rbfType (str): nodeType to create node of supported type
transformNode (str): name of transform node
"""
def __init__(self, name):
self.name = name
self.transformNode = None
if mc.objExists(name) and mc.nodeType(name) in SUPPORTED_RBF_NODES:
self.rbfType = mc.nodeType(name)
self.transformNode = self.getTransformParent()
self.lengthenCompoundAttrs()
else:
self.create()
createDriverControlAttr(self.name)
def __repr__(self):
"""overwritten so that the RBFNode instance can be treated as a pymal
node. Convenience
Returns:
str: name of rbfNode node correctly formated
"""
return self.name
def __unicode__(self):
"""overwritten so that the RBFNode instance can be treated as a pymal
node. Convenience
Returns:
str: name of rbfNode node correctly formated
"""
if PY2:
return unicode(self.name).encode('utf-8')
return str(self.name).encode('utf-8')
def __str__(self):
"""overwritten so that the RBFNode instance can be treated as a pymal
node. Convenience
Returns:
str: name of rbfNode node correctly formated
"""
return str(self.name)
@staticmethod
def nodeType_suffix():
"""optional override with a module/node specific suffix for naming
"""
return GENERIC_SUFFIX
@staticmethod
def formatName(name, suffix):
"""standardized the naming of all rbf nodes for consistency
Returns:
str: name of all supported rbf nodes
"""
return "{}{}".format(name, suffix)
def create(self):
"""create an RBF node of type, defined by the subclassed module
Raises:
NotImplementedError: Description
"""
raise NotImplementedError()
def getPoseInfo(self):
"""get poseInfo dict
Raises:
NotImplementedError: each rbf node is unique, adhere here for
rbf manager ui support
"""
raise NotImplementedError()
def getNodeInfo(self):
"""get all the info for for the node in the form of a dict
Raises:
NotImplementedError: NotImplementedError: each rbf node is unique,
adhere here for rbf manager ui support
"""
raise NotImplementedError()
def lengthenCompoundAttrs(self):
"""convenience function, sanity check for zero'd compound attrs
"""
pass
def addPose(self, poseInput, poseValue, posesIndex=None):
"""add pose to the weightDriver node provided. Also used for editing
an existing pose, since you can specify the index. If non provided
assume new
Args:
node (str): weightedDriver
poseInput (list): list of the poseInput values
poseValue (list): of poseValue values
posesIndex (int, optional): at desired index, if none assume
latest/new
"""
if posesIndex is None:
posesIndex = len(self.getPoseInfo()["poseInput"])
self.updateDriverControlPoseAttr(posesIndex)
raise NotImplementedError()
def deletePose(self, indexToPop):
"""gather information on node, remove desired index and reapply
Args:
node (str): weightDriver
indexToPop (int): pose index to remove
"""
raise NotImplementedError()
def getDriverNode(self):
"""get nodes that are driving weightDriver node
Returns:
list: of driver nodes
"""
raise NotImplementedError()
def getDriverNodeAttributes(self):
"""get the connected attributes of the provided compound attr in order
of index - Sanity check
Returns:
list: of connected attrs in order
"""
raise NotImplementedError()
def getDrivenNode(self):
"""get driven nodes connected to weightDriver
Returns:
list: of driven nodes
"""
raise NotImplementedError()
def getDrivenNodeAttributes(self):
"""get the connected attributes of the provided compound attr in order
of index - Sanity check
Returns:
list: of connected attrs in order
"""
raise NotImplementedError()
def getSetupName(self):
"""get the name of the setup that the RBFNode belongs to
Returns:
str: skirt_L0, shoulder_R0
"""
return getSetupName(self.name)
def setSetupName(self, setupName):
"""set the name of the setup for the RBFNode
Args:
setupName (str): desired name
"""
setSetupName(str(self.name), setupName)
def setDriverNode(self, driverNode, driverAttrs):
"""set the node that will be driving the evaluation on our poses
Args:
node (str): name of weightDriver node
driverNode (str): name of driver node
driverAttrs (list): of attributes used to perform evaluations
"""
raise NotImplementedError()
def setDrivenNode(self, drivenNode, drivenAttrs, parent=True):
"""set the node to be driven by the weightDriver
Args:
node (str): weightDriver node
drivenNode (str): name of node to be driven
drivenAttrs (list): of attributes to be driven by weightDriver
"""
raise NotImplementedError()
def getTransformParent(self):
"""get a dict of all the information to be serialized to/from json
Returns:
dict: information to be recreated on import
"""
NotImplementedError()
def copyPoses(self, nodeB, emptyPoseValues=True):
"""Copy poses from nodeA to nodeB with the option to be blank or node
for syncing nodes
Args:
nodeB (str): name of weightedNode
emptyPoseValues (bool, optional): should the copy just be the same
number of poses but blank output value
Returns:
n/a: n/a
"""
NotImplementedError()
def setDriverControlPoseAttr(self, poseInfo):
"""set the poseInfo as a string to the DriverControlPoseAttr
Args:
poseInfo (dict): of pose information to set, as a str
"""
setDriverControlPoseAttr(self.name, poseInfo)
def getDriverControlPoseAttr(self):
"""retrieve poseInfo from the driverControlPoseAttr as a dict
Returns:
dict: of pose information
"""
driverPoseInfoAttr = getDriverControlPoseAttr(self.name)
return driverPoseInfoAttr
def updateDriverControlPoseAttr(self, posesIndex):
"""update the driverControlPoseAttr at the specified index
Args:
posesIndex (int): update the pose information at the index
"""
driverControl = self.getDriverControlAttr()
updateDriverControlPoseAttr(self.name, driverControl, posesIndex)
def setDriverControlAttr(self, controlName):
""" create and set attr with the driver animControl string
Args:
controlName (str): name of animControl(usually)
"""
setDriverControlAttr(self.name, controlName)
def getDriverControlAttr(self):
"""get the driverControlAttr
Returns:
str: the name of the control set within the attr
"""
driverControl = getDriverControlAttr(self.name)
if driverControl == "":
driverControl = self.getDriverNode()[0]
return driverControl
def recallDriverPose(self, poseIndex):
"""recall the pose on the controlDriver with information at the
specified index
Args:
poseIndex (int): desired index, matches pose index on rbfNode
"""
driverControl = self.getDriverControlAttr()
poseInfo = getDriverControlPoseAttr(self.name)
recallDriverControlPose(driverControl, poseInfo, poseIndex)
def getPoseValues(self, resetDriven=True, absoluteWorld=True):
"""get all pose values from rbf node
Args:
resetDriven (bool, optional): reset driven animControl
Returns:
list: of poseValues
"""
attributeValue_dict = {}
drivenNode = self.getDrivenNode()[0]
drivenAttrs = self.getDrivenNodeAttributes()
if (mc.attributeQuery("matrix", n=drivenNode, ex=True) and
mc.attributeQuery("worldMatrix", n=drivenNode, ex=True)):
(trans,
rotate,
scale) = decompMatrix(drivenNode,
getDrivenMatrix(drivenNode,
absoluteWorld=absoluteWorld))
for attr in drivenAttrs:
if attr in TRANSLATE_ATTRS:
index = TRANSLATE_ATTRS.index(attr)
attributeValue_dict[attr] = trans[index]
elif attr in ROTATE_ATTRS:
index = ROTATE_ATTRS.index(attr)
attributeValue_dict[attr] = rotate[index]
elif attr in SCALE_ATTRS:
index = SCALE_ATTRS.index(attr)
attributeValue_dict[attr] = scale[index]
else:
nodePlug = "{}.{}".format(drivenNode, attr)
attributeValue_dict[attr] = mc.getAttr(nodePlug)
if resetDriven:
resetDrivenNodes(drivenNode)
poseValues = [attributeValue_dict[attr] for attr in drivenAttrs]
return poseValues
def forceEvaluation(self):
"""convenience function to force re evaluation on the rbf nodes
most nodes support this
"""
NotImplementedError()
def getRBFToggleAttr(self):
"""get the specific to the type, "envelope" attr for rbf node
"""
NotImplementedError()
# return "scale"
def deleteRBFToggleAttr(self):
"""convenience function to delete the connected "enevelope" from the
anim control node
Returns:
TYPE: Description
"""
driverControl = self.getConnectedRBFToggleNode()
if not driverControl:
return
deleteRBFToggleAttr(driverControl)
def setToggleRBFAttr(self, value):
"""Toggle rbfattr on or off (any value provided)
Args:
value (TYPE): Description
"""
driverControl = self.getConnectedRBFToggleNode()
setToggleRBFAttr(driverControl, value, RBF_SCALE_ATTR)
def getConnectedRBFToggleNode(self):
"""return the node connected to the RBFNodes toggle attr
Returns:
str: name of node
"""
return getConnectedRBFToggleNode(self.name, self.getRBFToggleAttr())
def syncPoseIndices(self, srcNode):
raise NotImplementedError()
def applyDefaultPose(self, posesIndex=0):
"""apply default pose, WARNING. Applying default on more than one index
will result in rbf decomp error.
Args:
posesIndex (int, optional): index to default values
"""
driverNode = self.getDriverNode()[0]
driverAttrs = self.getDriverNodeAttributes()
poseInputs = getMultipleAttrs(driverNode, driverAttrs)
drivenAttrs = self.getDrivenNodeAttributes()
newPoseValues = []
for attr in drivenAttrs:
if attr in SCALE_ATTRS:
newPoseValues.append(1.0)
else:
newPoseValues.append(0.0)
self.addPose(poseInput=poseInputs,
poseValue=newPoseValues,
posesIndex=posesIndex)
def compensateForDirectConnect(self):
drivenNode = self.getDrivenNode()[0]
if (mc.nodeType(drivenNode) not in ["transform", "joint"] or
mc.objExists("{}{}".format(drivenNode, RBF_LOCATOR_SUFFIX)) or
drivenNode.endswith(DRIVEN_SUFFIX)):
return
transformAttrs = set(TRANSLATE_ATTRS + ROTATE_ATTRS + SCALE_ATTRS)
drivenAttrs = set(self.getDrivenNodeAttributes())
if not drivenAttrs.intersection(transformAttrs):
return
cmpLoc = compensateLocator(drivenNode)
|
[
"miquel.campos@gmail.com"
] |
miquel.campos@gmail.com
|
93113329cab60289329c039bbed8550468933686
|
a51e408379db9cea0ae82f196ee2bc96a93dd0e1
|
/Python/CALCULADORAPITAGORAS.py
|
1492ddff1f349de1491b64f4c1a331a34d6f06d7
|
[] |
no_license
|
DiegoSalazar02/CURSO-PINOLES
|
5e659cf5dad344e15c5c4e265ecf957773a7df82
|
2e25457e77867ce9b287cbc7ed9998f5ac8bec2b
|
refs/heads/master
| 2020-05-25T18:28:12.787721
| 2019-07-24T00:13:12
| 2019-07-24T00:13:12
| 187,930,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
print("Si no tienes un dato, dejalo en blanco")
print("Dame tu cateto 'a'")
a = input()
print("Dame tu cateto 'b'")
b=input()
print("Dame tu hipotenusa")
c=input()
if(not c):
c=(int(a)*int(a))+(int(b)*int(b))
c=pow(c,0.5)
if(int(a)+int(b)<int(c)):
print("Tu triangulo esta bien raro chamaco")
else:
print("Tu hipotenusa es igual a: " + str(c))
elif(not a):
a=(int(c)*int(c))-(int(b)*int(b))
a=pow(a,0.5)
if(int(a)+int(b)<int(c)):
print("Tu triangulo esta bien raro chamaco")
else:
print("Tu cateto 'a' es igual a: "+str(a))
elif(not b):
b=(int(c)*int(c))-(int(a)*int(a))
b=pow(b,0.5)
if(int(a)+int(b)<int(c)):
print("Tu triangulo esta bien raro chamaco")
else:
print("Tu cateto 'b' es igual a: " +str(c))
|
[
"50892111+DiegoSalazar02@users.noreply.github.com"
] |
50892111+DiegoSalazar02@users.noreply.github.com
|
52b12868ecc92457a61a17446a5a58f85755197b
|
c025aac7d3288a84d2c2f2a12dcc4ac8068e920d
|
/formatString.py
|
d7b706ce4cac6ff7f303a5d84d5d5d85e3c03601
|
[] |
no_license
|
thinkinghs/pyalgorithm
|
f81eb317c906a91e00fa04edd65ae5c4396b8cee
|
1b6371c68ecdf940e54e8d0f890f2e671bd0baa9
|
refs/heads/master
| 2020-04-15T14:52:44.220382
| 2019-06-25T09:36:43
| 2019-06-25T09:36:43
| 164,771,742
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# https://www.codewars.com/kata/format-a-string-of-names-like-bart-lisa-and-maggie/python
# there are more short solutions. I have to study. But my solution is easy to read
def namelist(names):
if not names:
return ''
if len(names) == 1:
return names[0]['name']
name_list = ''
for i in range(len(names)):
if len(names) - i > 2:
name_list += names[i]['name'] + ', '
else:
break
name_list += names[-2]['name'] + ' & ' + names[-1]['name']
return name_list
|
[
"noreply@github.com"
] |
thinkinghs.noreply@github.com
|
36176d02cf0a6adeafba9c114b7c7921e64ecd21
|
677118d53be00096610a5d881348f9f6464d8a40
|
/MyProject/api/views.py
|
49a6637d88eb2bcc645daf226d857a8b3245a7b8
|
[] |
no_license
|
sandipan898/django-rest-sample-project
|
9ded79516461f4b8a1d4cc9bbc8f45e652b810f5
|
2097d2dbf8ef9caf914dbe18098f8c6dc2bdb003
|
refs/heads/master
| 2023-02-13T14:45:43.877953
| 2020-12-30T18:56:00
| 2020-12-30T18:56:00
| 324,568,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,853
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework import serializers
from rest_framework.parsers import JSONParser
from rest_framework.serializers import Serializer
from .models import Article
from .serializers import ArticleSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework import mixins
from rest_framework.authentication import BasicAuthentication, SessionAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from django.shortcuts import get_object_or_404
# Create your views here.
""""""""""""""""""""" Class Based Views """""""""""""""""""""
class ArticleModelViewSet(viewsets.ModelViewSet):
"""
Inherits from Model viewsets where by defaults all the methods are defined
"""
serializer_class = ArticleSerializer
queryset = Article.objects.all()
class ArticleGenericViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin):
"""
Inherits from generic viewsets whwere we can just implements the generic
operatios just by inheriting the mixins classes
"""
serializer_class = ArticleSerializer
queryset = Article.objects.all()
########################################################
class ArticleViewSet(viewsets.ViewSet):
"""
This is a basic ViewSet class.
Here we have to implement the methods on our own
"""
def list(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True) # serializing all articles into objects
return Response(serializer.data) # takes serialized data and sends response in JSON format
def create(self, request):
serializer = ArticleSerializer(data=request.data) # serializing in comming request data
if serializer.is_valid(): # checking validity
serializer.save() # saving the serialized data into model instance
return Response(serializer.data, status=status.HTTP_201_CREATED) # takes serialized data and sends response in JSON format
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # returning error
def retrieve(self, request, pk=None):
queryset = Article.objects.all()
article = get_object_or_404(queryset, pk=pk)
serializer = ArticleSerializer(article) # serializing all articles into objects
return Response(serializer.data) # takes serialized data and sends response in JSON format
def update(self, request, pk=None):
article = Article.objects.get(pk=pk)
serializer = ArticleSerializer(article, data=request.data) # serializing the article object with the parsed data
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
########################################################
class GenericArticleView(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin):
serializer_class = ArticleSerializer
queryset = Article.objects.all()
lookup_field = "id"
# authentication_classes = [SessionAuthentication, BasicAuthentication]
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, id=None):
if id:
return self.retrieve(request)
else:
return self.list(request)
def post(self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
########################################################
class ArticleAPIView(APIView):
def get(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True) # serializing all articles into objects
return Response(serializer.data) # takes serialized data and sends response in JSON format
def post(self, request):
serializer = ArticleSerializer(data=request.data) # serializing in comming request data
if serializer.is_valid(): # checking validity
serializer.save() # saving the serialized data into model instance
return Response(serializer.data, status=status.HTTP_201_CREATED) # takes serialized data and sends response in JSON format
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # returning error
class ArticleDetailsView(APIView):
def get_object(self, id):
try:
return Article.objects.get(id=id) # trying to find the article object matching the 'id' passed
except Article.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
def get(self, request, id):
""" GET Method to Get or Read the article from database """
article = self.get_object(id)
serializer = ArticleSerializer(article) # serializing the article object found
return Response(serializer.data) # returning serialized data into JSON format
def put(self, request, id):
""" PUT Method to Update the Article """
article = self.get_object(id)
serializer = ArticleSerializer(article, data=request.data) # serializing the article object with the parsed data
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id):
article = self.get_object(id)
article.delete() # deleting the article object found
return Response(status=status.HTTP_204_NO_CONTENT)
""""""""""""""""""""" Function Based Views """""""""""""""""""""
# @csrf_exempt #allows post requests without csrf token
@api_view(['GET', 'POST']) # decorating views with django-rest-framework api views to add special functionalities
def article_list(request):
if request.method == 'GET':
"""If method is GET"""
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True) # serializing all articles into objects
return Response(serializer.data) # takes serialized data and sends response in JSON format
elif request.method == "POST":
"""If method is POST"""
# data = JSONParser().parse(request) # Parse JSON formatted data from in comming request object
serializer = ArticleSerializer(data=request.data) # serializing in comming request data
if serializer.is_valid(): # checking validity
serializer.save() # saving the serialized data into model instance
return Response(serializer.data, status=status.HTTP_201_CREATED) # takes serialized data and sends response in JSON format
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # returning error
# @csrf_exempt
@api_view(['GET', 'PUT', 'DELETE']) # decorating views with django-rest-framework api views to add special functionalities
def article_detail(request, pk):
try:
article = Article.objects.get(pk=pk) # trying to find the article object matching the 'pk' passed
except Article.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = ArticleSerializer(article) # serializing the article object found
return Response(serializer.data) # returning serialized data into JSON format
elif request.method == "PUT":
# data = JSONParser().parse(request) # Parse JSON formatted data from in comming request object
serializer = ArticleSerializer(article, data=request.data) # serializing the article object with the parsed data
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
article.delete() # deleting the article object found
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"sandipan.das898@gmail.com"
] |
sandipan.das898@gmail.com
|
cec1987aca842ac02625d72b387932b0b6678ec4
|
e292531fa72c3b5b9a3d5d1433dfb4a7f69a3471
|
/app/public/views.py
|
0b3b946b5dda554cafcd717270ddf926b172b973
|
[
"MIT"
] |
permissive
|
MunifNagi/whiteboard
|
ea0d74543c7e0e289bf80195ac229c7e5de90be2
|
f706ed5977a25447c1cb938313fd8a7328811749
|
refs/heads/master
| 2023-01-06T17:27:22.183060
| 2019-10-31T18:05:53
| 2019-10-31T18:05:53
| 218,827,710
| 0
| 0
|
MIT
| 2023-01-04T23:52:05
| 2019-10-31T17:57:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
# -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import (
Blueprint,
current_app,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import login_required, login_user, logout_user
from app.extensions import login_manager
from app.public.forms import LoginForm
from app.user.forms import RegisterForm
from app.user.models import User
from app.utils import flash_errors
blueprint = Blueprint("public", __name__, static_folder="../static")
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
"""Home page."""
form = LoginForm(request.form)
current_app.logger.info("Hello from the home page!")
# Handle logging in
if request.method == "POST":
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", "success")
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route("/logout/")
@login_required
def logout():
"""Logout."""
logout_user()
flash("You are logged out.", "info")
return redirect(url_for("public.home"))
@blueprint.route("/register/", methods=["GET", "POST"])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(
username=form.username.data,
email=form.email.data,
password=form.password.data,
active=True,
)
flash("Thank you for registering. You can now log in.", "success")
return redirect(url_for("public.home"))
else:
flash_errors(form)
return render_template("public/register.html", form=form)
@blueprint.route("/about/")
def about():
"""About page."""
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
|
[
"mnagi@records.nyc.gov"
] |
mnagi@records.nyc.gov
|
3a907909b113fd65076489fe8def626540095d6a
|
c7be03a4a8160750a3260609abe23a115a29848b
|
/Problems/117. Crawler Log Folder/Crawler_Log_Folder.py
|
78505fdf449bb4b756ba1afae3f4397d333329a5
|
[] |
no_license
|
kenlee0305/Leetcode-Automation
|
9399a362d0b97f8ee9226a74c847e37d50d34d2b
|
fc89cb7cc3f6f136200ca9acf3c6db10ad972ca4
|
refs/heads/main
| 2023-06-03T03:03:20.068567
| 2021-06-16T07:31:36
| 2021-06-16T07:31:36
| 424,152,427
| 1
| 0
| null | 2021-11-03T08:55:25
| 2021-11-03T08:55:24
| null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
class Solution:
def minOperations(self, logs: List[str]) -> int:
count = 0
for x in logs:
if '../' in x:
if count == 0 :
pass
else:
count-=1
elif './' in x:
pass
else:
count+=1
return count
|
[
"aj97389@gmail.com"
] |
aj97389@gmail.com
|
a847bbcf01fba6154f6ca550379a6ffc499e3ea0
|
260d095d0422d7783d125830fa9d98529b5c60f1
|
/zajecia_12/kartkowka4.py
|
1e58b8733dbed65062c4f35139245a97cd93526f
|
[] |
no_license
|
loafe123/pp3
|
a9b8be328764ad16c6d7cdaedc2519ff95876d41
|
647f44bb888f763207ded8874cec2a07ee3c4c85
|
refs/heads/master
| 2022-03-30T18:30:14.246307
| 2020-01-28T12:10:23
| 2020-01-28T12:10:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
123456
To jest napis dokumentujący modułu.
'''
def rot13(text):
"""
Funkcja szyfrująca rot13
To jest napis dokumentujący funkcji.
"""
encoded = ''
for letter in text:
if letter < 'N':
encoded += chr(ord(letter) + 13)
else:
encoded += chr(ord(letter) - 13)
return encoded
|
[
"abukaj@users.noreply.github.com"
] |
abukaj@users.noreply.github.com
|
3c42e5bc139bba19c06e3aab07ee8c2d17c0dc7f
|
e34a44c07adb818a15dd0742761a4c2cf4258336
|
/src/final_exam/q_pie/pie_chart.py
|
bc21630f94e0f0987d182b714255e2164e929a9a
|
[
"MIT"
] |
permissive
|
acc-cosc-1336/cosc-1336-spring-2018-jjmareck
|
629e9cdb3a0f091e440e6dccbd2bc23341df4a2c
|
7abfd79cb9a63192c965f828a185ccd981820bae
|
refs/heads/master
| 2021-05-16T14:08:06.763135
| 2018-05-12T03:33:17
| 2018-05-12T03:33:17
| 118,071,035
| 0
| 0
|
MIT
| 2018-02-25T23:29:58
| 2018-01-19T03:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
class PieChart:
def __init__(self, arclist):
self.arclist = arclist
def draw(self):
for arc in self.arclist:
arc.draw()
|
[
"noreply@github.com"
] |
acc-cosc-1336.noreply@github.com
|
75eb1c002ed77b3efca306601ab05f1729052e77
|
1fabf3e10cd201cc0cf9a3e82590b5f68133b46b
|
/main.py
|
33247a65f5e0140e3018e908e8290707cdddffc3
|
[] |
no_license
|
gauthamzz/get_github_emails
|
5777c5c61e06eebe4904b1f55757c8a7c7956254
|
a5a462c7574b1e3fbb7e539b213112496bdd2be4
|
refs/heads/master
| 2023-01-14T03:34:07.840350
| 2020-11-21T11:58:07
| 2020-11-21T11:58:07
| 314,109,696
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,948
|
py
|
import typer
from src import github, utils
from typing import Optional
app = typer.Typer()
@app.command()
def contributors(owner: str, repo: str, uname: Optional[str] = typer.Argument(None)):
"""Get contributors of a repo
"""
typer.echo(github.find_contributors_from_repo(owner=owner, repo=repo, uname=uname))
@app.command()
def org(
organisation: str, threads: int = 2, uname: Optional[str] = typer.Argument(None)
):
"""Get list of users of an Organisation
"""
usernames = github.find_users_from_organisation(
organisation=organisation, uname=uname
)
result = []
for username in usernames:
result.append(github.find_email_from_username(username=username))
typer.echo(result)
@app.command()
def username(username: str, uname: Optional[str] = typer.Argument(None)):
"""Get email from username
"""
typer.echo(github.find_email_from_username(username))
@app.command()
def repo(owner: str, repo: str, uname: Optional[str] = typer.Argument(None)):
"""
returns email of contributors repo
"""
typer.echo(github.find_emails_from_repo(username=owner, repo=repo, uname=uname))
@app.command()
def stargazzers(
owner: str,
repo: str,
info: bool = False,
uname: Optional[str] = typer.Argument(None),
):
"""
returns list of people who starred this repo
--info to get info of user
"""
usernames = github.find_stargazzers_from_repo(owner=owner, repo=repo, uname=uname)
results = []
for username in usernames:
results.append(github.find_email_from_username(username=username))
if not info:
return typer.echo(results)
typer.echo(utils.get_info_for_usernames(results=results, uname=uname))
@app.command()
def info(username: str, uname: Optional[str] = typer.Argument(None)):
typer.echo(github.get_profile_info_from_user(username=username, uname=uname))
if __name__ == "__main__":
app()
|
[
"thabeatsz@gmail.com"
] |
thabeatsz@gmail.com
|
d4ede4b1be60941155c4a1d06dc3b4754347282b
|
adc1a5ea987f1a8b50a806e65ad3bb188b6a270f
|
/exchangeSearch.py
|
ab17afe927da477503e0ee49a5466edc9b83fe76
|
[] |
no_license
|
M-aljawaheri/CalenderFill
|
4ea8f175e11073fb22e225afb781255092eaf191
|
4d321adb4426ebffc466318943e99b7fd34e6686
|
refs/heads/master
| 2022-06-14T22:46:52.778761
| 2020-05-03T18:07:17
| 2020-05-03T18:07:17
| 228,091,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,417
|
py
|
# Exchange email deadline Extraction
# By : Mohammed Al-jawaheri
# Email : Mobj@cmu.edu -- m_aljawaheri@outlook.com
# Student -- CarnegieMellon University
from exchangelib import Credentials, Account
from re import*
#
## Handles all deadline collection. Will use RegEx through methods
## to extract information from from account.inbox item objects
class deadline_collector:
def __init__(self, reg_ex):
self.search_regex = reg_ex # list of regexs specified by user (dates,etc)
# Use methods to check / extract deadline
def extract_deadline(self, email):
# search for every regex
for regex in self.search_regex:
result = search(regex, email.text_body)
if result and email.subject not in important_emails:
# add to important emails
important_emails[email.subject] = important_email_info(
email.sender, email.subject,
email.text_body, result)
else:
# if regex not in body search in subjectline
result = search(regex, email.subject)
if result and email.subject not in important_emails:
# add to important emails
important_emails[email.subject] = important_email_info(
email.sender, email.subject,
email.text_body, result)
#
## Class stores all important email-info per email. Subject line, dates and
## email body. Will store each object in an important emails dictionary
class important_email_info:
def __init__(self, sender, subjectLine, email_body, deadline):
self.sender = sender
self.subject = subjectLine
self.email_body = email_body
self.deadline = deadline
# dictionary to check if email receieved hasnt already been processed
important_emails = {} # reset every 24 hrs
# dictionary to run DLcollector filter over to filter important emails
all_emails = {} # reset every 24 hrs
my_deadline_collector = deadline_collector(
["January \d+",
"February \d+",
"March \d+",
"April \d+",
"May \d+",
"June \d+",
"July \d+",
"August \d+",
"September \d+",
"October \d+",
"November \d+",
"December \d+"])
def collect_important_emails():
# Collecting info from exchange email:
email = input("Enter your email: ")
password = input("Enter your password: ")
credentials = Credentials(email, password) # Note: later change this to user input, possibly tkinter interface ?
account = Account(email, credentials=credentials, autodiscover=True)
# Get emails
for item in account.inbox.all().order_by('-datetime_received')[:5]:
if item.subject not in all_emails and item.subject != None:
all_emails[item.subject] = item
# Use deadline collector to filter all emails and append to important emails
for email in all_emails:
my_deadline_collector.extract_deadline(all_emails[email])
return important_emails
|
[
"mobj@cmu.edu"
] |
mobj@cmu.edu
|
46038361f24318dc1106ee990c5592e340ff7717
|
4ff67d9f3614fecf5d37bc1562638651e7346e28
|
/portfolio/portfolio/urls.py
|
a4b9aa5833f4632784d99ed2997e97f0fee35845
|
[] |
no_license
|
techjunaid/portfolio
|
d86fa0e7a6c5af509858e1fb9f51c5d45325f903
|
0cfaa517fe85cfd0b9eacb18bf83ba46b329afe8
|
refs/heads/master
| 2022-12-07T18:20:27.048859
| 2020-08-30T05:01:51
| 2020-08-30T05:01:51
| 291,402,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
"""portfolio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
import jobs.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',jobs.views.home,name='home'),
path('blog/',include('blogs.urls')),
] + static(settings.MEDIA_URL, document_root =settings.MEDIA_ROOT)
|
[
"53346421+techjunaid@users.noreply.github.com"
] |
53346421+techjunaid@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.