hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d469ead197ca2f194f3552c9f555d20d3b161a42 | 1,731 | py | Python | setup.py | BhagyaDeepika/git | 28439efd97ffc2f1e4a8c54324ab67116471672e | [
"MIT"
] | null | null | null | setup.py | BhagyaDeepika/git | 28439efd97ffc2f1e4a8c54324ab67116471672e | [
"MIT"
] | null | null | null | setup.py | BhagyaDeepika/git | 28439efd97ffc2f1e4a8c54324ab67116471672e | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name = 'SAMPLE', # How you named your package folder (MyLib)
packages = ['SAMPLE'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'testing ', # Give a short description about your library
author = 'Deepika', # Type in your name
author_email = 'mekalabhagyadeepika@gmail.com', # Type in your E-Mail
url = 'https://github.com/BhagyaDeepika/git.git', # Provide either the link to your github or to your website
#download_url = 'https://github.com/user/reponame/archive/v_01.tar.gz', # I explain this later on
keywords = ['SOME', 'MEANINGFULL', 'KEYWORDS'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'requests',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) | 59.689655 | 147 | 0.636049 |
08549106478ff8941903eeb1db87a631abcbe7a8 | 287 | py | Python | demo-django/demo/urls.py | JonathanRowe/python-saml-master | 81dad60667191e6e1443a770bf779f175ed6beac | [
"MIT"
] | null | null | null | demo-django/demo/urls.py | JonathanRowe/python-saml-master | 81dad60667191e6e1443a770bf779f175ed6beac | [
"MIT"
] | 2 | 2020-06-06T00:11:05.000Z | 2021-06-10T22:19:47.000Z | demo-django/demo/urls.py | JonathanRowe/python-saml-master | 81dad60667191e6e1443a770bf779f175ed6beac | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from demo.views import index, attrs, metadata
admin.autodiscover()
urlpatterns = [
url(r'^$', index, name='index'),
# url(r'^attrs/$', attrs, name='attrs'),
url(r'^metadata/$', metadata, name='metadata')
]
| 22.076923 | 50 | 0.672474 |
8480c942bea87b58afed55bc7eb0d8924e345f8d | 598 | py | Python | app/mails.py | i3Cheese/MatBoy | 29dd65f07393087758179d14d4b40d5974816759 | [
"WTFPL"
] | 10 | 2020-04-24T02:39:22.000Z | 2021-07-22T13:12:55.000Z | app/mails.py | i3Cheese/MatBoy | 29dd65f07393087758179d14d4b40d5974816759 | [
"WTFPL"
] | null | null | null | app/mails.py | i3Cheese/MatBoy | 29dd65f07393087758179d14d4b40d5974816759 | [
"WTFPL"
] | 4 | 2020-05-31T12:34:55.000Z | 2020-06-25T17:35:43.000Z | from flask_mail import Message
from app import app, mail
from config import config
def send_message(msg):
"""Send message on email SMPT"""
with app.app_context():
mail.send(msg)
def send_messages(subject, recipients, html, sender=config.MAIL_DEFAULT_SENDER, ):
"""Send emails to all recipients one by one"""
with app.app_context():
for recipient in recipients:
msg = Message(
subject=subject,
recipients=[recipient, ],
sender=sender,
html=html,
)
mail.send(msg)
| 26 | 82 | 0.593645 |
52504304edf96b7653fda4c06ad60f94082a2716 | 4,999 | py | Python | sdk/storage/azure-mgmt-storagecache/azure/mgmt/storagecache/aio/operations/_skus_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/storage/azure-mgmt-storagecache/azure/mgmt/storagecache/aio/operations/_skus_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/storage/azure-mgmt-storagecache/azure/mgmt/storagecache/aio/operations/_skus_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SkusOperations:
"""SkusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~storage_cache_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ResourceSkusResult"]:
"""Get the list of StorageCache.Cache SKUs available to this subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSkusResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ResourceSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore
| 45.862385 | 133 | 0.660932 |
3984fa51230c91c31fdc593b31478655ef2015d9 | 1,914 | py | Python | utils.py | flaviofilipe/Live-Divulgador | 5f02604efc5bb2ef7f994e35c866a993a4ea92ec | [
"MIT"
] | null | null | null | utils.py | flaviofilipe/Live-Divulgador | 5f02604efc5bb2ef7f994e35c866a993a4ea92ec | [
"MIT"
] | null | null | null | utils.py | flaviofilipe/Live-Divulgador | 5f02604efc5bb2ef7f994e35c866a993a4ea92ec | [
"MIT"
] | null | null | null | import os
import sys
import pandas as pd
import time
import requests
import shutil
from PIL import Image
# Caminho absoluto deste ficheiro
abs_path = os.path.abspath(__file__)
# Caminho desta pasta
dir_path = os.path.dirname(abs_path)
# Caminho desta pasta + o ficheiro que eu quero acessar
FILE = os.path.join(dir_path, "streamers.csv")
def readStreamers():
# Ler os nomes dos streamers de um .csv
if os.path.exists(FILE):
try:
df = pd.read_csv(FILE, sep=",", encoding="latin-1")
return df
except:
# Retornar um dataframe vazio, caso o ficheiro esteja vazio
return pd.DataFrame({})
else:
print("O ficheiro "+FILE+" não existe!")
sys.exit(1)
def deleteExistStreamers(streamers, names):
# Eliminar Streamers que já estão na BD
for name in names:
i = streamers[streamers["Nome"]==name].index
streamers = streamers.drop(i)
return streamers
def removeCmdsFromTitle(title):
# Função que remove os comandos colocados nos títulos
# apenas por uma questão de objetividade no título
arr = title.split()
output = " ".join( list(filter(lambda x: x[0] != "!", arr)) )
return output
def getImage(name):
# Função que faz download da imagem da stream
url = f"https://static-cdn.jtvnw.net/previews-ttv/live_user_{name}-1280x720.jpg"
img_name = name+".jpg"
r = requests.get(url, stream=True)
if r.status_code == 200:
# Isto para que o tamanho do download não seja 0
r.raw.decode_content = True
with open(img_name, "wb") as fw:
# Escrever a imagem no disco
shutil.copyfileobj(r.raw, fw)
# Converter a imagem para .png
img = Image.open(img_name)
img.save(name+".png")
return name, True
return None, False
def updateCSV(streamers):
""" Função encarregue de guardar as modificações num .csv"""
os.remove(FILE)
streamers.to_csv(FILE, sep=",", index=False)
return | 23.060241 | 82 | 0.680773 |
9727e939fc968221611a201214ef866088ae9524 | 1,750 | py | Python | clancy_database/management/commands/export_clancy_lemmas.py | arthurian/visualizing_russian_tools | 65fd37839dc0650bb25d1f98904da5b79ae1a754 | [
"BSD-3-Clause"
] | null | null | null | clancy_database/management/commands/export_clancy_lemmas.py | arthurian/visualizing_russian_tools | 65fd37839dc0650bb25d1f98904da5b79ae1a754 | [
"BSD-3-Clause"
] | null | null | null | clancy_database/management/commands/export_clancy_lemmas.py | arthurian/visualizing_russian_tools | 65fd37839dc0650bb25d1f98904da5b79ae1a754 | [
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db.models import Q
import time
import csv
import sys
from clancy_database.models import Lemma
class Command(BaseCommand):
help = 'Exports a list of lemmas from the database to standard output.'
def add_arguments(self, parser):
parser.add_argument("--format", help="CSV or TSV format", choices=("tsv", "csv"), default="tsv")
parser.add_argument("--level", help="Filter by level")
parser.add_argument("--pos", help="Filter by part of speech")
parser.add_argument('--exclude-mwes', help="Exclude multiple word expressions", action='store_true', default=False)
def handle(self, *args, **options):
queryset = self.build_queryset(
level=options['level'],
pos=options['pos'],
exclude_mwes=options['exclude_mwes'],
)
self.export_lemmas(queryset, format=options['format'])
def build_queryset(self, level=None, pos=None, exclude_mwes=False):
queryset = Lemma.objects.all()
if level is not None:
queryset = queryset.filter(level=level)
if pos is not None:
queryset = queryset.filter(pos=pos)
if exclude_mwes:
queryset = queryset.filter(~Q(lemma__contains=" "))
return queryset
def export_lemmas(self, queryset, format=None):
delimiter = "\t" if format == "tsv" else ","
writer = csv.writer(sys.stdout, delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)
for lemma in queryset:
data = lemma.to_dict()
fields = ("label", "translation")
row = [data[f] for f in fields]
writer.writerow(row)
| 38.043478 | 123 | 0.644571 |
803f3fac86087aef433994c561ac5ebc03de0718 | 1,297 | py | Python | demo/guide-python/sklearn_parallel.py | cnheider/xgboost | e7fbc8591fa7277ee4c474b7371c48c11b34cbde | [
"Apache-2.0"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | demo/guide-python/sklearn_parallel.py | cnheider/xgboost | e7fbc8591fa7277ee4c474b7371c48c11b34cbde | [
"Apache-2.0"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | demo/guide-python/sklearn_parallel.py | cnheider/xgboost | e7fbc8591fa7277ee4c474b7371c48c11b34cbde | [
"Apache-2.0"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | import os
if __name__ == "__main__":
# NOTE: on posix systems, this *has* to be here and in the
# `__name__ == "__main__"` clause to run XGBoost in parallel processes
# using fork, if XGBoost was built with OpenMP support. Otherwise, if you
# build XGBoost without OpenMP support, you can use fork, which is the
# default backend for joblib, and omit this.
try:
from multiprocessing import set_start_method
except ImportError:
raise ImportError("Unable to import multiprocessing.set_start_method."
" This example only runs on Python 3.4")
set_start_method("forkserver")
import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_boston
import xgboost as xgb
rng = np.random.RandomState(31337)
print("Parallel Parameter optimization")
boston = load_boston()
os.environ["OMP_NUM_THREADS"] = "2" # or to whatever you want
y = boston['target']
X = boston['data']
xgb_model = xgb.XGBRegressor()
clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6],
'n_estimators': [50, 100, 200]}, verbose=1,
n_jobs=2)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
| 36.027778 | 78 | 0.647648 |
6b36a89e6ed63a4e2b620d42affd2d07eac9ef6f | 3,896 | py | Python | experiments/helper.py | tennisonliu/fair-cocco | 010ad85dac7c844089af172c99e2eba95685edd7 | [
"MIT"
] | null | null | null | experiments/helper.py | tennisonliu/fair-cocco | 010ad85dac7c844089af172c99e2eba95685edd7 | [
"MIT"
] | null | null | null | experiments/helper.py | tennisonliu/fair-cocco | 010ad85dac7c844089af172c99e2eba95685edd7 | [
"MIT"
] | null | null | null | '''
Helper functions to compute accuracy, fairness metrics
'''
import sys
sys.path.append('.')
import torch
from fair_cocco import compute_cond_cocco, compute_indep_cocco
from config import DEVICE
from random import seed
SEED = 0
seed(SEED)
torch.manual_seed(SEED)
def entropy_to_prob(entropy):
'''
Convert NN output to probability
'''
return entropy[:,1].exp() / entropy.exp().sum(dim=1)
def calc_accuracy(outputs, Y):
'''
Calculate accuracy
'''
_, max_indices = torch.max(outputs, dim=1, keepdim=True)
acc = (max_indices == Y).sum()/max_indices.size()[0]
return acc
def compute_di(pt, p):
'''
Compute disparate impact
'''
f = 0.5
p1 = (((pt == 1.)*(p>f)).sum().float() / (pt == 1).sum().float())
p0 = (((pt == 0.)*(p>f)).sum().float() / (pt == 0).sum().float())
if p0.item() == 0:
return 0.
else:
return (p1 / p0).item()
def compute_deo(pt, p, target):
'''
Compute difference in equality of opportunity
'''
f = 0.5
o1 = (((pt == 1.)*(p>f)*(target==1)).sum().float() / ((pt == 1)*(target==1)).sum().float())
o2 = (((pt == 0.)*(p>f)*(target==1)).sum().float() / ((pt == 0)*(target==1)).sum().float())
return (o1 - o2).abs().item()
def compute_group_calibration(pt, p, target):
'''
Compute difference in group calibration
'''
f = 0.5
o1 = (((target==1)*(p>f)*(pt==1.)).sum().float() / ((p>f)*(pt==1)).sum().float())
o2 = (((target==1)*(p>f)*(pt==0.)).sum().float() / ((p>f)*(pt==0)).sum().float())
print(f'group calibration: {o1.item()}, {o2.item()}')
return (o1-o2).abs().item()
def results_on_test(model, criterion, x, y, prot, config, type='classification', fairness='eo', maxpoints=1000, true_outcome=None):
'''
Evaluates trained model
'''
model.eval()
with torch.no_grad():
if type == 'classification':
target = torch.tensor(y.values).long().to(DEVICE)
else:
target = torch.tensor(y.values).float().to(DEVICE)
pt = torch.tensor(prot).float().to(DEVICE)
data = torch.tensor(x.values).float().to(DEVICE)
outputs = model(data)
loss = criterion(outputs, target)
p = entropy_to_prob(outputs)
ans = {}
balanced_acc = (calc_accuracy(outputs[pt==0],target[pt==0]) +
calc_accuracy(outputs[pt==1],target[pt==1]))/2
ans['loss'] = loss.item()
ans['accuracy'] = calc_accuracy(outputs, torch.unsqueeze(target, 1))
ans['balanced_acc'] = balanced_acc
ans['di'] = compute_di(pt, p)
ans['deo'] = compute_deo(pt, p, target)
target = torch.unsqueeze(target, 1)
pt = torch.unsqueeze(pt, 1)
if fairness == 'eo':
print('Evaluating model on Equalised Odds...')
if true_outcome is not None:
## allows use of different set of ground truth labels
print('Using separately supplied ground truth outcomes...')
true_outcome = torch.tensor(true_outcome.values).float().to(DEVICE)
true_outcome = torch.unsqueeze(true_outcome, 1)
ans['cocco'] = compute_cond_cocco(outputs, pt, true_outcome, config.epsilon, config.tol, maxpoints).item()
else:
ans['cocco'] = compute_cond_cocco(outputs, pt, target.double(), config.epsilon, config.tol, maxpoints).item()
if fairness == 'cal':
print('Evaluating model on Calibration...')
ans['cocco'] = compute_cond_cocco(pt, target.double(), outputs, config.epsilon, config.tol, maxpoints).item()
ans['dc'] = compute_group_calibration(torch.squeeze(pt), p, torch.squeeze(target))
if fairness == 'dp':
print('Evaluating model on Demographic Parity...')
ans['cocco'] = compute_indep_cocco(outputs, pt).item()
return ans | 36.411215 | 131 | 0.580082 |
28799f10dc7d8c1f09147d12cb2d83134073a423 | 53,056 | py | Python | woot/apps/catalog/migrations/0097_auto__add_space.py | Makeystreet/makeystreet | 761331de52207227baf6f8d161ab6df1747f8ef3 | [
"Apache-2.0"
] | 1 | 2015-06-27T13:25:28.000Z | 2015-06-27T13:25:28.000Z | woot/apps/catalog/migrations/0097_auto__add_space.py | Makeystreet/makeystreet | 761331de52207227baf6f8d161ab6df1747f8ef3 | [
"Apache-2.0"
] | 1 | 2015-07-02T20:18:53.000Z | 2015-07-02T20:18:53.000Z | woot/apps/catalog/migrations/0097_auto__add_space.py | Makeystreet/makeystreet | 761331de52207227baf6f8d161ab6df1747f8ef3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Space'
db.create_table(u'catalog_space', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added_time', self.gf('django.db.models.fields.DateTimeField')()),
('is_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=254, null=True, blank=True)),
('address', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('catalog', ['Space'])
# Adding M2M table for field admins on 'Space'
m2m_table_name = db.shorten_name(u'catalog_space_admins')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('space', models.ForeignKey(orm['catalog.space'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['space_id', 'user_id'])
# Adding M2M table for field members on 'Space'
m2m_table_name = db.shorten_name(u'catalog_space_members')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('space', models.ForeignKey(orm['catalog.space'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['space_id', 'user_id'])
# Adding M2M table for field new_members on 'Space'
m2m_table_name = db.shorten_name(u'catalog_space_new_members')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('space', models.ForeignKey(orm['catalog.space'], null=False)),
('newuser', models.ForeignKey(orm['catalog.newuser'], null=False))
))
db.create_unique(m2m_table_name, ['space_id', 'newuser_id'])
# Adding M2M table for field tools on 'Space'
m2m_table_name = db.shorten_name(u'catalog_space_tools')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('space', models.ForeignKey(orm['catalog.space'], null=False)),
('product', models.ForeignKey(orm['catalog.product'], null=False))
))
db.create_unique(m2m_table_name, ['space_id', 'product_id'])
# Adding M2M table for field new_tools on 'Space'
m2m_table_name = db.shorten_name(u'catalog_space_new_tools')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('space', models.ForeignKey(orm['catalog.space'], null=False)),
('newproduct', models.ForeignKey(orm['catalog.newproduct'], null=False))
))
db.create_unique(m2m_table_name, ['space_id', 'newproduct_id'])
def backwards(self, orm):
# Deleting model 'Space'
db.delete_table(u'catalog_space')
# Removing M2M table for field admins on 'Space'
db.delete_table(db.shorten_name(u'catalog_space_admins'))
# Removing M2M table for field members on 'Space'
db.delete_table(db.shorten_name(u'catalog_space_members'))
# Removing M2M table for field new_members on 'Space'
db.delete_table(db.shorten_name(u'catalog_space_new_members'))
# Removing M2M table for field tools on 'Space'
db.delete_table(db.shorten_name(u'catalog_space_tools'))
# Removing M2M table for field new_tools on 'Space'
db.delete_table(db.shorten_name(u'catalog_space_new_tools'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['auth.User']"}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['auth.User']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.space': {
'Meta': {'object_name': 'Space'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_members'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_members'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Product']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Bangalore, India'", 'max_length': '255'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog'] | 78.718101 | 224 | 0.558825 |
3a40003ba9e044a1463528e270b5e7c0463392f7 | 1,329 | py | Python | plusseg/utils/registry.py | tonysy/SegmentationToolbox.PyTorch | 4d487dd81d0101bc5cdb7b2337776fdf1b5546ff | [
"MIT"
] | 13 | 2019-07-26T11:33:15.000Z | 2021-09-22T06:48:52.000Z | plusseg/utils/registry.py | tonysy/SegmentationToolbox.PyTorch | 4d487dd81d0101bc5cdb7b2337776fdf1b5546ff | [
"MIT"
] | 1 | 2018-11-05T14:07:07.000Z | 2018-11-05T14:07:07.000Z | plusseg/utils/registry.py | tonysy/SegmentationToolbox.PyTorch | 4d487dd81d0101bc5cdb7b2337776fdf1b5546ff | [
"MIT"
] | 2 | 2019-07-26T11:33:32.000Z | 2020-03-04T13:47:50.000Z | def _register_generic(module_dict, module_name, module):
assert module_name not in module_dict
module_dict[module_name] = module
class Registry(dict):
"""
A helper class for managing registering modules, it extends a dictionary and provide a register function
Eg. create a registery
some_registry = Regitry({"default": default_module})
There are two ways to registering new modules:
1): normal way is just calling register function
def foo():
...
some_registry.register("foo_module", foo)
2): used as decorator when declaring the module:
@some_registry.register("foo_module")
@some_registry.register("foo_module_nickname")
def foo():
...
Access of module is just like using a dictionary, eg:
f = some_registry["foo_module"]
"""
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def register(self, module_name, module=None):
# used as function call
if module is not None:
_register_generic(self, module_name, module)
return
# used as decorator
def register_fn(fn):
_register_generic(self, module_name, fn)
return fn
return register_fn | 32.414634 | 108 | 0.629044 |
a4f1cc2114ad27115c1ebaf7fb808e02476b962a | 839 | py | Python | images/migrations/0003_auto_20180618_2009.py | lucasLB7/PIckyBucket | 7e29d5934d6b3b04c5a86435a2b67e2f4009a682 | [
"Unlicense"
] | null | null | null | images/migrations/0003_auto_20180618_2009.py | lucasLB7/PIckyBucket | 7e29d5934d6b3b04c5a86435a2b67e2f4009a682 | [
"Unlicense"
] | null | null | null | images/migrations/0003_auto_20180618_2009.py | lucasLB7/PIckyBucket | 7e29d5934d6b3b04c5a86435a2b67e2f4009a682 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-18 17:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20180617_2004'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='image',
name='num_vote_down',
),
migrations.RemoveField(
model_name='image',
name='num_vote_up',
),
migrations.RemoveField(
model_name='image',
name='vote_score',
),
]
| 24.676471 | 114 | 0.555423 |
a6f4b21a6a167ac66765a1fbc336c89a9ebad6ab | 239 | py | Python | docs/user_guide/operation/scripts/examples/argus/extraction/jan/EvacPipette1.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | docs/user_guide/operation/scripts/examples/argus/extraction/jan/EvacPipette1.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | docs/user_guide/operation/scripts/examples/argus/extraction/jan/EvacPipette1.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | def main():
info('Evacuate Pipette 1')
open(description='Microbone to Turbo')
open(description='Microbone to Minibone')
close(description='Inner Pipette 1')
sleep(1)
open(description='Outer Pipette 1')
sleep(15) | 29.875 | 45 | 0.677824 |
4fad0a576943eafb9a34ecd9c571f9741ee76369 | 813 | py | Python | tests/test_face.py | rmaglio/thumbor_rekognition | dd86cedbd989010ec0d7b9681dee7758c1f7fc91 | [
"MIT"
] | 8 | 2017-11-29T23:13:22.000Z | 2021-02-23T23:44:26.000Z | tests/test_face.py | rmaglio/thumbor_rekognition | dd86cedbd989010ec0d7b9681dee7758c1f7fc91 | [
"MIT"
] | 8 | 2018-12-20T20:19:17.000Z | 2022-03-11T23:25:42.000Z | tests/test_face.py | rmaglio/thumbor_rekognition | dd86cedbd989010ec0d7b9681dee7758c1f7fc91 | [
"MIT"
] | 6 | 2018-04-03T16:56:36.000Z | 2021-06-01T07:23:39.000Z | #!/usr/bin/env python
# standard library imports
from unittest import TestCase
# third party related imports
# local library imports
from thumbor_rekognition import Face
class TestFocalPoint(TestCase):
def test(self):
resp = {
"BoundingBox": {
"Height": 0.09666666388511658,
"Left": 0.32405567169189453,
"Top": 0.04333333298563957,
"Width": 0.1729622334241867
}
}
face = Face(resp)
focal_point = face.focal_point(885, 1582)
self.assertEqual(focal_point.x, 362)
self.assertEqual(focal_point.y, 144)
self.assertEqual(focal_point.width, 153)
self.assertEqual(focal_point.height, 152)
self.assertEqual(focal_point.origin, 'RekognitionDetector')
| 25.40625 | 67 | 0.622386 |
7f9a5715337291ff985699217996022426d5ef33 | 19,904 | py | Python | tabular/src/autogluon/tabular/learner/default_learner.py | willsmithorg/autogluon | 1e8c6a2f30fcc473411bf393c9827eb4713dcef6 | [
"Apache-2.0"
] | null | null | null | tabular/src/autogluon/tabular/learner/default_learner.py | willsmithorg/autogluon | 1e8c6a2f30fcc473411bf393c9827eb4713dcef6 | [
"Apache-2.0"
] | null | null | null | tabular/src/autogluon/tabular/learner/default_learner.py | willsmithorg/autogluon | 1e8c6a2f30fcc473411bf393c9827eb4713dcef6 | [
"Apache-2.0"
] | null | null | null | import copy
import logging
import math
import platform
import time
import numpy as np
import pandas as pd
from pandas import DataFrame
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT
from autogluon.core.data import LabelCleaner
from autogluon.core.data.cleaner import Cleaner
from autogluon.core.utils.time import sample_df_for_time_func, time_func
from autogluon.core.utils.utils import augment_rare_classes, extract_column
from .abstract_learner import AbstractLearner
from ..trainer import AutoTrainer
logger = logging.getLogger(__name__)
# TODO: Add functionality for advanced feature generators such as gl_code_matrix_generator (inter-row dependencies, apply to train differently than test, etc., can only run after train/test split, rerun for each cv fold)
# TODO: - Differentiate between advanced generators that require fit (stateful, gl_code_matrix) and those that do not (bucket label averaging in SCOT GC 2019)
# TODO: - Those that do not could be added to preprocessing function of model, but would then have to be recomputed on each model.
# TODO: Add cv / OOF generator option, so that AutoGluon can be used as a base model in an ensemble stacker
# Learner encompasses full problem, loading initial data, feature generation, model training, model prediction
class DefaultLearner(AbstractLearner):
def __init__(self, trainer_type=AutoTrainer, **kwargs):
super().__init__(**kwargs)
self.trainer_type = trainer_type
self.class_weights = None
self._time_fit_total = None
self._time_fit_preprocessing = None
self._time_fit_training = None
self._time_limit = None
self.preprocess_1_time = None # Time required to preprocess 1 row of data
# TODO: v0.1 Document trainer_fit_kwargs
def _fit(self, X: DataFrame, X_val: DataFrame = None, X_unlabeled: DataFrame = None, holdout_frac=0.1,
num_bag_folds=0, num_bag_sets=1, time_limit=None,
infer_limit=None, infer_limit_batch_size=None,
verbosity=2, **trainer_fit_kwargs):
""" Arguments:
X (DataFrame): training data
X_val (DataFrame): data used for hyperparameter tuning. Note: final model may be trained using this data as well as training data
X_unlabeled (DataFrame): data used for pretraining a model. This is same data format as X, without label-column. This data is used for semi-supervised learning.
holdout_frac (float): Fraction of data to hold out for evaluating validation performance (ignored if X_val != None, ignored if kfolds != 0)
num_bag_folds (int): kfolds used for bagging of models, roughly increases model training time by a factor of k (0: disabled)
num_bag_sets (int): number of repeats of kfold bagging to perform (values must be >= 1),
total number of models trained during bagging = num_bag_folds * num_bag_sets
"""
# TODO: if provided, feature_types in X, X_val are ignored right now, need to pass to Learner/trainer and update this documentation.
self._time_limit = time_limit
if time_limit:
logger.log(20, f'Beginning AutoGluon training ... Time limit = {time_limit}s')
else:
logger.log(20, 'Beginning AutoGluon training ...')
logger.log(20, f'AutoGluon will save models to "{self.path}"')
logger.log(20, f'AutoGluon Version: {self.version}')
logger.log(20, f'Python Version: {self._python_version}')
logger.log(20, f'Operating System: {platform.system()}')
logger.log(20, f'Train Data Rows: {len(X)}')
logger.log(20, f'Train Data Columns: {len([column for column in X.columns if column != self.label])}')
if X_val is not None:
logger.log(20, f'Tuning Data Rows: {len(X_val)}')
logger.log(20, f'Tuning Data Columns: {len([column for column in X_val.columns if column != self.label])}')
logger.log(20, f'Label Column: {self.label}')
time_preprocessing_start = time.time()
logger.log(20, 'Preprocessing data ...')
self._pre_X_rows = len(X)
if self.groups is not None:
num_bag_sets = 1
num_bag_folds = len(X[self.groups].unique())
X_og = None if infer_limit_batch_size is None else X
X, y, X_val, y_val, X_unlabeled, holdout_frac, num_bag_folds, groups = self.general_data_processing(X, X_val, X_unlabeled, holdout_frac, num_bag_folds)
if infer_limit_batch_size is not None:
X_og_1 = sample_df_for_time_func(df=X_og, sample_size=infer_limit_batch_size)
infer_limit_batch_size_actual = len(X_og_1)
self.preprocess_1_time = time_func(f=self.transform_features, args=[X_og_1]) / infer_limit_batch_size_actual
logger.log(20, f'\t{round(self.preprocess_1_time, 4)}s\t= Feature Preprocessing Time (1 row | {infer_limit_batch_size} batch size)')
if infer_limit is not None:
infer_limit_new = infer_limit - self.preprocess_1_time
logger.log(20, f'\t\tFeature Preprocessing requires {round(self.preprocess_1_time/infer_limit*100, 2)}% '
f'of the overall inference constraint ({infer_limit}s)\n'
f'\t\t{round(infer_limit_new, 4)}s inference time budget remaining for models...')
if infer_limit_new <= 0:
raise AssertionError('Impossible to satisfy inference constraint, budget is exceeded during data preprocessing!\n'
'Consider using fewer features, relaxing the inference constraint, or simplifying the feature generator.')
infer_limit = infer_limit_new
self._post_X_rows = len(X)
time_preprocessing_end = time.time()
self._time_fit_preprocessing = time_preprocessing_end - time_preprocessing_start
logger.log(20, f'Data preprocessing and feature engineering runtime = {round(self._time_fit_preprocessing, 2)}s ...')
if time_limit:
time_limit_trainer = time_limit - self._time_fit_preprocessing
else:
time_limit_trainer = None
trainer = self.trainer_type(
path=self.model_context,
problem_type=self.label_cleaner.problem_type_transform,
eval_metric=self.eval_metric,
num_classes=self.label_cleaner.num_classes,
quantile_levels=self.quantile_levels,
feature_metadata=self.feature_generator.feature_metadata,
low_memory=True,
k_fold=num_bag_folds, # TODO: Consider moving to fit call
n_repeats=num_bag_sets, # TODO: Consider moving to fit call
sample_weight=self.sample_weight,
weight_evaluation=self.weight_evaluation,
save_data=self.cache_data,
random_state=self.random_state,
verbosity=verbosity
)
self.trainer_path = trainer.path
if self.eval_metric is None:
self.eval_metric = trainer.eval_metric
self.save()
trainer.fit(
X=X,
y=y,
X_val=X_val,
y_val=y_val,
X_unlabeled=X_unlabeled,
holdout_frac=holdout_frac,
time_limit=time_limit_trainer,
infer_limit=infer_limit,
infer_limit_batch_size=infer_limit_batch_size,
groups=groups,
**trainer_fit_kwargs
)
self.save_trainer(trainer=trainer)
time_end = time.time()
self._time_fit_training = time_end - time_preprocessing_end
self._time_fit_total = time_end - time_preprocessing_start
logger.log(20, f'AutoGluon training complete, total runtime = {round(self._time_fit_total, 2)}s ... Best model: "{trainer.model_best}"')
# TODO: Add default values to X_val, X_unlabeled, holdout_frac, and num_bag_folds
def general_data_processing(self, X: DataFrame, X_val: DataFrame, X_unlabeled: DataFrame, holdout_frac: float, num_bag_folds: int):
""" General data processing steps used for all models. """
X = copy.deepcopy(X)
# TODO: We should probably uncomment the below lines, NaN label should be treated as just another value in multiclass classification -> We will have to remove missing, compute problem type, and add back missing if multiclass
# if self.problem_type == MULTICLASS:
# X[self.label] = X[self.label].fillna('')
# Remove all examples with missing labels from this dataset:
missinglabel_inds = [index for index, x in X[self.label].isna().iteritems() if x]
if len(missinglabel_inds) > 0:
logger.warning(f"Warning: Ignoring {len(missinglabel_inds)} (out of {len(X)}) training examples for which the label value in column '{self.label}' is missing")
X = X.drop(missinglabel_inds, axis=0)
if self.problem_type is None:
self.problem_type = self.infer_problem_type(X[self.label])
if self.quantile_levels is not None:
if self.problem_type == REGRESSION:
self.problem_type = QUANTILE
else:
raise ValueError("autogluon infers this to be classification problem for which quantile_levels "
"cannot be specified. If it is truly a quantile regression problem, "
"please specify:problem_type='quantile'")
if X_val is not None and self.label in X_val.columns:
holdout_frac = 1
if (self.eval_metric is not None) and (self.eval_metric.name in ['log_loss', 'pac_score']) and (self.problem_type == MULTICLASS):
if num_bag_folds > 0:
self.threshold = 2
if self.groups is None:
X = augment_rare_classes(X, self.label, threshold=2)
else:
self.threshold = 1
self.threshold, holdout_frac, num_bag_folds = self.adjust_threshold_if_necessary(X[self.label], threshold=self.threshold, holdout_frac=holdout_frac, num_bag_folds=num_bag_folds)
# Gets labels prior to removal of infrequent classes
y_uncleaned = X[self.label].copy()
self.cleaner = Cleaner.construct(problem_type=self.problem_type, label=self.label, threshold=self.threshold)
X = self.cleaner.fit_transform(X) # TODO: Consider merging cleaner into label_cleaner
X, y = self.extract_label(X)
self.label_cleaner = LabelCleaner.construct(problem_type=self.problem_type, y=y, y_uncleaned=y_uncleaned, positive_class=self._positive_class)
y = self.label_cleaner.transform(y)
X = self.set_predefined_weights(X, y)
X, w = extract_column(X, self.sample_weight)
X, groups = extract_column(X, self.groups)
if self.label_cleaner.num_classes is not None and self.problem_type != BINARY:
logger.log(20, f'Train Data Class Count: {self.label_cleaner.num_classes}')
if X_val is not None and self.label in X_val.columns:
X_val = self.cleaner.transform(X_val)
if len(X_val) == 0:
logger.warning('All X_val data contained low frequency classes, ignoring X_val and generating from subset of X')
X_val = None
y_val = None
w_val = None
else:
X_val, y_val = self.extract_label(X_val)
y_val = self.label_cleaner.transform(y_val)
X_val = self.set_predefined_weights(X_val, y_val)
X_val, w_val = extract_column(X_val, self.sample_weight)
else:
y_val = None
w_val = None
# TODO: Move this up to top of data before removing data, this way our feature generator is better
logger.log(20, f'Using Feature Generators to preprocess the data ...')
if X_val is not None:
# Do this if working with SKLearn models, otherwise categorical features may perform very badly on the test set
logger.log(15, 'Performing general data preprocessing with merged train & validation data, so validation performance may not accurately reflect performance on new test data')
X_super = pd.concat([X, X_val, X_unlabeled], ignore_index=True)
if self.feature_generator.is_fit():
logger.log(20, f'{self.feature_generator.__class__.__name__} is already fit, so the training data will be processed via .transform() instead of .fit_transform().')
X_super = self.feature_generator.transform(X_super)
self.feature_generator.print_feature_metadata_info()
else:
if X_unlabeled is None:
y_super = pd.concat([y, y_val], ignore_index=True)
else:
y_unlabeled = pd.Series(np.nan, index=X_unlabeled.index)
y_super = pd.concat([y, y_val, y_unlabeled], ignore_index=True)
X_super = self.fit_transform_features(X_super, y_super, problem_type=self.label_cleaner.problem_type_transform, eval_metric=self.eval_metric)
X = X_super.head(len(X)).set_index(X.index)
X_val = X_super.head(len(X)+len(X_val)).tail(len(X_val)).set_index(X_val.index)
if X_unlabeled is not None:
X_unlabeled = X_super.tail(len(X_unlabeled)).set_index(X_unlabeled.index)
del X_super
else:
X_super = pd.concat([X, X_unlabeled], ignore_index=True)
if self.feature_generator.is_fit():
logger.log(20, f'{self.feature_generator.__class__.__name__} is already fit, so the training data will be processed via .transform() instead of .fit_transform().')
X_super = self.feature_generator.transform(X_super)
self.feature_generator.print_feature_metadata_info()
else:
if X_unlabeled is None:
y_super = y.reset_index(drop=True)
else:
y_unlabeled = pd.Series(np.nan, index=X_unlabeled.index)
y_super = pd.concat([y, y_unlabeled], ignore_index=True)
X_super = self.fit_transform_features(X_super, y_super, problem_type=self.label_cleaner.problem_type_transform, eval_metric=self.eval_metric)
X = X_super.head(len(X)).set_index(X.index)
if X_unlabeled is not None:
X_unlabeled = X_super.tail(len(X_unlabeled)).set_index(X_unlabeled.index)
del X_super
X, X_val = self.bundle_weights(X, w, X_val, w_val) # TODO: consider not bundling sample-weights inside X, X_val
return X, y, X_val, y_val, X_unlabeled, holdout_frac, num_bag_folds, groups
def bundle_weights(self, X, w, X_val, w_val):
if w is not None:
X[self.sample_weight] = w
if X_val is not None:
if w_val is not None:
X_val[self.sample_weight] = w_val
elif not self.weight_evaluation:
nan_vals = np.empty((len(X_val),))
nan_vals[:] = np.nan
X_val[self.sample_weight] = nan_vals
else:
raise ValueError(f"sample_weight column '{self.sample_weight}' cannot be missing from X_val if weight_evaluation=True")
return X, X_val
def set_predefined_weights(self, X, y):
if self.sample_weight not in [AUTO_WEIGHT,BALANCE_WEIGHT] or self.problem_type not in [BINARY,MULTICLASS]:
return X
if self.sample_weight in X.columns:
raise ValueError(f"Column name '{self.sample_weight}' cannot appear in your dataset with predefined weighting strategy. Please change it and try again.")
if self.sample_weight == BALANCE_WEIGHT:
if self.class_weights is None:
class_counts = y.value_counts()
n = len(y)
k = len(class_counts)
self.class_weights = {c : n/(class_counts[c]*k) for c in class_counts.index}
logger.log(20, "Assigning sample weights to balance differences in frequency of classes.")
logger.log(15, f"Balancing classes via the following weights: {self.class_weights}")
w = y.map(self.class_weights)
elif self.sample_weight == AUTO_WEIGHT: # TODO: support more sophisticated auto_weight strategy
raise NotImplementedError(f"{AUTO_WEIGHT} strategy not yet supported.")
X[self.sample_weight] = w # TODO: consider not bundling sample weights inside X
return X
def adjust_threshold_if_necessary(self, y, threshold, holdout_frac, num_bag_folds):
new_threshold, new_holdout_frac, new_num_bag_folds = self._adjust_threshold_if_necessary(y, threshold, holdout_frac, num_bag_folds)
if new_threshold != threshold:
if new_threshold < threshold:
logger.warning(f'Warning: Updated label_count_threshold from {threshold} to {new_threshold} to avoid cutting too many classes.')
if new_holdout_frac != holdout_frac:
if new_holdout_frac > holdout_frac:
logger.warning(f'Warning: Updated holdout_frac from {holdout_frac} to {new_holdout_frac} to avoid cutting too many classes.')
if new_num_bag_folds != num_bag_folds:
logger.warning(f'Warning: Updated num_bag_folds from {num_bag_folds} to {new_num_bag_folds} to avoid cutting too many classes.')
return new_threshold, new_holdout_frac, new_num_bag_folds
def _adjust_threshold_if_necessary(self, y, threshold, holdout_frac, num_bag_folds):
new_threshold = threshold
num_rows = len(y)
holdout_frac = max(holdout_frac, 1 / num_rows + 0.001)
num_bag_folds = min(num_bag_folds, num_rows)
if num_bag_folds < 2:
minimum_safe_threshold = 1
else:
minimum_safe_threshold = 2
if minimum_safe_threshold > new_threshold:
new_threshold = minimum_safe_threshold
if self.problem_type in [REGRESSION, QUANTILE]:
return new_threshold, holdout_frac, num_bag_folds
class_counts = y.value_counts()
total_rows = class_counts.sum()
minimum_percent_to_keep = 0.975
minimum_rows_to_keep = math.ceil(total_rows * minimum_percent_to_keep)
minimum_class_to_keep = 2
num_classes = len(class_counts)
class_counts_valid = class_counts[class_counts >= new_threshold]
num_rows_valid = class_counts_valid.sum()
num_classes_valid = len(class_counts_valid)
if (num_rows_valid >= minimum_rows_to_keep) and (num_classes_valid >= minimum_class_to_keep):
return new_threshold, holdout_frac, num_bag_folds
num_classes_valid = 0
num_rows_valid = 0
new_threshold = None
for i in range(num_classes):
num_classes_valid += 1
num_rows_valid += class_counts.iloc[i]
new_threshold = class_counts.iloc[i]
if (num_rows_valid >= minimum_rows_to_keep) and (num_classes_valid >= minimum_class_to_keep):
break
return new_threshold, holdout_frac, num_bag_folds
def get_info(self, include_model_info=False, **kwargs):
learner_info = super().get_info(**kwargs)
trainer = self.load_trainer()
trainer_info = trainer.get_info(include_model_info=include_model_info)
learner_info.update({
'time_fit_preprocessing': self._time_fit_preprocessing,
'time_fit_training': self._time_fit_training,
'time_fit_total': self._time_fit_total,
'time_limit': self._time_limit,
})
learner_info.update(trainer_info)
return learner_info
| 55.753501 | 232 | 0.660068 |
bfa05a0ff9d9557661bb81e1cda683933be15475 | 3,280 | py | Python | project-base/applications/ecs_applications/beer_backend.py | joseafilho/aws-cdk-python-builders | 3ebaf5e32f0fcfe3807b7820f9916f294dc25993 | [
"Apache-2.0"
] | null | null | null | project-base/applications/ecs_applications/beer_backend.py | joseafilho/aws-cdk-python-builders | 3ebaf5e32f0fcfe3807b7820f9916f294dc25993 | [
"Apache-2.0"
] | null | null | null | project-base/applications/ecs_applications/beer_backend.py | joseafilho/aws-cdk-python-builders | 3ebaf5e32f0fcfe3807b7820f9916f294dc25993 | [
"Apache-2.0"
] | null | null | null | from aws_cdk import (
core as cdk
)
from aws_cdk.aws_ec2 import (
IVpc,
ISecurityGroup,
)
from aws_cdk.aws_elasticloadbalancingv2 import (
IApplicationListener
)
from libraries.utils.global_consts import Domains
from aws_cdk.aws_certificatemanager import ICertificate
from libraries.ecs.cluster.ecs_cluster_builder import ECSClusterBuilder
from libraries.security_group.security_group_builder import SecurityGroupBuilder
from libraries.ecs.ecr.ecr_builder import ECRBuilder
from libraries.ecs.task_definition.fargate_task_definition_builder import FargateTaskDefinitionBuilder
from libraries.ecs.service.ecs_service_builder import ECSServiceBuilder
from libraries.utils.alb_utils import ALBUtils
class BeerBackendResources(cdk.Construct):
@property
def sg_beer_backend(self) -> ISecurityGroup:
return self.__sg_service.sg
def __init__(self, scope: cdk.Construct, id: str, vpc: IVpc, sg_alb: ISecurityGroup, alb_listener: IApplicationListener, certificate: ICertificate, **kwargs):
super().__init__(scope, id, **kwargs)
self.__vpc = vpc
self.__sg_alb = sg_alb
self.__alb_listener = alb_listener
# Consts
self.__NODE_EXPRESS_PORT = 3000
self.__create_backend()
def __create_backend(self):
self.__create_sg()
self.__create_repository_images()
self.__create_cluster()
self.__create_task_definition()
self.__create_service()
self.__create_target_listener()
def __create_sg(self):
self.__sg_service = SecurityGroupBuilder(
self, 'beer-sg-ecs',
vpc = self.__vpc,
sg_description = 'SG dedicated to api beer.'
)
self.__sg_service.add_role(
port = self.__NODE_EXPRESS_PORT,
rule_description = 'Node Express port.',
sg_parent = self.__sg_alb
)
def __create_repository_images(self):
self.__ecr_beer = ECRBuilder(
self, 'beer-ecr'
)
def __create_cluster(self):
self.__ecs_cluster = ECSClusterBuilder(self, f'beer-cluster', self.__vpc)
def __create_task_definition(self):
self.__task_definition = FargateTaskDefinitionBuilder(
self, f'beer-task-definition',
repository = self.__ecr_beer.repository,
cpu = 512,
memory_limit = 1024,
port_mapping = self.__NODE_EXPRESS_PORT,
tag_image = 'latest'
)
def __create_service(self):
self.__beer_service = ECSServiceBuilder(
self, f'beer-service',
task_definition = self.__task_definition.definition,
cluster = self.__ecs_cluster.cluster,
sg = self.__sg_service.sg,
desired_count = 1
)
def __create_target_listener(self):
ALBUtils.AddTargetInListener(
id = f'beer-ecs-service-tg',
alb_listener = self.__alb_listener,
target = self.__beer_service.service,
port = self.__NODE_EXPRESS_PORT,
priority = 1,
health_check_path = '/ping',
host_header = f'beer-api.{Domains.DOMAIN_COMPANY}'
) | 33.469388 | 162 | 0.653049 |
4d79ca4bc1e4b625a743805ddfb95f07e3d1730b | 424 | py | Python | tests/test_pyorient_native_load.py | sam-caldwell/pyorient_native | 2ebda0798f5aaa165cecf24ff4418558458f09f1 | [
"Apache-2.0"
] | null | null | null | tests/test_pyorient_native_load.py | sam-caldwell/pyorient_native | 2ebda0798f5aaa165cecf24ff4418558458f09f1 | [
"Apache-2.0"
] | null | null | null | tests/test_pyorient_native_load.py | sam-caldwell/pyorient_native | 2ebda0798f5aaa165cecf24ff4418558458f09f1 | [
"Apache-2.0"
] | null | null | null | """
This file contains a test to load pyorient_native
created by Sam Caldwell <mail@samcaldwell.net>
"""
import unittest
class TestPyorientNativeLoad(unittest.TestCase):
def setUp(self):
pass
def test_module_load(self):
try:
import pyorient_native
assert True
except Exception as e:
assert False, "failed to load pyorient_native, {}".format(e)
| 23.555556 | 73 | 0.643868 |
848b128aaddb0ec60987b3ea2ee021470c16f4b1 | 742 | py | Python | src/logger.py | TheMMpl/RaspberryPi-Remote-Thermometer- | a4c6dc77447cc6b488eeff094005aa63c0b37a05 | [
"MIT"
] | null | null | null | src/logger.py | TheMMpl/RaspberryPi-Remote-Thermometer- | a4c6dc77447cc6b488eeff094005aa63c0b37a05 | [
"MIT"
] | null | null | null | src/logger.py | TheMMpl/RaspberryPi-Remote-Thermometer- | a4c6dc77447cc6b488eeff094005aa63c0b37a05 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from datetime import datetime
FILENAME = '/home/pi/temps.txt'
MAXLINES = 100
INPUT_FILENAME = '/home/pi/data_now.txt'
def file_len(fname):
return sum(1 for _ in open (fname))
def file_append(fname, entry, append = True):
file = open(fname, 'r')
lines = file.readlines()
if (not append):
lines = lines[:-1]
file.close()
file = open(fname, 'w')
file.write('{}\n'.format (entry))
for i in lines:
file.write(i)
file.close()
now = datetime.now().isoformat()
file = open(INPUT_FILENAME, 'r')
num_lines = file_len(FILENAME)
entry = '{} {} {}'.format(now, int(file.readline()), int(file.readline()))
file.close()
file_append(FILENAME, entry, num_lines < MAXLINES)
| 20.611111 | 74 | 0.638814 |
93960284d36750f272aa5ef38dc1b81dfbaf172f | 2,538 | py | Python | src/oci/key_management/models/backup_location_uri.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/key_management/models/backup_location_uri.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/key_management/models/backup_location_uri.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .backup_location import BackupLocation
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BackupLocationURI(BackupLocation):
"""
PreAuthenticated object storage URI to upload or download the backup
"""
def __init__(self, **kwargs):
"""
Initializes a new BackupLocationURI object with values from keyword arguments. The default value of the :py:attr:`~oci.key_management.models.BackupLocationURI.destination` attribute
of this class is ``PRE_AUTHENTICATED_REQUEST_URI`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param destination:
The value to assign to the destination property of this BackupLocationURI.
Allowed values for this property are: "BUCKET", "PRE_AUTHENTICATED_REQUEST_URI"
:type destination: str
:param uri:
The value to assign to the uri property of this BackupLocationURI.
:type uri: str
"""
self.swagger_types = {
'destination': 'str',
'uri': 'str'
}
self.attribute_map = {
'destination': 'destination',
'uri': 'uri'
}
self._destination = None
self._uri = None
self._destination = 'PRE_AUTHENTICATED_REQUEST_URI'
@property
def uri(self):
"""
**[Required]** Gets the uri of this BackupLocationURI.
:return: The uri of this BackupLocationURI.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this BackupLocationURI.
:param uri: The uri of this BackupLocationURI.
:type: str
"""
self._uri = uri
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.961039 | 245 | 0.654846 |
b2375987022181b5cb91d2747c94d19c123e7e90 | 4,478 | py | Python | generator.py | twentyonepilotslive/site | 4987b1261530605f6d38c00ee245820f9ef74003 | [
"Unlicense"
] | null | null | null | generator.py | twentyonepilotslive/site | 4987b1261530605f6d38c00ee245820f9ef74003 | [
"Unlicense"
] | null | null | null | generator.py | twentyonepilotslive/site | 4987b1261530605f6d38c00ee245820f9ef74003 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import argparse
import collections
import datetime
import json
import os
import sys
generator_tag = '<META CONTENT="twentyonepilots.live site generator" NAME="generator" />'
macro_format = "!%s!"
def apply_macros(m, t):
for key in m.keys():
t = t.replace(macro_transform(key, macro_format), m[key])
return t
def combine_macros(*args):
retval = collections.OrderedDict()
for arg in args:
retval = collections.OrderedDict({**retval, **arg})
return retval
def dates(data):
retval = []
for year in list(data["concerts"]):
for month in list(data["concerts"][year]):
for day in list(data["concerts"][year][month]):
retval.append([year, month, day])
return retval
# print an error to stderr and then exit 1
def error(s):
sys.stderr.write("%s: %s\n" % (sys.argv[0], s))
sys.exit(1)
# safely read a file
# n
# file name
def fileread(n):
try:
f = open(n)
except FileNotFoundError as e:
error(e)
r = f.read()
f.close()
return r
# safely write a file
# n
# file name
# c
# content
def filewrite(n, c):
try:
f = open(n, "w")
except Exception as e:
error(e)
f.write(c)
f.close()
return
# mkdir -p {$years}/{$months}, you get the idea
def generate_directory_structure(data):
for year in list(data["concerts"]):
if not(os.path.exists(year)):
try:
os.mkdir(year)
except Exception as e:
error("Error generating directory structure (os.mkdir): %s" % e)
os.chdir(year)
for month in list(data["concerts"][year]):
if not(os.path.exists(month)):
try:
os.mkdir(month)
except Exception as e:
error("Error generating directory structure (os.mkdir): %s" % e)
os.chdir("..")
return
def generate_index(data, template):
content = ""
content += "<UL>\n"
for date in dates(data):
content += '<LI><A HREF="/%s/%s/%s.html">%s-%s-%s: %s</A></LI>\n' \
% (date[0], date[1], date[2], date[0], date[1], date[2],
data["concerts"][date[0]][date[1]][date[2]]["venue"])
content += "</UL>\n"
filewrite(
"index.html", apply_macros(
combine_macros(
macros(data), collections.OrderedDict([("Content", content)])
), template
)
)
def generate_pages(data, template):
for date in dates(data):
filewrite(
"%s/%s/%s.html" % tuple(date),
apply_macros(
combine_macros(
macros(data),
macros_page(data, date)
), template
)
)
return
def macros(data):
macros = collections.OrderedDict([("Generator", generator_tag)])
return macros
# data
# the full JSON object
# selection
# A list of [
# the year of the performance selected (str, len 4)
# the month of the performance selected (str, len 2)
# the day of the performance selected (str, len 2)
# ]
def macros_page(data, selection):
year = selection[0]
month = selection[1]
day = selection[2]
try:
concert = data["concerts"][year][month][day]
except KeyError as e:
error("macros_page(): KeyError: %s" % e)
# trivial date macros to start out
macros = collections.OrderedDict([
("DY", year),
("DM", month),
("DD", day),
("Dp", datetime.date(int(year), int(month), int(day)).strftime("%d %B %Y"))
])
macros["Venue"] = concert["venue"]
# embed
embed= data["concerts"][year][month][day]["embed"]
if embed["type"] == "none":
pass
elif embed["type"] == "iframe":
macros["Embed"] = '''\
<IFRAME
ALLOWFULLSCREEN
FRAMEBORDER="0"
HEIGHT="480"
MOZALLOWFULLSCREEN="true"
SRC="%s"
WEBKITALLOWFULLSCREEN="true"
WIDTH="640"
><P><A HREF="%s">iframe</A></P></IFRAME>''' % (embed["href"], embed["href"])
else:
error("What embed type is %s? (%s)"
% (embed["type"], ('data["concerts"]["%s"]["%s"]["%s"]["embed"]["type"]'
% (year, month, day))))
# setlist ordered list
setlist = "<OL>\n"
for s in concert["setlist"]:
setlist += "<LI>%s</LI>\n" % s
setlist += "</OL>\n"
macros["Setlist"] = setlist
return macros
# turn a macro name into the string for which to search to turn into macro
# content
# in: "hello world", "!%s!"
# out: "!hello world!"
def macro_transform(s, fmt):
return fmt % s
def main(argc, argv):
try:
data = json.loads(fileread("data.json"))
except json.decoder.JSONDecodeError as e:
error("Invalid JSON file. (%s)" % e)
template = {
"index": fileread("index.html.template"),
"pages": fileread("XX.html.template")
}
os.chdir("docs")
generate_directory_structure(data)
generate_index(data, template["index"])
generate_pages(data, template["pages"])
return 0;
if __name__ == "__main__":
sys.exit(main(len(sys.argv), sys.argv))
| 22.39 | 89 | 0.645377 |
9f2419382760eb3b1be23a778a0e3f07e75e9883 | 1,105 | py | Python | setup.py | fhoeb/py-tmps | b7eced582acb4042815a775090a59f569975e3be | [
"BSD-3-Clause"
] | 1 | 2020-02-21T20:03:44.000Z | 2020-02-21T20:03:44.000Z | setup.py | fhoeb/py-tmps | b7eced582acb4042815a775090a59f569975e3be | [
"BSD-3-Clause"
] | null | null | null | setup.py | fhoeb/py-tmps | b7eced582acb4042815a775090a59f569975e3be | [
"BSD-3-Clause"
] | 1 | 2020-02-21T19:56:53.000Z | 2020-02-21T19:56:53.000Z | # TODO Requirements are not minimal
from setuptools import setup, find_packages
setup(name='py-tmps',
version='1.0.1',
description='Implementation of the tmps algorithm for real and imaginary time evolution of quantum states '
'represented by mps, mpo or pmps for chain and star geometries with a focus on impurity models. '
'For the chain geometry, the algorithm described by '
'Schollwoeck in Annals of Physics 326 (2011), 96-192; doi: 10.1016/j.aop.2010.09.012 is used. '
'For the star geometry, the algorithm is described by Hans Gerd Evertz in '
'DMRG for Multiband Impurity Solvers, Institute for Theoretical and Computational Physics. '
'Graz University of Technology, Austria, is used.',
author='Fabian Hoeb, Ish Dhand, Alexander Nuesseler',
install_requires=['numpy>=1.12', 'scipy>=0.19', 'mpnum>=1.0.3', 'pytest>=3.7.1'],
author_email='fabian.hoeb@uni-ulm.de, ish.dhand@uni-ulm.de, alexander.nuesseler@uni-ulm.de',
packages=find_packages(where='.'))
| 65 | 115 | 0.662443 |
5dbb2884b7b1dea007cd73c18443b4eb52eb7c0e | 25,140 | py | Python | stentseg/stentdirect/tests/test_stentgraph.py | almarklein/stentseg | 48255fffdc2394d1dc4ce2208c9a91e1d4c35a46 | [
"BSD-3-Clause"
] | 1 | 2020-08-28T16:34:10.000Z | 2020-08-28T16:34:10.000Z | stentseg/stentdirect/tests/test_stentgraph.py | almarklein/stentseg | 48255fffdc2394d1dc4ce2208c9a91e1d4c35a46 | [
"BSD-3-Clause"
] | null | null | null | stentseg/stentdirect/tests/test_stentgraph.py | almarklein/stentseg | 48255fffdc2394d1dc4ce2208c9a91e1d4c35a46 | [
"BSD-3-Clause"
] | 1 | 2021-04-25T06:59:36.000Z | 2021-04-25T06:59:36.000Z | from __future__ import print_function, division, absolute_import
import numpy as np
import networkx as nx
from visvis import ssdf
from stentseg.utils.new_pointset import PointSet
from stentseg.stentdirect.stentgraph import (StentGraph, check_path_integrity,
_get_pairs_of_neighbours, add_nodes_at_crossings,
_detect_corners, _add_corner_to_edge,
_pop_node, pop_nodes,
prune_very_weak, prune_weak,
prune_clusters, prune_redundant, prune_tails,)
class TestStentGraph:
def test_prune_redundant1(self):
""" Test removing redundant edges on a graph with two triangles
that are connected by a single edge.
"""
# Create two triangles that are connected with a single edge
graph = StentGraph()
graph.add_edge(11, 12, cost=1, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 11, cost=2, ctvalue=50)
#
graph.add_edge(21, 22, cost=2, ctvalue=60)
graph.add_edge(22, 23, cost=3, ctvalue=60)
graph.add_edge(23, 21, cost=1, ctvalue=60)
#
graph.add_edge(21, 11, cost=4, ctvalue=10)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 7
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 6
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 6
prune_redundant(graph, 65)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 5
prune_tails(graph, 2)
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 1
def test_prune_redundant2(self):
""" Test removing redundant edges on a graph with two triangles
that are connected by a two edges, twice.
"""
# Create two triangles that are connected with a single edge
graph = StentGraph()
graph.add_edge(11, 12, cost=1, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 11, cost=2, ctvalue=50)
#
graph.add_edge(21, 22, cost=2, ctvalue=60)
graph.add_edge(22, 23, cost=3, ctvalue=60)
graph.add_edge(23, 21, cost=1, ctvalue=60)
#
graph.add_edge(21, 1, cost=4, ctvalue=10)
graph.add_edge(1, 11, cost=4, ctvalue=10)
#
graph.add_edge(22, 2, cost=4, ctvalue=10)
graph.add_edge(2, 12, cost=4, ctvalue=10)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-1
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-1
prune_redundant(graph, 65)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-2
prune_tails(graph, 2)
assert graph.number_of_nodes() == 8-2
assert graph.number_of_edges() == 10-2-2
def test_prune_tails(self):
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
# Tail from 1
graph.add_edge(1, 11, cost=3, ctvalue=50)
graph.add_edge(11, 12, cost=3, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 14, cost=3, ctvalue=50)
# Tail from 2
graph.add_edge(2, 21, cost=3, ctvalue=50)
graph.add_edge(21, 22, cost=3, ctvalue=50)
graph.add_edge(22, 23, cost=3, ctvalue=50)
assert graph.number_of_nodes() == 3+4+3
assert graph.number_of_edges() == 3+4+3
prune_tails(graph, 3)
assert graph.number_of_nodes() == 3+4
assert graph.number_of_edges() == 3+4
prune_tails(graph, 9)
assert graph.number_of_nodes() == 3
assert graph.number_of_edges() == 3
def test_prune_clusters(self):
# Create two small cliques
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 7, cost=2, ctvalue=50)
graph.add_edge(7, 4, cost=2, ctvalue=50)
# Connect them
graph.add_edge(1, 4, cost=3, ctvalue=50)
# Also add loose node
graph.add_nodes_from([101, 102])
# Remove cliques and check that nothing happened
prune_clusters(graph, 4)
assert graph.number_of_edges() == 8
assert graph.number_of_nodes() == 7
# Remove connection
graph.remove_edge(1, 4)
# Remove cliques and check that one clique is removed
prune_clusters(graph, 4)
assert graph.number_of_edges() == 4
assert graph.number_of_nodes() == 4
# Remove cliques and check that one clique is removed
prune_clusters(graph, 5)
assert graph.number_of_edges() == 0
assert graph.number_of_nodes() == 0
def test_very_weak(self):
# Create simple graph
graph = StentGraph()
graph.add_edge(1, 4, ctvalue=50)
graph.add_edge(1, 5, ctvalue=40)
graph.add_edge(1, 2, ctvalue=30)
graph.add_edge(1, 3, ctvalue=20)
# Remove weak edges
th = 35
prune_very_weak(graph, th)
# Check result
assert graph.number_of_edges() == 2
for (n1, n2) in graph.edges_iter():
assert graph[n1][n2]['ctvalue'] > th
def test_weak1(self):
""" 2
/ | \
5 - 1 - 3
\ | /
4
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(1, 3, cost=3, ctvalue=50) # gets removed
graph.add_edge(1, 4, cost=4, ctvalue=50) # gets removed
graph.add_edge(1, 5, cost=1, ctvalue=50)
#
graph.add_edge(2, 3, cost=1, ctvalue=50)
graph.add_edge(3, 4, cost=1, ctvalue=50)
graph.add_edge(4, 5, cost=1, ctvalue=50)
graph.add_edge(5, 2, cost=1, ctvalue=50)
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 6
for e in graph.edges_iter():
assert e not in [(1, 3), (1, 4)]
def test_weak2(self):
""" 2 5
/ | | \
3 - 1 - 4 - 6
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 4, cost=2, ctvalue=50)
# Connect two subgraphs with weaker connection
graph.add_edge(1, 4, cost=3, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 6
for e in graph.edges_iter():
assert e not in [(1, 4)]
# Again, now with lower cost (stronger connection)
graph.add_edge(1, 4, cost=1, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 7
# Again, now with high ct value
graph.add_edge(1, 4, cost=3, ctvalue=90)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 7
def test_weak3(self):
""" 2 456
/ | |
3 - 1 - 0 - 789
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 4, cost=2, ctvalue=50)
#
graph.add_edge(7, 8, cost=2, ctvalue=50)
graph.add_edge(8, 9, cost=2, ctvalue=50)
graph.add_edge(9, 7, cost=2, ctvalue=50)
# Connect three subgraphs
graph.add_edge(0, 1, cost=2, ctvalue=50)
graph.add_edge(0, 4, cost=3, ctvalue=50) # gets removed
graph.add_edge(0, 7, cost=2, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+2
for e in graph.edges_iter():
assert e not in [(0, 4)]
# Connect three subgraphs
graph.add_edge(0, 1, cost=1, ctvalue=50)
graph.add_edge(0, 4, cost=1, ctvalue=50)
graph.add_edge(0, 7, cost=2, ctvalue=50) # gets removed
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+2
for e in graph.edges_iter():
assert e not in [(0, 7)]
# Connect three subgraphs
graph.add_edge(0, 1, cost=3, ctvalue=50)
graph.add_edge(0, 4, cost=4, ctvalue=90) # None gets removed
graph.add_edge(0, 7, cost=3, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+3
def test_pack1(self):
# Custom stent
g = StentGraph(summary='dit is een stent!', lala=3)
g.add_node((10,20), foo=3)
g.add_node((30,40), foo=5)
g.add_edge((1,1), (2,2), bar=10)
g.add_edge((10,20),(1,1), bar=20)
fname = '/home/almar/test.ssdf'
ssdf.save(fname, g.pack())
g2 = StentGraph()
g2.unpack(ssdf.load(fname))
#print(nx.is_isomorphic(g, g2))
assert nx.is_isomorphic(g, g2)
def test_pack2(self):
# Auto generate
import random
n = 500
p=dict((i,(random.gauss(0,2),random.gauss(0,2))) for i in range(n))
g_ = nx.random_geometric_graph(n, 0.1, dim=3, pos=p)
g = StentGraph(summary='dit is een stent!', lala=3)
g.add_nodes_from(g_.nodes_iter())
g.add_edges_from(g_.edges_iter())
fname = '/home/almar/test.ssdf'
ssdf.save(fname, g.pack())
g2 = StentGraph()
g2.unpack(ssdf.load(fname))
#print(nx.is_isomorphic(g, g2))
assert nx.is_isomorphic(g, g2)
def test_pop_node(self):
# Create paths
path1 = PointSet(2)
path1.append(1, 11)
path1.append(1, 12)
path2 = PointSet(2)
path2.append(1, 12)
path2.append(1, 13)
#
path12 = PointSet(2)
path12.append(1, 11)
path12.append(1, 12)
path12.append(1, 13)
# create 4 nodes (6-7-8-9), remove 8
graph = StentGraph()
graph.add_edge(6, 7, cost=4, ctvalue=70)
graph.add_edge(7, 8, cost=2, ctvalue=50, path=path1)
graph.add_edge(8, 9, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 3
assert 8 not in graph.nodes()
assert graph.edge[7][9]['ctvalue'] == 50
assert graph.edge[7][9]['cost'] == 5
assert np.all(graph.edge[7][9]['path'] == path12)
# create 4 nodes (6-8-7-9), remove 7
graph = StentGraph()
graph.add_edge(6, 8, cost=4, ctvalue=70)
graph.add_edge(8, 7, cost=2, ctvalue=50, path=np.flipud(path1))
graph.add_edge(7, 9, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 7)
# Check
assert graph.number_of_nodes() == 3
assert 7 not in graph.nodes()
assert graph.edge[8][9]['ctvalue'] == 50
assert graph.edge[8][9]['cost'] == 5
assert np.all(graph.edge[8][9]['path'] == path12)
# create 4 nodes (7-8-6-9), remove 8
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=np.flipud(path2))
graph.add_edge(8, 6, cost=2, ctvalue=50, path=path1)
graph.add_edge(6, 9, cost=3, ctvalue=60)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 3
assert 8 not in graph.nodes()
assert graph.edge[6][7]['ctvalue'] == 50
assert graph.edge[6][7]['cost'] == 6
assert np.all(graph.edge[6][7]['path'] == path12)
# create 3 nodes in a cycle. It should remove all but one
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=path1)
graph.add_edge(8, 9, cost=2, ctvalue=50, path=path2)
graph.add_edge(9, 7, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 1
assert graph.number_of_edges() == 1
assert 8 not in graph.nodes()
n = graph.nodes()[0]
assert len(graph.edge[n][n]['path']) == 6-1
# create 3 nodes in a cycle, with one subbranch
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=path1)
graph.add_edge(8, 9, cost=2, ctvalue=50, path=path2)
graph.add_edge(9, 7, cost=3, ctvalue=60, path=path2)
graph.add_edge(7, 4, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 2
assert 8 not in graph.nodes()
assert len(graph.edge[7][7]['path']) == 6-1
def test_pop_nodes(self):
# Create dummy paths
path1 = PointSet(2)
path1.append(1, 11)
path1.append(1, 12)
# create 4 nodes (6-7-8-9), remove 8
graph = StentGraph()
graph.add_edge(6, 7, cost=4, ctvalue=70, path=path1)
graph.add_edge(7, 8, cost=2, ctvalue=50, path=path1)
graph.add_edge(8, 9, cost=3, ctvalue=60, path=path1)
graph0 = graph.copy()
# Pop straight line
graph = graph0.copy()
pop_nodes(graph)
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 1
assert graph.edge[6][9]['path'].shape[0] == 3+1
# Pop cycle
graph = graph0.copy()
graph.add_edge(9, 6, cost=3, ctvalue=60, path=path1)
pop_nodes(graph)
assert graph.number_of_nodes() == 1
assert graph.number_of_edges() == 1
n = graph.nodes()[0]
assert graph.edge[n][n]['path'].shape[0] == 4+1+1 # cycle
# arbitrary what node stayed around
# Pop with one side branch popping
graph = graph0.copy()
graph.add_edge(7, 2, cost=3, ctvalue=60, path=path1)
pop_nodes(graph)
assert graph.number_of_nodes() == 4
assert graph.number_of_edges() == 3
assert graph.edge[7][9]['path'].shape[0] == 2+1
# Pop with one prevent popping
graph = graph0.copy()
graph.node[7]['nopop'] = True
pop_nodes(graph)
assert graph.number_of_nodes() == 3
assert graph.number_of_edges() == 2
assert graph.edge[7][9]['path'].shape[0] == 2+1
def test_detect_corners(self):
path = PointSet(3)
path.append(10, 2, 0)
path.append(11, 3, 0)
path.append(12, 4, 0)
path.append(13, 5, 0)
path.append(14, 6, 0)
path.append(15, 7, 0) # top
path.append(16, 6, 0)
path.append(17, 5, 0)
path.append(18, 4, 0)
path.append(19, 3, 0)
path.append(20, 2, 0) # bottom
path.append(21, 3, 0)
path.append(22, 4, 0)
path.append(23, 5, 0)
path.append(24, 6, 0)
path.append(25, 7, 0) # top
path.append(26, 6, 0)
path.append(27, 5, 0)
path.append(28, 4, 0)
path.append(29, 3, 0)
path0 = path
for i in range(3):
path = path0.copy()
path[:,2] = path[:,i]
path[:,i] = 0
# Test that _detect_corners detects the indices correctly
I = _detect_corners(path, smoothFactor=1)
assert I == [5, 10, 15]
# Test that _add_corner_to_edge constructs the graph and splits
# the path in the correct way
graph = StentGraph()
n1, n5 = tuple(path[0].flat), tuple(path[-1].flat)
n2, n3, n4 = tuple(path[5].flat), tuple(path[10].flat), tuple(path[15].flat)
graph.add_edge(n1, n5, path=path, cost=0, ctvalue=0)
_add_corner_to_edge(graph, n1, n5, smoothFactor=1)
assert graph.number_of_nodes() == 5
assert graph.number_of_edges() == 4
for n in [n1, n2, n3, n4, n5]:
assert n in graph.nodes()
path12, path23, path34, path45 = path[0:6], path[5:11], path[10:16], path[15:20]
if n1 > n2: path12 = np.flipud(path12)
if n2 > n3: path23 = np.flipud(path23)
if n3 > n4: path34 = np.flipud(path34)
if n4 > n5: path45 = np.flipud(path45)
assert np.all(graph.edge[n1][n2]['path'] == path12)
assert np.all(graph.edge[n2][n3]['path'] == path23)
assert np.all(graph.edge[n3][n4]['path'] == path34)
assert np.all(graph.edge[n4][n5]['path'] == path45)
def test_pairs(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(1, 5)
#
graph.add_edge(2, 6)
graph.add_edge(2, 7)
#
graph.add_edge(3, 8)
pairs1 = _get_pairs_of_neighbours(graph, 1)
assert pairs1 == [(2, 3), (2, 4), (2, 5), (3, 4), (3, 5), (4, 5)]
pairs2 = _get_pairs_of_neighbours(graph, 2)
assert pairs2 == [(1, 6), (1, 7), (6, 7)]
pairs3 = _get_pairs_of_neighbours(graph, 3)
assert pairs3 == [(1, 8)]
pairs4 = _get_pairs_of_neighbours(graph, 4)
assert pairs4 == []
def test_add_nodes_at_crossings1(self):
# N4---N1=====-------N2
# |
# N3
path1 = PointSet(3) # path from n1 to n2
path1.append(10, 2, 0)
path1.append(10, 3, 0)
path1.append(10, 4, 0)
path1.append(10, 5, 0)
#
path3 = path1.copy() # path to n3
#
path1.append(10, 6, 0)
path1.append(10, 7, 0)
path1.append(10, 8, 0)
#
path3.append(11, 5, 0)
path3.append(12, 5, 0)
path3.append(13, 5, 0)
#
path4 = PointSet(3) # path to n4
path4.append(10, 0, 0)
path4.append(10, 1, 0)
path4.append(10, 2, 0)
graph = nx.Graph()
n1 = tuple(path1[0].flat)
n2 = tuple(path1[-1].flat)
n3 = tuple(path3[-1].flat)
n4 = tuple(path4[0].flat)
graph.add_edge(n1, n2, path=path1, cost=3, ctvalue=3)
graph.add_edge(n1, n3, path=path3, cost=3, ctvalue=3)
graph.add_edge(n1, n4, path=path4, cost=3, ctvalue=3)
# Pre-check
assert len(graph.nodes()) == 4
for n in (n1, n2, n3, n4):
assert n in graph.nodes()
# Deal with crossongs
add_nodes_at_crossings(graph)
# Check result
check_path_integrity(graph)
assert len(graph.nodes()) == 5
added_node = 10, 5, 0
for n in (n1, n2, n3, n4, added_node):
assert n in graph.nodes()
def test_add_nodes_at_crossings2(self):
# N4---N1=====-------====N2
# | |
# N3 N5
path1 = PointSet(3) # path from n1 to n2
path1.append(10, 2, 0)
path1.append(10, 3, 0)
path1.append(10, 4, 0)
path1.append(10, 5, 0)
#
path3 = path1.copy() # path to n3
#
path1.append(10, 6, 0)
path1.append(10, 7, 0)
path1.append(10, 8, 0)
path1.append(10, 9, 0)
#
path3.append(11, 5, 0)
path3.append(12, 5, 0)
path3.append(13, 5, 0)
#
path4 = PointSet(3) # path to n4
path4.append(10, 0, 0)
path4.append(10, 1, 0)
path4.append(10, 2, 0)
#
path5 = PointSet(3) # path from n2 to n5 (note the order)
path5.append(10, 9, 0) # dup path1
path5.append(10, 8, 0) # dup path1
path5.append(10, 7, 0) # dup path1
path5.append(11, 7, 0)
path5.append(12, 7, 0)
path5.append(13, 7, 0)
graph = nx.Graph()
n1 = tuple(path1[0].flat)
n2 = tuple(path1[-1].flat)
n3 = tuple(path3[-1].flat)
n4 = tuple(path4[0].flat)
n5 = tuple(path5[-1].flat)
graph.add_edge(n1, n2, path=path1, cost=3, ctvalue=3)
graph.add_edge(n1, n3, path=path3, cost=3, ctvalue=3)
graph.add_edge(n1, n4, path=path4, cost=3, ctvalue=3)
graph.add_edge(n5, n2, path=path5, cost=3, ctvalue=3)
# Pre-check
assert len(graph.nodes()) == 5
for n in (n1, n2, n3, n4, n5):
assert n in graph.nodes()
# Deal with crossongs
add_nodes_at_crossings(graph)
# Check result
check_path_integrity(graph)
assert len(graph.nodes()) == 7
added_node1 = 10, 5, 0
added_node2 = 10, 7, 0
for n in (n1, n2, n3, n4, n5, added_node1, added_node2):
assert n in graph.nodes()
def test_add_nodes_at_crossings3(self):
# N4---N1>>>>>======-------N2
# | |
# N3 N5
path1 = PointSet(3) # path from n1 to n2
path1.append(10, 2, 0)
path1.append(10, 3, 0)
path1.append(10, 4, 0)
path1.append(10, 5, 0)
#
path3 = path1.copy() # path to n3
path3.append(11, 5, 0)
path3.append(12, 5, 0)
path3.append(13, 5, 0)
#
path1.append(10, 6, 0)
path1.append(10, 7, 0)
#
path5 = path1.copy()
path5.append(11, 7, 0)
path5.append(12, 7, 0)
path5.append(13, 7, 0)
#
path1.append(10, 8, 0)
path1.append(10, 9, 0)
#
path4 = PointSet(3) # path to n4
path4.append(10, 0, 0)
path4.append(10, 1, 0)
path4.append(10, 2, 0)
graph = nx.Graph()
n1 = tuple(path1[0].flat)
n2 = tuple(path1[-1].flat)
n3 = tuple(path3[-1].flat)
n4 = tuple(path4[0].flat)
n5 = tuple(path5[-1].flat)
graph.add_edge(n1, n2, path=path1, cost=3, ctvalue=3)
graph.add_edge(n1, n3, path=path3, cost=3, ctvalue=3)
graph.add_edge(n1, n4, path=path4, cost=3, ctvalue=3)
graph.add_edge(n1, n5, path=path5, cost=3, ctvalue=3)
# Pre-check
assert len(graph.nodes()) == 5
for n in (n1, n2, n3, n4, n5):
assert n in graph.nodes()
# Deal with crossongs
add_nodes_at_crossings(graph)
# Check result
check_path_integrity(graph)
assert len(graph.nodes()) == 7
added_node1 = 10, 5, 0
added_node2 = 10, 7, 0
for n in (n1, n2, n3, n4, n5, added_node1, added_node2):
assert n in graph.nodes()
if __name__ == "__main__":
# Run test. Nose is acting weird. So wrote a little test runner myself:
test = TestStentGraph()
for m in dir(test):
if m.startswith('test_'):
print('Running %s ... ' % m, end='')
try:
getattr(test, m)()
except AssertionError as err:
print('Fail')
raise
except Exception:
print('Error')
raise
else:
print("Ok")
# Create simple graph
graph = StentGraph()
graph.add_edge(1, 4, cost=5)
graph.add_edge(1, 5, cost=4)
graph.add_edge(1, 2, cost=3)
graph.add_edge(1, 3, cost=2)
| 32.355212 | 92 | 0.524781 |
738f4d744a0cca679a5e51cd25230515942d3ddf | 1,549 | py | Python | apps/relation/admin.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
] | 6 | 2019-07-18T16:21:17.000Z | 2020-11-19T04:47:02.000Z | apps/relation/admin.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
] | null | null | null | apps/relation/admin.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from relation.models import PaperQuestion, ReportInfo, AnswerCard, SystemCode
@admin.register(PaperQuestion)
class PaperQuestionAdmin(admin.ModelAdmin):
# 表头显示
list_display = (
'question', 'paper', 'score', 'create_time', 'update_time'
)
# 支持搜索的字段
search_fields = (
'question', 'paper',
)
# 支持进入编辑模式的字段
list_display_links = (
'question',
)
# 分页页数
list_per_page = 20
@admin.register(ReportInfo)
class ReportInfoAdmin(admin.ModelAdmin):
# 表头显示
list_display = (
'examination', 'user', 'grade', 'submit_time', 'create_time',
)
# 支持搜索的字段
search_fields = (
'examination', 'user',
)
# 支持进入编辑模式的字段
list_display_links = (
'examination',
)
# 分页页数
list_per_page = 20
@admin.register(AnswerCard)
class AnswerCardAdmin(admin.ModelAdmin):
# 表头显示
list_display = (
'examination', 'question', 'user', 'result', 'create_time',
)
# 支持搜索的字段
search_fields = (
'examination', 'question', 'user'
)
# 支持进入编辑模式的字段
list_display_links = (
'examination',
)
# 分页页数
list_per_page = 20
@admin.register(SystemCode)
class SystemCodeAdmin(admin.ModelAdmin):
# 表头显示
list_display = (
'type', 'name', 'value', 'create_time', 'update_time',
)
# 支持搜索的字段
search_fields = (
'type', 'name'
)
# 支持进入编辑模式的字段
list_display_links = (
'name',
)
# 分页页数
list_per_page = 20
| 17.602273 | 77 | 0.593286 |
57391d766904dd151f703bddf96423f378c48e2e | 54,365 | py | Python | yt_dlp/extractor/vimeo.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 80 | 2021-05-25T11:33:49.000Z | 2022-03-29T20:36:53.000Z | yt_dlp/extractor/vimeo.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 53 | 2017-04-12T19:53:18.000Z | 2022-02-22T10:33:13.000Z | yt_dlp/extractor/vimeo.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 22 | 2021-05-07T05:01:27.000Z | 2022-03-26T19:10:54.000Z | # coding: utf-8
from __future__ import unicode_literals
import base64
import functools
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
get_element_by_class,
HEADRequest,
js_to_json,
int_or_none,
merge_dicts,
OnDemandPagedList,
parse_filesize,
parse_iso8601,
parse_qs,
sanitized_Request,
smuggle_url,
std_headers,
str_or_none,
try_get,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
urljoin,
unescapeHTML,
urlhandle_detect_ext,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
username, password = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
webpage = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = {
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}
self._set_vimeo_cookie('vuid', vuid)
try:
self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._LOGIN_URL,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 418:
raise ExtractorError(
'Unable to log in: bad username or password',
expected=True)
raise ExtractorError('Unable to log in')
def _get_video_password(self):
password = self.get_param('videopassword')
if password is None:
raise ExtractorError(
'This video is protected by a password, use the --video-password option',
expected=True)
return password
def _verify_video_password(self, url, video_id, password, token, vuid):
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
url + '/password', video_id, 'Verifying the password',
'Wrong password', data=urlencode_postdata({
'password': password,
'token': token,
}), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': url,
})
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
if vimeo_config:
return self._parse_json(vimeo_config, video_id)
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _vimeo_sort_formats(self, formats):
# Note: Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified.
self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source'))
def _parse_config(self, config, video_id):
video_data = config['video']
video_title = video_data['title']
live_event = video_data.get('live_event') or {}
is_live = live_event.get('status') == 'started'
request = config.get('request') or {}
formats = []
config_files = video_data.get('files') or request.get('files') or {}
for f in (config_files.get('progressive') or []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'source_preference': 10,
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
# TODO: fix handling of 308 status code returned for live archive manifest requests
sep_pattern = r'/sep/video/'
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in (try_get(config_files, lambda x: x[files_type]['cdns']) or {}).items():
manifest_url = cdn_data.get('url')
if not manifest_url:
continue
format_id = '%s-%s' % (files_type, cdn_name)
sep_manifest_urls = []
if re.search(sep_pattern, manifest_url):
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
sep_manifest_urls.append((format_id + suffix, re.sub(
sep_pattern, '/%s/' % repl, manifest_url)))
else:
sep_manifest_urls = [(format_id, manifest_url)]
for f_id, m_url in sep_manifest_urls:
if files_type == 'hls':
formats.extend(self._extract_m3u8_formats(
m_url, video_id, 'mp4',
'm3u8' if is_live else 'm3u8_native', m3u8_id=f_id,
note='Downloading %s m3u8 information' % cdn_name,
fatal=False))
elif files_type == 'dash':
if 'json=1' in m_url:
real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
if real_m_url:
m_url = real_m_url
mpd_formats = self._extract_mpd_formats(
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
'Downloading %s MPD information' % cdn_name,
fatal=False)
formats.extend(mpd_formats)
live_archive = live_event.get('archive') or {}
live_archive_source_url = live_archive.get('source_url')
if live_archive_source_url and live_archive.get('status') == 'done':
formats.append({
'format_id': 'live-archive-source',
'url': live_archive_source_url,
'quality': 10,
})
subtitles = {}
for tt in (request.get('text_tracks') or []):
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': urljoin('https://vimeo.com', tt['url']),
}]
thumbnails = []
if not is_live:
for key, thumb in (video_data.get('thumbs') or {}).items():
thumbnails.append({
'id': key,
'width': int_or_none(key),
'url': thumb,
})
thumbnail = video_data.get('thumbnail')
if thumbnail:
thumbnails.append({
'url': thumbnail,
})
owner = video_data.get('owner') or {}
video_uploader_url = owner.get('url')
duration = int_or_none(video_data.get('duration'))
chapter_data = try_get(config, lambda x: x['embed']['chapters']) or []
chapters = [{
'title': current_chapter.get('title'),
'start_time': current_chapter.get('timecode'),
'end_time': next_chapter.get('timecode'),
} for current_chapter, next_chapter in zip(chapter_data, chapter_data[1:] + [{'timecode': duration}])]
if chapters and chapters[0]['start_time']: # Chapters may not start from 0
chapters[:0] = [{'title': '<Untitled>', 'start_time': 0, 'end_time': chapters[0]['start_time']}]
return {
'id': str_or_none(video_data.get('id')) or video_id,
'title': video_title,
'uploader': owner.get('name'),
'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None,
'uploader_url': video_uploader_url,
'thumbnails': thumbnails,
'duration': duration,
'chapters': chapters or None,
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
def _extract_original_format(self, url, video_id, unlisted_hash=None):
query = {'action': 'load_download_config'}
if unlisted_hash:
query['unlisted_hash'] = unlisted_hash
download_data = self._download_json(
url, video_id, fatal=False, query=query,
headers={'X-Requested-With': 'XMLHttpRequest'},
expected_status=(403, 404)) or {}
source_file = download_data.get('source_file')
download_url = try_get(source_file, lambda x: x['download_url'])
if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
source_name = source_file.get('public_name', 'Original')
if self._is_valid_url(download_url, video_id, '%s video' % source_name):
ext = (try_get(
source_file, lambda x: x['extension'],
compat_str) or determine_ext(
download_url, None) or 'mp4').lower()
return {
'url': download_url,
'ext': ext,
'width': int_or_none(source_file.get('width')),
'height': int_or_none(source_file.get('height')),
'filesize': parse_filesize(source_file.get('size')),
'format_id': source_name,
'quality': 1,
}
jwt_response = self._download_json(
'https://vimeo.com/_rv/viewer', video_id, note='Downloading jwt token', fatal=False) or {}
if not jwt_response.get('jwt'):
return
headers = {'Authorization': 'jwt %s' % jwt_response['jwt']}
original_response = self._download_json(
f'https://api.vimeo.com/videos/{video_id}', video_id,
headers=headers, fatal=False, expected_status=(403, 404)) or {}
for download_data in original_response.get('download') or []:
download_url = download_data.get('link')
if not download_url or download_data.get('quality') != 'source':
continue
ext = determine_ext(parse_qs(download_url).get('filename', [''])[0].lower(), default_ext=None)
if not ext:
urlh = self._request_webpage(
HEADRequest(download_url), video_id, fatal=False, note='Determining source extension')
ext = urlh and urlhandle_detect_ext(urlh)
return {
'url': download_url,
'ext': ext or 'unknown_video',
'format_id': download_data.get('public_name', 'Original'),
'width': int_or_none(download_data.get('width')),
'height': int_or_none(download_data.get('height')),
'fps': int_or_none(download_data.get('fps')),
'filesize': int_or_none(download_data.get('size')),
'quality': 1,
}
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:
(?:
www|
player
)
\.
)?
vimeo(?:pro)?\.com/
(?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
(?:[^/]+/)*?
(?:
(?:
play_redirect_hls|
moogaloop\.swf)\?clip_id=
)?
(?:videos?/)?
(?P<id>[0-9]+)
(?:/(?P<unlisted_hash>[\da-f]{10}))?
/?(?:[?&].*)?(?:[#].*)?$
'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'timestamp': 1355990239,
'upload_date': '20121220',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434',
'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
'license': 'by-sa',
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:2c362968038d4499f4d79f88458590c1',
'duration': 1595,
'upload_date': '20130610',
'timestamp': 1370893156,
'license': 'by',
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'Business of Software',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/businessofsoftware',
'uploader_id': 'businessofsoftware',
'duration': 3610,
'description': None,
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'timestamp': 1371200155,
'upload_date': '20130614',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'md5:dca3ea23adb29ee387127bc4ddfce63f',
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/atencio',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'channel_id': 'keypeele',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/keypeele',
'timestamp': 1380339469,
'upload_date': '20130928',
'duration': 187,
},
'params': {'format': 'http-1080p'},
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'timestamp': 1381846109,
'upload_date': '20131015',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/staff',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
'subtitles': {
'de': [{'ext': 'vtt'}],
'en': [{'ext': 'vtt'}],
'es': [{'ext': 'vtt'}],
'fr': [{'ext': 'vtt'}],
},
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'],
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user28849593',
'uploader_id': 'user28849593',
},
},
{
# contains original format
'url': 'https://vimeo.com/33951933',
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
'info_dict': {
'id': '33951933',
'ext': 'mp4',
'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
'uploader': 'The DMCI',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/dmci',
'uploader_id': 'dmci',
'timestamp': 1324343742,
'upload_date': '20111220',
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
},
},
{
'note': 'Contains original format not accessible in webpage',
'url': 'https://vimeo.com/393756517',
'md5': 'c464af248b592190a5ffbb5d33f382b0',
'info_dict': {
'id': '393756517',
'ext': 'mov',
'timestamp': 1582642091,
'uploader_id': 'frameworkla',
'title': 'Straight To Hell - Sabrina: Netflix',
'uploader': 'Framework Studio',
'description': 'md5:f2edc61af3ea7a5592681ddbb683db73',
'upload_date': '20200225',
},
},
{
# only available via https://vimeo.com/channels/tributes/6213729 and
# not via https://vimeo.com/6213729
'url': 'https://vimeo.com/channels/tributes/6213729',
'info_dict': {
'id': '6213729',
'ext': 'mp4',
'title': 'Vimeo Tribute: The Shining',
'uploader': 'Casey Donahue',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/caseydonahue',
'uploader_id': 'caseydonahue',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/tributes',
'channel_id': 'tributes',
'timestamp': 1250886430,
'upload_date': '20090821',
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
},
'params': {
'skip_download': True,
},
},
{
# redirects to ondemand extractor and should be passed through it
# for successful extraction
'url': 'https://vimeo.com/73445910',
'info_dict': {
'id': '73445910',
'ext': 'mp4',
'title': 'The Reluctant Revolutionary',
'uploader': '10Ft Films',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/tenfootfilms',
'uploader_id': 'tenfootfilms',
'description': 'md5:0fa704e05b04f91f40b7f3ca2e801384',
'upload_date': '20130830',
'timestamp': 1377853339,
},
'params': {
'skip_download': True,
},
'skip': 'this page is no longer available.',
},
{
'url': 'http://player.vimeo.com/video/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
'only_matching': True,
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
{
'url': 'https://vimeo.com/album/2632481/video/79010983',
'only_matching': True,
},
{
'url': 'https://vimeo.com/showcase/3253534/video/119195465',
'note': 'A video in a password protected album (showcase)',
'info_dict': {
'id': '119195465',
'ext': 'mp4',
'title': 'youtube-dl test video \'ä"BaW_jenozKc',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'user20132939',
'description': 'md5:fa7b6c6d8db0bdc353893df2f111855b',
'upload_date': '20150209',
'timestamp': 1423518307,
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
# source file returns 403: Forbidden
'url': 'https://vimeo.com/7809605',
'only_matching': True,
},
{
'note': 'Direct URL with hash',
'url': 'https://vimeo.com/160743502/abd0e13fb4',
'info_dict': {
'id': '160743502',
'ext': 'mp4',
'uploader': 'Julian Tryba',
'uploader_id': 'aliniamedia',
'title': 'Harrisville New Hampshire',
'timestamp': 1459259666,
'upload_date': '20160329',
},
'params': {'skip_download': True},
},
{
'url': 'https://vimeo.com/138909882',
'info_dict': {
'id': '138909882',
'ext': 'mp4',
'title': 'Eastnor Castle 2015 Firework Champions - The Promo!',
'description': 'md5:5967e090768a831488f6e74b7821b3c1',
'uploader_id': 'fireworkchampions',
'uploader': 'Firework Champions',
'upload_date': '20150910',
'timestamp': 1441901895,
},
'params': {
'skip_download': True,
'format': 'Original',
},
},
{
'url': 'https://vimeo.com/channels/staffpicks/143603739',
'info_dict': {
'id': '143603739',
'ext': 'mp4',
'uploader': 'Karim Huu Do',
'timestamp': 1445846953,
'upload_date': '20151026',
'title': 'The Shoes - Submarine Feat. Blaine Harrison',
'uploader_id': 'karimhd',
'description': 'md5:8e2eea76de4504c2e8020a9bcfa1e843',
},
'params': {'skip_download': 'm3u8'},
},
{
# requires passing unlisted_hash(a52724358e) to load_download_config request
'url': 'https://vimeo.com/392479337/a52724358e',
'only_matching': True,
},
# https://gettingthingsdone.com/workflowmap/
# vimeo embed with check-password page protected by Referer header
]
@staticmethod
def _smuggle_referrer(url, referrer_url):
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
@staticmethod
def _extract_urls(url, webpage):
urls = []
# Look for embedded (iframe) Vimeo player
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/\d+.*?)\1',
webpage):
urls.append(VimeoIE._smuggle_referrer(unescapeHTML(mobj.group('url')), url))
PLAIN_EMBED_RE = (
# Look for embedded (swf embed) Vimeo player
r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1',
# Look more for non-standard embedded Vimeo player
r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1',
)
for embed_re in PLAIN_EMBED_RE:
for mobj in re.finditer(embed_re, webpage):
urls.append(mobj.group('url'))
return urls
@staticmethod
def _extract_url(url, webpage):
urls = VimeoIE._extract_urls(url, webpage)
return urls[0] if urls else None
def _verify_player_video_password(self, url, video_id, headers):
password = self._get_video_password()
data = urlencode_postdata({
'password': base64.b64encode(password.encode()),
})
headers = merge_dicts(headers, {
'Content-Type': 'application/x-www-form-urlencoded',
})
checked = self._download_json(
url + '/check-password', video_id,
'Verifying the password', data=data, headers=headers)
if checked is False:
raise ExtractorError('Wrong video password', expected=True)
return checked
def _real_initialize(self):
self._login()
def _extract_from_api(self, video_id, unlisted_hash=None):
token = self._download_json(
'https://vimeo.com/_rv/jwt', video_id, headers={
'X-Requested-With': 'XMLHttpRequest'
})['token']
api_url = 'https://api.vimeo.com/videos/' + video_id
if unlisted_hash:
api_url += ':' + unlisted_hash
video = self._download_json(
api_url, video_id, headers={
'Authorization': 'jwt ' + token,
}, query={
'fields': 'config_url,created_time,description,license,metadata.connections.comments.total,metadata.connections.likes.total,release_time,stats.plays',
})
info = self._parse_config(self._download_json(
video['config_url'], video_id), video_id)
self._vimeo_sort_formats(info['formats'])
get_timestamp = lambda x: parse_iso8601(video.get(x + '_time'))
info.update({
'description': video.get('description'),
'license': video.get('license'),
'release_timestamp': get_timestamp('release'),
'timestamp': get_timestamp('created'),
'view_count': int_or_none(try_get(video, lambda x: x['stats']['plays'])),
})
connections = try_get(
video, lambda x: x['metadata']['connections'], dict) or {}
for k in ('comment', 'like'):
info[k + '_count'] = int_or_none(try_get(connections, lambda x: x[k + 's']['total']))
return info
def _try_album_password(self, url):
album_id = self._search_regex(
r'vimeo\.com/(?:album|showcase)/([^/]+)', url, 'album id', default=None)
if not album_id:
return
viewer = self._download_json(
'https://vimeo.com/_rv/viewer', album_id, fatal=False)
if not viewer:
webpage = self._download_webpage(url, album_id)
viewer = self._parse_json(self._search_regex(
r'bootstrap_data\s*=\s*({.+?})</script>',
webpage, 'bootstrap data'), album_id)['viewer']
jwt = viewer['jwt']
album = self._download_json(
'https://api.vimeo.com/albums/' + album_id,
album_id, headers={'Authorization': 'jwt ' + jwt},
query={'fields': 'description,name,privacy'})
if try_get(album, lambda x: x['privacy']['view']) == 'password':
password = self.get_param('videopassword')
if not password:
raise ExtractorError(
'This album is protected by a password, use the --video-password option',
expected=True)
self._set_vimeo_cookie('vuid', viewer['vuid'])
try:
self._download_json(
'https://vimeo.com/showcase/%s/auth' % album_id,
album_id, 'Verifying the password', data=urlencode_postdata({
'password': password,
'token': viewer['xsrft'],
}), headers={
'X-Requested-With': 'XMLHttpRequest',
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
raise ExtractorError('Wrong password', expected=True)
raise
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers.copy()
if 'http_headers' in data:
headers.update(data['http_headers'])
if 'Referer' not in headers:
headers['Referer'] = url
# Extract ID from URL
mobj = self._match_valid_url(url).groupdict()
video_id, unlisted_hash = mobj['id'], mobj.get('unlisted_hash')
if unlisted_hash:
return self._extract_from_api(video_id, unlisted_hash)
orig_url = url
is_pro = 'vimeopro.com/' in url
if is_pro:
# some videos require portfolio_id to be present in player url
# https://github.com/ytdl-org/youtube-dl/issues/20070
url = self._extract_url(url, self._download_webpage(url, video_id))
if not url:
url = 'https://vimeo.com/' + video_id
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
url = 'https://vimeo.com/' + video_id
self._try_album_password(url)
try:
# Retrieve video webpage to extract further information
webpage, urlh = self._download_webpage_handle(
url, video_id, headers=headers)
redirect_url = urlh.geturl()
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call yt-dlp with the URL of the page '
'that embeds this video.',
expected=True)
raise
if '://player.vimeo.com/video/' in url:
config = self._parse_json(self._search_regex(
r'\bconfig\s*=\s*({.+?})\s*;', webpage, 'info section'), video_id)
if config.get('view') == 4:
config = self._verify_player_video_password(
redirect_url, video_id, headers)
info = self._parse_config(config, video_id)
self._vimeo_sort_formats(info['formats'])
return info
if re.search(r'<form[^>]+?id="pw_form"', webpage):
video_password = self._get_video_password()
token, vuid = self._extract_xsrft_and_vuid(webpage)
webpage = self._verify_video_password(
redirect_url, video_id, video_password, token, vuid)
vimeo_config = self._extract_vimeo_config(webpage, video_id, default=None)
if vimeo_config:
seed_status = vimeo_config.get('seed_status') or {}
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
cc_license = None
timestamp = None
video_description = None
info_dict = {}
config_url = None
channel_id = self._search_regex(
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
if channel_id:
config_url = self._html_search_regex(
r'\bdata-config-url="([^"]+)"', webpage, 'config URL', default=None)
video_description = clean_html(get_element_by_class('description', webpage))
info_dict.update({
'channel_id': channel_id,
'channel_url': 'https://vimeo.com/channels/' + channel_id,
})
if not config_url:
page_config = self._parse_json(self._search_regex(
r'vimeo\.(?:clip|vod_title)_page_config\s*=\s*({.+?});',
webpage, 'page config', default='{}'), video_id, fatal=False)
if not page_config:
return self._extract_from_api(video_id)
config_url = page_config['player']['config_url']
cc_license = page_config.get('cc_license')
clip = page_config.get('clip') or {}
timestamp = clip.get('uploaded_on')
video_description = clean_html(
clip.get('description') or page_config.get('description_html_escaped'))
config = self._download_json(config_url, video_id)
video = config.get('video') or {}
vod = video.get('vod') or {}
def is_rented():
if '>You rented this title.<' in webpage:
return True
if try_get(config, lambda x: x['user']['purchased']):
return True
for purchase_option in (vod.get('purchase_options') or []):
if purchase_option.get('purchased'):
return True
label = purchase_option.get('label_string')
if label and (label.startswith('You rented this') or label.endswith(' remaining')):
return True
return False
if is_rented() and vod.get('is_trailer'):
feature_id = vod.get('feature_id')
if feature_id and not data.get('force_feature_id', False):
return self.url_result(smuggle_url(
'https://player.vimeo.com/player/%s' % feature_id,
{'force_feature_id': True}), 'Vimeo')
if not video_description:
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
['description', 'og:description', 'twitter:description'],
webpage, default=None)
if not video_description and is_pro:
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description:
self.report_warning('Cannot find video description')
if not timestamp:
timestamp = self._search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage,
'timestamp', default=None)
view_count = int_or_none(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count', default=None))
like_count = int_or_none(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count', default=None))
comment_count = int_or_none(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count', default=None))
formats = []
source_format = self._extract_original_format(
'https://vimeo.com/' + video_id, video_id, video.get('unlisted_hash'))
if source_format:
formats.append(source_format)
info_dict_config = self._parse_config(config, video_id)
formats.extend(info_dict_config['formats'])
self._vimeo_sort_formats(formats)
json_ld = self._search_json_ld(webpage, video_id, default={})
if not cc_license:
cc_license = self._search_regex(
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
webpage, 'license', default=None, group='license')
info_dict.update({
'formats': formats,
'timestamp': unified_timestamp(timestamp),
'description': video_description,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'license': cc_license,
})
return merge_dicts(info_dict, info_dict_config, json_ld)
class VimeoOndemandIE(VimeoIE):
IE_NAME = 'vimeo:ondemand'
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/ondemand/(?:[^/]+/)?(?P<id>[^/?#&]+)'
_TESTS = [{
# ondemand video not available via https://vimeo.com/id
'url': 'https://vimeo.com/ondemand/20704',
'md5': 'c424deda8c7f73c1dfb3edd7630e2f35',
'info_dict': {
'id': '105442900',
'ext': 'mp4',
'title': 'המעבדה - במאי יותם פלדמן',
'uploader': 'גם סרטים',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/gumfilms',
'uploader_id': 'gumfilms',
'description': 'md5:4c027c965e439de4baab621e48b60791',
'upload_date': '20140906',
'timestamp': 1410032453,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
# requires Referer to be passed along with og:video:url
'url': 'https://vimeo.com/ondemand/36938/126682985',
'info_dict': {
'id': '126584684',
'ext': 'mp4',
'title': 'Rävlock, rätt läte på rätt plats',
'uploader': 'Lindroth & Norin',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/lindrothnorin',
'uploader_id': 'lindrothnorin',
'description': 'md5:c3c46a90529612c8279fb6af803fc0df',
'upload_date': '20150502',
'timestamp': 1430586422,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'url': 'https://vimeo.com/ondemand/nazmaalik',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/141692381',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/thelastcolony/150274832',
'only_matching': True,
}]
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/channels/%s'
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(
self._TITLE_RE, webpage, 'list title', fatal=False)
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
yield self._extract_list_title(webpage)
# Try extracting href first since not all videos are available via
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
clips = re.findall(
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)(?:[^>]+\btitle="([^"]+)")?', webpage)
if clips:
for video_id, video_url, video_title in clips:
yield self.url_result(
compat_urlparse.urljoin(base_url, video_url),
VimeoIE.ie_key(), video_id=video_id, video_title=video_title)
# More relaxed fallback
else:
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
yield self.url_result(
'https://vimeo.com/%s' % video_id,
VimeoIE.ie_key(), video_id=video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def _extract_videos(self, list_id, base_url):
title_and_entries = self._title_and_entries(list_id, base_url)
list_title = next(title_and_entries)
return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
channel_id = self._match_id(url)
return self._extract_videos(channel_id, self._BASE_URL_TEMPL % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<id>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/%s'
class VimeoAlbumIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/(?:album|showcase)/(?P<id>\d+)(?:$|[?#]|/(?!video))'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}]
_PAGE_SIZE = 100
def _fetch_page(self, album_id, authorization, hashed_pass, page):
api_page = page + 1
query = {
'fields': 'link,uri',
'page': api_page,
'per_page': self._PAGE_SIZE,
}
if hashed_pass:
query['_hashed_pass'] = hashed_pass
try:
videos = self._download_json(
'https://api.vimeo.com/albums/%s/videos' % album_id,
album_id, 'Downloading page %d' % api_page, query=query, headers={
'Authorization': 'jwt ' + authorization,
})['data']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
return
for video in videos:
link = video.get('link')
if not link:
continue
uri = video.get('uri')
video_id = self._search_regex(r'/videos/(\d+)', uri, 'video_id', default=None) if uri else None
yield self.url_result(link, VimeoIE.ie_key(), video_id)
def _real_extract(self, url):
album_id = self._match_id(url)
viewer = self._download_json(
'https://vimeo.com/_rv/viewer', album_id, fatal=False)
if not viewer:
webpage = self._download_webpage(url, album_id)
viewer = self._parse_json(self._search_regex(
r'bootstrap_data\s*=\s*({.+?})</script>',
webpage, 'bootstrap data'), album_id)['viewer']
jwt = viewer['jwt']
album = self._download_json(
'https://api.vimeo.com/albums/' + album_id,
album_id, headers={'Authorization': 'jwt ' + jwt},
query={'fields': 'description,name,privacy'})
hashed_pass = None
if try_get(album, lambda x: x['privacy']['view']) == 'password':
password = self.get_param('videopassword')
if not password:
raise ExtractorError(
'This album is protected by a password, use the --video-password option',
expected=True)
self._set_vimeo_cookie('vuid', viewer['vuid'])
try:
hashed_pass = self._download_json(
'https://vimeo.com/showcase/%s/auth' % album_id,
album_id, 'Verifying the password', data=urlencode_postdata({
'password': password,
'token': viewer['xsrft'],
}), headers={
'X-Requested-With': 'XMLHttpRequest',
})['hashed_pass']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
raise ExtractorError('Wrong password', expected=True)
raise
entries = OnDemandPagedList(functools.partial(
self._fetch_page, album_id, jwt, hashed_pass), self._PAGE_SIZE)
return self.playlist_result(
entries, album_id, album.get('name'), album.get('description'))
class VimeoGroupsIE(VimeoChannelIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<id>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/meetup',
'info_dict': {
'id': 'meetup',
'title': 'Vimeo Meetup!',
},
'playlist_mincount': 27,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/groups/%s'
class VimeoReviewIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'(?P<url>https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)/[0-9a-f]{10})'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
'uploader_id': 'user21297594',
'description': "Comedian Dick Hardwick's five minute demo filmed in front of a live theater audience.\nEdit by Doug Mattocks",
},
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader_id': 'user22258446',
},
'skip': 'video gone',
}, {
'note': 'Password protected',
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
'info_dict': {
'id': '138823582',
'ext': 'mp4',
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
'uploader': 'TMB',
'uploader_id': 'user37284429',
},
'params': {
'videopassword': 'holygrail',
},
'skip': 'video gone',
}]
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page_url, video_id = self._match_valid_url(url).groups()
data = self._download_json(
page_url.replace('/review/', '/review/data/'), video_id)
if data.get('isLocked') is True:
video_password = self._get_video_password()
viewer = self._download_json(
'https://vimeo.com/_rv/viewer', video_id)
webpage = self._verify_video_password(
'https://vimeo.com/' + video_id, video_id,
video_password, viewer['xsrft'], viewer['vuid'])
clip_page_config = self._parse_json(self._search_regex(
r'window\.vimeo\.clip_page_config\s*=\s*({.+?});',
webpage, 'clip page config'), video_id)
config_url = clip_page_config['player']['config_url']
clip_data = clip_page_config.get('clip') or {}
else:
clip_data = data['clipData']
config_url = clip_data['configUrl']
config = self._download_json(config_url, video_id)
info_dict = self._parse_config(config, video_id)
source_format = self._extract_original_format(
page_url + '/action', video_id)
if source_format:
info_dict['formats'].append(source_format)
self._vimeo_sort_formats(info_dict['formats'])
info_dict['description'] = clean_html(clip_data.get('description'))
return info_dict
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(VimeoChannelIE):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TESTS = [{
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
'info_dict': {
'id': 'user755559',
'title': 'urza’s Likes',
},
}, {
'url': 'https://vimeo.com/stormlapse/likes',
'only_matching': True,
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
user_id = self._match_id(url)
return self._extract_videos(user_id, 'https://vimeo.com/%s/likes' % user_id)
class VHXEmbedIE(VimeoBaseInfoExtractor):
IE_NAME = 'vhx:embed'
_VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+src="(https?://embed\.vhx\.tv/videos/\d+[^"]*)"', webpage)
return unescapeHTML(mobj.group(1)) if mobj else None
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config_url = self._parse_json(self._search_regex(
r'window\.OTTData\s*=\s*({.+})', webpage,
'ott data'), video_id, js_to_json)['config_url']
config = self._download_json(config_url, video_id)
info = self._parse_config(config, video_id)
info['id'] = video_id
self._vimeo_sort_formats(info['formats'])
return info
| 41.248103 | 166 | 0.526074 |
30a77ed8e870ba9a8f13217ca61e746362b9a56e | 72,901 | py | Python | code/rmgpy/molecule/draw.py | Molecular-Image-Recognition/Molecular-Image-Recognition | 413e74bb526f56077cd5f70bb41fb7a075636174 | [
"MIT"
] | 7 | 2017-10-04T16:04:14.000Z | 2021-03-27T21:54:41.000Z | code/rmgpy/molecule/draw.py | tiger-tiger/Molecular-Image-Recognition | 413e74bb526f56077cd5f70bb41fb7a075636174 | [
"MIT"
] | null | null | null | code/rmgpy/molecule/draw.py | tiger-tiger/Molecular-Image-Recognition | 413e74bb526f56077cd5f70bb41fb7a075636174 | [
"MIT"
] | 6 | 2017-10-04T15:37:05.000Z | 2021-12-29T06:50:16.000Z | #!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides functionality for automatic two-dimensional drawing of the
`skeletal formulae <http://en.wikipedia.org/wiki/Skeletal_formula>`_ of a wide
variety of organic and inorganic molecules. The general method for creating
these drawings is to utilize the :meth:`draw()` method of the :class:`Molecule`
you wish to draw; this wraps a call to :meth:`MoleculeDrawer.draw()`, where the
molecule drawing algorithm begins. Advanced use may require use of the
:class:`MoleculeDrawer` class directly.
The `Cairo <http://cairographics.org/>`_ 2D graphics library is used to create
the drawings. The :class:`MoleculeDrawer` class module will fail gracefully if
Cairo is not installed.
The implementation uses the 2D coordinate generation of rdKit to find coordinates,
then uses Cairo to render the atom.
"""
import math
import numpy
import os.path
import re
import logging
from rmgpy.qm.molecule import Geometry
from rdkit.Chem import AllChem
from numpy.linalg import LinAlgError
################################################################################
def createNewSurface(format, target=None, width=1024, height=768):
"""
Create a new surface of the specified `format`:
"png" for :class:`ImageSurface`
"svg" for :class:`SVGSurface`
"pdf" for :class:`PDFSurface`
"ps" for :class:`PSSurface`
The surface will be written to the `target` parameter , which can be a
path to save the surface to, or file-like object with a `write()` method.
You can also optionally specify the `width` and `height` of the generated
surface if you know what it is; otherwise a default size of 1024 by 768 is
used.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
format = format.lower()
if format == 'png':
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width), int(height))
elif format == 'svg':
surface = cairo.SVGSurface(target, width, height)
elif format == 'pdf':
surface = cairo.PDFSurface(target, width, height)
elif format == 'ps':
surface = cairo.PSSurface(target, width, height)
else:
raise ValueError('Invalid value "{0}" for type parameter; valid values are "png", "svg", "pdf", and "ps".'.format(type))
return surface
################################################################################
class MoleculeDrawer:
"""
This class provides functionality for drawing the skeletal formula of
molecules using the Cairo 2D graphics engine. The most common use case is
simply::
MoleculeDrawer().draw(molecule, format='png', path='molecule.png')
where ``molecule`` is the :class:`Molecule` object to draw. You can also
pass a dict of options to the constructor to affect how the molecules are
drawn.
"""
def __init__(self, options=None):
self.options = {
'fontFamily': 'sans',
'fontSizeNormal': 12,
'fontSizeSubscript': 8,
'bondLength': 24,
'padding': 2,
}
if options: self.options.update(options)
self.clear()
def clear(self):
self.molecule = None
self.cycles = None
self.ringSystems = None
self.coordinates = None
self.symbols = None
self.implicitHydrogens = None
self.left = 0.0
self.top = 0.0
self.right = 0.0
self.bottom = 0.0
self.surface = None
self.cr = None
def draw(self, molecule, format, target=None):
"""
Draw the given `molecule` using the given image `format` - pdf, svg, ps, or
png. If `path` is given, the drawing is saved to that location on disk. The
`options` dict is an optional set of key-value pairs that can be used to
control the generated drawing.
This function returns the Cairo surface and context used to create the
drawing, as well as a bounding box for the molecule being drawn as the
tuple (`left`, `top`, `width`, `height`).
"""
# The Cairo 2D graphics library (and its Python wrapper) is required for
# the molecule drawing algorithm
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
logging.error('Cairo not found; molecule will not be drawn.')
return
# Make a copy of the molecule so we don't modify the original
self.molecule = molecule.copy(deep=True)
# Remove all unlabeled hydrogen atoms from the copied atoms and bonds, as
# they are not drawn
# However, if this would remove all atoms, then don't remove any
atomsToRemove = []
self.implicitHydrogens = {}
for atom in self.molecule.atoms:
if atom.isHydrogen() and atom.label == '': atomsToRemove.append(atom)
if len(atomsToRemove) < len(self.molecule.atoms):
for atom in atomsToRemove:
for atom2 in atom.bonds:
try:
self.implicitHydrogens[atom2] += 1
except KeyError:
self.implicitHydrogens[atom2] = 1
self.molecule.removeAtom(atom)
# Generate information about any cycles present in the molecule, as
# they will need special attention
self.__findRingGroups()
# Handle carbon monoxide special case
if self.molecule.getFormula() == 'CO' and len(atomsToRemove) == 0:
# RDKit does not accept atom type Ot
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['CO']
self.molecule.atoms[0].charge = 0 # don't label the C as - if you're not drawing the O with a +
self.coordinates = numpy.array([[0,0]], numpy.float64)
else:
# Generate the coordinates to use to draw the molecule
try:
# before getting coordinates, make all bonds single and then
# replace the bonds after generating coordinates. This avoids
# bugs with RDKit
old_bond_dictionary = self.__make_single_bonds()
self.__generateCoordinates()
self.__replace_bonds(old_bond_dictionary)
# Generate labels to use
self.__generateAtomLabels()
except (ValueError, numpy.linalg.LinAlgError), e:
logging.error('Error while drawing molecule {0}: {1}'.format(molecule.toSMILES(), e))
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exc()
return None, None, None
except KeyError:
logging.error('KeyError occured when drawing molecule, likely because' +\
' the molecule contained non-standard bond orders in the' +\
' getResonanceHybrid method. These cannot be drawn since' +\
' they cannot be sent to RDKit for coordinate placing.')
raise
self.coordinates[:,1] *= -1
self.coordinates *= self.options['bondLength']
# Handle some special cases
if self.symbols == ['H','H']:
# Render as H2 instead of H-H
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['H2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['O', 'O']:
# Render as O2 instead of O-O
self.molecule.removeAtom(self.molecule.atoms[-1])
self.molecule.atoms[0].radicalElectrons = 0
self.symbols = ['O2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['OH', 'O'] or self.symbols == ['O', 'OH']:
# Render as HO2 instead of HO-O or O-OH
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['O2H']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['OH', 'OH']:
# Render as H2O2 instead of HO-OH or O-OH
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['O2H2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['O', 'C', 'O']:
# Render as CO2 instead of O=C=O
self.molecule.removeAtom(self.molecule.atoms[0])
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['CO2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
# Create a dummy surface to draw to, since we don't know the bounding rect
# We will copy this to another surface with the correct bounding rect
surface0 = createNewSurface(format=format, target=None)
cr0 = cairo.Context(surface0)
# Render using Cairo
self.render(cr0)
# Create the real surface with the appropriate size
xoff = self.left
yoff = self.top
width = self.right - self.left
height = self.bottom - self.top
self.surface = createNewSurface(format=format, target=target, width=width, height=height)
self.cr = cairo.Context(self.surface)
# Draw white background
self.cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
self.cr.paint()
self.render(self.cr, offset=(-xoff,-yoff))
if target is not None:
# Finish Cairo drawing
# Save PNG of drawing if appropriate
if isinstance(target, str):
ext = os.path.splitext(target)[1].lower()
if ext == '.png':
self.surface.write_to_png(target)
else:
self.surface.finish()
else:
self.surface.finish()
return self.surface, self.cr, (xoff, yoff, width, height)
def __findRingGroups(self):
"""
Find all of the cycles in the current molecule, and group them into
sets of adjacent cycles.
"""
# Find all of the cycles in the molecule
self.cycles = self.molecule.getSmallestSetOfSmallestRings()
self.ringSystems = []
# If the molecule contains cycles, find them and group them
if len(self.cycles) > 0:
# Split the list of cycles into groups
# Each atom in the molecule should belong to exactly zero or one such groups
for cycle in self.cycles:
found = False
for ringSystem in self.ringSystems:
for ring in ringSystem:
if any([atom in ring for atom in cycle]) and not found:
ringSystem.append(cycle)
found = True
if not found:
self.ringSystems.append([cycle])
def __generateCoordinates(self):
"""
Generate the 2D coordinates to be used when drawing the current
molecule. The function uses rdKits 2D coordinate generation.
"""
atoms = self.molecule.atoms
Natoms = len(atoms)
flag_charge = 0
for atom in self.molecule.atoms:
if atom.charge != 0: #atomType.label in ['N5s','N5d','N5dd','N5t','N5b']:
flag_charge = 1
break
# Initialize array of coordinates
self.coordinates = coordinates = numpy.zeros((Natoms, 2))
if flag_charge == 1:
# If there are only one or two atoms to draw, then determining the
# coordinates is trivial
if Natoms == 1:
self.coordinates[0,:] = [0.0, 0.0]
return self.coordinates
elif Natoms == 2:
self.coordinates[0,:] = [-0.5, 0.0]
self.coordinates[1,:] = [0.5, 0.0]
return self.coordinates
if len(self.cycles) > 0:
# Cyclic molecule
backbone = self.__findCyclicBackbone()
self.__generateRingSystemCoordinates(backbone)
# Flatten backbone so that it contains a list of the atoms in the
# backbone, rather than a list of the cycles in the backbone
backbone = list(set([atom for cycle in backbone for atom in cycle]))
else:
# Straight chain molecule
backbone = self.__findStraightChainBackbone()
self.__generateStraightChainCoordinates(backbone)
# If backbone is linear, then rotate so that the bond is parallel to the
# horizontal axis
vector0 = coordinates[atoms.index(backbone[1]),:] - coordinates[atoms.index(backbone[0]),:]
for i in range(2, len(backbone)):
vector = coordinates[atoms.index(backbone[i]),:] - coordinates[atoms.index(backbone[i-1]),:]
if numpy.linalg.norm(vector - vector0) > 1e-4:
break
else:
angle = math.atan2(vector0[0], vector0[1]) - math.pi / 2
rot = numpy.array([[math.cos(angle), math.sin(angle)], [-math.sin(angle), math.cos(angle)]], numpy.float64)
coordinates = numpy.dot(coordinates, rot)
# Center backbone at origin
xmin = numpy.min(coordinates[:,0])
xmax = numpy.max(coordinates[:,0])
ymin = numpy.min(coordinates[:,1])
ymax = numpy.max(coordinates[:,1])
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
for atom in backbone:
index = atoms.index(atom)
coordinates[index,0] -= xmid
coordinates[index,1] -= ymid
# We now proceed by calculating the coordinates of the functional groups
# attached to the backbone
# Each functional group is independent, although they may contain further
# branching and cycles
# In general substituents should try to grow away from the origin to
# minimize likelihood of overlap
self.__generateNeighborCoordinates(backbone)
return coordinates
else:
# Use rdkit 2D coordinate generation:
# Generate the RDkit molecule from the RDkit molecule, use geometry
# in order to match the atoms in the rdmol with the atoms in the
# RMG molecule (which is required to extract coordinates).
self.geometry = Geometry(None, None, self.molecule, None)
rdmol, rdAtomIdx = self.geometry.rd_build()
AllChem.Compute2DCoords(rdmol)
# Extract the coordinates from each atom.
for atom in atoms:
index = rdAtomIdx[atom]
point = rdmol.GetConformer(0).GetAtomPosition(index)
coordinates[index,:]= [point.x*0.6, point.y*0.6]
# RDKit generates some molecules more vertically than horizontally,
# Especially linear ones. This will reflect any molecule taller than
# it is wide across the line y=x
ranges = numpy.ptp(coordinates, axis = 0)
if ranges[1] > ranges[0]:
temp = numpy.copy(coordinates)
coordinates[:,0] = temp[:,1]
coordinates[:,1] = temp[:,0]
return coordinates
def __findCyclicBackbone(self):
"""
Return a set of atoms to use as the "backbone" of the molecule. For
cyclics this is simply the largest ring system.
"""
count = [len(set([atom for ring in ringSystem for atom in ring])) for ringSystem in self.ringSystems]
index = 0
for i in range(1, len(self.ringSystems)):
if count[i] > count[index]:
index = i
return self.ringSystems[index]
def __findStraightChainBackbone(self):
"""
Return a set of atoms to use as the "backbone" of the molecule. For
non-cyclics this is the largest straight chain between atoms. If carbon
atoms are present, then we define the backbone only in terms of them.
"""
# Find the terminal atoms - those that only have one explicit bond
terminalAtoms = [atom for atom in self.molecule.atoms if len(atom.bonds) == 1]
assert len(terminalAtoms) >= 2
# Starting from each terminal atom, find the longest straight path to
# another terminal
# The longest found is the backbone
backbone = []
paths = []
for atom in terminalAtoms:
paths.extend(self.__findStraightChainPaths([atom]))
# Remove any paths that don't end in a terminal atom
# (I don't think this should remove any!)
paths = [path for path in paths if path[-1] in terminalAtoms]
# Remove all paths shorter than the maximum
length = max([len(path) for path in paths])
paths = [path for path in paths if len(path) == length]
# Prefer the paths with the most carbon atoms
carbons = [sum([1 for atom in path if atom.isCarbon()]) for path in paths]
maxCarbons = max(carbons)
paths = [path for path, carbon in zip(paths, carbons) if carbon == maxCarbons]
# At this point we could choose any remaining path, so simply choose the first
backbone = paths[0]
assert len(backbone) > 1
assert backbone[0] in terminalAtoms
assert backbone[-1] in terminalAtoms
return backbone
def __findStraightChainPaths(self, atoms0):
"""
Finds the paths containing the list of atoms `atoms0` in the
current molecule. The atoms are assumed to already be in a path, with
``atoms0[0]`` being a terminal atom.
"""
atom1 = atoms0[-1]
paths = []
for atom2 in atom1.bonds:
if atom2 not in atoms0:
atoms = atoms0[:]
atoms.append(atom2)
if not self.molecule.isAtomInCycle(atom2):
paths.extend(self.__findStraightChainPaths(atoms))
if len(paths) == 0:
paths.append(atoms0[:])
return paths
def __generateRingSystemCoordinates(self, atoms):
"""
For a ring system composed of the given cycles of `atoms`, update the
coordinates of each atom in the system.
"""
coordinates = self.coordinates
atoms = atoms[:]
processed = []
# Lay out largest cycle in ring system first
cycle = atoms[0]
for cycle0 in atoms[1:]:
if len(cycle0) > len(cycle):
cycle = cycle0
angle = - 2 * math.pi / len(cycle)
radius = 1.0 / (2 * math.sin(math.pi / len(cycle)))
for i, atom in enumerate(cycle):
index = self.molecule.atoms.index(atom)
coordinates[index,:] = [math.cos(math.pi / 2 + i * angle), math.sin(math.pi / 2 + i * angle)]
coordinates[index,:] *= radius
atoms.remove(cycle)
processed.append(cycle)
# If there are other cycles, then try to lay them out as well
while len(atoms) > 0:
# Find the largest cycle that shares one or two atoms with a ring that's
# already been processed
cycle = None
for cycle0 in atoms:
for cycle1 in processed:
count = sum([1 for atom in cycle0 if atom in cycle1])
if (count == 1 or count == 2):
if cycle is None or len(cycle0) > len(cycle): cycle = cycle0
cycle0 = cycle1
atoms.remove(cycle)
# Shuffle atoms in cycle such that the common atoms come first
# Also find the average center of the processed cycles that touch the
# current cycles
found = False
commonAtoms = []
count = 0
center0 = numpy.zeros(2, numpy.float64)
for cycle1 in processed:
found = False
for atom in cycle1:
if atom in cycle and atom not in commonAtoms:
commonAtoms.append(atom)
found = True
if found:
center1 = numpy.zeros(2, numpy.float64)
for atom in cycle1:
center1 += coordinates[cycle1.index(atom),:]
center1 /= len(cycle1)
center0 += center1
count += 1
center0 /= count
if len(commonAtoms) > 1:
index0 = cycle.index(commonAtoms[0])
index1 = cycle.index(commonAtoms[1])
if (index0 == 0 and index1 == len(cycle) - 1) or (index1 == 0 and index0 == len(cycle) - 1):
cycle = cycle[-1:] + cycle[0:-1]
if cycle.index(commonAtoms[1]) < cycle.index(commonAtoms[0]):
cycle.reverse()
index = cycle.index(commonAtoms[0])
cycle = cycle[index:] + cycle[0:index]
# Determine center of cycle based on already-assigned positions of
# common atoms (which won't be changed)
if len(commonAtoms) == 1 or len(commonAtoms) == 2:
# Center of new cycle is reflection of center of adjacent cycle
# across common atom or bond
center = numpy.zeros(2, numpy.float64)
for atom in commonAtoms:
center += coordinates[self.molecule.atoms.index(atom),:]
center /= len(commonAtoms)
vector = center - center0
center += vector
radius = 1.0 / (2 * math.sin(math.pi / len(cycle)))
else:
# Use any three points to determine the point equidistant from these
# three; this is the center
index0 = self.molecule.atoms.index(commonAtoms[0])
index1 = self.molecule.atoms.index(commonAtoms[1])
index2 = self.molecule.atoms.index(commonAtoms[2])
A = numpy.zeros((2,2), numpy.float64)
b = numpy.zeros((2), numpy.float64)
A[0,:] = 2 * (coordinates[index1,:] - coordinates[index0,:])
A[1,:] = 2 * (coordinates[index2,:] - coordinates[index0,:])
b[0] = coordinates[index1,0]**2 + coordinates[index1,1]**2 - coordinates[index0,0]**2 - coordinates[index0,1]**2
b[1] = coordinates[index2,0]**2 + coordinates[index2,1]**2 - coordinates[index0,0]**2 - coordinates[index0,1]**2
center = numpy.linalg.solve(A, b)
radius = numpy.linalg.norm(center - coordinates[index0,:])
startAngle = 0.0; endAngle = 0.0
if len(commonAtoms) == 1:
# We will use the full 360 degrees to place the other atoms in the cycle
startAngle = math.atan2(-vector[1], vector[0])
endAngle = startAngle + 2 * math.pi
elif len(commonAtoms) >= 2:
# Divide other atoms in cycle equally among unused angle
vector = coordinates[cycle.index(commonAtoms[-1]),:] - center
startAngle = math.atan2(vector[1], vector[0])
vector = coordinates[cycle.index(commonAtoms[0]),:] - center
endAngle = math.atan2(vector[1], vector[0])
# Place remaining atoms in cycle
if endAngle < startAngle:
endAngle += 2 * math.pi
dAngle = (endAngle - startAngle) / (len(cycle) - len(commonAtoms) + 1)
else:
endAngle -= 2 * math.pi
dAngle = (endAngle - startAngle) / (len(cycle) - len(commonAtoms) + 1)
count = 1
for i in range(len(commonAtoms), len(cycle)):
angle = startAngle + count * dAngle
index = self.molecule.atoms.index(cycle[i])
# Check that we aren't reassigning any atom positions
# This version assumes that no atoms belong at the origin, which is
# usually fine because the first ring is centered at the origin
if numpy.linalg.norm(coordinates[index,:]) < 1e-4:
vector = numpy.array([math.cos(angle), math.sin(angle)], numpy.float64)
coordinates[index,:] = center + radius * vector
count += 1
# We're done assigning coordinates for this cycle, so mark it as processed
processed.append(cycle)
def __generateStraightChainCoordinates(self, atoms):
"""
Update the coordinates for the linear straight chain of `atoms` in
the current molecule.
"""
coordinates = self.coordinates
# First atom goes at origin
index0 = self.molecule.atoms.index(atoms[0])
coordinates[index0,:] = [0.0, 0.0]
# Second atom goes on x-axis (for now; this could be improved!)
index1 = self.molecule.atoms.index(atoms[1])
vector = numpy.array([1.0, 0.0], numpy.float64)
if atoms[0].bonds[atoms[1]].isTriple():
rotatePositive = False
else:
rotatePositive = True
rot = numpy.array([[math.cos(-math.pi / 6), math.sin(-math.pi / 6)], [-math.sin(-math.pi / 6), math.cos(-math.pi / 6)]], numpy.float64)
vector = numpy.array([1.0, 0.0], numpy.float64)
vector = numpy.dot(rot, vector)
coordinates[index1,:] = coordinates[index0,:] + vector
# Other atoms
for i in range(2, len(atoms)):
atom0 = atoms[i-2]
atom1 = atoms[i-1]
atom2 = atoms[i]
index1 = self.molecule.atoms.index(atom1)
index2 = self.molecule.atoms.index(atom2)
bond0 = atom0.bonds[atom1]
bond = atom1.bonds[atom2]
# Angle of next bond depends on the number of bonds to the start atom
numBonds = len(atom1.bonds)
if numBonds == 2:
if (bond0.isTriple() or bond.isTriple()) or (bond0.isDouble() and bond.isDouble()):
# Rotate by 0 degrees towards horizontal axis (to get angle of 180)
angle = 0.0
else:
# Rotate by 60 degrees towards horizontal axis (to get angle of 120)
angle = math.pi / 3
elif numBonds == 3:
# Rotate by 60 degrees towards horizontal axis (to get angle of 120)
angle = math.pi / 3
elif numBonds == 4:
# Rotate by 0 degrees towards horizontal axis (to get angle of 90)
angle = 0.0
elif numBonds == 5:
# Rotate by 36 degrees towards horizontal axis (to get angle of 144)
angle = math.pi / 5
elif numBonds == 6:
# Rotate by 0 degrees towards horizontal axis (to get angle of 180)
angle = 0.0
# Determine coordinates for atom
if angle != 0:
if not rotatePositive: angle = -angle
rot = numpy.array([[math.cos(angle), math.sin(angle)], [-math.sin(angle), math.cos(angle)]], numpy.float64)
vector = numpy.dot(rot, vector)
rotatePositive = not rotatePositive
coordinates[index2,:] = coordinates[index1,:] + vector
def __generateNeighborCoordinates(self, backbone):
"""
Recursively update the coordinates for the atoms immediately adjacent
to the atoms in the molecular `backbone`.
"""
atoms = self.molecule.atoms
coordinates = self.coordinates
for i in range(len(backbone)):
atom0 = backbone[i]
index0 = atoms.index(atom0)
# Determine bond angles of all previously-determined bond locations for
# this atom
bondAngles = []
for atom1 in atom0.bonds:
index1 = atoms.index(atom1)
if atom1 in backbone:
vector = coordinates[index1,:] - coordinates[index0,:]
angle = math.atan2(vector[1], vector[0])
bondAngles.append(angle)
bondAngles.sort()
bestAngle = 2 * math.pi / len(atom0.bonds)
regular = True
for angle1, angle2 in zip(bondAngles[0:-1], bondAngles[1:]):
if all([abs(angle2 - angle1 - (i+1) * bestAngle) > 1e-4 for i in range(len(atom0.bonds))]):
regular = False
if regular:
# All the bonds around each atom are equally spaced
# We just need to fill in the missing bond locations
# Determine rotation angle and matrix
rot = numpy.array([[math.cos(bestAngle), -math.sin(bestAngle)], [math.sin(bestAngle), math.cos(bestAngle)]], numpy.float64)
# Determine the vector of any currently-existing bond from this atom
vector = None
for atom1 in atom0.bonds:
index1 = atoms.index(atom1)
if atom1 in backbone or numpy.linalg.norm(coordinates[index1,:]) > 1e-4:
vector = coordinates[index1,:] - coordinates[index0,:]
# Iterate through each neighboring atom to this backbone atom
# If the neighbor is not in the backbone and does not yet have
# coordinates, then we need to determine coordinates for it
for atom1 in atom0.bonds:
if atom1 not in backbone and numpy.linalg.norm(coordinates[atoms.index(atom1),:]) < 1e-4:
occupied = True; count = 0
# Rotate vector until we find an unoccupied location
while occupied and count < len(atom0.bonds):
count += 1; occupied = False
vector = numpy.dot(rot, vector)
for atom2 in atom0.bonds:
index2 = atoms.index(atom2)
if numpy.linalg.norm(coordinates[index2,:] - coordinates[index0,:] - vector) < 1e-4:
occupied = True
coordinates[atoms.index(atom1),:] = coordinates[index0,:] + vector
self.__generateFunctionalGroupCoordinates(atom0, atom1)
else:
# The bonds are not evenly spaced (e.g. due to a ring)
# We place all of the remaining bonds evenly over the reflex angle
startAngle = max(bondAngles)
endAngle = min(bondAngles)
if 0.0 < endAngle - startAngle < math.pi: endAngle += 2 * math.pi
elif 0.0 > endAngle - startAngle > -math.pi: startAngle -= 2 * math.pi
dAngle = (endAngle - startAngle) / (len(atom0.bonds) - len(bondAngles) + 1)
index = 1
for atom1 in atom0.bonds:
if atom1 not in backbone and numpy.linalg.norm(coordinates[atoms.index(atom1),:]) < 1e-4:
angle = startAngle + index * dAngle
index += 1
vector = numpy.array([math.cos(angle), math.sin(angle)], numpy.float64)
vector /= numpy.linalg.norm(vector)
coordinates[atoms.index(atom1),:] = coordinates[index0,:] + vector
self.__generateFunctionalGroupCoordinates(atom0, atom1)
def __generateFunctionalGroupCoordinates(self, atom0, atom1):
"""
For the functional group starting with the bond from `atom0` to `atom1`,
generate the coordinates of the rest of the functional group. `atom0` is
treated as if a terminal atom. `atom0` and `atom1` must already have their
coordinates determined. `atoms` is a list of the atoms to be drawn, `bonds`
is a dictionary of the bonds to draw, and `coordinates` is an array of the
coordinates for each atom to be drawn. This function is designed to be
recursive.
"""
atoms = self.molecule.atoms
coordinates = self.coordinates
index0 = atoms.index(atom0)
index1 = atoms.index(atom1)
# Determine the vector of any currently-existing bond from this atom
# (We use the bond to the previous atom here)
vector = coordinates[index0,:] - coordinates[index1,:]
bondAngle = math.atan2(vector[1], vector[0])
# Check to see if atom1 is in any cycles in the molecule
ringSystem = None
for ringSys in self.ringSystems:
if any([atom1 in ring for ring in ringSys]):
ringSystem = ringSys
if ringSystem is not None:
# atom1 is part of a ring system, so we need to process the entire
# ring system at once
# Generate coordinates for all atoms in the ring system
self.__generateRingSystemCoordinates(ringSystem)
cycleAtoms = list(set([atom for ring in ringSystem for atom in ring]))
coordinates_cycle = numpy.zeros_like(self.coordinates)
for atom in cycleAtoms:
coordinates_cycle[atoms.index(atom),:] = coordinates[atoms.index(atom),:]
# Rotate the ring system coordinates so that the line connecting atom1
# and the center of mass of the ring is parallel to that between
# atom0 and atom1
center = numpy.zeros(2, numpy.float64)
for atom in cycleAtoms:
center += coordinates_cycle[atoms.index(atom),:]
center /= len(cycleAtoms)
vector0 = center - coordinates_cycle[atoms.index(atom1),:]
angle = math.atan2(vector[1] - vector0[1], vector[0] - vector0[0])
rot = numpy.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], numpy.float64)
coordinates_cycle = numpy.dot(coordinates_cycle, rot)
# Translate the ring system coordinates to the position of atom1
coordinates_cycle += coordinates[atoms.index(atom1),:] - coordinates_cycle[atoms.index(atom1),:]
for atom in cycleAtoms:
coordinates[atoms.index(atom),:] = coordinates_cycle[atoms.index(atom),:]
# Generate coordinates for remaining neighbors of ring system,
# continuing to recurse as needed
self.__generateNeighborCoordinates(cycleAtoms)
else:
# atom1 is not in any rings, so we can continue as normal
# Determine rotation angle and matrix
numBonds = len(atom1.bonds)
angle = 0.0
if numBonds == 2:
bond0, bond = atom1.bonds.values()
if (bond0.isTriple() or bond.isTriple()) or (bond0.isDouble() and bond.isDouble()):
angle = math.pi
else:
angle = 2 * math.pi / 3
# Make sure we're rotating such that we move away from the origin,
# to discourage overlap of functional groups
rot1 = numpy.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], numpy.float64)
rot2 = numpy.array([[math.cos(angle), math.sin(angle)], [-math.sin(angle), math.cos(angle)]], numpy.float64)
vector1 = coordinates[index1,:] + numpy.dot(rot1, vector)
vector2 = coordinates[index1,:] + numpy.dot(rot2, vector)
if bondAngle < -0.5 * math.pi or bondAngle > 0.5 * math.pi:
angle = abs(angle)
else:
angle = -abs(angle)
else:
angle = 2 * math.pi / numBonds
rot = numpy.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], numpy.float64)
# Iterate through each neighboring atom to this backbone atom
# If the neighbor is not in the backbone, then we need to determine
# coordinates for it
for atom, bond in atom1.bonds.iteritems():
if atom is not atom0:
occupied = True; count = 0
# Rotate vector until we find an unoccupied location
while occupied and count < len(atom1.bonds):
count += 1; occupied = False
vector = numpy.dot(rot, vector)
for atom2 in atom1.bonds:
index2 = atoms.index(atom2)
if numpy.linalg.norm(coordinates[index2,:] - coordinates[index1,:] - vector) < 1e-4:
occupied = True
coordinates[atoms.index(atom),:] = coordinates[index1,:] + vector
# Recursively continue with functional group
self.__generateFunctionalGroupCoordinates(atom1, atom)
def __generateAtomLabels(self):
"""
Generate the labels to use for each atom in the drawing. In general,
all atoms are labeled with their symbols except carbon. Some carbon
atoms are also labeled in certain circumstances. The labels also
contain any implicit hydrogen atoms (i.e. those hydrogen atoms not
explicitly drawn in the skeletal formula).
"""
atoms = self.molecule.atoms
self.symbols = symbols = [atom.symbol for atom in atoms]
for i in range(len(symbols)):
# Don't label carbon atoms, unless there are only one or two heavy atoms
if symbols[i] == 'C' and len(symbols) > 2:
if len(atoms[i].bonds) > 1 or (atoms[i].radicalElectrons == 0 and atoms[i].charge == 0):
symbols[i] = ''
# Do label atoms that have only double bonds to one or more labeled atoms
changed = True
while changed:
changed = False
for i in range(len(symbols)):
if symbols[i] == '' and all([(bond.isDouble() or bond.isTriple()) for bond in atoms[i].bonds.values()]) and any([symbols[atoms.index(atom)] != '' for atom in atoms[i].bonds]):
symbols[i] = atoms[i].symbol
changed = True
# Add implicit hydrogens
for i in range(len(symbols)):
if symbols[i] != '':
try:
Hcount = self.implicitHydrogens[atoms[i]]
except KeyError:
continue
if Hcount == 1: symbols[i] = symbols[i] + 'H'
elif Hcount > 1: symbols[i] = symbols[i] + 'H{0:d}'.format(Hcount)
return symbols
def render(self, cr, offset=None):
"""
Uses the Cairo graphics library to create a skeletal formula drawing of a
molecule containing the list of `atoms` and dict of `bonds` to be drawn.
The 2D position of each atom in `atoms` is given in the `coordinates` array.
The symbols to use at each atomic position are given by the list `symbols`.
You must specify the Cairo context `cr` to render to.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
coordinates = self.coordinates
atoms = self.molecule.atoms
symbols = self.symbols
drawLonePairs = False
for atom in atoms:
if atom.isNitrogen():
drawLonePairs = True
left = 0.0
top = 0.0
right = 0.0
bottom = 0.0
# Shift coordinates by offset value
if offset is not None:
coordinates[:,0] += offset[0]
coordinates[:,1] += offset[1]
# Draw bonds
for atom1 in atoms:
for atom2, bond in atom1.bonds.items():
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2: # So we only draw each bond once
self.__renderBond(index1, index2, bond, cr)
# Draw aromatic bonds
for cycle in self.cycles:
cycleBonds = []
for atom1, atom2 in zip(cycle[0:-1], cycle[1:]):
cycleBonds.append(atom1.bonds[atom2])
cycleBonds.append(cycle[0].bonds[cycle[-1]])
if all([bond.isBenzene() for bond in cycleBonds]):
# We've found an aromatic ring, so draw a circle in the center to represent the benzene bonds
center = numpy.zeros(2, numpy.float64)
for atom in cycle:
index = atoms.index(atom)
center += coordinates[index,:]
center /= len(cycle)
index1 = atoms.index(cycle[0])
index2 = atoms.index(cycle[1])
radius = math.sqrt(
(center[0] - (coordinates[index1,0] + coordinates[index2,0]) / 2)**2 +
(center[1] - (coordinates[index1,1] + coordinates[index2,1]) / 2)**2
) - 4
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_width(1.0)
cr.set_line_cap(cairo.LINE_CAP_ROUND)
cr.arc(center[0], center[1], radius, 0.0, 2 * math.pi)
cr.stroke()
# Draw atoms
for i, atom in enumerate(atoms):
symbol = symbols[i]
index = atoms.index(atom)
x0, y0 = coordinates[index,:]
vector = numpy.zeros(2, numpy.float64)
for atom2 in atom.bonds:
vector += coordinates[atoms.index(atom2),:] - coordinates[index,:]
heavyFirst = vector[0] <= 0
if len(atoms) == 1 and atoms[0].symbol not in ['C', 'N'] and atoms[0].charge == 0 and atoms[0].radicalElectrons == 0:
# This is so e.g. water is rendered as H2O rather than OH2
heavyFirst = False
cr.set_font_size(self.options['fontSizeNormal'])
x0 += cr.text_extents(symbols[0])[2] / 2.0
atomBoundingRect = self.__renderAtom(symbol, atom, x0, y0, cr, heavyFirst, drawLonePairs)
# Add a small amount of whitespace on all sides
padding = self.options['padding']
self.left -= padding; self.top -= padding; self.right += padding; self.bottom += padding
def __drawLine(self, cr, x1, y1, x2, y2, dashed = False):
"""
Draw a line on the given Cairo context `cr` from (`x1`, `y1`) to
(`x2`,`y2`), and update the bounding rectangle if necessary.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
cairo
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_width(1.0)
if dashed:
cr.set_dash([3.5])
cr.set_line_cap(cairo.LINE_CAP_ROUND)
cr.move_to(x1, y1); cr.line_to(x2, y2)
cr.stroke()
# remove dashes for next method call
if dashed:
cr.set_dash([])
if x1 < self.left: self.left = x1
if x1 > self.right: self.right = x1
if y1 < self.top: self.top = y1
if y1 > self.bottom: self.bottom = y1
if x2 < self.left: self.left = x2
if x2 > self.right: self.right = x2
if y2 < self.top: self.top = y2
if y2 > self.bottom: self.bottom = y2
def __renderBond(self, atom1, atom2, bond, cr):
"""
Render an individual `bond` between atoms with indices `atom1` and `atom2`
on the Cairo context `cr`.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
bondLength = self.options['bondLength']
# determine if aromatic
isAromatic = False
for cycle in self.cycles:
if self.molecule.atoms[atom1] in cycle and \
self.molecule.atoms[atom2] in cycle:
allBenzenes = True
for index in range(len(cycle)):
if not cycle[index -1].bonds[cycle[index]].isBenzene():
allBenzenes = False
break
if allBenzenes:
isAromatic = True
break
x1, y1 = self.coordinates[atom1,:]
x2, y2 = self.coordinates[atom2,:]
angle = math.atan2(y2 - y1, x2 - x1)
dx = x2 - x1; dy = y2 - y1
du = math.cos(angle + math.pi / 2)
dv = math.sin(angle + math.pi / 2)
if (self.symbols[atom1] != '' or \
self.symbols[atom2] != ''):
if bond.isTriple():
# Draw triple bond centered on bond axis
du *= 3; dv *= 3
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 , y1 , x2 , y2 )
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv)
elif bond.getOrderNum() > 2 and bond.getOrderNum() < 3:
du *= 3; dv *= 3
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 , y1 , x2 , y2 )
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv, dashed = True)
elif bond.isDouble():
# Draw double bond centered on bond axis
du *= 1.6; dv *= 1.6
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv)
elif bond.getOrderNum() > 1 and bond.getOrderNum() < 2 and not isAromatic:
# Draw dashed double bond centered on bond axis
du *= 1.6; dv *= 1.6
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv, dashed=True)
else:
self.__drawLine(cr, x1, y1, x2, y2)
else:
# Draw bond on skeleton
self.__drawLine(cr, x1, y1, x2, y2)
# Draw other bonds
if bond.isDouble():
du *= 3.2; dv *= 3.2; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy)
elif bond.isTriple():
du *= 3; dv *= 3; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 - du + dx, y1 - dv + dy, x2 - du - dx, y2 - dv - dy)
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy)
elif bond.getOrderNum() > 1 and bond.getOrderNum() < 2 and not isAromatic:
du *= 3.2; dv *= 3.2; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy, dashed=True)
elif bond.getOrderNum() > 2 and bond.getOrderNum() < 3:
du *= 3; dv *= 3; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 - du + dx, y1 - dv + dy, x2 - du - dx, y2 - dv - dy)
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy, dashed=True)
def __renderAtom(self, symbol, atom, x0, y0, cr, heavyFirst=True, drawLonePairs=False):
"""
Render the `label` for an atom centered around the coordinates (`x0`, `y0`)
onto the Cairo context `cr`. If `heavyFirst` is ``False``, then the order
of the atoms will be reversed in the symbol. This method also causes
radical electrons and charges to be drawn adjacent to the rendered symbol.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
atoms = self.molecule.atoms
if symbol != '':
heavyAtom = symbol[0]
# Split label by atoms
labels = re.findall('[A-Z][a-z]*[0-9]*', symbol)
if not heavyFirst: labels.reverse()
if 'C' not in symbol and 'O' not in symbol and len(atoms) == 1: labels.sort()
symbol = ''.join(labels)
# Determine positions of each character in the symbol
coordinates = []
cr.set_font_size(self.options['fontSizeNormal'])
y0 += max([cr.text_extents(char)[3] for char in symbol if char.isalpha()]) / 2
for i, label in enumerate(labels):
for j, char in enumerate(label):
cr.set_font_size(self.options['fontSizeSubscript' if char.isdigit() else 'fontSizeNormal'])
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(char)
if i == 0 and j == 0:
# Center heavy atom at (x0, y0)
x = x0 - width / 2.0 - xbearing
y = y0
else:
# Left-justify other atoms (for now)
x = x0
y = y0
if char.isdigit(): y += height / 2.0
coordinates.append((x,y))
x0 = x + xadvance
x = 1000000; y = 1000000; width = 0; height = 0
startWidth = 0; endWidth = 0
for i, char in enumerate(symbol):
cr.set_font_size(self.options['fontSizeSubscript' if char.isdigit() else 'fontSizeNormal'])
extents = cr.text_extents(char)
if coordinates[i][0] + extents[0] < x: x = coordinates[i][0] + extents[0]
if coordinates[i][1] + extents[1] < y: y = coordinates[i][1] + extents[1]
width += extents[4] if i < len(symbol) - 1 else extents[2]
if extents[3] > height: height = extents[3]
if i == 0: startWidth = extents[2]
if i == len(symbol) - 1: endWidth = extents[2]
if not heavyFirst:
for i in range(len(coordinates)):
coordinates[i] = (coordinates[i][0] - (width - startWidth / 2 - endWidth / 2), coordinates[i][1])
x -= width - startWidth / 2 - endWidth / 2
# Background
x1 = x - 2; y1 = y - 2; x2 = x + width + 2; y2 = y + height + 2; r = 4
cr.move_to(x1 + r, y1)
cr.line_to(x2 - r, y1)
cr.curve_to(x2 - r/2, y1, x2, y1 + r/2, x2, y1 + r)
cr.line_to(x2, y2 - r)
cr.curve_to(x2, y2 - r/2, x2 - r/2, y2, x2 - r, y2)
cr.line_to(x1 + r, y2)
cr.curve_to(x1 + r/2, y2, x1, y2 - r/2, x1, y2 - r)
cr.line_to(x1, y1 + r)
cr.curve_to(x1, y1 + r/2, x1 + r/2, y1, x1 + r, y1)
cr.close_path()
cr.save()
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.fill()
cr.restore()
boundingRect = [x1, y1, x2, y2]
# Set color for text
if heavyAtom == 'C': cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
elif heavyAtom == 'N': cr.set_source_rgba(0.0, 0.0, 1.0, 1.0)
elif heavyAtom == 'O': cr.set_source_rgba(1.0, 0.0, 0.0, 1.0)
elif heavyAtom == 'F': cr.set_source_rgba(0.5, 0.75, 1.0, 1.0)
elif heavyAtom == 'Si': cr.set_source_rgba(0.5, 0.5, 0.75, 1.0)
elif heavyAtom == 'Al': cr.set_source_rgba(0.75, 0.5, 0.5, 1.0)
elif heavyAtom == 'P': cr.set_source_rgba(1.0, 0.5, 0.0, 1.0)
elif heavyAtom == 'S': cr.set_source_rgba(1.0, 0.75, 0.5, 1.0)
elif heavyAtom == 'Cl': cr.set_source_rgba(0.0, 1.0, 0.0, 1.0)
elif heavyAtom == 'Br': cr.set_source_rgba(0.6, 0.2, 0.2, 1.0)
elif heavyAtom == 'I': cr.set_source_rgba(0.5, 0.0, 0.5, 1.0)
else: cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
# Text itself
for i, char in enumerate(symbol):
cr.set_font_size(self.options['fontSizeSubscript' if char.isdigit() else 'fontSizeNormal'])
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(char)
xi, yi = coordinates[i]
cr.move_to(xi, yi)
cr.show_text(char)
x, y = coordinates[0] if heavyFirst else coordinates[-1]
else:
x = x0; y = y0; width = 0; height = 0
boundingRect = [x0 - 0.5, y0 - 0.5, x0 + 0.5, y0 + 0.5]
heavyAtom = ''
# Draw radical electrons and charges
# These will be placed either horizontally along the top or bottom of the
# atom or vertically along the left or right of the atom
orientation = ' '
if len(atom.bonds) == 0:
if len(symbol) == 1: orientation = 'r'
else: orientation = 'l'
elif len(atom.bonds) == 1:
# Terminal atom - we require a horizontal arrangement if there are
# more than just the heavy atom
atom1 = atom.bonds.keys()[0]
vector = self.coordinates[atoms.index(atom),:] - self.coordinates[atoms.index(atom1),:]
if len(symbol) <= 1:
angle = math.atan2(vector[1], vector[0])
if 3 * math.pi / 4 <= angle or angle < -3 * math.pi / 4: orientation = 'l'
elif -3 * math.pi / 4 <= angle < -1 * math.pi / 4: orientation = 'b'
elif -1 * math.pi / 4 <= angle < 1 * math.pi / 4: orientation = 'r'
else: orientation = 't'
else:
if vector[1] <= 0:
orientation = 'b'
else:
orientation = 't'
else:
# Internal atom
# First try to see if there is a "preferred" side on which to place the
# radical/charge data, i.e. if the bonds are unbalanced
vector = numpy.zeros(2, numpy.float64)
for atom1 in atom.bonds:
vector += self.coordinates[atoms.index(atom),:] - self.coordinates[atoms.index(atom1),:]
if numpy.linalg.norm(vector) < 1e-4:
# All of the bonds are balanced, so we'll need to be more shrewd
angles = []
for atom1 in atom.bonds:
vector = self.coordinates[atoms.index(atom1),:] - self.coordinates[atoms.index(atom),:]
angles.append(math.atan2(vector[1], vector[0]))
# Try one more time to see if we can use one of the four sides
# (due to there being no bonds in that quadrant)
# We don't even need a full 90 degrees open (using 60 degrees instead)
if all([ 1 * math.pi / 3 >= angle or angle >= 2 * math.pi / 3 for angle in angles]): orientation = 't'
elif all([-2 * math.pi / 3 >= angle or angle >= -1 * math.pi / 3 for angle in angles]): orientation = 'b'
elif all([-1 * math.pi / 6 >= angle or angle >= 1 * math.pi / 6 for angle in angles]): orientation = 'r'
elif all([ 5 * math.pi / 6 >= angle or angle >= -5 * math.pi / 6 for angle in angles]): orientation = 'l'
else:
# If we still don't have it (e.g. when there are 4+ equally-
# spaced bonds), just put everything in the top right for now
orientation = 'tr'
else:
# There is an unbalanced side, so let's put the radical/charge data there
angle = math.atan2(vector[1], vector[0])
if 3 * math.pi / 4 <= angle or angle < -3 * math.pi / 4: orientation = 'l'
elif -3 * math.pi / 4 <= angle < -1 * math.pi / 4: orientation = 'b'
elif -1 * math.pi / 4 <= angle < 1 * math.pi / 4: orientation = 'r'
else: orientation = 't'
cr.set_font_size(self.options['fontSizeNormal'])
extents = cr.text_extents(heavyAtom)
# (xi, yi) mark the center of the space in which to place the radicals and charges
if orientation[0] == 'l':
xi = x - 3
yi = y - extents[3]/2
elif orientation[0] == 'b':
xi = x + extents[0] + extents[2]/2
yi = y - extents[3] - 4
elif orientation[0] == 'r':
xi = x + extents[0] + extents[2] + 4
yi = y - extents[3]/2
elif orientation[0] == 't':
xi = x + extents[0] + extents[2]/2
yi = y + 4
# If we couldn't use one of the four sides, then offset the radical/charges
# horizontally by a few pixels, in hope that this avoids overlap with an
# existing bond
if len(orientation) > 1: xi += 4
# Get width and height
cr.set_font_size(self.options['fontSizeSubscript'])
width = 0.0; height = 0.0
if orientation[0] == 'b' or orientation[0] == 't':
if atom.radicalElectrons > 0:
width += atom.radicalElectrons * 2 + (atom.radicalElectrons - 1)
height = atom.radicalElectrons * 2
text = ''
if atom.radicalElectrons > 0 and atom.charge != 0: width += 1
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
width += extents[2] + 1
height = extents[3]
elif orientation[0] == 'l' or orientation[0] == 'r':
if atom.radicalElectrons > 0:
height += atom.radicalElectrons * 2 + (atom.radicalElectrons - 1)
width = atom.radicalElectrons * 2
text = ''
if atom.radicalElectrons > 0 and atom.charge != 0: height += 1
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
height += extents[3] + 1
width = extents[2]
# Move (xi, yi) to top left corner of space in which to draw radicals and charges
xi -= width / 2.0; yi -= height / 2.0
# Update bounding rectangle if necessary
if width > 0 and height > 0:
if xi < boundingRect[0]:
boundingRect[0] = xi
if yi < boundingRect[1]:
boundingRect[1] = yi
if xi + width > boundingRect[2]:
boundingRect[2] = xi + width
if yi + height > boundingRect[3]:
boundingRect[3] = yi + height
if orientation[0] == 'b' or orientation[0] == 't':
# Draw radical electrons first
for i in range(atom.radicalElectrons):
cr.new_sub_path()
cr.arc(xi + 3 * i + 1, yi + height/2, 1, 0, 2 * math.pi)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.fill()
if atom.radicalElectrons > 0: xi += atom.radicalElectrons * 2 + (atom.radicalElectrons - 1) + 1
# Draw charges second
text = ''
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
cr.move_to(xi, yi - extents[1])
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(text)
# Draw lone electron pairs
# Draw them for nitrogen containing molecules only
if drawLonePairs:
for i in range(atom.lonePairs):
cr.new_sub_path()
if i == 0:
x1lp = x-2
y1lp = y-8
x2lp = x+2
y2lp = y-12
elif i == 1:
x1lp = x+12
y1lp = y-8
x2lp = x+8
y2lp = y-12
elif i == 2:
x1lp = x-2
y1lp = y-1
x2lp = x+2
y2lp = y+3
self.__drawLine(cr, x1lp, y1lp, x2lp, y2lp)
elif orientation[0] == 'l' or orientation[0] == 'r':
# Draw charges first
text = ''
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
cr.move_to(xi - extents[2]/2, yi - extents[1])
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(text)
if atom.charge != 0: yi += extents[3] + 1
# Draw radical electrons second
for i in range(atom.radicalElectrons):
cr.new_sub_path()
cr.arc(xi + width/2, yi + 3 * i + 1, 1, 0, 2 * math.pi)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.fill()
# Draw lone electron pairs
# Draw them for nitrogen species only
if drawLonePairs:
for i in range (atom.lonePairs):
cr.new_sub_path()
if i == 0:
x1lp = x-2
y1lp = y-8
x2lp = x+2
y2lp = y-12
elif i == 1:
x1lp = x+12
y1lp = y-8
x2lp = x+8
y2lp = y-12
elif i == 2:
x1lp = x-2
y1lp = y-1
x2lp = x+2
y2lp = y+3
self.__drawLine(cr, x1lp, y1lp, x2lp, y2lp)
# Update bounding rect to ensure atoms are included
if boundingRect[0] < self.left:
self.left = boundingRect[0]
if boundingRect[1] < self.top:
self.top = boundingRect[1]
if boundingRect[2] > self.right:
self.right = boundingRect[2]
if boundingRect[3] > self.bottom:
self.bottom = boundingRect[3]
def __make_single_bonds(self):
"""This method converts all bonds to single bonds and then returns
a dictionary of Bond object keys with the old bond order as a value"""
dictionary = {}
for atom1 in self.molecule.atoms:
for atom2, bond in atom1.bonds.items():
if not bond.isSingle():
dictionary[bond] = bond.getOrderNum()
bond.setOrderNum(1)
return dictionary
def __replace_bonds(self,bond_order_dictionary):
"""
Sets the bond order in self.molecule equal to the orders in bond_order_dictionary
which is obtained from __make_single_bonds().
"""
for bond, order in bond_order_dictionary.items():
bond.setOrderNum(order)
################################################################################
class ReactionDrawer:
"""
This class provides functionality for drawing chemical reactions using the
skeletal formula of each reactant and product molecule via the Cairo 2D
graphics engine. The most common use case is simply::
ReactionDrawer().draw(reaction, format='png', path='reaction.png')
where ``reaction`` is the :class:`Reaction` object to draw. You can also
pass a dict of options to the constructor to affect how the molecules are
drawn.
"""
def __init__(self, options=None):
self.options = MoleculeDrawer().options.copy()
self.options.update({
'arrowLength': 36,
})
if options: self.options.update(options)
def draw(self, reaction, format, path=None):
"""
Draw the given `reaction` using the given image `format` - pdf, svg,
ps, or png. If `path` is given, the drawing is saved to that location
on disk.
This function returns the Cairo surface and context used to create the
drawing, as well as a bounding box for the molecule being drawn as the
tuple (`left`, `top`, `width`, `height`).
"""
# The Cairo 2D graphics library (and its Python wrapper) is required for
# the reaction drawing algorithm
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
logging.error('Cairo not found; molecule will not be drawn.')
return
from .molecule import Molecule
from rmgpy.species import Species
fontFamily = self.options['fontFamily']
fontSizeNormal = self.options['fontSizeNormal']
# First draw each of the reactants and products
reactants = []; products = []
for reactant in reaction.reactants:
if isinstance(reactant, Species):
molecule = reactant.molecule[0]
elif isinstance(reactant, Molecule):
molecule = reactant
reactants.append(MoleculeDrawer().draw(molecule, format))
for product in reaction.products:
if isinstance(product, Species):
molecule = product.molecule[0]
elif isinstance(product, Molecule):
molecule = product
products.append(MoleculeDrawer().draw(molecule, format))
# Next determine size required for surface
rxn_width = 0; rxn_height = 0; rxn_top = 0
for surface, cr, rect in reactants + products:
left, top, width, height = rect
rxn_width += width
if height > rxn_height: rxn_height = height
if height + top > rxn_top: rxn_top = height + top
rxn_top = 0.5 * rxn_height - rxn_top
# Also include '+' and reaction arrow in width
cr.set_font_size(fontSizeNormal)
plus_extents = cr.text_extents(' + ')
arrow_width = self.options['arrowLength']
rxn_width += (len(reactants)-1) * plus_extents[4] + arrow_width + (len(products)-1) * plus_extents[4]
# Now make the surface for the reaction and render each molecule on it
rxn_surface = createNewSurface(format, path, width=rxn_width, height=rxn_height)
rxn_cr = cairo.Context(rxn_surface)
# Draw white background
rxn_cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
rxn_cr.paint()
# Draw reactants
rxn_x = 0.0; rxn_y = 0.0
for index, reactant in enumerate(reactants):
surface, cr, rect = reactant
left, top, width, height = rect
if index > 0:
# Draw the "+" between the reactants
rxn_cr.save()
rxn_cr.set_font_size(fontSizeNormal)
rxn_y = rxn_top + 0.5 * (rxn_height - plus_extents[3])
rxn_cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
rxn_cr.move_to(rxn_x, rxn_y - plus_extents[1])
rxn_cr.show_text(' + ')
rxn_cr.restore()
rxn_x += plus_extents[4]
# Draw the reactant
rxn_y = top + rxn_top + 0.5 * rxn_height
if rxn_y < 0 : rxn_y = 0
rxn_cr.save()
rxn_cr.set_source_surface(surface, rxn_x, rxn_y)
rxn_cr.paint()
rxn_cr.restore()
rxn_x += width
# Draw reaction arrow
# Unfortunately Cairo does not have arrow drawing built-in, so we must
# draw the arrow head ourselves
rxn_cr.save()
rxn_cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
rxn_cr.set_line_width(1.0)
rxn_cr.move_to(rxn_x + 8, rxn_top + 0.5 * rxn_height)
rxn_cr.line_to(rxn_x + arrow_width - 8, rxn_top + 0.5 * rxn_height)
rxn_cr.move_to(rxn_x + arrow_width - 14, rxn_top + 0.5 * rxn_height - 3.0)
rxn_cr.line_to(rxn_x + arrow_width - 8, rxn_top + 0.5 * rxn_height)
rxn_cr.line_to(rxn_x + arrow_width - 14, rxn_top + 0.5 * rxn_height + 3.0)
rxn_cr.stroke()
rxn_cr.restore()
rxn_x += arrow_width
# Draw products
for index, product in enumerate(products):
surface, cr, rect = product
left, top, width, height = rect
if index > 0:
# Draw the "+" between the products
rxn_cr.save()
rxn_cr.set_font_size(fontSizeNormal)
rxn_y = rxn_top + 0.5 * (rxn_height - plus_extents[3])
rxn_cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
rxn_cr.move_to(rxn_x, rxn_y - plus_extents[1])
rxn_cr.show_text(' + ')
rxn_cr.restore()
rxn_x += plus_extents[4]
# Draw the product
rxn_y = top + rxn_top + 0.5 * rxn_height
if rxn_y < 0 : rxn_y = 0
rxn_cr.save()
rxn_cr.set_source_surface(surface, rxn_x, rxn_y)
rxn_cr.paint()
rxn_cr.restore()
rxn_x += width
# Finish Cairo drawing
if format == 'png':
rxn_surface.write_to_png(path)
else:
rxn_surface.finish()
| 45.994322 | 191 | 0.532599 |
57c7de33400307ab53eeae5c6fd2f6dfc36f363b | 8,679 | py | Python | hug_website/app.py | timothycrosley/hug_website | 2a086b72c9a17bb424b389dc6764603b5f646f03 | [
"MIT"
] | 2 | 2016-03-15T15:16:39.000Z | 2016-09-23T20:42:18.000Z | hug_website/app.py | timothycrosley/hug_website | 2a086b72c9a17bb424b389dc6764603b5f646f03 | [
"MIT"
] | 7 | 2016-03-18T21:49:37.000Z | 2019-04-12T23:27:09.000Z | hug_website/app.py | timothycrosley/hug_website | 2a086b72c9a17bb424b389dc6764603b5f646f03 | [
"MIT"
] | 7 | 2016-03-18T21:01:39.000Z | 2017-01-03T17:38:31.000Z | """hug's website: hug.rest"""
from functools import partial
import hug
from hug_website import controllers
dual_output = hug.output_format.suffix({'/js': hug.output_format.json}, hug.output_format.html)
app = hug.get(output=dual_output, on_invalid=hug.redirect.not_found).suffixes('/js')
html = partial(hug.transform.suffix, {'/js': None})
@hug.static('/static', cache=True)
def static_files():
return ('hug_website/static', )
@hug.not_found(transform=html(controllers.frame), output=dual_output)
def drop_bear():
return root('not_found')
@app.transform(html(controllers.frame), urls=('/', '/website/{page_name}/{section}', '/website/{page_name}'))
def root(page_name: hug.types.one_of(('home', 'contribute', 'quickstart', 'discuss', 'not_found', 'learn',
'acknowledge', 'latest'))='home',
section: hug.types.one_of(controllers.DOCUMENTATION_TEMPLATES.keys())=controllers.DOCUMENTATION[0][0]):
if page_name == 'learn' and section:
content = globals()[page_name](section)
else:
content = globals()[page_name]()
return {'label': 'hug', 'version': hug.__version__,
'content': content, 'page': page_name}
@app.transform(html(controllers.contribute))
def contribute():
return {}
@app.transform(html(controllers.latest))
def latest():
return {}
@app.transform(html(controllers.acknowledge))
def acknowledge():
return {}
@app.transform(html(controllers.discuss))
def discuss():
return {}
@app.transform(html(controllers.learn))
def learn(section: hug.types.one_of(controllers.DOCUMENTATION_TEMPLATES.keys())=controllers.DOCUMENTATION[0][0]):
return {'sections': controllers.DOCUMENTATION, 'section': section}
def not_found():
return {'not_found_header': '404 - BEWARE OF DROP BEARS',
'not_found_description': "You don't belong around these parts. Do yourself a favor: ",
'home_link_description': 'GO HOME.'}
@app.transform(html(controllers.quickstart))
def quickstart():
return {'install_header': 'Installing hug',
'install_description': 'The first step to get started is to install hug. hug has very minimal base system '
'requirements - a local installation of Python3.3+, optionally inside a virtualenv. '
'Additionally, pip is required, but this should be included with most Python3 '
'installations by default. Once the base system is in good shape, run the following '
'command to install the latest version of hug:',
'first_header': 'First hug API',
'first_description': 'To start off, we are going to make a simple API with local access only, but which '
'demonstrates a couple of basic hug features: annotation-based validation and '
'directives. Our first API will simply return a happy birthday message to the '
'user, along with the time it took to generate the message:',
'first_explaination': "In this example: hug's built-in type annotation automatically validates and "
"converts incoming inputs while hug's directives automatically replace the hug_timer "
'argument with a HugTimer object that keeps track of how long the function has been '
'running. hug type annotations are, at their core, simply functions or objects which '
'take a value as input, cast that value as something (raising on errors), and then '
'return it. As a result of this, most built-in Python cast functions (int, str, etc.) '
'are valid annotations in hug out of the box. You can also use Marshmallow schemas '
'and types as hug type annotations without modification.',
'http_header': 'Exposing our API as an HTTP micro-service',
'http_description': 'To expose our API over HTTP, all we need to do is apply a hug HTTP route decorator to '
'the function, in addition to the local decorator. hug includes convience decorators '
'for all common HTTP methods (GET, POST, PUT, etc). In this case we will apply a get '
'decorator to specify that the function should return on an HTTP GET request. We will '
'also supply an example set of parameters to lead our users in the correct direction:',
'cli_header': 'Enabling command line interaction',
'cli_description': 'What if we want to allow users to interact with our API from the command line, as well? '
"No problem! All that's necessary is adding a hug.cli route decorator to our API "
'function:',
'wsgi_header': 'Final step: Production HTTP use',
'wsgi_description': "Finally, it's important to note that it's generally never a good idea to use a "
"development server (like hug's, Flask's, etc.) directly in production. Instead, "
'a WSGI-compatible server (such as uwsgi or Gunicorn) is recommended. Every hug API that '
'contains an http endpoint automatically exposes a `__hug_wsgi__` WSGI-compatible API '
'- making integration of our above example a breeze:'}
@app.transform(html(controllers.home))
def home():
return {'slogan': 'Embrace the APIs of the future',
'introduction': 'Drastically simplify API development over multiple interfaces. With hug, design '
'and develop your API once, then expose it however your clients need to consume it. '
'Be it locally, over HTTP, or through the command line - hug is the fastest '
'and most modern way to create APIs on Python3.',
'example_header': 'Obvious. Clean. Radically simple.',
'performance_header': 'Unparalleled performance',
'performance_description': 'hug has been built from the ground up with performance in mind. It is built '
'to consume resources only when necessary and is then compiled with Cython '
'to achieve amazing performance. As a result, hug consistently benchmarks as '
'one of the fastest Python frameworks and without question takes the crown '
'as the fastest high-level framework for Python 3.',
'versioning_header': 'Built in version management',
'versioning_description': 'hug makes it easy to expose multiple versions of your API. With hug you can '
'simply specify what version or range of versions an endpoint supports and then '
'automatically have that enforced and communicated to your API\'s users.',
'documentation_header': 'Automatic documentation',
'documentation_description': 'Python makes it easy to document your APIs well using doc strings and types '
'annotations. hug uses this information to automatically generate '
'documentation for users of your API so you don\'t have to.',
'annotation_header': 'Annotation powered validation',
'annotation_description': 'hug leverages Python 3 type annotations to enable simple per argument '
'validation and transformation. This leads to explicit and easy to follow '
'endpoints.',
'reuse_header': 'Write once. Use everywhere.',
'reuse_description': 'With hug your API and business logic is cleanly separated from the interface you\'re '
'exposing it on, which means you can safely expose it over HTTP, CLI, and Python in '
'one fell swoop.',
'get_started_header': 'What are you waiting for?',
'get_started_description': 'Start writing world class APIs on top of Python 3 in no time. Use hug {0} to '
'radically simplify your code base.'.format(hug.__version__)}
| 62.891304 | 122 | 0.604448 |
949498da3fb713cd56a8d3f8807411623ada0d27 | 5,509 | py | Python | tests/factorization/test_implicit.py | BBiering/spotlight | e79c7eafbaff6e2ce2fabd7a348be3b93f5e6ee0 | [
"MIT"
] | 2,873 | 2017-07-12T00:39:49.000Z | 2022-03-29T11:34:15.000Z | tests/factorization/test_implicit.py | BBiering/spotlight | e79c7eafbaff6e2ce2fabd7a348be3b93f5e6ee0 | [
"MIT"
] | 138 | 2017-07-13T05:45:44.000Z | 2022-01-26T09:08:17.000Z | tests/factorization/test_implicit.py | BBiering/spotlight | e79c7eafbaff6e2ce2fabd7a348be3b93f5e6ee0 | [
"MIT"
] | 437 | 2017-07-13T06:36:24.000Z | 2022-03-17T12:35:26.000Z | import os
import numpy as np
import pytest
import torch
from spotlight.cross_validation import random_train_test_split
from spotlight.datasets import movielens
from spotlight.evaluation import mrr_score
from spotlight.factorization.implicit import ImplicitFactorizationModel
from spotlight.factorization.representations import BilinearNet
from spotlight.layers import BloomEmbedding
RANDOM_STATE = np.random.RandomState(42)
CUDA = bool(os.environ.get('SPOTLIGHT_CUDA', False))
# Acceptable variation in specific test runs
EPSILON = .005
def test_pointwise():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ImplicitFactorizationModel(loss='pointwise',
n_iter=10,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr = mrr_score(model, test, train=train).mean()
assert mrr + EPSILON > 0.05
def test_bpr():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ImplicitFactorizationModel(loss='bpr',
n_iter=10,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr = mrr_score(model, test, train=train).mean()
assert mrr + EPSILON > 0.07
def test_bpr_custom_optimizer():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
def adagrad_optimizer(model_params,
lr=1e-2,
weight_decay=1e-6):
return torch.optim.Adagrad(model_params,
lr=lr,
weight_decay=weight_decay)
model = ImplicitFactorizationModel(loss='bpr',
n_iter=10,
batch_size=1024,
optimizer_func=adagrad_optimizer,
use_cuda=CUDA)
model.fit(train)
mrr = mrr_score(model, test, train=train).mean()
assert mrr + EPSILON > 0.05
def test_hinge():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ImplicitFactorizationModel(loss='hinge',
n_iter=10,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr = mrr_score(model, test, train=train).mean()
assert mrr + EPSILON > 0.07
def test_adaptive_hinge():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ImplicitFactorizationModel(loss='adaptive_hinge',
n_iter=10,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
mrr = mrr_score(model, test, train=train).mean()
assert mrr + EPSILON > 0.07
@pytest.mark.parametrize('compression_ratio, expected_mrr', [
(0.5, 0.03),
(1.0, 0.04),
(1.5, 0.045),
(2.0, 0.045),
])
def test_bpr_bloom(compression_ratio, expected_mrr):
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
user_embeddings = BloomEmbedding(interactions.num_users, 32,
compression_ratio=compression_ratio,
num_hash_functions=2)
item_embeddings = BloomEmbedding(interactions.num_items, 32,
compression_ratio=compression_ratio,
num_hash_functions=2)
network = BilinearNet(interactions.num_users,
interactions.num_items,
user_embedding_layer=user_embeddings,
item_embedding_layer=item_embeddings)
model = ImplicitFactorizationModel(loss='bpr',
n_iter=10,
batch_size=1024,
learning_rate=1e-2,
l2=1e-6,
representation=network,
use_cuda=CUDA)
model.fit(train)
print(model)
mrr = mrr_score(model, test, train=train).mean()
assert mrr + EPSILON > expected_mrr
| 33.387879 | 73 | 0.514975 |
9d61b51ca388454b737a09d37e7df8a9f235c4c4 | 850 | py | Python | examples/delete_ports_vlan.py | open-switch/opx-docs | f448f3f3dc0de38822bbf16c1e173eb108925a40 | [
"CC-BY-4.0"
] | 122 | 2017-02-10T01:47:04.000Z | 2022-03-23T20:11:11.000Z | examples/delete_ports_vlan.py | open-switch/opx-docs | f448f3f3dc0de38822bbf16c1e173eb108925a40 | [
"CC-BY-4.0"
] | 37 | 2017-03-01T07:07:22.000Z | 2021-11-11T16:47:42.000Z | examples/delete_ports_vlan.py | open-switch/opx-docs | f448f3f3dc0de38822bbf16c1e173eb108925a40 | [
"CC-BY-4.0"
] | 39 | 2017-01-18T16:22:58.000Z | 2020-11-18T13:23:43.000Z | #Python code block to delete port to VLAN
import cps
import cps_object
#Create CPS object
cps_obj = \
cps_object.CPSObject('dell-base-if-cmn/if/interfaces/interface')
#Populate the VLAN attributes VLAN_ID='br100'
VLAN_ID = 'br100'
cps_obj.add_attr('if/interfaces/interface/name', VLAN_ID)
#Delete the untagged-ports from VLAN, include the ports which is needed in the if_port_list
if_port_list = ['e101-002-0']
cps_obj.add_attr('dell-if/if/interfaces/interface/untagged-ports',
if_port_list)
#Associate a CPS set operation with the CPS object
cps_update = {'change': cps_obj.get(), 'operation': 'set'}
#Add the CPS operation,obj pair to a new CPS transaction
transaction = cps.transaction([cps_update])
#Check for failure
if not transaction:
raise RuntimeError('Error in deleting ports to Vlan')
print 'successful'
| 29.310345 | 91 | 0.751765 |
ddfcb7e76e9ad5a7c61702aa9dfd6123acbb58ff | 1,780 | py | Python | Arquivo/2021-1/2021-1-uff-lab-jogos/PPlay/keyboard.py | joaog314/uff-projects | 417895d5b7c6fd88e9c67c925e7c6a4abb6bb6f4 | [
"MIT"
] | 3 | 2020-04-19T15:08:50.000Z | 2022-02-09T20:06:18.000Z | Mathias/PPlay/keyboard.py | AlanVilaca/Mathias | 2d7d0e69b6093422a8c99911478ecbd9fa88859f | [
"MIT"
] | null | null | null | Mathias/PPlay/keyboard.py | AlanVilaca/Mathias | 2d7d0e69b6093422a8c99911478ecbd9fa88859f | [
"MIT"
] | null | null | null | # coding= utf-8
import pygame
from pygame.locals import *
# Initializes pygame's modules
pygame.init()
class Keyboard():
"""
Returns True if the key IS pressed, it means
the press-check occurs every frame
"""
def key_pressed(self, key):
key = self.to_pattern(key)
keys = pygame.key.get_pressed()
if(keys[key]):
return True
return False
"""Shows the int code of the key"""
def show_key_pressed(self):
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
print(event.key)
def to_pattern(self, key):
if((key=="LEFT") or (key=="left")):
return pygame.K_LEFT
elif((key=="RIGHT") or (key=="right")):
return pygame.K_RIGHT
elif((key=="UP") or (key=="up")):
return pygame.K_UP
elif((key=="DOWN") or (key=="down")):
return pygame.K_DOWN
elif((key=="ENTER") or (key=="enter") or
(key=="RETURN") or (key=="return")):
return pygame.K_RETURN
elif((key=="ESCAPE") or (key=="escape") or
(key=="ESC") or (key=="esc")):
return pygame.K_ESCAPE
elif((key=="SPACE") or (key=="space")):
return pygame.K_SPACE
elif((key=="LEFT_CONTROL") or (key=="left_control")):
return pygame.K_LCTRL
elif((key=="LEFT_SHIFT") or (key=="left_shift")):
return pygame.K_LSHIFT
elif(((key >= "A") and (key <= "Z")) or
((key >= "a") and (key <= "z"))):
return getattr(pygame, "K_" + key.lower())
elif((key >= "0") and (key <= "9")):
return getattr(pygame, "K_" + key)
return key
| 31.785714 | 61 | 0.513483 |
828e34a74cf3f6c4ea6e50b0fe01a8798c411e3a | 5,349 | py | Python | src/run_carla.py | PhuongLe/PilotNet | 28ebf1db0ed907d1ede219ec82dae657a947a12b | [
"MIT"
] | null | null | null | src/run_carla.py | PhuongLe/PilotNet | 28ebf1db0ed907d1ede219ec82dae657a947a12b | [
"MIT"
] | null | null | null | src/run_carla.py | PhuongLe/PilotNet | 28ebf1db0ed907d1ede219ec82dae657a947a12b | [
"MIT"
] | null | null | null | import tensorflow as tf
import carla
import scipy.misc
from nets.pilotNet import PilotNet
import cv2
from subprocess import call
import random
import time
import numpy as np
from multiprocessing import Queue
FLAGS = tf.app.flags.FLAGS
"""model from nvidia's training"""
# generated model after training
tf.app.flags.DEFINE_string(
'model', './data/models/model.ckpt',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'steer_image', './data/.logo/steering_wheel_image.jpg',
"""Steering wheel image to show corresponding steering wheel angle.""")
WIN_MARGIN_LEFT = 240
WIN_MARGIN_TOP = 240
WIN_MARGIN_BETWEEN = 180
IM_WIDTH = 455
IM_HEIGHT = 256
counter = 0
#counterMod = 0
imageQueue = Queue(maxsize = 100)
def process_img(image):
global counter
global imageQueue
i = np.array(image.raw_data)
i2 = i.reshape((IM_HEIGHT, IM_WIDTH, 4))
i3 = i2[:, :, :3]
imageQueue.put(i3)
#cv2.imwrite('./output1/{0}.jpg'.format(counter), i3)
counter += 1
actor_list = []
if __name__ == '__main__':
print("hello")
img = cv2.imread(FLAGS.steer_image, 0)
rows,cols = img.shape
# Visualization init
cv2.namedWindow("Steering Wheel", cv2.WINDOW_NORMAL)
cv2.moveWindow("Steering Wheel", WIN_MARGIN_LEFT, WIN_MARGIN_TOP)
cv2.namedWindow("camera", cv2.WINDOW_AUTOSIZE)
cv2.resizeWindow("camera", IM_WIDTH, IM_HEIGHT)
cv2.moveWindow("camera", WIN_MARGIN_LEFT+cols+WIN_MARGIN_BETWEEN, WIN_MARGIN_TOP)
try:
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
blueprint_library = world.get_blueprint_library()
bp = blueprint_library.filter('model3')[0]
print(bp)
spawn_point = random.choice(world.get_map().get_spawn_points())
vehicle = world.spawn_actor(bp, spawn_point)
vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=0.0))
actor_list.append(vehicle)
# get the blueprint for this sensor
blueprint = blueprint_library.find('sensor.camera.rgb')
# change the dimensions of the image
blueprint.set_attribute('image_size_x', '{w}'.format(w=IM_WIDTH))
blueprint.set_attribute('image_size_y', '{h}'.format(h=IM_HEIGHT))
blueprint.set_attribute('fov', '110')
#blueprint.set_attribute('sensor_tick', '0.1')
# Adjust sensor relative to vehicle
transform = carla.Transform(carla.Location(x=2.5, z=0.75))
# spawn the sensor and attach to vehicle.
sensor = world.spawn_actor(blueprint, transform, attach_to=vehicle)
# add sensor to list of actors
actor_list.append(sensor)
# do something with this sensor
#sensor.listen(lambda image: image.save_to_disk('output/%.6d.jpg' % image.frame))
sensor.listen(lambda data: process_img(data))
#time.sleep(0.1)
with tf.Graph().as_default():
smoothed_angle = 0
i=0
# construct model
model = PilotNet()
saver = tf.train.Saver()
with tf.Session() as sess:
# restore model variables
saver.restore(sess, FLAGS.model)
while(cv2.waitKey(10) != ord('q')):
while i>=counter:
time.sleep(0.01)
#full_image = scipy.misc.imread("./output1" + "/" + str(i) + ".jpg", mode="RGB")
full_image = imageQueue.get()
image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0
steering = sess.run(
model.steering,
feed_dict={
model.image_input: [image],
model.keep_prob: 1.0
}
)
degrees = steering[0][0] * 180.0 / scipy.pi
#degrees = 0.1
call("clear")
#print("Queue size = {0}. Rending image..{1}".format(imageQueue.qsize(), i))
print("Predicted steering angle: " + str(degrees) + " degrees")
# convert RGB due to dataset format
cv2.imshow("camera", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))
print("camera image size: {} x {}").format(full_image.shape[0], full_image.shape[1])
# make smooth angle transitions by turning the steering wheel based on the difference of the current angle
# and the predicted988j98u4 angle
smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)
M = cv2.getRotationMatrix2D((cols/2,rows/2), -smoothed_angle, 1)
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow("Steering Wheel", dst)
#if i>100:
vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=degrees))
i += 1
finally:
cv2.destroyAllWindows()
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.')
| 34.733766 | 152 | 0.583474 |
443c686ed283b1df9577b5a4508742bf799628c2 | 2,104 | py | Python | examples/ad_manager/v201808/content_metadata_key_hierarchy_service/get_all_content_metadata_key_hierarchies.py | khanhnhk/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | 1 | 2021-12-30T15:21:42.000Z | 2021-12-30T15:21:42.000Z | examples/ad_manager/v201808/content_metadata_key_hierarchy_service/get_all_content_metadata_key_hierarchies.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201808/content_metadata_key_hierarchy_service/get_all_content_metadata_key_hierarchies.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all content metadata key hierarchies.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
content_metadata_key_hierarchy_service = client.GetService(
'ContentMetadataKeyHierarchyService', version='v201808')
# Create a statement to select content metadata key hierarchies.
statement = ad_manager.StatementBuilder()
# Retrieve a small amount of content metadata key hierarchies at a time,
# paging through until all content metadata key hierarchies have been
# retrieved.
while True:
response = (
content_metadata_key_hierarchy_service
.getContentMetadataKeyHierarchiesByStatement(
statement.ToStatement()))
if 'results' in response and len(response['results']):
for content_metadata_key_hierarchy in response['results']:
# Print out some information for each content metadata key hierarchy.
print('Content metadata key hierarchy with ID "%d" and name "%s" was '
'found.\n' % (content_metadata_key_hierarchy['id'],
content_metadata_key_hierarchy['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 37.571429 | 78 | 0.73289 |
9d58739f5be7ae26deac7d1d5273fb84bcd7353c | 232 | py | Python | Spider/PocketLifeSpider/PocketLifeSpider/modify_src.py | wpwbb510582246/PocketFIlm | 356d057810fd48a77197fe0f00b1f2adccb02d39 | [
"MIT"
] | 17 | 2019-09-11T08:37:26.000Z | 2021-08-17T12:08:54.000Z | Spider/PocketLifeSpider/PocketLifeSpider/modify_src.py | wpwbb510582246/PocketFIlm | 356d057810fd48a77197fe0f00b1f2adccb02d39 | [
"MIT"
] | 5 | 2019-10-26T00:28:35.000Z | 2021-05-08T09:10:36.000Z | Spider/PocketLifeSpider/PocketLifeSpider/modify_src.py | wpwbb510582246/PocketFIlm | 356d057810fd48a77197fe0f00b1f2adccb02d39 | [
"MIT"
] | 8 | 2020-03-17T08:17:35.000Z | 2021-07-30T15:48:36.000Z | import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from PocketLifeSpider.util.CommonUtils import *
if __name__ == '__main__':
modify_src()
| 19.333333 | 52 | 0.75431 |
33a3ee095d9406fe660570aebe62ca788d5da6af | 207 | py | Python | modules/stage_1.py | ElijahBeach/C2-Server-Project | 89e6d418790493aa222cca5b6ea7f3f519e4e478 | [
"MIT"
] | 3 | 2022-01-14T01:55:25.000Z | 2022-03-30T01:08:16.000Z | modules/stage_1.py | ElijahBeach/C2-Server-Project | 89e6d418790493aa222cca5b6ea7f3f519e4e478 | [
"MIT"
] | null | null | null | modules/stage_1.py | ElijahBeach/C2-Server-Project | 89e6d418790493aa222cca5b6ea7f3f519e4e478 | [
"MIT"
] | 2 | 2022-01-15T14:30:55.000Z | 2022-01-15T16:04:15.000Z | import json
def run(**args):
print('[$] Enter stage 1')
basic_config =json.dumps([{"module" : "dir_lister"},{"module" : "enviro"},{"module" : "sleep"},{"module" : "stage_2_qrw"}])
return basic_config
| 29.571429 | 125 | 0.63285 |
9ad17eb978c349a1b2e9134b077176a1d801cdfd | 3,251 | py | Python | src/test/tests/databases/singlemulti.py | cstatz/visit | f352f3984fa77392e81acbaa6943778a779f0435 | [
"BSD-3-Clause"
] | null | null | null | src/test/tests/databases/singlemulti.py | cstatz/visit | f352f3984fa77392e81acbaa6943778a779f0435 | [
"BSD-3-Clause"
] | null | null | null | src/test/tests/databases/singlemulti.py | cstatz/visit | f352f3984fa77392e81acbaa6943778a779f0435 | [
"BSD-3-Clause"
] | 1 | 2020-03-18T23:17:43.000Z | 2020-03-18T23:17:43.000Z | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: singlemulti.py
#
# Tests: mesh - 3D, curvilinear, single domain
# plots - Pseudocolor
# databases - Silo
#
# Purpose: This test case tests the ability of VisIt to have single
# time state files in the same window as multi time state
# windows and do the right thing when changing time states.
#
# Bugs: '4011
#
# Programmer: Brad Whitlock
# Date: Thu Mar 18 13:45:29 PST 2004
#
# Modifications:
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
import string
#
# Look at the first few lines of the string representation of the
# WindowInformation to see the list of time sliders, etc.
#
def TestWindowInformation(testname):
# Get the window information and convert it to a string.
s = str(GetWindowInformation())
# Only use the first 5 or so lines from the string.
lines = string.split(s, "\n")
s = ""
for i in range(5):
if(i < len(lines)):
s = s + lines[i]
s = s + "\n"
TestText(testname, s)
# Create a Pseudocolor plot of wave by opening it up at a late time state.
OpenDatabase(silo_data_path("wave*.silo database"), 20)
AddPlot("Pseudocolor", "pressure")
DrawPlots()
# Set the view.
v0 = View3DAttributes()
v0.viewNormal = (-0.661743, 0.517608, 0.542382)
v0.focus = (5, 0.757692, 2.5)
v0.viewUp = (0.370798, 0.854716, -0.363277)
v0.viewAngle = 30
v0.parallelScale = 5.63924
v0.nearPlane = -11.2785
v0.farPlane = 11.2785
v0.imagePan = (0.00100868, 0.0454815)
v0.imageZoom = 1.17935
v0.perspective = 1
SetView3D(v0)
Test("singlemulti00")
# Convert the WindowInformation to a string and use that as a test case.
# The WindowInformation contains the list of time sliders, the active time
# state, and the states for each time slider.
TestWindowInformation("singlemulti01")
# Create a Pseudocolor plot of curv3d, a single time state database.
OpenDatabase(silo_data_path("curv3d.silo"))
AddPlot("Pseudocolor", "u")
AddOperator("Transform")
t = TransformAttributes()
t.doScale = 1
t.scaleX = 0.5
t.scaleY = 0.5
t.scaleZ = 0.1667
t.doTranslate = 1
t.translateX = 5
t.translateY = 0
t.translateZ = -5
SetOperatorOptions(t)
DrawPlots()
# Set the view again
v1 = View3DAttributes()
v1.viewNormal = (-0.661743, 0.517608, 0.542382)
v1.focus = (5, 1.25, 0)
v1.viewUp = (0.370798, 0.854716, -0.363277)
v1.viewAngle = 30
v1.parallelScale = 7.1807
v1.nearPlane = -14.3614
v1.farPlane = 14.3614
v1.imagePan = (0.00100868, 0.0454815)
v1.imageZoom = 1.17935
v1.perspective = 1
SetView3D(v1)
Test("singlemulti02")
# Make sure there is still just one time slider.
TestWindowInformation("singlemulti03")
# Go to the middle time slider state.
SetTimeSliderState(TimeSliderGetNStates() / 2)
Test("singlemulti04")
# Check the time states
TestWindowInformation("singlemulti05")
# Go to the last time slider state.
SetTimeSliderState(TimeSliderGetNStates() - 1)
Test("singlemulti06")
# Check the time states
TestWindowInformation("singlemulti07")
Exit()
| 27.786325 | 78 | 0.663488 |
dcba915f17526b50162640137aaf5e1c805a35b4 | 9,639 | py | Python | litedram/frontend/dma.py | thirtythreeforty/litedram | db879ae3f7d591482e4665801c946241bb663bce | [
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | 237 | 2016-08-09T05:53:09.000Z | 2022-03-29T15:34:00.000Z | litedram/frontend/dma.py | thirtythreeforty/litedram | db879ae3f7d591482e4665801c946241bb663bce | [
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | 280 | 2016-12-17T13:44:35.000Z | 2022-03-31T10:17:57.000Z | litedram/frontend/dma.py | thirtythreeforty/litedram | db879ae3f7d591482e4665801c946241bb663bce | [
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | 84 | 2016-05-23T08:58:46.000Z | 2022-02-22T17:57:40.000Z | #
# This file is part of LiteDRAM.
#
# Copyright (c) 2016-2021 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2018 John Sully <john@csquare.ca>
# Copyright (c) 2016 Tim 'mithro' Ansell <mithro@mithis.com>
# SPDX-License-Identifier: BSD-2-Clause
"""Direct Memory Access (DMA) reader and writer modules."""
from math import log2
from migen import *
from litex.soc.interconnect.csr import *
from litex.soc.interconnect import stream
from litedram.common import LiteDRAMNativePort
from litedram.frontend.axi import LiteDRAMAXIPort
# LiteDRAMDMAReader --------------------------------------------------------------------------------
class LiteDRAMDMAReader(Module, AutoCSR):
"""Read data from DRAM memory.
For every address written to the sink, one DRAM word will be produced on
the source.
Parameters
----------
port : port
Port on the DRAM memory controller to read from (Native or AXI).
fifo_depth : int
How many request results the output FIFO can contain (and thus how many
read requests can be outstanding at once).
fifo_buffered : bool
Implement FIFO in Block Ram.
Attributes
----------
sink : Record("address")
Sink for DRAM addresses to be read.
source : Record("data")
Source for DRAM word results from reading.
rsv_level: Signal()
FIFO reservation level counter
"""
def __init__(self, port, fifo_depth=16, fifo_buffered=False, with_csr=False):
assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort))
self.port = port
self.sink = sink = stream.Endpoint([("address", port.address_width)])
self.source = source = stream.Endpoint([("data", port.data_width)])
# # #
# Native / AXI selection
is_native = isinstance(port, LiteDRAMNativePort)
is_axi = isinstance(port, LiteDRAMAXIPort)
if is_native:
(cmd, rdata) = port.cmd, port.rdata
elif is_axi:
(cmd, rdata) = port.ar, port.r
else:
raise NotImplementedError
# Request issuance -------------------------------------------------------------------------
request_enable = Signal()
request_issued = Signal()
if is_native:
self.comb += cmd.we.eq(0)
if is_axi:
self.comb += cmd.size.eq(int(log2(port.data_width//8)))
self.comb += [
cmd.addr.eq(sink.address),
cmd.valid.eq(sink.valid & request_enable),
sink.ready.eq(cmd.ready & request_enable),
request_issued.eq(cmd.valid & cmd.ready)
]
# FIFO reservation level counter -----------------------------------------------------------
# incremented when data is planned to be queued
# decremented when data is dequeued
data_dequeued = Signal()
self.rsv_level = rsv_level = Signal(max=fifo_depth+1)
self.sync += [
If(request_issued,
If(~data_dequeued, rsv_level.eq(self.rsv_level + 1))
).Elif(data_dequeued,
rsv_level.eq(rsv_level - 1)
)
]
self.comb += request_enable.eq(rsv_level != fifo_depth)
# FIFO -------------------------------------------------------------------------------------
fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered)
self.submodules += fifo
self.comb += [
rdata.connect(fifo.sink, omit={"id", "resp"}),
fifo.source.connect(source),
data_dequeued.eq(source.valid & source.ready)
]
if with_csr:
self.add_csr()
def add_csr(self, default_base=0, default_length=0, default_enable=0, default_loop=0):
self._base = CSRStorage(32, reset=default_base)
self._length = CSRStorage(32, reset=default_length)
self._enable = CSRStorage(reset=default_enable)
self._done = CSRStatus()
self._loop = CSRStorage(reset=default_loop)
self._offset = CSRStatus(32)
# # #
shift = log2_int(self.port.data_width//8)
base = Signal(self.port.address_width)
offset = Signal(self.port.address_width)
length = Signal(self.port.address_width)
self.comb += base.eq(self._base.storage[shift:])
self.comb += length.eq(self._length.storage[shift:])
self.comb += self._offset.status.eq(offset)
fsm = FSM(reset_state="IDLE")
fsm = ResetInserter()(fsm)
self.submodules.fsm = fsm
self.comb += fsm.reset.eq(~self._enable.storage)
fsm.act("IDLE",
NextValue(offset, 0),
NextState("RUN"),
)
fsm.act("RUN",
self.sink.valid.eq(1),
self.sink.last.eq(offset == (length - 1)),
self.sink.address.eq(base + offset),
If(self.sink.ready,
NextValue(offset, offset + 1),
If(self.sink.last,
If(self._loop.storage,
NextValue(offset, 0)
).Else(
NextState("DONE")
)
)
)
)
fsm.act("DONE", self._done.status.eq(1))
# LiteDRAMDMAWriter --------------------------------------------------------------------------------
class LiteDRAMDMAWriter(Module, AutoCSR):
"""Write data to DRAM memory.
Parameters
----------
port : port
Port on the DRAM memory controller to write to (Native or AXI).
fifo_depth : int
How many requests the input FIFO can contain (and thus how many write
requests can be outstanding at once).
fifo_buffered : bool
Implement FIFO in Block Ram.
Attributes
----------
sink : Record("address", "data")
Sink for DRAM addresses and DRAM data word to be written too.
"""
def __init__(self, port, fifo_depth=16, fifo_buffered=False, with_csr=False):
assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort))
self.port = port
self.sink = sink = stream.Endpoint([("address", port.address_width),
("data", port.data_width)])
# # #
# Native / AXI selection -------------------------------------------------------------------
is_native = isinstance(port, LiteDRAMNativePort)
is_axi = isinstance(port, LiteDRAMAXIPort)
if is_native:
(cmd, wdata) = port.cmd, port.wdata
elif is_axi:
(cmd, wdata) = port.aw, port.w
else:
raise NotImplementedError
# FIFO -------------------------------------------------------------------------------------
fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered)
self.submodules += fifo
if is_native:
self.comb += cmd.we.eq(1)
if is_axi:
self.comb += cmd.size.eq(int(log2(port.data_width//8)))
self.comb += [
cmd.addr.eq(sink.address),
cmd.valid.eq(fifo.sink.ready & sink.valid),
sink.ready.eq(fifo.sink.ready & cmd.ready),
fifo.sink.valid.eq(sink.valid & cmd.ready),
fifo.sink.data.eq(sink.data)
]
if is_native:
self.comb += wdata.we.eq(2**(port.data_width//8)-1)
if is_axi:
self.comb += wdata.strb.eq(2**(port.data_width//8)-1)
self.comb += [
wdata.valid.eq(fifo.source.valid),
fifo.source.ready.eq(wdata.ready),
wdata.data.eq(fifo.source.data)
]
if with_csr:
self.add_csr()
def add_csr(self, default_base=0, default_length=0, default_enable=0, default_loop=0):
self._sink = self.sink
self.sink = stream.Endpoint([("data", self.port.data_width)])
self._base = CSRStorage(32, reset=default_base)
self._length = CSRStorage(32, reset=default_length)
self._enable = CSRStorage(reset=default_enable)
self._done = CSRStatus()
self._loop = CSRStorage(reset=default_loop)
self._offset = CSRStatus(32)
# # #
shift = log2_int(self.port.data_width//8)
base = Signal(self.port.address_width)
offset = Signal(self.port.address_width)
length = Signal(self.port.address_width)
self.comb += base.eq(self._base.storage[shift:])
self.comb += length.eq(self._length.storage[shift:])
self.comb += self._offset.status.eq(offset)
fsm = FSM(reset_state="IDLE")
fsm = ResetInserter()(fsm)
self.submodules.fsm = fsm
self.comb += fsm.reset.eq(~self._enable.storage)
fsm.act("IDLE",
self.sink.ready.eq(1),
NextValue(offset, 0),
NextState("RUN"),
)
fsm.act("RUN",
self._sink.valid.eq(self.sink.valid),
self._sink.last.eq(offset == (length - 1)),
self._sink.address.eq(base + offset),
self._sink.data.eq(self.sink.data),
self.sink.ready.eq(self._sink.ready),
If(self.sink.valid & self.sink.ready,
NextValue(offset, offset + 1),
If(self._sink.last,
If(self._loop.storage,
NextValue(offset, 0)
).Else(
NextState("DONE")
)
)
)
)
fsm.act("DONE", self._done.status.eq(1))
| 34.923913 | 100 | 0.541861 |
10396818e5c97b3de009b5375a01071b270e8345 | 11,951 | py | Python | yt/fields/derived_field.py | bkhamesra/yt-EinsteinToolkit | 576bf88b5cd706fd577c513c23b1db07ec5f4cd2 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-11-29T21:59:06.000Z | 2021-11-29T21:59:06.000Z | yt/fields/derived_field.py | bkhamesra/yt-EinsteinToolkit | 576bf88b5cd706fd577c513c23b1db07ec5f4cd2 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/fields/derived_field.py | bkhamesra/yt-EinsteinToolkit | 576bf88b5cd706fd577c513c23b1db07ec5f4cd2 | [
"BSD-3-Clause-Clear"
] | null | null | null | """
Derived field base class.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import contextlib
import inspect
from yt.extern.six import string_types, PY2
from yt.funcs import \
ensure_list
from .field_exceptions import \
NeedsGridType, \
NeedsOriginalGrid, \
NeedsDataField, \
NeedsProperty, \
NeedsParameter, \
NeedsParameterValue, \
FieldUnitsError
from .field_detector import \
FieldDetector
from yt.units.unit_object import \
Unit
import yt.units.dimensions as ytdims
from yt.utilities.exceptions import \
YTFieldNotFound
def TranslationFunc(field_name):
def _TranslationFunc(field, data):
# We do a bunch of in-place modifications, so we will copy this.
return data[field_name].copy()
_TranslationFunc.alias_name = field_name
return _TranslationFunc
def NullFunc(field, data):
raise YTFieldNotFound(field.name)
class DerivedField(object):
"""
This is the base class used to describe a cell-by-cell derived field.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
units : str
A plain text string encoding the unit, or a query to a unit system of
a dataset. Powers must be in python syntax (** instead of ^). If set
to "auto" the units will be inferred from the units of the return
value of the field function, and the dimensions keyword must also be
set (see below).
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
particle_type : bool
Is this a particle (1D) field?
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_field : bool
Governs its appearance in the dropdowns in Reason
not_in_all : bool
Used for baryon fields from the data that are not in all the grids
display_name : str
A name used in the plots
output_units : str
For fields that exist on disk, which we may want to convert to other
fields or that get aliased to themselves, we can specify a different
desired output unit than the unit found on disk.
dimensions : str or object from yt.units.dimensions
The dimensions of the field, only needed if units="auto" and only used
for error checking.
"""
def __init__(self, name, function, units=None,
take_log=True, validators=None,
particle_type=False, vector_field=False, display_field=True,
not_in_all=False, display_name=None, output_units=None,
dimensions=None, ds=None):
self.name = name
self.take_log = take_log
self.display_name = display_name
self.not_in_all = not_in_all
self.display_field = display_field
self.particle_type = particle_type
self.vector_field = vector_field
self.ds = ds
self._function = function
if validators:
self.validators = ensure_list(validators)
else:
self.validators = []
# handle units
if units is None:
self.units = ''
elif isinstance(units, string_types):
if units.lower() == 'auto':
if dimensions is None:
raise RuntimeError("To set units='auto', please specify the dimensions "
"of the field with dimensions=<dimensions of field>!")
self.units = None
else:
self.units = units
elif isinstance(units, Unit):
self.units = str(units)
else:
raise FieldUnitsError("Cannot handle units '%s' (type %s)." \
"Please provide a string or Unit " \
"object." % (units, type(units)) )
if output_units is None:
output_units = self.units
self.output_units = output_units
if isinstance(dimensions, string_types):
dimensions = getattr(ytdims, dimensions)
self.dimensions = dimensions
def _copy_def(self):
dd = {}
dd['name'] = self.name
dd['units'] = self.units
dd['take_log'] = self.take_log
dd['validators'] = list(self.validators)
dd['particle_type'] = self.particle_type
dd['vector_field'] = self.vector_field
dd['display_field'] = True
dd['not_in_all'] = self.not_in_all
dd['display_name'] = self.display_name
return dd
def get_units(self):
u = Unit(self.units, registry=self.ds.unit_registry)
return u.latex_representation()
def get_projected_units(self):
u = Unit(self.units, registry=self.ds.unit_registry)*Unit('cm')
return u.latex_representation()
def check_available(self, data):
"""
This raises an exception of the appropriate type if the set of
validation mechanisms are not met, and otherwise returns True.
"""
for validator in self.validators:
validator(data)
# If we don't get an exception, we're good to go
return True
def get_dependencies(self, *args, **kwargs):
"""
This returns a list of names of fields that this field depends on.
"""
e = FieldDetector(*args, **kwargs)
if self._function.__name__ == '<lambda>':
e.requested.append(self.name)
else:
e[self.name]
return e
_unit_registry = None
@contextlib.contextmanager
def unit_registry(self, data):
old_registry = self._unit_registry
if hasattr(data, 'unit_registry'):
ur = data.unit_registry
elif hasattr(data, 'ds'):
ur = data.ds.unit_registry
else:
ur = None
self._unit_registry = ur
yield
self._unit_registry = old_registry
def __call__(self, data):
""" Return the value of the field in a given *data* object. """
self.check_available(data)
original_fields = data.keys() # Copy
if self._function is NullFunc:
raise RuntimeError(
"Something has gone terribly wrong, _function is NullFunc " +
"for %s" % (self.name,))
with self.unit_registry(data):
dd = self._function(self, data)
for field_name in data.keys():
if field_name not in original_fields:
del data[field_name]
return dd
def get_source(self):
"""
Return a string containing the source of the function (if possible.)
"""
return inspect.getsource(self._function)
def get_label(self, projected=False):
"""
Return a data label for the given field, including units.
"""
name = self.name[1]
if self.display_name is not None:
name = self.display_name
# Start with the field name
data_label = r"$\rm{%s}" % name
# Grab the correct units
if projected:
raise NotImplementedError
else:
units = Unit(self.units, registry=self.ds.unit_registry)
# Add unit label
if not units.is_dimensionless:
data_label += r"\ \ (%s)" % (units.latex_representation())
data_label += r"$"
return data_label
def __repr__(self):
if PY2:
func_name = self._function.func_name
else:
func_name = self._function.__name__
if self._function == NullFunc:
s = "On-Disk Field "
elif func_name == "_TranslationFunc":
s = "Alias Field for \"%s\" " % (self._function.alias_name,)
else:
s = "Derived Field "
if isinstance(self.name, tuple):
s += "(%s, %s): " % self.name
else:
s += "%s: " % (self.name)
s += "(units: %s" % self.units
if self.display_name is not None:
s += ", display_name: '%s'" % (self.display_name)
if self.particle_type:
s += ", particle field"
s += ")"
return s
class FieldValidator(object):
pass
class ValidateParameter(FieldValidator):
def __init__(self, parameters, parameter_values=None):
"""
This validator ensures that the dataset has a given parameter.
If *parameter_values* is supplied, this will also ensure that the field
is available for all permutations of the field parameter.
"""
FieldValidator.__init__(self)
self.parameters = ensure_list(parameters)
self.parameter_values = parameter_values
def __call__(self, data):
doesnt_have = []
if self.parameter_values is not None:
if isinstance(data, FieldDetector):
raise NeedsParameterValue(self.parameter_values)
for p in self.parameters:
if not data.has_field_parameter(p):
doesnt_have.append(p)
if len(doesnt_have) > 0:
raise NeedsParameter(doesnt_have)
return True
class ValidateDataField(FieldValidator):
def __init__(self, field):
"""
This validator ensures that the output file has a given data field stored
in it.
"""
FieldValidator.__init__(self)
self.fields = ensure_list(field)
def __call__(self, data):
doesnt_have = []
if isinstance(data, FieldDetector): return True
for f in self.fields:
if f not in data.index.field_list:
doesnt_have.append(f)
if len(doesnt_have) > 0:
raise NeedsDataField(doesnt_have)
return True
class ValidateProperty(FieldValidator):
def __init__(self, prop):
"""
This validator ensures that the data object has a given python attribute.
"""
FieldValidator.__init__(self)
self.prop = ensure_list(prop)
def __call__(self, data):
doesnt_have = []
for p in self.prop:
if not hasattr(data,p):
doesnt_have.append(p)
if len(doesnt_have) > 0:
raise NeedsProperty(doesnt_have)
return True
class ValidateSpatial(FieldValidator):
def __init__(self, ghost_zones = 0, fields=None):
"""
This validator ensures that the data handed to the field is of spatial
nature -- that is to say, 3-D.
"""
FieldValidator.__init__(self)
self.ghost_zones = ghost_zones
self.fields = fields
def __call__(self, data):
# When we say spatial information, we really mean
# that it has a three-dimensional data structure
#if isinstance(data, FieldDetector): return True
if not getattr(data, '_spatial', False):
raise NeedsGridType(self.ghost_zones,self.fields)
if self.ghost_zones <= data._num_ghost_zones:
return True
raise NeedsGridType(self.ghost_zones,self.fields)
class ValidateGridType(FieldValidator):
def __init__(self):
"""
This validator ensures that the data handed to the field is an actual
grid patch, not a covering grid of any kind.
"""
FieldValidator.__init__(self)
def __call__(self, data):
# We need to make sure that it's an actual AMR grid
if isinstance(data, FieldDetector): return True
if getattr(data, "_type_name", None) == 'grid': return True
raise NeedsOriginalGrid()
| 34.440922 | 93 | 0.601038 |
bbf50150ff14d4574eb27424c1dd059df89ef687 | 79 | py | Python | fclpy/lispenv.py | fclpy/fclpy | 30808a2e3f212b676ba30f6194d60d02659c8b43 | [
"MIT"
] | 1 | 2021-09-14T11:23:57.000Z | 2021-09-14T11:23:57.000Z | fclpy/lispenv.py | fclpy/fclpy | 30808a2e3f212b676ba30f6194d60d02659c8b43 | [
"MIT"
] | null | null | null | fclpy/lispenv.py | fclpy/fclpy | 30808a2e3f212b676ba30f6194d60d02659c8b43 | [
"MIT"
] | null | null | null |
import fclpy.lisptype
current_environment = fclpy.lisptype.Environment() | 15.8 | 50 | 0.78481 |
6109ed76cc8ad255dad468e5ec52defead6b32c9 | 2,940 | py | Python | tests/test_environment.py | VeranosTech/alembic | 7c352124d089c138bb063c9241eb55f929a06742 | [
"MIT"
] | null | null | null | tests/test_environment.py | VeranosTech/alembic | 7c352124d089c138bb063c9241eb55f929a06742 | [
"MIT"
] | null | null | null | tests/test_environment.py | VeranosTech/alembic | 7c352124d089c138bb063c9241eb55f929a06742 | [
"MIT"
] | 6 | 2018-05-10T01:19:33.000Z | 2019-10-07T02:01:01.000Z | #!coding: utf-8
from alembic.environment import EnvironmentContext
from alembic.migration import MigrationContext
from alembic.script import ScriptDirectory
from alembic.testing import eq_
from alembic.testing import is_
from alembic.testing.assertions import expect_warnings
from alembic.testing.env import _no_sql_testing_config
from alembic.testing.env import _sqlite_file_db
from alembic.testing.env import clear_staging_env
from alembic.testing.env import staging_env
from alembic.testing.env import write_script
from alembic.testing.fixtures import TestBase
from alembic.testing.mock import call
from alembic.testing.mock import MagicMock
from alembic.testing.mock import Mock
class EnvironmentTest(TestBase):
def setUp(self):
staging_env()
self.cfg = _no_sql_testing_config()
def tearDown(self):
clear_staging_env()
def _fixture(self, **kw):
script = ScriptDirectory.from_config(self.cfg)
env = EnvironmentContext(self.cfg, script, **kw)
return env
def test_x_arg(self):
env = self._fixture()
self.cfg.cmd_opts = Mock(x="y=5")
eq_(env.get_x_argument(), "y=5")
def test_x_arg_asdict(self):
env = self._fixture()
self.cfg.cmd_opts = Mock(x=["y=5"])
eq_(env.get_x_argument(as_dictionary=True), {"y": "5"})
def test_x_arg_no_opts(self):
env = self._fixture()
eq_(env.get_x_argument(), [])
def test_x_arg_no_opts_asdict(self):
env = self._fixture()
eq_(env.get_x_argument(as_dictionary=True), {})
def test_tag_arg(self):
env = self._fixture(tag="x")
eq_(env.get_tag_argument(), "x")
def test_migration_context_has_config(self):
env = self._fixture()
env.configure(url="sqlite://")
ctx = env._migration_context
is_(ctx.config, self.cfg)
ctx = MigrationContext(ctx.dialect, None, {})
is_(ctx.config, None)
def test_warning_on_passing_engine(self):
env = self._fixture()
engine = _sqlite_file_db()
a_rev = "arev"
env.script.generate_revision(a_rev, "revision a", refresh=True)
write_script(
env.script,
a_rev,
"""\
"Rev A"
revision = '%s'
down_revision = None
from alembic import op
def upgrade():
pass
def downgrade():
pass
"""
% a_rev,
)
migration_fn = MagicMock()
def upgrade(rev, context):
migration_fn(rev, context)
return env.script._upgrade_revs(a_rev, rev)
with expect_warnings(
r"'connection' argument to configure\(\) is "
r"expected to be a sqlalchemy.engine.Connection "
):
env.configure(
connection=engine, fn=upgrade, transactional_ddl=False
)
env.run_migrations()
eq_(migration_fn.mock_calls, [call((), env._migration_context)])
| 26.972477 | 72 | 0.65068 |
4c48b08afd7e310ca960170f0604b17070c88093 | 4,780 | py | Python | ARGA/arga/layers.py | kaize0409/ARGA | cf06ae8a830bee142765395284e3b451d0c8e2d0 | [
"MIT"
] | 1 | 2020-07-25T08:25:49.000Z | 2020-07-25T08:25:49.000Z | ARGA/arga/layers.py | kaize0409/ARGA | cf06ae8a830bee142765395284e3b451d0c8e2d0 | [
"MIT"
] | null | null | null | ARGA/arga/layers.py | kaize0409/ARGA | cf06ae8a830bee142765395284e3b451d0c8e2d0 | [
"MIT"
] | null | null | null | from initializations import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs
"""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def dropout_sparse(x, keep_prob, num_nonzero_elems):
"""Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
"""
noise_shape = [num_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
# Properties
name: String, defines the variable scope of the layer.
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.issparse = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
class GraphConvolution(Layer):
"""Basic graph convolution layer for undirected graph without edge labels."""
def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
print("actionvation function:" + str(self.act))
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, 1-self.dropout)
x = tf.matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
print np.shape(self.adj)
print np.shape(x)
outputs = self.act(x)
return outputs
class Binarize(Layer):
"""Basic graph convolution layer for undirected graph without edge labels."""
def __init__(self, input_dim, output_dim, dropout=0., act=tf.nn.softsign, **kwargs):
super(Binarize, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.act = act
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, 1-self.dropout)
x = tf.matmul(x, self.vars['weights'])
outputs = self.act(x)
return outputs
class GraphConvolutionSparse(Layer):
"""Graph convolution layer for sparse inputs."""
def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., act=tf.nn.relu, **kwargs):
super(GraphConvolutionSparse, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
self.issparse = True
self.features_nonzero = features_nonzero
def _call(self, inputs):
x = inputs
x = dropout_sparse(x, 1-self.dropout, self.features_nonzero)
x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
outputs = self.act(x)
return outputs
class InnerProductDecoder(Layer):
"""Decoder model layer for link prediction."""
def __init__(self, input_dim, dropout=0., act=tf.nn.sigmoid, **kwargs):
super(InnerProductDecoder, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
def _call(self, inputs):
inputs = tf.nn.dropout(inputs, 1-self.dropout)
x = tf.transpose(inputs)
x = tf.matmul(inputs, x)
x = tf.reshape(x, [-1])
outputs = self.act(x)
return outputs
| 33.661972 | 107 | 0.638703 |
d8810c41ea0be83ca19ddca06ff946ed8968fb6c | 468 | py | Python | octane/api_resources/meter.py | zomglings/octane-python | e8e1bfeb7146e8cd2314359f2c3ddd280bf105a7 | [
"MIT"
] | 1 | 2022-01-06T13:02:11.000Z | 2022-01-06T13:02:11.000Z | octane/api_resources/meter.py | zomglings/octane-python | e8e1bfeb7146e8cd2314359f2c3ddd280bf105a7 | [
"MIT"
] | null | null | null | octane/api_resources/meter.py | zomglings/octane-python | e8e1bfeb7146e8cd2314359f2c3ddd280bf105a7 | [
"MIT"
] | 1 | 2021-05-10T06:00:32.000Z | 2021-05-10T06:00:32.000Z | from __future__ import absolute_import, division, print_function
from octane.api_resources.abstract import CreateableAPIResource
from octane.api_resources.abstract import DeletableAPIResource
from octane.api_resources.abstract import ListableAPIResource
from octane.api_resources.abstract import UpdateableAPIResource
class Meter(
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
):
OBJECT_NAME = "meter"
| 29.25 | 64 | 0.839744 |
6d7481c4e6ccb096c9f0012f2694340e3e6025e7 | 1,717 | py | Python | assessments/migrations/0001_initial.py | Lukmanhakim112/bikom | eda795e15a46f3edcbeb21f9d87e75def0494dc4 | [
"MIT"
] | null | null | null | assessments/migrations/0001_initial.py | Lukmanhakim112/bikom | eda795e15a46f3edcbeb21f9d87e75def0494dc4 | [
"MIT"
] | null | null | null | assessments/migrations/0001_initial.py | Lukmanhakim112/bikom | eda795e15a46f3edcbeb21f9d87e75def0494dc4 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.11 on 2022-01-21 03:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AssesmentModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120, verbose_name='Assesment Name')),
],
),
migrations.CreateModel(
name='QuestionModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=120, verbose_name='Question')),
('img_question', models.ImageField(blank=True, null=True, upload_to='question_img/', verbose_name='Question Image')),
('assessment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assessments.assesmentmodel')),
],
),
migrations.CreateModel(
name='AnswerModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.CharField(max_length=120, verbose_name='Answer')),
('img_question', models.ImageField(blank=True, null=True, upload_to='answer_img/', verbose_name='Question Image')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assessments.questionmodel')),
],
),
]
| 41.878049 | 133 | 0.614444 |
e3c23e0bb6e757cd037df1f959ee862b868aebd1 | 5,230 | py | Python | src/poliastro/coordinates.py | nikita-astronaut/poliastro | 7f675d76da413618f3bcc25317de750d74ea667e | [
"MIT"
] | 1 | 2019-02-05T06:19:59.000Z | 2019-02-05T06:19:59.000Z | src/poliastro/coordinates.py | nikita-astronaut/poliastro | 7f675d76da413618f3bcc25317de750d74ea667e | [
"MIT"
] | null | null | null | src/poliastro/coordinates.py | nikita-astronaut/poliastro | 7f675d76da413618f3bcc25317de750d74ea667e | [
"MIT"
] | null | null | null | """Functions related to coordinate systems and transformations.
This module complements :py:mod:`astropy.coordinates`.
"""
from math import sin, cos, sqrt
import numpy as np
import astropy.units as u
from astropy.coordinates import get_body_barycentric_posvel, CartesianRepresentation, CartesianDifferential
from poliastro.constants import J2000
from poliastro.twobody.rv import rv2coe
from poliastro.util import transform as transform_vector
from poliastro.twobody.orbit import Orbit
def body_centered_to_icrs(r, v, source_body, epoch=J2000, rotate_meridian=False):
"""Converts position and velocity body-centered frame to ICRS.
Parameters
----------
r : ~astropy.units.Quantity
Position vector in a body-centered reference frame.
v : ~astropy.units.Quantity
Velocity vector in a body-centered reference frame.
source_body : Body
Source body.
epoch : ~astropy.time.Time, optional
Epoch, default to J2000.
rotate_meridian : bool, optional
Whether to apply the rotation of the meridian too, default to False.
Returns
-------
r, v : tuple (~astropy.units.Quantity)
Position and velocity vectors in ICRS.
"""
ra, dec, W = source_body.rot_elements_at_epoch(epoch)
if rotate_meridian:
r = transform_vector(r, -W, 'z')
v = transform_vector(v, -W, 'z')
r_trans1 = transform_vector(r, -(90 * u.deg - dec), 'x')
r_trans2 = transform_vector(r_trans1, -(90 * u.deg + ra), 'z')
v_trans1 = transform_vector(v, -(90 * u.deg - dec), 'x')
v_trans2 = transform_vector(v_trans1, -(90 * u.deg + ra), 'z')
icrs_frame_pos_coord, icrs_frame_vel_coord = get_body_barycentric_posvel(source_body.name, time=epoch)
r_f = icrs_frame_pos_coord.xyz + r_trans2
v_f = icrs_frame_vel_coord.xyz + v_trans2
return r_f.to(r.unit), v_f.to(v.unit)
def icrs_to_body_centered(r, v, target_body, epoch=J2000, rotate_meridian=False):
"""Converts position and velocity in ICRS to body-centered frame.
Parameters
----------
r : ~astropy.units.Quantity
Position vector in ICRS.
v : ~astropy.units.Quantity
Velocity vector in ICRS.
target_body : Body
Target body.
epoch : ~astropy.time.Time, optional
Epoch, default to J2000.
rotate_meridian : bool, optional
Whether to apply the rotation of the meridian too, default to False.
Returns
-------
r, v : tuple (~astropy.units.Quantity)
Position and velocity vectors in a body-centered reference frame.
"""
ra, dec, W = target_body.rot_elements_at_epoch(epoch)
icrs_frame_pos_coord, icrs_frame_vel_coord = get_body_barycentric_posvel(target_body.name, time=epoch)
r_trans1 = r - icrs_frame_pos_coord.xyz
r_trans2 = transform_vector(r_trans1, (90 * u.deg + ra), 'z')
r_f = transform_vector(r_trans2, (90 * u.deg - dec), 'x')
v_trans1 = v - icrs_frame_vel_coord.xyz
v_trans2 = transform_vector(v_trans1, (90 * u.deg + ra), 'z')
v_f = transform_vector(v_trans2, (90 * u.deg - dec), 'x')
if rotate_meridian:
r_f = transform_vector(r_f, W, 'z')
v_f = transform_vector(v_f, W, 'z')
return r_f.to(r.unit), v_f.to(v.unit)
def inertial_body_centered_to_pqw(r, v, source_body):
"""Converts position and velocity from inertial body-centered frame to perifocal frame.
Parameters
----------
r : ~astropy.units.Quantity
Position vector in a inertial body-centered reference frame.
v : ~astropy.units.Quantity
Velocity vector in a inertial body-centered reference frame.
source_body : Body
Source body.
Returns
-------
r_pqw, v_pqw : tuple (~astropy.units.Quantity)
Position and velocity vectors in ICRS.
"""
r = r.to('km').value
v = v.to('km/s').value
k = source_body.k.to('km^3 / s^2').value
p, ecc, inc, _, _, nu = rv2coe(k, r, v)
r_pqw = (np.array([cos(nu), sin(nu), 0 * nu]) * p / (1 + ecc * cos(nu))).T * u.km
v_pqw = (np.array([-sin(nu), (ecc + cos(nu)), 0]) * sqrt(k / p)).T * u.km / u.s
return r_pqw, v_pqw
def transform(orbit, frame_orig, frame_dest):
"""Transforms Orbit from one frame to another.
Parameters
----------
orbit : ~poliastro.bodies.Orbit
Orbit to transform
frame_orig : ~astropy.coordinates.BaseCoordinateFrame
Initial frame
frame_dest : ~astropy.coordinates.BaseCoordinateFrame
Final frame
Returns
-------
orbit: ~poliastro.bodies.Orbit
Orbit in the new frame
"""
orbit_orig = frame_orig(x=orbit.r[0], y=orbit.r[1], z=orbit.r[2],
v_x=orbit.v[0], v_y=orbit.v[1], v_z=orbit.v[2],
representation=CartesianRepresentation,
differential_type=CartesianDifferential)
orbit_dest = orbit_orig.transform_to(frame_dest(obstime=orbit.epoch))
orbit_dest.representation = CartesianRepresentation
return Orbit.from_vectors(orbit.attractor,
orbit_dest.data.xyz,
orbit_dest.data.differentials['s'].d_xyz,
epoch=orbit.epoch)
| 31.317365 | 107 | 0.649713 |
c15bb58894af3df960404cc1089a71f2f8ea55ad | 7,322 | py | Python | parsing.py | Undo1/SmellDetector | 44dbecb150c32c928bc8721fcc311d868b2e8f44 | [
"Apache-2.0",
"MIT"
] | 3 | 2019-04-23T02:59:15.000Z | 2019-08-05T06:47:56.000Z | parsing.py | Undo1/SmellDetector | 44dbecb150c32c928bc8721fcc311d868b2e8f44 | [
"Apache-2.0",
"MIT"
] | null | null | null | parsing.py | Undo1/SmellDetector | 44dbecb150c32c928bc8721fcc311d868b2e8f44 | [
"Apache-2.0",
"MIT"
] | null | null | null | # coding=utf-8
# noinspection PyCompatibility
import regex
import globalvars
import datahandling
BAD_CHAR = "\u200c\u200b"
# noinspection PyMissingTypeHints
def rebuild_str(str):
return ''.join(ch for ch in str if ch not in BAD_CHAR)
# noinspection PyBroadException,PyMissingTypeHints
def get_user_from_url(url):
match = regex.compile(r"(?:https?:)?//([\w.]+)/u(?:sers)?/(\d+)(/(?:.+/?)?)?").search(url)
if match is None:
return None
try:
site = match.group(1)
user_id = match.group(2)
return user_id, site
except:
return None
# noinspection PyBroadException
def get_api_sitename_from_url(url):
match = regex.compile(r"(?:https?:)?(?://)?([\w.]+)/?").search(url)
if match is None:
return None
try:
if match.group(1) == 'mathoverflow.net':
return 'mathoverflow.net'
else:
return match.group(1).split('.')[0]
except:
return None
# noinspection PyBroadException,PyMissingTypeHints
def fetch_post_url_from_msg_content(content):
search_regex = r"^\[ \[SmokeDetector\]\([^)]*\)(?: \| \[.+\]\(.+\))? \] [\w\s,:+\(\)-]+: \[.+]\(((?:http:)" \
r"?\/\/[\w.]+\/questions\/\d+(?:\/.*)?|(?:http:)?\/\/[\w.]+\/[qa]\/\d+/?)(?:\?smokeypost=true)?\)" \
r"\s?(?:\u270F\uFE0F|\U0001F437)?\sby \[?.*\]?\(?(?:.*)\)? on `[\w.]+`(?: \(@.+\))?" \
r"(?: \[.+\]\(.+\))?$"
match = regex.compile(search_regex).search(content)
if match is None:
return None
try:
url = match.group(1)
return url
except:
return None
# noinspection PyBroadException,PyUnusedLocal,PyRedundantParentheses,PyMissingTypeHints
def fetch_post_id_and_site_from_url(url):
if url is None:
return None
trimmed_url = rebuild_str(url)
post_type_regex = r"\/\d+#\d+$"
post_type = ""
search_regex = ""
if regex.compile(post_type_regex).search(trimmed_url):
post_type = "answer"
search_regex = r"^(?:https?:)?\/\/([\w.]+)\/questions\/\d+\/.+\/(\d+)#\d+$"
else:
post_type = "question"
search_regex = r"^(?:https?:)?\/\/([\w.]+)/questions/(\d+)(?:/.*)?$"
found = regex.compile(search_regex).search(trimmed_url)
if found is not None:
try:
post_id = found.group(2)
post_site = found.group(1)
return (post_id, post_site, post_type)
except:
return None
search_regex = r"^(?:https?:)?\/\/([\w.]+)/(q|a)/(\d+)(?:/\d+)?/?"
found = regex.compile(search_regex).search(trimmed_url)
if found is None:
return None
try:
post_id = found.group(3)
post_site = found.group(1)
post_type = "question" if found.group(2) == "q" else "answer"
return (post_id, post_site, post_type)
except:
return None
# noinspection PyMissingTypeHints
def fetch_post_id_and_site_from_msg_content(content):
url = fetch_post_url_from_msg_content(content)
return fetch_post_id_and_site_from_url(url)
# noinspection PyBroadException,PyMissingTypeHints
def fetch_owner_url_from_msg_content(content):
search_regex = r"^\[ \[SmokeDetector\]\([^)]*\)(?: \| \[.+\]\(.+\))? \] [\w\s,:+\(\)-]+: \[.+]\((?:(?:http:)" \
r"?\/\/[\w.]+\/questions\/\d+(?:\/.*)?|(?:http:)?\/\/[\w.]+\/[qa]\/\d+/?)\) by \[.+\]\((.+)\)" \
r" on `[\w.]+`(?: \(@.+\))?(?: \[.+\]\(.+\))?$"
match = regex.compile(search_regex).search(content)
if match is None:
return None
try:
owner_url = match.group(1)
return owner_url
except:
return None
# noinspection PyBroadException,PyMissingTypeHints
def fetch_title_from_msg_content(content):
search_regex = r"^\[ \[SmokeDetector\]\([^)]*\)(?: \| \[.+\]\(.+\))? \] [\w\s,:+\(\)-]+: \[(.+)]\((?:(?:http:)" \
r"?\/\/[\w.]+\/questions\/\d+(?:\/.*)?|(?:http:)?\/\/[\w.]+\/[qa]\/\d+/?)\) by \[?.*\]?\(?.*\)?" \
r" on `[\w.]+`(?: \(@.+\))?(?: \[.+\]\(.+\))?$"
match = regex.compile(search_regex).search(content)
if match is None:
return None
try:
title = match.group(1)
return title
except:
return None
# noinspection PyBroadException,PyMissingTypeHints
def edited_message_after_postgone_command(content):
search_regex = r"^\[ \[SmokeDetector\]\([^)]*\)(?: \| \[.+\]\(.+\))? \] [\w\s,:+\(\)-]+: (\[.+]\((?:(?:http:)" \
r"?\/\/[\w.]+\/questions\/\d+(?:\/.*)?|(?:http:)?\/\/[\w.]+\/[qa]\/\d+/?)\)) by \[?.*\]?\(?.*\)?" \
r" on `[\w.]+`(?: \(@.+\))?(?: \[.+\]\(.+\))?$"
match = regex.compile(search_regex).search(content)
if match is None:
return None
try:
link = match.group(1)
return content.replace(link, "*(gone)*")
except:
return None
# noinspection PyMissingTypeHints
def unescape_title(title_escaped):
return globalvars.GlobalVars.parser.unescape(title_escaped).strip()
# noinspection PyMissingTypeHints
def escape_markdown(s):
return regex.sub(r"([_*`\[\]])", r"\\\1", s)
# noinspection PyMissingTypeHints
def sanitize_title(title_unescaped):
return regex.sub('(https?://|\n)', '', escape_markdown(title_unescaped).replace('\n', u'\u23CE'))
# noinspection PyMissingTypeHints
def get_user_from_list_command(cmd): # for example, !!/addblu is a list command
cmd_merged_spaces = regex.sub("\\s+", " ", cmd)
cmd_parts = cmd_merged_spaces.split(" ")
uid = -1
site = ""
if len(cmd_parts) == 1:
uid_site = get_user_from_url(cmd_parts[0])
if uid_site is not None:
uid, site = uid_site
elif len(cmd_parts) == 2:
uid = cmd_parts[0]
site = cmd_parts[1]
digit_re = regex.compile("^[0-9]+$")
site_re = regex.compile(r"^(\w+\.stackexchange\.com|\w+\.(com|net))$")
if not digit_re.match(uid):
uid = -1
site = ""
elif not site_re.match(site):
exists, name = datahandling.check_site_and_get_full_name(site)
if exists:
return uid, name
else:
return -2, name
return uid, site
# noinspection PyMissingTypeHints
def url_to_shortlink(url):
id_and_site = fetch_post_id_and_site_from_url(url)
if id_and_site is None:
return url
if id_and_site[2] == "question":
return "http://{}/questions/{}".format(id_and_site[1], id_and_site[0])
# We're using "/questions" and not "/q" here because when the URL
# is made protocol-relative, /q would redirect to http even if the
# shortlink is https. Same for /a. But there we still use /a because
# there is no /answers or something like that.
else:
return "http://{}/a/{}".format(id_and_site[1], id_and_site[0])
# noinspection PyMissingTypeHints
def user_url_to_shortlink(url):
user_id_and_site = get_user_from_url(url)
if user_id_and_site is None:
return url
return "http://{}/users/{}".format(user_id_and_site[1], user_id_and_site[0])
# noinspection PyMissingTypeHints
def to_protocol_relative(url):
if url.startswith("http://"):
return url[5:]
elif url.startswith("https://"):
return url[6:]
else:
return url
| 33.43379 | 119 | 0.561459 |
721b5cae75181033273f632c5d10427a3ed2625d | 1,229 | py | Python | test/test_forecast_api.py | chbndrhnns/ahoi-client | 8bd25f541c05af17c82904fa250272514b7971f2 | [
"MIT"
] | null | null | null | test/test_forecast_api.py | chbndrhnns/ahoi-client | 8bd25f541c05af17c82904fa250272514b7971f2 | [
"MIT"
] | null | null | null | test/test_forecast_api.py | chbndrhnns/ahoi-client | 8bd25f541c05af17c82904fa250272514b7971f2 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.forecast_api import ForecastApi # noqa: E501
from swagger_client.rest import ApiException
class TestForecastApi(unittest.TestCase):
"""ForecastApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.forecast_api.ForecastApi() # noqa: E501
def tearDown(self):
pass
def test_get_forecast(self):
"""Test case for get_forecast
Get balance forecast # noqa: E501
"""
pass
def test_get_forecast_transactions(self):
"""Test case for get_forecast_transactions
Retrieve balance forecast for the end of the current month. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 25.081633 | 277 | 0.685924 |
44ced8cbc784dcffd5b77e8a71e96e874d860ac3 | 788 | py | Python | dataent/patches/v11_0/fix_order_by_in_reports_json.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | dataent/patches/v11_0/fix_order_by_in_reports_json.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | 6 | 2020-03-24T17:15:56.000Z | 2022-02-10T18:41:31.000Z | dataent/patches/v11_0/fix_order_by_in_reports_json.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import dataent, json
def execute():
reports_data = dataent.get_all('Report',
filters={'json': ['not like', '%%%"order_by": "`tab%%%'],
'report_type': 'Report Builder', 'is_standard': 'No'}, fields=['name'])
for d in reports_data:
doc = dataent.get_doc('Report', d.get('name'))
if not doc.get('json'): continue
json_data = json.loads(doc.get('json'))
parts = []
if ('order_by' in json_data) and ('.' in json_data.get('order_by')):
parts = json_data.get('order_by').split('.')
sort_by = parts[1].split(' ')
json_data['order_by'] = '`tab{0}`.`{1}`'.format(doc.ref_doctype, sort_by[0])
json_data['order_by'] += ' {0}'.format(sort_by[1]) if len(sort_by) > 1 else ''
doc.json = json.dumps(json_data)
doc.save()
| 29.185185 | 81 | 0.635787 |
d946ef5b90ce1d72772ef2490f107c7810d86268 | 1,201 | py | Python | app/errors/handlers.py | boerniee/project-mate | 072b0e871525d527d438f2ec0238fa94c4547f85 | [
"MIT"
] | 2 | 2019-12-18T09:42:18.000Z | 2019-12-20T13:16:52.000Z | app/errors/handlers.py | boerniee/project-mate | 072b0e871525d527d438f2ec0238fa94c4547f85 | [
"MIT"
] | 17 | 2019-12-18T12:45:30.000Z | 2021-02-06T14:44:36.000Z | app/errors/handlers.py | boerniee/project-mate | 072b0e871525d527d438f2ec0238fa94c4547f85 | [
"MIT"
] | null | null | null | from flask import render_template, request
from app import db
from app.errors import bp
from app.api.errors import error_response as api_error_response
def wants_json_response():
return request.accept_mimetypes['application/json'] >= \
request.accept_mimetypes['text/html']
@bp.app_errorhandler(403)
def forbidden(error):
if wants_json_response():
return api_error_response(403)
return render_template('error/403.html'), 403
@bp.app_errorhandler(404)
def not_found_error(error):
if wants_json_response():
return api_error_response(404)
return render_template('error/404.html'), 404
@bp.app_errorhandler(400)
def not_found_error(error):
if wants_json_response():
return api_error_response(400)
return render_template('error/400.html'), 400
@bp.app_errorhandler(500)
def internal_error(error):
db.session.rollback()
if wants_json_response():
return api_error_response(500)
return render_template('error/500.html'), 500
@bp.app_errorhandler(413)
def internal_error(error):
db.session.rollback()
if wants_json_response():
return api_error_response(413)
return render_template('error/413.html'), 413
| 29.292683 | 63 | 0.742714 |
7218a0e1d5e7b250bc583b2e6c7890fbc62d3d74 | 11,200 | py | Python | sentiment_analysis_ml.py | ruanyangry/Sentiment_Analysis_cnn_lstm_cnnlstm_textcnn_bilstm | fb9f2563ff8594ad98ed8fb14073ce8be7ee9aca | [
"Apache-2.0"
] | 56 | 2019-01-19T13:34:39.000Z | 2022-03-29T13:55:29.000Z | sentiment_analysis_ml.py | ColionX/Sentiment_Analysis_cnn_lstm_cnnlstm_textcnn_bilstm | fb9f2563ff8594ad98ed8fb14073ce8be7ee9aca | [
"Apache-2.0"
] | 2 | 2019-11-24T13:56:26.000Z | 2020-12-23T02:40:12.000Z | sentiment_analysis_ml.py | ColionX/Sentiment_Analysis_cnn_lstm_cnnlstm_textcnn_bilstm | fb9f2563ff8594ad98ed8fb14073ce8be7ee9aca | [
"Apache-2.0"
] | 18 | 2019-03-04T14:37:46.000Z | 2022-02-12T10:00:33.000Z | # _*_ coding:utf-8 _*_
'''
@Author: Ruan Yang
@Date: 2018.12.16
@Purpose: 使用传统的机器学习的方法进行文本情感分析
'''
import codecs
import jieba
import numpy as np
from gensim.models.word2vec import Word2Vec
from sklearn.externals import joblib
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import neighbors
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
datapaths=r"C:\Users\RY\Desktop\情感分析\SentimentAnalysis-master\data\\"
storedpaths=r"C:\Users\RY\Desktop\\"
positive_data=[]
y_positive=[]
neutral_data=[]
y_neutral=[]
negative_data=[]
y_negative=[]
print("#------------------------------------------------------#")
print("加载数据集")
with codecs.open(datapaths+"pos.csv","r","utf-8") as f1,\
codecs.open(datapaths+"neutral.csv","r","utf-8") as f2,\
codecs.open(datapaths+"neg.csv","r","utf-8") as f3:
for line in f1:
positive_data.append(" ".join(i for i in jieba.lcut(line.strip(),cut_all=False)))
#y_positive.append([1,0,0])
y_positive.append([0])
for line in f2:
neutral_data.append(" ".join(i for i in jieba.lcut(line.strip(),cut_all=False)))
#y_neutral.append([0,1,0])
y_neutral.append([1])
for line in f3:
negative_data.append(" ".join(i for i in jieba.lcut(line.strip(),cut_all=False)))
#y_negative.append([0,0,1])
y_negative.append([2])
print("positive data:{}".format(len(positive_data)))
print("neutral data:{}".format(len(neutral_data)))
print("negative data:{}".format(len(negative_data)))
x_text=positive_data+neutral_data+negative_data
y_label=y_positive+y_neutral+y_negative
print("#------------------------------------------------------#")
print("\n")
# 数据集混洗
shuffle_indices = np.random.permutation(np.arange(len(y_label)))
train_test_percent=0.2
x_train=[]
x_test=[]
y_train=[]
y_test=[]
for i in shuffle_indices[:-(int(len(shuffle_indices)*train_test_percent))]:
x_train.append(x_text[i])
y_train.append(y_label[i])
for i in shuffle_indices[-(int(len(shuffle_indices)*train_test_percent)):]:
x_test.append(x_text[i])
y_test.append(y_label[i])
x_train_pos=0
x_train_neu=0
x_train_neg=0
x_test_pos=0
x_test_neu=0
x_test_neg=0
for i in y_train:
if i[0] == 0:
x_train_pos += 1
elif i[0] == 1:
x_train_neu += 1
else:
x_train_neg += 1
for i in y_test:
if i[0] == 0:
x_test_pos += 1
elif i[0] == 1:
x_test_neu += 1
else:
x_test_neg += 1
print("#------------------------------------------------------#")
print("保存标签数据")
np.save(storedpaths+"y_train.npy",np.array(y_train))
np.save(storedpaths+"y_test.npy",np.array(y_test))
print("训练集总数:{}".format(len(x_train)))
print("训练集正样本:{}".format(x_train_pos))
print("训练集中性样本:{}".format(x_train_neu))
print("训练集负样本:{}".format(x_train_neg))
print("测试集总数:{}".format(len(x_test)))
print("测试集正样本:{}".format(x_test_pos))
print("测试集中性样本:{}".format(x_test_neu))
print("测试集负样本:{}".format(x_test_neg))
print("#------------------------------------------------------#")
print("\n")
#对每个句子的所有词向量取均值
# text 需要是切完词的 词列表
# size 一般是词向量的维度
# word_vector_model: 训练好的词向量模型 (一般使用 gensim 中的 WordVector 进行词向量训练)
# 或者是直接加载训练好的模型
def buildWordVector(text,size,word_vector_model):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in text:
try:
vec += word_vector_model[word].reshape((1, size))
count += 1.
except KeyError:
continue
if count != 0:
vec /= count
return vec
# 计算词向量
def get_train_vecs(x_train,x_test,n_dim):
'''
x_train: 训练集
x_test: 测试集
n_dim: 训练词向量的维度
'''
n_dim=n_dim
# 初始化模型和生成词汇表
all_text=x_train+x_test
text_w2v=Word2Vec(size=n_dim,min_count=5,workers=1)
text_w2v.build_vocab(all_text)
text_w2v.train(all_text,total_examples=text_w2v.corpus_count,epochs=5)
# 分别得到训练集和测试集文本的词向量合集,这个数据集就很大了
train_vecs=np.concatenate([buildWordVector(text,n_dim,text_w2v) for text in x_train])
np.save(storedpaths+"train_vecs.npy",train_vecs)
print("训练集数据的词向量维度:{}".format(train_vecs.shape))
test_vecs=np.concatenate([buildWordVector(text,n_dim,text_w2v) for text in x_test])
np.save(storedpaths+"test_vecs.npy",test_vecs)
print("测试集数据的词向量维度:{}".format(test_vecs.shape))
# 保存词向量
text_w2v.save(storedpaths+"w2v_model.pkl")
# 加载向量化的文本和标签
def get_data():
train_vecs=np.load(storedpaths+'train_vecs.npy')
y_train=np.load(storedpaths+'y_train.npy')
test_vecs=np.load(storedpaths+'test_vecs.npy')
y_test=np.load(storedpaths+'y_test.npy')
return train_vecs,y_train,test_vecs,y_test
# 训练svm模型
def svm_train(train_vecs,y_train,test_vecs,y_test):
clf=SVC(kernel='rbf',verbose=True)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练朴素贝叶斯模型
def NB_train(train_vecs,y_train,test_vecs,y_test):
gnb = GaussianNB()
gnb.fit(train_vecs,y_train)
joblib.dump(gnb,storedpaths+'model_gnb.pkl')
test_scores=gnb.score(test_vecs,y_test)
return test_scores
# 训练决策树模型
def decision_tree(train_vecs,y_train,test_vecs,y_test):
clf=DecisionTreeClassifier(max_depth=10, min_samples_split=2,random_state=0)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_dtree.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练随机森林算法
def random_forest(train_vecs,y_train,test_vecs,y_test):
clf = RandomForestClassifier(n_estimators=10, max_depth=10,min_samples_split=2,n_jobs=1,random_state=0)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_randomforest.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练 ExtraTreesClassifier 分类算法
def extract_tree(train_vecs,y_train,test_vecs,y_test):
clf = ExtraTreesClassifier(n_estimators=10, max_depth=10,min_samples_split=2,n_jobs=1,random_state=0)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_extracttree.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练 GBDT 分类算法
def gbdt_classifier(train_vecs,y_train,test_vecs,y_test):
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,max_depth=10,random_state=0)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_gbdt.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练近邻分类算法
def nn_classifier(n_neighbors,train_vecs,y_train,test_vecs,y_test):
clf = neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_nn.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练 LogisticRegression 分类算法
def LR_classifier(train_vecs,y_train,test_vecs,y_test):
clf = LogisticRegression(C=50. / len(y_train),multi_class='multinomial',\
penalty='l1', solver='saga', tol=0.1)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_lr.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练 随机梯度下降 分类算法
def SGD_classifier(train_vecs,y_train,test_vecs,y_test):
clf = SGDClassifier(alpha=0.001, max_iter=100)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_sgd.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 训练多层感知机分类算法
def MP_classifier(train_vecs,y_train,test_vecs,y_test):
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(train_vecs,y_train)
joblib.dump(clf,storedpaths+'model_mp.pkl')
test_scores=clf.score(test_vecs,y_test)
return test_scores
# 得到待预测单个句子的词向量
# 预先进行分词操作
def get_predict_vecs(string,n_dim,w2v_model_path):
'''
string: 输入的句子
n_dim: 词向量维度
w2v_model_path: 預训练词向量的模型路径
'''
n_dim = n_dim
text_w2v = Word2Vec.load(w2v_model_path)
words=[i for i in jieba.cut(string,cut_all=False)]
train_vecs = buildWordVector(words, n_dim,text_w2v)
return train_vecs
# 调用训练模型进行预测
def svm_predict(string,trainmodelpath):
words_vecs=get_predict_vecs(string)
clf=joblib.load(trainmodelpath)
result=clf.predict(words_vecs)
return result
# Train model
n_dim=300
n_neighbors=10
#get_train_vecs(x_train,x_test,n_dim)
train_vecs,y_train,test_vecs,y_test=get_data()
test_scores=svm_train(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("SVM测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=NB_train(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("NB测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=nn_classifier(n_neighbors,train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("NN测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=LR_classifier(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("LR测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=SGD_classifier(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("SGD测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=decision_tree(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("TREE测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=random_forest(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("Random_Forest测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=extract_tree(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("Extract_Tree测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=gbdt_classifier(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("GBDT_Tree测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#")
test_scores=MP_classifier(train_vecs,y_train,test_vecs,y_test)
print("#----------------------------------------#")
print("MP测试集测试得分:{}".format(test_scores))
print("#----------------------------------------#") | 32.844575 | 108 | 0.63625 |
2875d0b4b1352c9d1dc8c58e5b0b90127e9c27d5 | 604 | py | Python | setup.py | pbaksh/well_structured_repo | 016c4f72c2ebabbc820dc160ce88b2b09654d34c | [
"Apache-2.0"
] | null | null | null | setup.py | pbaksh/well_structured_repo | 016c4f72c2ebabbc820dc160ce88b2b09654d34c | [
"Apache-2.0"
] | null | null | null | setup.py | pbaksh/well_structured_repo | 016c4f72c2ebabbc820dc160ce88b2b09654d34c | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements = ["numpy"]
setup(
name="well_structured_repo",
version="0.0.1",
author="PB",
author_email="jpdbaksh@gmail.com",
description="A template",
long_description=readme,
long_description_content_type="text/markdown",
url="",
packages=find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
],
)
| 25.166667 | 75 | 0.665563 |
46d18e456d14081ecfea2fd33bd34dd18834b88f | 636 | py | Python | Introducao ao Python - DIO/Aula02.py | eduardaalvess/RepositorioPython | e6778e851942b97cd61143c6bcc34d0d5941392b | [
"MIT"
] | 1 | 2021-08-31T22:48:53.000Z | 2021-08-31T22:48:53.000Z | Introducao ao Python - DIO/Aula02.py | eduardaalvess/RepositorioPython | e6778e851942b97cd61143c6bcc34d0d5941392b | [
"MIT"
] | null | null | null | Introducao ao Python - DIO/Aula02.py | eduardaalvess/RepositorioPython | e6778e851942b97cd61143c6bcc34d0d5941392b | [
"MIT"
] | null | null | null | a = int(input('Entre com o primeiro valor: '))
b = int(input('Entre com o segundo valor: '))
print(type(a))
soma = a + b
subtracao = a - b
multiplicacao = a * b
divisao = a / b
resto = a % b
resultado = ('Soma: {soma}. \nSubtração: {subtracao}. '
'\nMultiplicação: {multiplicacao}'
'\nDivisão: {divisao}'
'\nResto: {resto}'.format(soma=soma,
subtracao=subtracao,
resto=resto,
multiplicacao=multiplicacao,
divisao=divisao))
print(resultado)
# x = '1'
# # soma2 = int(x) + 1
# # print(soma2) | 30.285714 | 60 | 0.512579 |
34738af3d56b057b8524b2c5df07f6ef3cde4377 | 1,185 | py | Python | benchmark_portals.py | EvilRedHorse/SkyLive | 698db51d1ffbe475c840a3283c675bc993f10064 | [
"MIT"
] | null | null | null | benchmark_portals.py | EvilRedHorse/SkyLive | 698db51d1ffbe475c840a3283c675bc993f10064 | [
"MIT"
] | null | null | null | benchmark_portals.py | EvilRedHorse/SkyLive | 698db51d1ffbe475c840a3283c675bc993f10064 | [
"MIT"
] | null | null | null | import time
from pubaccess import Pubaccess
import os
def time_to_str(value):
if value == 20:
time = 'timeout'
elif value == 999:
time = 'error'
else:
time = value
return time
print('Starting benchmark')
f = open("2MBfile.txt", "w+")
text = ''
# generate 2MB text
for i in range(262144):
text += 'PubLive '
f.write(text)
f.close()
portals = [
'https://scp.techandsupply.ca',
'https://scprime.hashpool.eu',
]
results = []
for portal in portals:
start_time = time.time()
opts = type('obj', (object,), {
'portal_url': portal,
'timeout': 20
})
try:
try:
skylink = Skynet.upload_file('2MBfile.txt', opts)
uploadtime = round(time.time() - start_time, 2)
current_result = [uploadtime, portal]
except TimeoutError as e:
current_result = [opts.timeout, portal]
except Exception as e:
current_result = [999, portal]
results.append(current_result)
print('Benchmarking', str(len(results)) + '/' + str(len(portals)), 'portal. Current:', time_to_str(current_result[0]), current_result[1])
print('\nRESULTS:\n')
results.sort(key=lambda x: x[0])
for elem in results:
time = time_to_str(elem[0])
print(time, elem[1])
os.remove("2MBfile.txt")
| 21.160714 | 138 | 0.672574 |
9b2268facbdcae0415f4bff88d49195007828935 | 708 | py | Python | 02-work-queues/new_task.py | KirovVerst/rabbitmq-tutorial-async | 85c6b76ec554f1ac81994cb4cd62d609cb5dc518 | [
"MIT"
] | null | null | null | 02-work-queues/new_task.py | KirovVerst/rabbitmq-tutorial-async | 85c6b76ec554f1ac81994cb4cd62d609cb5dc518 | [
"MIT"
] | null | null | null | 02-work-queues/new_task.py | KirovVerst/rabbitmq-tutorial-async | 85c6b76ec554f1ac81994cb4cd62d609cb5dc518 | [
"MIT"
] | null | null | null | import asyncio
import sys
import aio_pika
ROUTING_KEY = 'task_queue'
QUEUE_NAME = 'task_queue'
async def main(msg: str) -> None:
conn = await aio_pika.connect(host='localhost')
async with conn:
channel: aio_pika.Channel = await conn.channel()
await channel.declare_queue(QUEUE_NAME, durable=True)
await channel.default_exchange.publish(
aio_pika.Message(
body=msg.encode(),
delivery_mode=aio_pika.DeliveryMode.PERSISTENT
),
routing_key=ROUTING_KEY,
)
print(f'[x] Sent {msg}')
if __name__ == '__main__':
message = ' '.join(sys.argv[1:]) or 'Hello, World!'
asyncio.run(main(message))
| 25.285714 | 62 | 0.628531 |
a9c740b025a32372731a6dbaeae47962a27dec2e | 1,305 | py | Python | virtual/lib/python3.9/site-packages/pyuploadcare/transformations/base.py | alex-mu/Neighborhood-watch | 13a4926a59a924f84c5560966ca686168efa054e | [
"MIT"
] | 85 | 2015-01-14T21:37:58.000Z | 2022-03-16T07:15:41.000Z | virtual/lib/python3.9/site-packages/pyuploadcare/transformations/base.py | alex-mu/Neighborhood-watch | 13a4926a59a924f84c5560966ca686168efa054e | [
"MIT"
] | 78 | 2015-01-15T23:44:15.000Z | 2022-03-21T12:05:26.000Z | virtual/lib/python3.9/site-packages/pyuploadcare/transformations/base.py | alex-mu/Neighborhood-watch | 13a4926a59a924f84c5560966ca686168efa054e | [
"MIT"
] | 34 | 2015-01-13T16:06:29.000Z | 2021-08-09T12:38:06.000Z | from enum import Enum
from typing import List, Optional, Union
class StrEnum(str, Enum):
def __str__(self):
return self.value
class BaseTransformation:
def __init__(
self, transformation: Optional[Union[str, "BaseTransformation"]] = None
):
if isinstance(transformation, BaseTransformation):
transformation = transformation.effects
self._effects = []
if transformation:
transformation = transformation.rstrip("/") # type: ignore
self._effects.append(transformation)
def set(
self, transformation_name: str, parameters: List[str]
) -> "BaseTransformation":
effect = transformation_name
if parameters:
effect += "/" + "/".join(parameters)
self._effects.append(effect)
return self
def _prefix(self, file_id: str) -> str:
return f"{file_id}/"
def __str__(self):
return self.effects
@property
def effects(self):
effects_ = "/-/".join(self._effects)
if effects_:
effects_ += "/"
return effects_
def path(self, file_id: str) -> str:
path_ = self._prefix(file_id)
effects = self.effects
if effects:
path_ += "-/" + effects
return path_
| 25.096154 | 79 | 0.595402 |
c6f3e02010cb1913799b474d40bd9ee66b3a8e66 | 2,518 | py | Python | virtool_workflow/hooks.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 5 | 2020-09-24T20:29:08.000Z | 2022-03-17T14:50:56.000Z | virtool_workflow/hooks.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 126 | 2020-10-01T23:38:34.000Z | 2022-03-31T08:26:28.000Z | virtool_workflow/hooks.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 5 | 2020-09-29T21:29:46.000Z | 2021-07-27T20:34:58.000Z | """
Hooks provide a way to do things when events happen during the workflow lifecycle.
"""
from concurrent import futures
import asyncio
from virtool_workflow.execution.hooks.fixture_hooks import FixtureHook
from virtool_workflow.execution.hooks.hooks import Hook
from virtool_workflow.execution.hooks.workflow_hooks import *
on_success = FixtureHook("on_success")
"""
Triggered when a job completes successfully.
Parameters supplied are the `Workflow` instance and the results dict.
.. code-block:: python
@on_success
async def perform_on_success(workflow: Workflow, results: Dict[str, Any]):
...
"""
on_failure = FixtureHook("on_failure")
"""
Triggered when a job fails to complete. The exception
which caused the failure will be found in the `error` fixture.
.. code-block:: python
@on_failure
async def perform_on_failure(error: Exception):
...
"""
on_finish = FixtureHook("on_finish")
"""
Triggered when a job finishes, regardless of success or failure.
.. code-block:: python
@on_finish
async def perform_on_finish(workflow: Workflow):
...
"""
on_finalize = FixtureHook("on_finalize")
"""
Triggered after job finishes, regardless of success or failure.
Intended for finalization actions such as closing the fixture scope.
"""
on_cancelled = FixtureHook("on_cancelled")
"""
Triggered when a job is cancelled.
.. code-block:: python
@on_cancelled
async def on_cancelled(error: asyncio.CancelledError):
...
"""
@on_failure
async def _trigger_on_cancelled(error: Exception, scope):
if (isinstance(error, asyncio.CancelledError)
or isinstance(error, futures.CancelledError)):
await on_cancelled.trigger(scope, error)
on_load_config = FixtureHook("on_load_config")
"""
Triggered after the config is loaded from the CLI arguments and environment variables. A SimpleNamespace object
is provided which has an attribute (sharing the same name as the fixture) for each configuration fixture in
:mod:`virtool_workflow_runtime.config.configuration`.
.. code-block:: python
@on_load_config
def use_config(dev_mode):
if dev_mode:
...
"""
before_result_upload = FixtureHook("before_result_upload")
"""Triggered after the result is ready to be uploaded, but before it is actually uploaded."""
__all__ = [
"on_result",
"on_update",
"on_success",
"on_failure",
"on_finalize",
"on_finish",
"on_load_config",
"before_result_upload",
"on_cancelled",
]
| 23.754717 | 111 | 0.726767 |
4653e2b4f796b80fe82482658fd8586c9b522752 | 7,831 | py | Python | titan/common/titan_rpc.py | GoogleCloudPlatform/titan | 150173af9b71bcc3a3946138bfd5a0c392eae9df | [
"Apache-2.0"
] | 19 | 2015-01-28T05:10:54.000Z | 2019-01-25T09:21:32.000Z | titan/common/titan_rpc.py | googlearchive/titan | 150173af9b71bcc3a3946138bfd5a0c392eae9df | [
"Apache-2.0"
] | 3 | 2015-10-03T17:03:46.000Z | 2020-04-20T21:32:28.000Z | titan/common/titan_rpc.py | googlearchive/titan | 150173af9b71bcc3a3946138bfd5a0c392eae9df | [
"Apache-2.0"
] | 10 | 2015-01-14T00:34:23.000Z | 2018-09-17T15:38:05.000Z | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional utility methods for appengine_rpc.HttpRpcServer.
This module is meant to be used as a base for Titan RPC clients, but can also be
used directly to make authenticated requests to an App Engine app.
Sample usage:
client = titan_rpc.TitanClient(
host=host, auth_function=auth_function, user_agent=user_agent,
source=source)
resp = client.fetch_url('/_titan/read?path=/foo/bar.txt')
print resp.content
"""
import copy
import getpass
import sys
import urllib2
from google.appengine.tools import appengine_rpc
USER_AGENT = 'TitanRpcClient/1.0'
SOURCE = '-'
class Error(Exception):
pass
class AuthenticationError(Error):
pass
class RpcError(Error):
pass
def create_auth_func():
"""Default auth func."""
email = ''
password = ''
if sys.stdin.isatty():
email = raw_input('Email: ')
password = getpass.getpass('Password: ')
return lambda: (email, password)
class TitanClient(appengine_rpc.HttpRpcServer):
"""RPC class to make authenticated requests to an App Engine app.
NOTE: This class isn't thread-safe; avoid using the same instance of this
object in threaded situations. A Copy() method is provided for convenience to
make copies of instances that can be used in threads.
"""
def __init__(self, *args, **kwargs):
super(TitanClient, self).__init__(*args, **kwargs)
self.method = None
self.orig_headers = self.extra_headers.copy()
def copy(self):
"""Copies an instance of self."""
obj = copy.copy(self)
# The copy.copy() method constructs a new object and copies references into
# it. As a result, we need to create shallow copies of self.extra_headers
# and self.orig_headers so that the copied object doesn't retain references
# to the original dicts.
obj.extra_headers = self.extra_headers.copy()
obj.orig_headers = self.orig_headers.copy()
return obj
def fetch_url(self, url, method=None, payload=None, headers=None, **kwargs):
"""Fetches a URL path and returns a Response object.
Args:
url: URL path (along with query params) to request.
method: Optional HTTP method.
payload: POST body.
headers: Dict of headers to send with the request.
Returns:
A Response object.
"""
# Set self.method to the HTTP method so that we can override urllib2's
# "get_method" method with something other than GET or POST. As a result of
# this "hack", this class/method isn't thread-safe; avoid using the same
# instance of this object in threaded situations.
self.method = method and method.upper()
if self.method in ['PATCH', 'POST', 'PUT']:
headers = headers or {}
headers['X-Http-Method-Override'] = self.method
payload = payload or ''
else:
payload = None
# This is an attribute of AbstractRpcServer, used in self._CreateRequest.
self.extra_headers = self.orig_headers.copy()
if headers:
self.extra_headers.update(headers)
try:
# content_type must unfortunately be given to the base class here,
# not in a header.
content_type = self.extra_headers.pop(
'Content-Type', 'application/x-www-form-urlencoded')
content = self.Send(url, payload=payload, content_type=content_type,
**kwargs)
# Unset the self.method so it does not affect the next request.
# This is horribly un-threadsafe, but works in practice because requests
# are usually all of one method type except for authentication.
self.method = None
# NOTE: The status code might not actually be 200 (any 2xx status code
# might be returned, but appengine_rpc doesn't exactly provide an
# easy way to get this information.
status_code = 200
except urllib2.HTTPError, e:
content = e.read()
status_code = e.code
# Convert any unicode strings to byte strings using utf-8 encoding.
if isinstance(content, unicode):
content = content.encode('utf-8')
resp = Response(content=content, status_code=status_code)
return resp
def validate_client_auth(self):
"""Test the stored credentials, may raise AuthenticationError."""
try:
if self._host_is_dev_app_server():
self._DevAppServerAuthenticate()
self.orig_headers.update(self.extra_headers)
return
self._Authenticate()
except appengine_rpc.ClientLoginError, e:
error = ('Error %d: %s %s' %
(e.code, e.reason, e.info or e.msg or '')).strip()
raise AuthenticationError('Invalid username or password. (%s)' % error)
def _host_is_dev_app_server(self):
"""Make a single GET / request to see if the server is a dev_appserver."""
# This exists because appserver_rpc doesn't nicely expose auth error paths.
try:
response = urllib2.urlopen('%s://%s/' % (self.scheme, self.host))
server_header = response.headers.get('server', '')
except urllib2.URLError, e:
if not hasattr(e, 'headers'):
raise
server_header = e.headers.get('server', '')
if server_header.startswith('Development'):
return True
return False
def _Authenticate(self): # Must be non-PEP 8 style name.
# Skip ClientLogin authentication when an "Authorization" header exists.
if 'Authorization' not in self.extra_headers:
super(TitanClient, self)._Authenticate()
def _CreateRequest(self, url, data=None): # Must be non-PEP 8 style name.
"""Overrides the base method to allow different HTTP methods to be used."""
request = super(TitanClient, self)._CreateRequest(url, data=data)
if self.method is not None:
request.get_method = lambda: self.method
return request
class Response(object):
"""An urlfetch response.
Attributes:
content: The body of the response.
status_code: HTTP status code.
headers: The HTTP headers.
"""
def __init__(self, content='', status_code=200, headers=None):
self.content = content
self.status_code = status_code
self.headers = headers or {}
class AbstractRemoteFactory(object):
"""Abstract factory for creating Remote* objects."""
def __init__(self, host, auth_function=None,
create_auth_function=create_auth_func, user_agent=USER_AGENT,
source=SOURCE, secure=True, _titan_client=None, **kwargs):
self.host = host
self.auth_function = auth_function
self.create_auth_function = create_auth_function
self.user_agent = user_agent
self.source = source
self.secure = secure
self.kwargs = kwargs
self._titan_client = _titan_client
@property
def titan_client(self):
"""Property for the current titan client."""
if not self._titan_client:
self._titan_client = self._get_titan_client(
host=self.host,
auth_function=self.auth_function,
user_agent=self.user_agent,
source=self.source,
secure=self.secure,
**self.kwargs)
return self._titan_client
def _get_titan_client(self, **kwargs):
if not self.auth_function:
self.auth_function = self.create_auth_function()
return TitanClient(**kwargs)
def validate_client_auth(self):
self.titan_client.validate_client_auth()
| 35.274775 | 80 | 0.695569 |
500f58a05a89cb04cc73cf5412aaf578985f4ee7 | 2,621 | py | Python | utensor_cgen/transformer/pipeline.py | ufo2011/utensor_cgen | 210bb530e7a532fbb9898f7df65f692e2f48f046 | [
"Apache-2.0"
] | null | null | null | utensor_cgen/transformer/pipeline.py | ufo2011/utensor_cgen | 210bb530e7a532fbb9898f7df65f692e2f48f046 | [
"Apache-2.0"
] | null | null | null | utensor_cgen/transformer/pipeline.py | ufo2011/utensor_cgen | 210bb530e7a532fbb9898f7df65f692e2f48f046 | [
"Apache-2.0"
] | null | null | null | import re
from ast import literal_eval
from .base import Transformer
class TransformerPipeline(object):
TRANSFORMER_MAP = {}
_trans_name_patrn = re.compile(r"(\w[\w]*)\(?")
def __init__(self, methods):
"""
methods : list
list of tuples of type Tuple[Type[Transformer], dict] or a string expression
of the transformer such as 'dropout(name_pattern=r"(dropout[_\w\d]*)/.*")'
"""
self._pipeline = []
for method_or_str in methods:
if isinstance(method_or_str, str):
method, kwargs = self._parse_expr(method_or_str)
trans_cls = self.TRANSFORMER_MAP.get(method, None)
if trans_cls is None:
raise ValueError("Unknown transformation method: {}".format(method))
else:
trans_cls, kwargs = method_or_str
if not issubclass(trans_cls, Transformer):
raise TypeError("expecting subclass of {}, get {}".format(Transformer, trans_cls))
transformer = trans_cls(**kwargs)
self._pipeline.append(transformer)
def transform(self, ugraph):
for transformer in self._pipeline:
ugraph = transformer.transform(ugraph)
return ugraph
@property
def pipeline(self):
return self._pipeline
@classmethod
def all_transform_methods(cls):
return list(cls.TRANSFORMER_MAP.keys())
@classmethod
def register_transformer(cls, trans_cls=None, overwrite=False):
def register(trans_cls):
if not issubclass(trans_cls, Transformer):
raise ValueError("expecting Transformer type, get %s" % trans_cls)
if not overwrite and trans_cls.METHOD_NAME in cls.TRANSFORMER_MAP:
raise RuntimeError("Registering existing transformer without overwriting")
cls.TRANSFORMER_MAP[trans_cls.METHOD_NAME] = trans_cls
return trans_cls
if trans_cls is None:
return register
return register(trans_cls)
@classmethod
def _parse_expr(cls, expr):
trans_match = cls._trans_name_patrn.match(expr)
if not trans_match:
raise ValueError("Invalid args detected: {}".format(expr))
trans_name = trans_match.group(1)
_, end = trans_match.span()
if end == len(expr):
kwargs = {}
else:
if not expr.endswith(")"):
raise ValueError("parentheses mismatch: {}".format(expr))
kwargs = cls._get_kwargs(expr[end:-1])
return trans_name, kwargs
@classmethod
def _get_kwargs(cls, kws_str):
kw_arg_strs = [s.strip() for s in kws_str.split(',')]
kwargs = {}
for kw_str in kw_arg_strs:
name, v_str = kw_str.split('=')
value = literal_eval(v_str)
kwargs[name] = value
return kwargs
| 31.202381 | 90 | 0.677222 |
67a33c5d8c5c1415de6eeb19bb0744bc9e77d39d | 1,791 | py | Python | storm_control/test/test_buffered_1.py | jeffmoffitt/storm-control | 522add1e196e0b7964f574481fd90c20a74b575e | [
"MIT"
] | null | null | null | storm_control/test/test_buffered_1.py | jeffmoffitt/storm-control | 522add1e196e0b7964f574481fd90c20a74b575e | [
"MIT"
] | null | null | null | storm_control/test/test_buffered_1.py | jeffmoffitt/storm-control | 522add1e196e0b7964f574481fd90c20a74b575e | [
"MIT"
] | 1 | 2020-11-10T06:39:18.000Z | 2020-11-10T06:39:18.000Z | #!/usr/bin/env python
"""
A test of the BufferedFunctionality class.
"""
import sys
import time
from PyQt5 import QtCore, QtWidgets
import storm_control.sc_hardware.baseClasses.hardwareModule as hardwareModule
class TBWidget(QtWidgets.QWidget):
processed = QtCore.pyqtSignal(str)
def __init__(self, **kwds):
super().__init__(**kwds)
self.bf = hardwareModule.BufferedFunctionality()
self.strings = []
self.timer = QtCore.QTimer()
self.timer.setInterval(10)
self.timer.timeout.connect(self.runTest)
self.timer.setSingleShot(True)
self.timer.start()
self.processed.connect(self.handleProcessed)
def editString(self, string):
time.sleep(0.2)
string = "0" + string
return string
def handleProcessed(self, string):
self.strings.append(string)
if "end" in string:
try:
assert(self.strings == ["0111", "0212", "0end"])
except AssertionError as exc:
print(self.strings, "is not expected")
raise exc
self.bf.wait()
self.close()
def runTest(self):
for string in ["111", "222", "333", "444", "555", "end"]:
self.bf.maybeRun(task = self.editString,
args = [string],
ret_signal = self.processed)
if(string == "222"):
self.bf.mustRun(task = self.editString,
args = ["212"],
ret_signal = self.processed)
def test_buffered():
app = QtWidgets.QApplication(sys.argv)
tb1 = TBWidget()
app.exec_()
app = None
if (__name__ == "__main__"):
test_buffered()
| 28.428571 | 77 | 0.553881 |
bbc518a026201abdf151736831e3f37bf2f62a8d | 3,177 | py | Python | orc8r/gateway/python/setup.py | nstng/magma | dec2691450f4bdd9e25d1e2eb0a38dc893dfeb7f | [
"BSD-3-Clause"
] | null | null | null | orc8r/gateway/python/setup.py | nstng/magma | dec2691450f4bdd9e25d1e2eb0a38dc893dfeb7f | [
"BSD-3-Clause"
] | null | null | null | orc8r/gateway/python/setup.py | nstng/magma | dec2691450f4bdd9e25d1e2eb0a38dc893dfeb7f | [
"BSD-3-Clause"
] | null | null | null | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from setuptools import setup
# We can use an environment variable to pass in the package version during
# build. Since we don't distribute this on its own, we don't really care what
# version this represents. 'None' defaults to 0.0.0.
VERSION = os.environ.get('PKG_VERSION', None)
setup(
name='orc8r',
version=VERSION,
packages=[
'magma.common',
'magma.common.health',
'magma.common.redis',
'magma.configuration',
'magma.directoryd',
'magma.magmad',
'magma.magmad.generic_command',
'magma.magmad.check',
'magma.magmad.check.kernel_check',
'magma.magmad.check.machine_check',
'magma.magmad.check.network_check',
'magma.magmad.upgrade',
'magma.state',
'magma.eventd',
'magma.ctraced',
],
scripts=[
'scripts/checkin_cli.py',
'scripts/ctraced_cli.py',
'scripts/directoryd_cli.py',
'scripts/generate_lighttpd_config.py',
'scripts/generate_nghttpx_config.py',
'scripts/generate_service_config.py',
'scripts/generate_fluent_bit_config.py',
'scripts/health_cli.py',
'scripts/magma_conditional_service.py',
'scripts/magma_get_config.py',
'scripts/magmad_cli.py',
'scripts/service_util.py',
'scripts/service303_cli.py',
'scripts/show_gateway_info.py',
'scripts/traffic_cli.py',
],
install_requires=[
'setuptools==49.6.0',
'Cython>=0.29.1',
'pystemd>=0.5.0',
'docker>=4.0.2',
'fire>=0.2.0',
'glob2>=0.7',
'aioh2>=0.2.2',
'redis==3.5.3', # redis-py (Python bindings to redis)
'redis-collections==0.9.1',
'python-redis-lock>=3.7.0',
'aiohttp>=0.17.2',
'grpcio>=1.16.1',
'protobuf==3.19.0',
'Jinja2>=2.8',
'netifaces>=0.10.4',
'pylint==2.14.0',
'PyYAML>=3.12',
'pytz>=2014.4',
'prometheus_client==0.3.1',
'sentry_sdk>=1.5.0',
'snowflake>=0.0.3',
'psutil==5.8.0',
'cryptography>=1.9',
'itsdangerous>=0.24',
'click>=5.1',
'pycares>=2.3.0',
'python-dateutil>=1.4',
# force same requests version as lte/gateway/python/setup.py
'requests==2.22.0',
'jsonpickle',
'bravado-core==5.16.1',
'jsonschema==3.1.0',
"strict-rfc3339>=0.7",
"rfc3987>=1.3.0",
"webcolors>=1.11.1",
'systemd-python>=234',
"jsonpointer>=1.14",
],
extras_require={
'dev': [
"lupa==1.10",
"fakeredis[lua]==1.7.1",
],
},
)
| 29.971698 | 77 | 0.583884 |
511a369d6b8e2c8202b1603d4d1eb28ef8d03d3b | 1,398 | py | Python | dispel4py/__main__.py | AndreiFrunze/wrangler | 076a07de00fc966dcf18ca6b6a6e804be5245ed9 | [
"Apache-2.0"
] | 22 | 2015-04-14T13:08:28.000Z | 2020-04-23T11:56:52.000Z | dispel4py/__main__.py | AndreiFrunze/wrangler | 076a07de00fc966dcf18ca6b6a6e804be5245ed9 | [
"Apache-2.0"
] | 4 | 2015-10-06T11:04:34.000Z | 2019-03-27T10:50:10.000Z | dispel4py/__main__.py | AndreiFrunze/wrangler | 076a07de00fc966dcf18ca6b6a6e804be5245ed9 | [
"Apache-2.0"
] | 19 | 2015-04-09T15:45:08.000Z | 2022-02-28T06:58:02.000Z | # Copyright (c) The University of Edinburgh 2014-2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from importlib import import_module
def main(args=None):
parser = argparse.ArgumentParser(
description='Submit a dispel4py graph for processing.')
parser.add_argument('target', help='target execution platform')
args, remaining = parser.parse_known_args()
try:
from dispel4py.new import mappings
# see if platform is in the mappings file as a simple name
target = mappings.config[args.target]
except KeyError:
# it is a proper module name - fingers crossed...
target = args.target
try:
process = getattr(import_module(target), 'main')
except:
# print traceback.format_exc()
print('Unknown target: %s' % target)
return
process()
if __name__ == "__main__":
main()
| 31.772727 | 74 | 0.701717 |
4333c2bee3dfa32796e69d12bf0a5f10acfbc04c | 6,004 | py | Python | sb/Data/Packages/Default/Comment.py | luc4spas/SISGE-ACONCHEGO | 77096a81f09b286a3793c7cb9cee9941c1fae803 | [
"MIT"
] | null | null | null | sb/Data/Packages/Default/Comment.py | luc4spas/SISGE-ACONCHEGO | 77096a81f09b286a3793c7cb9cee9941c1fae803 | [
"MIT"
] | null | null | null | sb/Data/Packages/Default/Comment.py | luc4spas/SISGE-ACONCHEGO | 77096a81f09b286a3793c7cb9cee9941c1fae803 | [
"MIT"
] | null | null | null | import sublime, sublimeplugin
def advanceToFirstNonWhitespaceOnLine(view, pt):
while True:
c = view.substr(sublime.Region(pt, pt + 1))
if c == " " or c == "\t":
pt += 1
else:
break
return pt
def hasNonWhitespaceOnLine(view, pt):
while True:
c = view.substr(sublime.Region(pt, pt + 1))
if c == " " or c == "\t":
pt += 1
else:
return c != "\n"
def buildCommentData(view, pt):
shellVars = view.metaInfo("shellVariables", pt)
if not shellVars:
return ([], [])
# transform the list of dicts into a single dict
allVars = {}
for v in shellVars:
if 'name' in v and 'value' in v:
allVars[v['name']] = v['value']
lineComments = []
blockComments = []
# transform the dict into a single array of valid comments
suffixes = [""] + ["_" + str(i) for i in xrange(1, 10)]
for suffix in suffixes:
start = allVars.setdefault("TM_COMMENT_START" + suffix)
end = allVars.setdefault("TM_COMMENT_END" + suffix)
mode = allVars.setdefault("TM_COMMENT_MODE" + suffix)
disableIndent = allVars.setdefault("TM_COMMENT_DISABLE_INDENT" + suffix)
if start and end:
blockComments.append((start, end, disableIndent == 'yes'))
blockComments.append((start.strip(), end.strip(), disableIndent == 'yes'))
elif start:
lineComments.append((start, disableIndent == 'yes'))
lineComments.append((start.strip(), disableIndent == 'yes'))
return (lineComments, blockComments)
class ToggleCommentCommand(sublimeplugin.TextCommand):
def removeBlockComment(self, view, commentData, region):
(lineComments, blockComments) = commentData
wholeRegion = view.extractScope(region.begin())
for c in blockComments:
(start, end, disableIndent) = c
startRegion = sublime.Region(wholeRegion.begin(),
wholeRegion.begin() + len(start))
endRegion = sublime.Region(wholeRegion.end() - len(end),
wholeRegion.end())
if view.substr(startRegion) == start and view.substr(endRegion) == end:
# It's faster to erase the start region first
view.erase(startRegion)
endRegion = sublime.Region(
endRegion.begin() - startRegion.size(),
endRegion.end() - startRegion.size())
view.erase(endRegion)
return True
return False
def removeLineComment(self, view, commentData, region):
(lineComments, blockComments) = commentData
foundLineComment = False
startPositions = [advanceToFirstNonWhitespaceOnLine(view, r.begin())
for r in view.lines(region)]
startPositions.reverse()
for pos in startPositions:
for c in lineComments:
(start, disableIndent) = c
commentRegion = sublime.Region(pos,
pos + len(start))
if view.substr(commentRegion) == start:
view.erase(commentRegion)
foundLineComment = True
break
return foundLineComment
def isEntirelyLineCommented(self, view, commentData, region):
(lineComments, blockComments) = commentData
startPositions = [advanceToFirstNonWhitespaceOnLine(view, r.begin())
for r in view.lines(region)]
startPositions = filter(lambda p: hasNonWhitespaceOnLine(view, p),
startPositions)
if len(startPositions) == 0:
return False
for pos in startPositions:
foundLineComment = False
for c in lineComments:
(start, disableIndent) = c
commentRegion = sublime.Region(pos,
pos + len(start))
if view.substr(commentRegion) == start:
foundLineComment = True
if not foundLineComment:
return False
return True
def blockCommentRegion(self, view, blockCommentData, region):
(start, end, disableIndent) = blockCommentData
view.insert(region.end(), end)
view.insert(region.begin(), start)
def lineCommentRegion(self, view, lineCommentData, region):
(start, disableIndent) = lineCommentData
startPositions = [r.begin() for r in view.lines(region)]
startPositions.reverse()
# Remove any blank lines from consideration, they make getting the
# comment start markers to line up challenging
nonEmptyStartPositions = filter(lambda p: hasNonWhitespaceOnLine(view, p),
startPositions)
# If all the lines are blank however, just comment away
if len(nonEmptyStartPositions) != 0:
startPositions = nonEmptyStartPositions
if not disableIndent:
minIndent = None
# This won't work well with mixed spaces and tabs, but really,
# don't do that!
for pos in startPositions:
indent = advanceToFirstNonWhitespaceOnLine(view, pos) - pos
if minIndent == None or indent < minIndent:
minIndent = indent
if minIndent != None and minIndent > 0:
startPositions = [r + minIndent for r in startPositions]
for pos in startPositions:
view.insert(pos, start)
def addComment(self, view, commentData, preferBlock, region):
(lineComments, blockComments) = commentData
if len(lineComments) == 0 and len(blockComments) == 0:
return
if len(blockComments) == 0:
preferBlock = False
if len(lineComments) == 0:
preferBlock = True
if region.empty():
if len(lineComments) == 0:
# add the block comment
self.blockCommentRegion(view, blockComments[0], region)
else:
# comment out the line
self.lineCommentRegion(view, lineComments[0], region)
else:
if preferBlock:
# add the block comment
self.blockCommentRegion(view, blockComments[0], region)
else:
# add a line comment to each line
self.lineCommentRegion(view, lineComments[0], region)
def run(self, view, args):
preferBlock = False
if len(args) > 0 and args[0] == "block":
preferBlock = True
for region in view.sel():
commentData = buildCommentData(view, region.begin())
if (region.end() != view.size() and
buildCommentData(view, region.end()) != commentData):
# region spans languages, nothing we can do
continue
if self.removeBlockComment(view, commentData, region):
continue
if self.isEntirelyLineCommented(view, commentData, region):
self.removeLineComment(view, commentData, region)
continue
# Add a comment instead
self.addComment(view, commentData, preferBlock, region)
| 28.454976 | 77 | 0.701033 |
022d4565165dac2044573af270ffe573ddb50193 | 14,404 | py | Python | sample-demo/venv/Lib/site-packages/PyQt6/lupdate/translation_file.py | rupc/bsp-protos | 58833e7ab9ff53f3633708fb5f95edfdd152c5ea | [
"Apache-2.0"
] | null | null | null | sample-demo/venv/Lib/site-packages/PyQt6/lupdate/translation_file.py | rupc/bsp-protos | 58833e7ab9ff53f3633708fb5f95edfdd152c5ea | [
"Apache-2.0"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | sample-demo/venv/Lib/site-packages/PyQt6/lupdate/translation_file.py | rupc/bsp-protos | 58833e7ab9ff53f3633708fb5f95edfdd152c5ea | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt6.
#
# This file may be used under the terms of the GNU General Public License
# version 3.0 as published by the Free Software Foundation and appearing in
# the file LICENSE included in the packaging of this file. Please review the
# following information to ensure the GNU General Public License version 3.0
# requirements will be met: http://www.gnu.org/copyleft/gpl.html.
#
# If you do not wish to use this file under the terms of the GPL version 3.0
# then you may purchase a commercial license. For more information contact
# info@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import os
from xml.etree import ElementTree
from .user import User, UserException
class TranslationFile(User):
""" Encapsulate a translation file. """
def __init__(self, ts_file, no_obsolete, no_summary, **kwargs):
""" Initialise the translation file. """
super().__init__(**kwargs)
if os.path.isfile(ts_file):
self.progress("Reading {0}...".format(ts_file))
try:
self._root = ElementTree.parse(ts_file).getroot()
except Exception as e:
raise UserException(
"{}: {}: {}".format(ts_file,
"invalid translation file", str(e)))
else:
self._root = ElementTree.fromstring(_EMPTY_TS)
self._ts_file = ts_file
self._no_obsolete = no_obsolete
self._no_summary = no_summary
self._updated_contexts = {}
# Create a dict of contexts keyed by the context name and having the
# list of message elements as the value.
self._contexts = {}
# Also create a dict of existing translations so that they can be
# re-used.
self._translations = {}
context_els = []
for context_el in self._root:
if context_el.tag != 'context':
continue
context_els.append(context_el)
name = ''
message_els = []
for el in context_el:
if el.tag == 'name':
name = el.text
elif el.tag == 'message':
message_els.append(el)
if name:
self._contexts[name] = message_els
for message_el in message_els:
source_el = message_el.find('source')
if source_el is None or not source_el.text:
continue
translation_el = message_el.find('translation')
if translation_el is None or not translation_el.text:
continue
self._translations[source_el.text] = translation_el.text
# Remove the context elements but keep everything else in the root
# (probably set by Linguist).
for context_el in context_els:
self._root.remove(context_el)
# Clear the summary statistics.
self._nr_new = 0
self._nr_new_duplicates = 0
self._nr_new_using_existing_translation = 0
self._nr_existing = 0
self._nr_kept_obsolete = 0
self._nr_discarded_obsolete = 0
self._nr_discarded_untranslated = 0
# Remember all new messages so we can make the summary less confusing
# than it otherwise might be.
self._new_message_els = []
def update(self, source):
""" Update the translation file from a SourceFile object. """
self.progress(
"Updating {0} from {1}...".format(self._ts_file,
source.filename))
for context in source.contexts:
# Get the messages that we already know about for this context.
try:
message_els = self._contexts[context.name]
except KeyError:
message_els = []
# Get the messages that have already been updated.
updated_message_els = self._get_updated_message_els(context.name)
for message in context.messages:
message_el = self._find_message(message, message_els)
if message_el is not None:
# Move the message to the updated list.
message_els.remove(message_el)
self._add_message_el(message_el, updated_message_els)
else:
# See if this is a new message. If not then we just have
# another location for an existing message.
message_el = self._find_message(message,
updated_message_els)
if message_el is None:
message_el = self._make_message_el(message)
updated_message_els.append(message_el)
self.progress(
"Added new message '{0}'".format(
self.pretty(message.source)))
self._nr_new += 1
else:
self.progress(
"Updated message '{0}'".format(
self.pretty(message.source)))
# Don't count another copy of a new message as an existing
# one.
if message_el in self._new_message_els:
self._nr_new_duplicates += 1
else:
self._nr_existing += 1
message_el.insert(0, self._make_location_el(message))
def write(self):
""" Write the translation file back to the filesystem. """
# If we are keeping obsolete messages then add them to the updated
# message elements list.
for name, message_els in self._contexts.items():
updated_message_els = None
for message_el in message_els:
source = self.pretty(message_el.find('source').text)
translation_el = message_el.find('translation')
if translation_el is not None and translation_el.text:
if self._no_obsolete:
self.progress(
"Discarded obsolete message '{0}'".format(
source))
self._nr_discarded_obsolete += 1
else:
translation_el.set('type', 'vanished')
if updated_message_els is None:
updated_message_els = self._get_updated_message_els(
name)
self._add_message_el(message_el, updated_message_els)
self.progress(
"Kept obsolete message '{0}'".format(source))
self._nr_kept_obsolete += 1
else:
self.progress(
"Discarded untranslated message '{0}'".format(
source))
self._nr_discarded_untranslated += 1
# Created the sorted context elements.
for name in sorted(self._updated_contexts.keys()):
context_el = ElementTree.Element('context')
name_el = ElementTree.Element('name')
name_el.text = name
context_el.append(name_el)
context_el.extend(self._updated_contexts[name])
self._root.append(context_el)
self.progress("Writing {0}...".format(self._ts_file))
with open(self._ts_file, 'w', encoding='utf-8', newline='\n') as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<!DOCTYPE TS>\n')
# Python v3.9 and later.
if hasattr(ElementTree, 'indent'):
ElementTree.indent(self._root)
ElementTree.ElementTree(self._root).write(f, encoding='unicode')
f.write('\n')
if not self._no_summary:
self._summary()
@staticmethod
def _add_message_el(message_el, updated_message_els):
""" Add a message element to a list of updated message elements. """
# Remove all the location elements.
for location_el in message_el.findall('location'):
message_el.remove(location_el)
# Add the message to the updated list.
updated_message_els.append(message_el)
@classmethod
def _find_message(cls, message, message_els):
""" Return the message element for a message from a list. """
for message_el in message_els:
source = ''
comment = ''
extra_comment = ''
extras = []
# Extract the data from the element.
for el in message_el:
if el.tag == 'source':
source = el.text
elif el.tag == 'comment':
comment = el.text
elif el.tag == 'extracomment':
extra_comment = el.text
elif el.tag.startswith('extra-'):
extras.append([el.tag[6:], el.text])
# Compare with the message.
if source != message.source:
continue
if comment != message.comment:
continue
if extra_comment != cls._get_message_extra_comments(message):
continue
if extras != message.embedded_comments.extras:
continue
return message_el
return None
@staticmethod
def _get_message_extra_comments(message):
""" Return a message's extra comments as they appear in a .ts file. """
return ' '.join(message.embedded_comments.extra_comments)
def _get_updated_message_els(self, name):
""" Return the list of updated message elements for a context. """
try:
updated_message_els = self._updated_contexts[name]
except KeyError:
updated_message_els = []
self._updated_contexts[name] = updated_message_els
return updated_message_els
def _make_location_el(self, message):
""" Return a 'location' element. """
return ElementTree.Element('location',
filename=os.path.relpath(message.filename,
start=os.path.dirname(os.path.abspath(self._ts_file))),
line=str(message.line_nr))
def _make_message_el(self, message):
""" Return a 'message' element. """
attrs = {}
if message.embedded_comments.message_id:
attrs['id'] = message.embedded_comments.message_id
if message.numerus:
attrs['numerus'] = 'yes'
message_el = ElementTree.Element('message', attrs)
source_el = ElementTree.Element('source')
source_el.text = message.source
message_el.append(source_el)
if message.comment:
comment_el = ElementTree.Element('comment')
comment_el.text = message.comment
message_el.append(comment_el)
if message.embedded_comments.extra_comments:
extracomment_el = ElementTree.Element('extracomment')
extracomment_el.text = self._get_message_extra_comments(message)
message_el.append(extracomment_el)
translation_el = ElementTree.Element('translation',
type='unfinished')
# Try and find another message with the same source and use its
# translation if it has one.
translation = self._translations.get(message.source)
if translation:
translation_el.text = translation
self.progress(
"Reused existing translation for '{0}'".format(
self.pretty(message.source)))
self._nr_new_using_existing_translation += 1
if message.numerus:
translation_el.append(ElementTree.Element(
'numerusform'))
message_el.append(translation_el)
for field, value in message.embedded_comments.extras:
el = ElementTree.Element('extra-' + field)
el.text = value
message_el.append(el)
self._new_message_els.append(message_el)
return message_el
def _summary(self):
""" Display the summary of changes to the user. """
summary_lines = []
# Display a line of the summary and the heading if not already done.
def summary(line):
nonlocal summary_lines
if not summary_lines:
summary_lines.append(
"Summary of changes to {ts}:".format(ts=self._ts_file))
summary_lines.append(" " + line)
if self._nr_new:
if self._nr_new_duplicates:
summary("{0} new messages were added (and {1} duplicates)".format(
self._nr_new, self._nr_new_duplicates))
else:
summary("{0} new messages were added".format(self._nr_new))
if self._nr_new_using_existing_translation:
summary("{0} messages reused existing translations".format(
self._nr_new_using_existing_translation))
if self._nr_existing:
summary("{0} existing messages were found".format(
self._nr_existing))
if self._nr_kept_obsolete:
summary("{0} obsolete messages were kept".format(
self._nr_kept_obsolete))
if self._nr_discarded_obsolete:
summary("{0} obsolete messages were discarded".format(
self._nr_discarded_obsolete))
if self._nr_discarded_untranslated:
summary("{0} untranslated messages were discarded".format(
self._nr_discarded_untranslated))
if not summary_lines:
summary_lines.append("{ts} was unchanged".format(ts=self._ts_file))
print(os.linesep.join(summary_lines))
# The XML of an empty .ts file. This is what a current lupdate will create
# with an empty C++ source file.
_EMPTY_TS = '''<TS version="2.1">
</TS>
'''
| 35.653465 | 82 | 0.568245 |
cd7d67ea848d74046d39420041c3e3c11ee0ac7f | 9,222 | py | Python | discord/ext/commands/cooldowns.py | Keith-Cancel/discord.py | aa55e762ae0ebbe2d6410c1275eddef9787ef4b5 | [
"MIT"
] | 3 | 2020-11-30T19:13:56.000Z | 2020-11-30T23:08:53.000Z | discord/ext/commands/cooldowns.py | Keith-Cancel/discord.py | aa55e762ae0ebbe2d6410c1275eddef9787ef4b5 | [
"MIT"
] | null | null | null | discord/ext/commands/cooldowns.py | Keith-Cancel/discord.py | aa55e762ae0ebbe2d6410c1275eddef9787ef4b5 | [
"MIT"
] | 1 | 2021-06-22T07:36:10.000Z | 2021-06-22T07:36:10.000Z | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from discord.enums import Enum
import time
import asyncio
from collections import deque
from ...abc import PrivateChannel
from .errors import MaxConcurrencyReached
__all__ = (
'BucketType',
'Cooldown',
'CooldownMapping',
'MaxConcurrency',
)
class BucketType(Enum):
default = 0
user = 1
guild = 2
channel = 3
member = 4
category = 5
role = 6
def get_key(self, msg):
if self is BucketType.user:
return msg.author.id
elif self is BucketType.guild:
return (msg.guild or msg.author).id
elif self is BucketType.channel:
return msg.channel.id
elif self is BucketType.member:
return ((msg.guild and msg.guild.id), msg.author.id)
elif self is BucketType.category:
return (msg.channel.category or msg.channel).id
elif self is BucketType.role:
# we return the channel id of a private-channel as there are only roles in guilds
# and that yields the same result as for a guild with only the @everyone role
# NOTE: PrivateChannel doesn't actually have an id attribute but we assume we are
# recieving a DMChannel or GroupChannel which inherit from PrivateChannel and do
return (msg.channel if isinstance(msg.channel, PrivateChannel) else msg.author.top_role).id
class Cooldown:
__slots__ = ('rate', 'per', 'type', '_window', '_tokens', '_last')
def __init__(self, rate, per, type):
self.rate = int(rate)
self.per = float(per)
self.type = type
self._window = 0.0
self._tokens = self.rate
self._last = 0.0
if not isinstance(self.type, BucketType):
raise TypeError('Cooldown type must be a BucketType')
def get_tokens(self, current=None):
if not current:
current = time.time()
tokens = self._tokens
if current > self._window + self.per:
tokens = self.rate
return tokens
def get_retry_after(self, current=None):
current = current or time.time()
tokens = self.get_tokens(current)
if tokens == 0:
return self.per - (current - self._window)
return 0.0
def update_rate_limit(self, current=None):
current = current or time.time()
self._last = current
self._tokens = self.get_tokens(current)
# first token used means that we start a new rate limit window
if self._tokens == self.rate:
self._window = current
# check if we are rate limited
if self._tokens == 0:
return self.per - (current - self._window)
# we're not so decrement our tokens
self._tokens -= 1
# see if we got rate limited due to this token change, and if
# so update the window to point to our current time frame
if self._tokens == 0:
self._window = current
def reset(self):
self._tokens = self.rate
self._last = 0.0
def copy(self):
return Cooldown(self.rate, self.per, self.type)
def __repr__(self):
return '<Cooldown rate: {0.rate} per: {0.per} window: {0._window} tokens: {0._tokens}>'.format(self)
class CooldownMapping:
def __init__(self, original):
self._cache = {}
self._cooldown = original
def copy(self):
ret = CooldownMapping(self._cooldown)
ret._cache = self._cache.copy()
return ret
@property
def valid(self):
return self._cooldown is not None
@classmethod
def from_cooldown(cls, rate, per, type):
return cls(Cooldown(rate, per, type))
def _bucket_key(self, msg):
return self._cooldown.type.get_key(msg)
def _verify_cache_integrity(self, current=None):
# we want to delete all cache objects that haven't been used
# in a cooldown window. e.g. if we have a command that has a
# cooldown of 60s and it has not been used in 60s then that key should be deleted
current = current or time.time()
dead_keys = [k for k, v in self._cache.items() if current > v._last + v.per]
for k in dead_keys:
del self._cache[k]
def get_bucket(self, message, current=None):
if self._cooldown.type is BucketType.default:
return self._cooldown
self._verify_cache_integrity(current)
key = self._bucket_key(message)
if key not in self._cache:
bucket = self._cooldown.copy()
self._cache[key] = bucket
else:
bucket = self._cache[key]
return bucket
def update_rate_limit(self, message, current=None):
bucket = self.get_bucket(message, current)
return bucket.update_rate_limit(current)
class _Semaphore:
"""This class is a version of a semaphore.
If you're wondering why asyncio.Semaphore isn't being used,
it's because it doesn't expose the internal value. This internal
value is necessary because I need to support both `wait=True` and
`wait=False`.
An asyncio.Queue could have been used to do this as well -- but it is
not as inefficient since internally that uses two queues and is a bit
overkill for what is basically a counter.
"""
__slots__ = ('value', 'loop', '_waiters')
def __init__(self, number):
self.value = number
self.loop = asyncio.get_event_loop()
self._waiters = deque()
def __repr__(self):
return '<_Semaphore value={0.value} waiters={1}>'.format(self, len(self._waiters))
def locked(self):
return self.value == 0
def is_active(self):
return len(self._waiters) > 0
def wake_up(self):
while self._waiters:
future = self._waiters.popleft()
if not future.done():
future.set_result(None)
return
async def acquire(self, *, wait=False):
if not wait and self.value <= 0:
# signal that we're not acquiring
return False
while self.value <= 0:
future = self.loop.create_future()
self._waiters.append(future)
try:
await future
except:
future.cancel()
if self.value > 0 and not future.cancelled():
self.wake_up()
raise
self.value -= 1
return True
def release(self):
self.value += 1
self.wake_up()
class MaxConcurrency:
__slots__ = ('number', 'per', 'wait', '_mapping')
def __init__(self, number, *, per, wait):
self._mapping = {}
self.per = per
self.number = number
self.wait = wait
if number <= 0:
raise ValueError('max_concurrency \'number\' cannot be less than 1')
if not isinstance(per, BucketType):
raise TypeError('max_concurrency \'per\' must be of type BucketType not %r' % type(per))
def copy(self):
return self.__class__(self.number, per=self.per, wait=self.wait)
def __repr__(self):
return '<MaxConcurrency per={0.per!r} number={0.number} wait={0.wait}>'.format(self)
def get_key(self, message):
return self.per.get_key(message)
async def acquire(self, message):
key = self.get_key(message)
try:
sem = self._mapping[key]
except KeyError:
self._mapping[key] = sem = _Semaphore(self.number)
acquired = await sem.acquire(wait=self.wait)
if not acquired:
raise MaxConcurrencyReached(self.number, self.per)
async def release(self, message):
# Technically there's no reason for this function to be async
# But it might be more useful in the future
key = self.get_key(message)
try:
sem = self._mapping[key]
except KeyError:
# ...? peculiar
return
else:
sem.release()
if sem.value >= self.number and not sem.is_active():
del self._mapping[key]
| 31.474403 | 108 | 0.624702 |
695daa8e40fb497f10ced0665122eb83bd79346e | 5,282 | py | Python | start_regtest.py | sergeyboyko0791/qrc20_docker_regtest | b5001f31cc0e08434809cb36326ad19e177f1958 | [
"Apache-2.0"
] | null | null | null | start_regtest.py | sergeyboyko0791/qrc20_docker_regtest | b5001f31cc0e08434809cb36326ad19e177f1958 | [
"Apache-2.0"
] | null | null | null | start_regtest.py | sergeyboyko0791/qrc20_docker_regtest | b5001f31cc0e08434809cb36326ad19e177f1958 | [
"Apache-2.0"
] | null | null | null | import json
import os
import sys
import subprocess
from subprocess import PIPE
import time
import threading
class Node:
def __init__(self, node_root, bin_path, port, rpc_port):
self.node_root = node_root
self.bin_path = bin_path
self.conf = self.node_root + '/qtum.conf'
self.port = port
self.rpc_user = 'test'
self.rpc_password = 'test'
self.rpc_port = rpc_port
def cli_cmd(self, command, args = [], attempts = 4, interval_s = 1):
assert attempts > 0
result = None
for attempt in range(attempts):
result = self.__cli_cmd_impl(command, args[:])
if result.returncode == 0:
return result
else:
attempts_left = attempts - attempt - 1
print("Client command error [command: {}] [args: {}] [attempts left: {}].\nstdout: {}\nstderr: {}".format(command, args, attempts_left, result.stdout, result.stderr))
time.sleep(interval_s)
return result
def __cli_cmd_impl(self, command, args = None):
if args is None:
args = []
args.insert(0, self.bin_path + '/qtum-cli')
args.insert(1, '-rpcport=' + str(self.rpc_port))
args.insert(2, '-conf=' + self.conf)
args.insert(3, command)
return subprocess.run(args, stdout=PIPE, stderr=PIPE, universal_newlines=True)
def run(self, connect_to = None):
qtumd = self.bin_path + '/qtumd'
args = [qtumd, '-conf=' + self.conf,
'-port=' + str(self.port), '-datadir=' + self.node_root,
'-whitelist=127.0.0.1', '-regtest', '-daemon']
if connect_to is not None:
args.append('-addnode=' + connect_to)
print("Run QTUM node [args={}]".format(args))
return subprocess.run(args, stdout=PIPE, stderr=PIPE, universal_newlines=True)
def prepare_config(self):
os.mkdir(self.node_root)
open(self.conf, 'a').close()
with open(self.conf, 'a') as conf:
conf.write("server=1\n")
conf.write("rpcuser=" + self.rpc_user + '\n')
conf.write("rpcpassword=" + self.rpc_password + '\n')
conf.write("rpcallowip=0.0.0.0/0\n")
conf.write("logevents=1\n")
conf.write("addressindex=1\n")
conf.write("txindex=1\n")
conf.write("[regtest]\n")
conf.write("rpcport=" + str(self.rpc_port) + '\n')
conf.write("[regtest]\n")
conf.write("rpcbind=0.0.0.0\n")
# Fill the blockchain mempool by periodically sending transactions
def fill_mempool_loop(node):
print("Start fill_mempool_loop")
while True:
assert node.cli_cmd('sendtoaddress', ['qeUbAVgkPiF62syqd792VJeB9BaqMtLcZV', "0.01"])
time.sleep(0.1)
def check_mempool_loop(node, interval_s):
print("Start check_mempool_loop")
while True:
result = node.cli_cmd('getmempoolinfo')
assert result.returncode == 0
data = json.loads(result.stdout)
print(">>>> Mempool <<<<")
print(data)
time.sleep(interval_s)
ROOT = sys.path[0]
CLIENTS_TO_START = int(os.environ['CLIENTS'])
COIN_RPC_PORT = int(os.environ['COIN_RPC_PORT'])
ADDRESS_LABEL = os.environ['ADDRESS_LABEL']
FILL_MEMPOOL = os.getenv('FILL_MEMPOOL', 'false').lower() == 'true'
CHECK_MEMPOOL = os.getenv('CHECK_MEMPOOL', 'false').lower() == 'true'
nodes = []
for i in range(CLIENTS_TO_START):
node_root = ROOT + '/node_' + str(i) + '/'
bin_path = ROOT + '/bin/'
port = 6000 + i
rpc_port = COIN_RPC_PORT + i
node = Node(node_root, bin_path, port, rpc_port)
node.prepare_config()
nodes.append(node)
first_node_address = None
for i, node in enumerate(nodes):
if i == 0:
first_node_address = '127.0.0.1:' + str(node.port)
assert node.run().returncode == 0
else:
assert node.run(first_node_address).returncode == 0
time.sleep(2)
# Generate an address with the specified ADDRESS_LABEL that can be used to create token and send its tokens
print("\nNOTE: There will be several attempts to get a new address as the wallet may not have loaded yet\n")
result = nodes[0].cli_cmd('getnewaddress', [ADDRESS_LABEL], 20, 0.5)
assert result.returncode == 0
address = result.stdout.splitlines()[0]
print('Generate to address ' + address)
# node[0] will be used by integration tests to send Qtum amounts and deploy smart contracts
# so we should generate more than 500 blocks for the given address.
# For some reason, the first 499 blocks don't give rewards.
nodes[0].cli_cmd('generatetoaddress', [str(600), address])
print('config is ready')
time.sleep(0.5)
# Spawn the 'fill_mempool_loop' if it is required
if FILL_MEMPOOL:
threading.Thread(target=fill_mempool_loop, args=(nodes[0],), daemon=True).start()
if CHECK_MEMPOOL:
threading.Thread(target=check_mempool_loop, args=(nodes[0], 5,), daemon=True).start()
# starting blocks creation on last node
result = nodes[-1].cli_cmd('getnewaddress')
assert result.returncode == 0
address = result.stdout.splitlines()[0]
print('Starting blocks generation to address ' + address)
while True:
assert nodes[-1].cli_cmd('generatetoaddress', [str(1), address]).returncode == 0
time.sleep(2)
| 36.937063 | 182 | 0.638016 |
4193ee1f3381733af777d1919dd4f8acee317bf7 | 4,475 | py | Python | src/warhammer_leveling/converters/converter.py | Peilonrayz/warhammer | 76610bf3c64418f914efb9ff3336daacdd6c5c4c | [
"MIT"
] | null | null | null | src/warhammer_leveling/converters/converter.py | Peilonrayz/warhammer | 76610bf3c64418f914efb9ff3336daacdd6c5c4c | [
"MIT"
] | null | null | null | src/warhammer_leveling/converters/converter.py | Peilonrayz/warhammer | 76610bf3c64418f914efb9ff3336daacdd6c5c4c | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import (
Any,
Generic,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
get_type_hints,
)
__all__ = ["ron", "Converter", "Converters"]
T = TypeVar("T")
class BuilderObject:
def __init__(self):
super().__setattr__("__values", {})
def __getattr__(self, name):
return super().__getattribute__("__values").setdefault(name, BuilderObject())
def __setattr__(self, name, value):
super().__getattribute__("__values")[name] = value
def __delattr__(self, name):
del super().__getattribute__("__values")[name]
def _build(base: Type[T], values: Union[BuilderObject, dict], exists_ok) -> T:
"""Build the object recursively, utilizes the type hints to create the correct types"""
types = get_type_hints(base)
if isinstance(values, BuilderObject):
values = super(BuilderObject, values).__getattribute__("__values")
for name, value in values.items():
if isinstance(value, Converter):
values[name] = value.build(exists_ok=exists_ok)
elif isinstance(value, BuilderObject) and name in types:
values[name] = _build(types[name], value, exists_ok)
return base(**values)
def _get_args(obj: object, orig: Type) -> Optional[Tuple[Type]]:
"""Get args from obj, filtering by orig type"""
bases = getattr(type(obj), "__orig_bases__", [])
for b in bases:
if b.__origin__ is orig:
return b.__args__
return None
class Converter(Generic[T]):
_obj: T
def __init__(self, **kwargs) -> None:
self._obj = BuilderObject()
for name, value in kwargs.items():
setattr(self, name, value)
def build(self, exists_ok: bool = False) -> T:
"""Build base object"""
t = _get_args(self, Converter)
if t is None:
raise ValueError("No base")
base_cls = t[0]
if isinstance(self._obj, base_cls):
if not exists_ok:
raise TypeError("Base type has been built already.")
return self._obj
self._obj = _build(base_cls, self._obj, exists_ok)
return self._obj
@classmethod
def from_(cls, b: T):
"""Build function from base object"""
c = cls()
c._obj = b
return c
def ron(obj: T) -> T:
"""Error on null result"""
if isinstance(obj, BuilderObject):
raise AttributeError()
return obj
TPath = Union[str, List[str]]
class Converters:
@staticmethod
def _read_path(path: TPath) -> List[str]:
"""Convert from public path formats to internal one"""
if isinstance(path, list):
return path
return path.split(".")
@staticmethod
def _get(obj: Any, path: List[str]) -> Any:
"""Helper for nested `getattr`s"""
for segment in path:
obj = getattr(obj, segment)
return obj
@classmethod
def property(cls, path: TPath, *, get_fn=None, set_fn=None):
"""
Allows getting data to and from `path`.
You can convert/type check the data using `get_fn` and `set_fn`. Both take and return one value.
"""
p = ["_obj"] + cls._read_path(path)
def get(self):
value = ron(cls._get(self, p))
if get_fn is not None:
return get_fn(value)
return value
def set(self, value: Any) -> Any:
if set_fn is not None:
value = set_fn(value)
setattr(cls._get(self, p[:-1]), p[-1], value)
def delete(self: Any) -> Any:
delattr(cls._get(self, p[:-1]), p[-1])
return property(get, set, delete)
@classmethod
def to_datetime(cls, path: TPath, format: str):
"""Convert to and from the date format specified"""
def get_fn(value: datetime) -> str:
return value.strftime(format)
def set_fn(value: str) -> datetime:
return datetime.strptime(value, format)
return cls.property(path, get_fn=get_fn, set_fn=set_fn)
@classmethod
def from_datetime(cls, path: TPath, format: str):
"""Convert to and from the date format specified"""
def get_fn(value: str) -> datetime:
return datetime.strptime(value, format)
def set_fn(value: datetime) -> str:
return value.strftime(format)
return cls.property(path, get_fn=get_fn, set_fn=set_fn)
| 28.322785 | 104 | 0.595978 |
d5d371cf4e43cd17844507759409b10b2a9fc44a | 9,269 | py | Python | Assignment_03/cs231n/data_utils.py | andrewlstewart/CS231n-Convolutional-Neural-Networks- | 3a6066d790bd654d5fe3ad670c2308e8b2c05d93 | [
"Unlicense"
] | null | null | null | Assignment_03/cs231n/data_utils.py | andrewlstewart/CS231n-Convolutional-Neural-Networks- | 3a6066d790bd654d5fe3ad670c2308e8b2c05d93 | [
"Unlicense"
] | 15 | 2020-11-18T23:05:49.000Z | 2022-03-12T00:35:03.000Z | Assignment_03/cs231n/data_utils.py | andrewlstewart/CS231n-Convolutional-Neural-Networks | 3a6066d790bd654d5fe3ad670c2308e8b2c05d93 | [
"Unlicense"
] | null | null | null | from __future__ import print_function
from builtins import range
from six.moves import cPickle as pickle
import numpy as np
import os
# from scipy.misc import imread
from PIL import Image
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d'
% (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * \
np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = np.asarray(Image.open(img_file))
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = np.asarray(Image.open(img_file))
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = np.asarray(Image.open(img_file))
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]]
for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt)
will be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
def load_imagenet_val(num=None):
"""Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
imagenet_fn = 'cs231n/datasets/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
print('Run the following:')
print('cd cs231n/datasets')
print('bash get_imagenet_val.sh')
assert False, 'Need to download imagenet_val_25.npz'
f = np.load(imagenet_fn, allow_pickle=True)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names
| 35.109848 | 79 | 0.615277 |
1676197584b75f7523916647cc75b6c4f830491e | 3,834 | py | Python | ticket/models.py | aman-roy/pune.pycon.org | f56cc948bd56767110d337c694ecbf5540bdf4b9 | [
"MIT"
] | 1 | 2021-02-14T13:02:19.000Z | 2021-02-14T13:02:19.000Z | ticket/models.py | aman-roy/pune.pycon.org | f56cc948bd56767110d337c694ecbf5540bdf4b9 | [
"MIT"
] | null | null | null | ticket/models.py | aman-roy/pune.pycon.org | f56cc948bd56767110d337c694ecbf5540bdf4b9 | [
"MIT"
] | null | null | null | import uuid
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from symposion.conference.models import Conference
from cauth.models import EventUser
from root.models import Base
class Ticket(Base):
""" Model to store the different types of ticket. """
title = models.CharField(_("name"), max_length=255)
limits = models.PositiveIntegerField(_("limits"), default=0)
price = models.PositiveIntegerField(_("price"), default=0, db_index=True)
description = models.CharField(_("description"), max_length=255, null=True)
image_base64_title = models.CharField(_("image title"), max_length=255,
null=True, blank=True)
image_base64_text = models.TextField(_("image url"), null=True, blank=True)
conference = models.ForeignKey(Conference, verbose_name=_("conference"))
is_limit_reached = models.BooleanField(_("limit reached?"), default=False,
db_index=True)
disabled = models.BooleanField(_("disabled?"), default=False, db_index=True)
class Meta:
verbose_name = _("ticket")
verbose_name_plural = _("tickets")
def __str__(self):
return u"%s: %s" % (self.conference.title, self.title)
class AuxiliaryTicket(Base):
""" Model for the auxiliary tickets, which the user can
buy in addition to the main ticket"""
title = models.CharField(_('name'), max_length=255)
limits = models.PositiveIntegerField(_("limits"), default=0)
price = models.PositiveIntegerField(_('price'), default=0)
description = models.CharField(_("description"), max_length=255, null=True)
image_base64_title = models.CharField(_("image title"), max_length=255,
null=True, blank=True)
image_base64_text = models.TextField(_("image url"), null=True, blank=True)
conference = models.ForeignKey(Conference, verbose_name=_("conference"))
is_limit_reached = models.BooleanField(_("limit reached?"), default=False,
db_index=True)
disabled = models.BooleanField(_("disabled?"), default=False, db_index=True)
class TicketAddons(Base):
""" Model for the addons for the tickets, which needs to be bought along
with the ticket. """
title = models.CharField(_('name'), max_length=255)
ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE)
price = models.PositiveIntegerField(_('price'), default=0)
class CouponCode(Base):
""" Model for storing the coupon code, which can be applied on both the
Ticket & TicketAddons.
"""
code = models.CharField(_('coupon'), max_length=20)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class UserTicket(Base):
""" Model for maitaining the ticket entry for all the Users. """
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
user = models.ForeignKey(EventUser, on_delete=models.CASCADE)
ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE)
invoice = models.CharField(_('invoice'), max_length=255, default=0)
auxiliary_ticket_id = models.CommaSeparatedIntegerField(
_('auxiliary ticket'),
default=0,
max_length=200
)
is_payment_done = models.BooleanField(
_("payment done?"),
default=False
)
class Meta:
verbose_name = _("user ticket")
verbose_name_plural = _("user tickets")
ordering = ['-timestamp']
def __str__(self):
return u'%s:%s' % (self.user.username, self.ticket.title)
| 39.9375 | 80 | 0.681534 |
b9af5a8c105afde2d0dfa13e45bc7f725beebdc4 | 57 | py | Python | clumpy/py3.py | bkimmig/clump | 6c6c3f3ab26defc05965de10e6e50d9635a773e6 | [
"MIT"
] | null | null | null | clumpy/py3.py | bkimmig/clump | 6c6c3f3ab26defc05965de10e6e50d9635a773e6 | [
"MIT"
] | null | null | null | clumpy/py3.py | bkimmig/clump | 6c6c3f3ab26defc05965de10e6e50d9635a773e6 | [
"MIT"
] | null | null | null | import sys
if sys.version[0] == '3':
xrange = range
| 11.4 | 25 | 0.596491 |
0cec6df670565034472da8c19b98bfdac4c58f2b | 9,699 | py | Python | tests/transforms/linear_test.py | Tennessee-Wallaceh/nflows | ac0bf432fc7904458a933ed14180f0ac26e3f93d | [
"MIT"
] | 522 | 2020-02-26T16:51:51.000Z | 2022-03-31T15:38:48.000Z | tests/transforms/linear_test.py | Tennessee-Wallaceh/nflows | ac0bf432fc7904458a933ed14180f0ac26e3f93d | [
"MIT"
] | 47 | 2020-03-24T18:36:59.000Z | 2022-03-25T09:47:18.000Z | tests/transforms/linear_test.py | Tennessee-Wallaceh/nflows | ac0bf432fc7904458a933ed14180f0ac26e3f93d | [
"MIT"
] | 77 | 2020-03-21T21:00:44.000Z | 2022-03-31T00:41:51.000Z | """Tests for linear transforms."""
import unittest
from unittest.mock import MagicMock
import torch
from nflows.transforms import linear
from nflows.transforms.linear import Linear
from nflows.utils import torchutils
from tests.transforms.transform_test import TransformTest
class LinearTest(TransformTest):
def setUp(self):
features = 5
batch_size = 10
weight = torch.randn(features, features)
inverse = torch.randn(features, features)
logabsdet = torch.randn(1)
self.transform = Linear(features)
self.transform.bias.data = torch.randn(features) # Just so bias isn't zero.
self.inputs = torch.randn(batch_size, features)
self.outputs_fwd = self.inputs @ weight.t() + self.transform.bias
self.outputs_inv = (self.inputs - self.transform.bias) @ inverse.t()
self.logabsdet_fwd = logabsdet * torch.ones(batch_size)
self.logabsdet_inv = (-logabsdet) * torch.ones(batch_size)
# Mocks for abstract methods.
self.transform.forward_no_cache = MagicMock(
return_value=(self.outputs_fwd, self.logabsdet_fwd)
)
self.transform.inverse_no_cache = MagicMock(
return_value=(self.outputs_inv, self.logabsdet_inv)
)
self.transform.weight = MagicMock(return_value=weight)
self.transform.weight_inverse = MagicMock(return_value=inverse)
self.transform.logabsdet = MagicMock(return_value=logabsdet)
def test_forward_default(self):
outputs, logabsdet = self.transform(self.inputs)
self.transform.forward_no_cache.assert_called_with(self.inputs)
self.assertEqual(outputs, self.outputs_fwd)
self.assertEqual(logabsdet, self.logabsdet_fwd)
# Cache shouldn't be computed.
self.assertFalse(self.transform.weight.called)
self.assertFalse(self.transform.logabsdet.called)
def test_inverse_default(self):
outputs, logabsdet = self.transform.inverse(self.inputs)
self.transform.inverse_no_cache.assert_called_with(self.inputs)
self.assertEqual(outputs, self.outputs_inv)
self.assertEqual(logabsdet, self.logabsdet_inv)
# Cache shouldn't be computed.
self.assertFalse(self.transform.weight_inverse.called)
self.assertFalse(self.transform.logabsdet.called)
def test_forward_cached(self):
self.transform.eval()
self.transform.use_cache()
outputs, logabsdet = self.transform(self.inputs)
self.assertTrue(self.transform.weight.called)
self.assertTrue(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_fwd)
self.assertEqual(logabsdet, self.logabsdet_fwd)
def test_inverse_cached(self):
self.transform.eval()
self.transform.use_cache()
outputs, logabsdet = self.transform.inverse(self.inputs)
self.assertTrue(self.transform.weight_inverse.called)
self.assertTrue(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_inv)
self.assertEqual(logabsdet, self.logabsdet_inv)
def test_forward_cache_is_used(self):
self.transform.eval()
self.transform.use_cache()
self.transform(self.inputs)
self.assertTrue(self.transform.weight.called)
self.assertTrue(self.transform.logabsdet.called)
self.transform.weight.reset_mock()
self.transform.logabsdet.reset_mock()
outputs, logabsdet = self.transform(self.inputs)
# Cached values should be used.
self.assertFalse(self.transform.weight.called)
self.assertFalse(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_fwd)
self.assertEqual(logabsdet, self.logabsdet_fwd)
def test_inverse_cache_is_used(self):
self.transform.eval()
self.transform.use_cache()
self.transform.inverse(self.inputs)
self.assertTrue(self.transform.weight_inverse.called)
self.assertTrue(self.transform.logabsdet.called)
self.transform.weight_inverse.reset_mock()
self.transform.logabsdet.reset_mock()
outputs, logabsdet = self.transform.inverse(self.inputs)
# Cached values should be used.
self.assertFalse(self.transform.weight_inverse.called)
self.assertFalse(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_inv)
self.assertEqual(logabsdet, self.logabsdet_inv)
def test_forward_cache_not_used_while_training(self):
self.transform.train()
self.transform.use_cache()
outputs, logabsdet = self.transform(self.inputs)
self.transform.forward_no_cache.assert_called_with(self.inputs)
self.assertEqual(outputs, self.outputs_fwd)
self.assertEqual(logabsdet, self.logabsdet_fwd)
# Cache shouldn't be computed.
self.assertFalse(self.transform.weight.called)
self.assertFalse(self.transform.logabsdet.called)
def test_inverse_cache_not_used_while_training(self):
self.transform.train()
self.transform.use_cache()
outputs, logabsdet = self.transform.inverse(self.inputs)
self.transform.inverse_no_cache.assert_called_with(self.inputs)
self.assertEqual(outputs, self.outputs_inv)
self.assertEqual(logabsdet, self.logabsdet_inv)
# Cache shouldn't be computed.
self.assertFalse(self.transform.weight_inverse.called)
self.assertFalse(self.transform.logabsdet.called)
def test_forward_train_invalidates_cache(self):
self.transform.eval()
self.transform.use_cache()
self.transform(self.inputs)
self.assertTrue(self.transform.weight.called)
self.assertTrue(self.transform.logabsdet.called)
self.transform.weight.reset_mock()
self.transform.logabsdet.reset_mock()
self.transform.train() # Cache should be invalidated here.
self.assertTrue(
self.transform.using_cache
) # Using cache should still be enabled.
self.transform.eval()
outputs, logabsdet = self.transform(self.inputs)
# Values should be recomputed.
self.assertTrue(self.transform.weight.called)
self.assertTrue(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_fwd)
self.assertEqual(logabsdet, self.logabsdet_fwd)
def test_inverse_train_invalidates_cache(self):
self.transform.eval()
self.transform.use_cache()
self.transform.inverse(self.inputs)
self.assertTrue(self.transform.weight_inverse.called)
self.assertTrue(self.transform.logabsdet.called)
self.transform.weight_inverse.reset_mock()
self.transform.logabsdet.reset_mock()
self.transform.train() # Cache should be disabled and invalidated here.
self.assertTrue(
self.transform.using_cache
) # Using cache should still be enabled.
self.transform.eval()
outputs, logabsdet = self.transform.inverse(self.inputs)
# Values should be recomputed.
self.assertTrue(self.transform.weight_inverse.called)
self.assertTrue(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_inv)
self.assertEqual(logabsdet, self.logabsdet_inv)
class NaiveLinearTest(TransformTest):
def setUp(self):
self.features = 3
self.transform = linear.NaiveLinear(features=self.features)
self.weight = self.transform._weight
self.weight_inverse = torch.inverse(self.weight)
self.logabsdet = torchutils.logabsdet(self.weight)
self.eps = 1e-5
def test_forward_no_cache(self):
batch_size = 10
inputs = torch.randn(batch_size, self.features)
outputs, logabsdet = self.transform.forward_no_cache(inputs)
outputs_ref = inputs @ self.weight.t() + self.transform.bias
logabsdet_ref = torch.full([batch_size], self.logabsdet.item())
self.assert_tensor_is_good(outputs, [batch_size, self.features])
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
def test_inverse_no_cache(self):
batch_size = 10
inputs = torch.randn(batch_size, self.features)
outputs, logabsdet = self.transform.inverse_no_cache(inputs)
outputs_ref = (inputs - self.transform.bias) @ self.weight_inverse.t()
logabsdet_ref = torch.full([batch_size], -self.logabsdet.item())
self.assert_tensor_is_good(outputs, [batch_size, self.features])
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
def test_weight(self):
weight = self.transform.weight()
self.assert_tensor_is_good(weight, [self.features, self.features])
self.assertEqual(weight, self.weight)
def test_weight_inverse(self):
weight_inverse = self.transform.weight_inverse()
self.assert_tensor_is_good(weight_inverse, [self.features, self.features])
self.assertEqual(weight_inverse, self.weight_inverse)
def test_logabsdet(self):
logabsdet = self.transform.logabsdet()
self.assert_tensor_is_good(logabsdet, [])
self.assertEqual(logabsdet, self.logabsdet)
def test_forward_inverse_are_consistent(self):
batch_size = 10
inputs = torch.randn(batch_size, self.features)
self.assert_forward_inverse_are_consistent(self.transform, inputs)
if __name__ == "__main__":
unittest.main()
| 38.335968 | 84 | 0.700381 |
1073a7b047f32870df2d0639d6e5a6ae01b0f6da | 34,832 | py | Python | characters.py | theDrake/toads-adventure-py | d2871341a95f60261163f3cd297c3532ddcc4e70 | [
"MIT"
] | null | null | null | characters.py | theDrake/toads-adventure-py | d2871341a95f60261163f3cd297c3532ddcc4e70 | [
"MIT"
] | null | null | null | characters.py | theDrake/toads-adventure-py | d2871341a95f60261163f3cd297c3532ddcc4e70 | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# Filename: characters.py
#
# Author: David C. Drake (https://davidcdrake.com)
#
# Description: Manages player and non-player characters for Toad's Adventure.
# Developed using Python 2.7 and Pygame 1.9.
#-------------------------------------------------------------------------------
import pygame
from pygame import display
import math
import random
import time
from config import *
#-------------------------------------------------------------------------------
# Class: GameCharacter
#
# Description: Represents a game character and manages movement, collisions,
# graphical rendering, etc.
#
# Methods: __init__, draw, push_x, push_y, move_left, move_right,
# apply_friction, apply_gravity, jump, move, is_colliding,
# overlaps, will_fall, on_ground, on_ice, get_tile_number_behind,
# get_tile_number_below, round_up
#-------------------------------------------------------------------------------
class GameCharacter:
#---------------------------------------------------------------------------
# Method: __init__
#
# Description: Initializes a character's basic stats.
#
# Inputs: ID - ID number for the game character's type.
# max_speed_x - Maximum horizontal speed.
# max_speed_y - Maximum vertical speed.
# accel_rate - Rate of acceleration per frame, in pixels.
# tiles - Tileset used by all game characters.
# first_tile - Tile number of character's first tile.
# stances - Number of different movement stances.
# width_offset - Number of empty pixels to the character's
# side in its tile images.
# height_offset - Number of empty pixels above the character in
# its tile images.
# map - The current map/level.
# screen - The screen on which the game is displayed.
# x - Initial x-coordinate for upper-left pixel.
# y - Initial y-coordinate for upper-left pixel.
# facing_right - 'True' if character should be facing right.
#
# Outputs: None.
#---------------------------------------------------------------------------
def __init__(self, ID, max_speed_x, max_speed_y, accel_rate, tiles,
first_tile, stances, width_offset, height_offset, map, screen,
x, y, facing_right=True):
self.ID = ID
self.max_speed_x = max_speed_x
self.max_speed_y = max_speed_y
self.accel_rate = accel_rate
self.tiles = tiles
self.first_tile = first_tile
self.stances = stances
self.width_offset = width_offset
self.height_offset = height_offset
self.map = map
self.screen = screen
self.x = x
self.y = y
self.facing_right = facing_right
self.dx = 0.0 # horizontal velocity
self.dy = 0.0 # vertical velocity
self.current_stance = 0 # will range from 0 to "self.stances - 1"
self.pixels_moved = 0 # helps determine when stance changes occur
self.num_stance_changes = 0
self.is_crouching = False
self.is_climbing = False
self.is_flying = False
#---------------------------------------------------------------------------
# Method: push_x
#
# Description: Accelerates the character along the x-axis.
#
# Inputs: ddx - The change to apply to character's horizontal velocity.
#
# Outputs: None.
#---------------------------------------------------------------------------
def push_x(self, ddx):
self.dx += ddx
if self.dx > 0.0:
self.facing_right = True
if self.dx > self.max_speed_x:
self.dx = self.max_speed_x
elif self.dx < 0.0:
self.facing_right = False
if self.dx < (self.max_speed_x * -1.0):
self.dx = (self.max_speed_x * -1.0)
#---------------------------------------------------------------------------
# Method: push_y
#
# Description: Accelerates the character along the y-axis.
#
# Inputs: ddy - The change to apply to character's vertical velocity.
#
# Outputs: None.
#---------------------------------------------------------------------------
def push_y(self, ddy):
self.dy += ddy
if self.dy > self.max_speed_y:
self.dy = self.max_speed_y
elif self.dy < (self.max_speed_y * -1.0):
self.dy = (self.max_speed_y * -1.0)
#---------------------------------------------------------------------------
# Method: move_left
#
# Description: Manages leftward movement.
#
# Inputs: modifier - Optional float value to modify acceleration.
#
# Outputs: None.
#---------------------------------------------------------------------------
def move_left(self, modifier = 1.0):
self.push_x(self.accel_rate * modifier * -1.0)
#---------------------------------------------------------------------------
# Method: move_right
#
# Description: Manages rightward movement.
#
# Inputs: modifier - Optional float value to modify acceleration.
#
# Outputs: None.
#---------------------------------------------------------------------------
def move_right(self, modifier = 1.0):
self.push_x(self.accel_rate * modifier)
#---------------------------------------------------------------------------
# Method: apply_friction
#
# Description: Decreases the character's horizontal velocity when moving on
# solid ground. Friction is diminished while on ice.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def apply_friction(self):
if self.is_flying:
return
elif self.on_ice():
friction_per_frame = ICE_FRICTION_PER_FRAME
else:
friction_per_frame = DEFAULT_FRICTION_PER_FRAME
if self.dx > 0.0:
if self.dx - friction_per_frame < 0.0:
self.dx == 0.0
else:
self.push_x(friction_per_frame * -1.0)
elif self.dx < 0.0:
if self.dx + friction_per_frame > 0.0:
self.dx == 0.0
else:
self.push_x(friction_per_frame)
#---------------------------------------------------------------------------
# Method: apply_gravity
#
# Description: Applies downward acceleration (up to a maximum rate) unless
# the character is flying or climbing.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def apply_gravity(self):
if not self.is_climbing and not self.is_flying:
self.push_y(GRAVITY_PER_FRAME)
#---------------------------------------------------------------------------
# Method: jump
#
# Description: Applies upward acceleration if character's on the ground.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def jump(self):
if self.on_ground():
self.push_y(self.max_speed_y * -2)
#---------------------------------------------------------------------------
# Method: move
#
# Description: Changes character's position according to its velocity along
# x- and y-axes, except where collision occurs. Also adjusts
# character's stance as appropriate.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def move(self):
# get horizontal movement values
abs_x = self.round_up(abs(self.dx))
sign_x = 1
if self.dx < 0.0:
sign_x = -1
# get vertical movement values
abs_y = self.round_up(abs(self.dy))
sign_y = 1
if self.dy < 0.0:
sign_y = -1
# perform horizontal movement
pixel_count = 0
for x in range(abs_x):
if self.is_colliding(self.x + (1 * sign_x), self.y):
self.dx == 0.0
break
else:
self.x += (1 * sign_x)
pixel_count += 1
# perform vertical movement
for y in range(abs_y):
if self.is_colliding(self.x, self.y + (1 * sign_y)):
self.dy == 0.0
break
else:
self.y += (1 * sign_y)
# adjust character's stance as appropriate
if self.stances > 1:
self.pixels_moved += pixel_count
if self.pixels_moved > PIXELS_PER_STANCE_CHANGE:
self.pixels_moved = 0
self.num_stance_changes += 1
if self.num_stance_changes < self.stances:
self.current_stance += 1
if self.current_stance >= self.stances: # error check
self.current_stance = 0
self.num_stance_changes = 0
# handle ninji jump behavior here
if self.ID == NINJI and int(time.time()) % 2:
self.jump()
else:
self.current_stance -= 1
if self.current_stance == 0:
self.num_stance_changes = 0
if self.dx == 0.0: # character is not moving horizontally
self.current_stance = 0
self.num_stance_changes = 0
self.pixels_moved = 0
if not self.on_ground() and not self.ID == ALBATOSS: # in the air
self.num_stance_changes = 0
self.pixels_moved = 0
if self.ID == PLAYER:
self.current_stance = PLAYER_JUMPING_STANCE
elif self.ID == NINJI:
self.current_stance = 1
#---------------------------------------------------------------------------
# Method: is_colliding
#
# Description: Given a set of pixel coordinates representing the character's
# upper-left corner, checks for overlap with a solid tile along
# any side (compensating for character's actual width and
# height) as well as contact, from above, with the top of a
# "top-solid" tile.
#
# Inputs: x - Character tile's hypothetical leftmost pixel column.
# y - Character tile's hypothetical topmost pixel row.
#
# Outputs: 'True' if character would collide with a solid edge at the
# given coordinates.
#---------------------------------------------------------------------------
def is_colliding(self, x, y):
left = x + self.width_offset
right = x + (CHARACTER_TILE_SIZE - 1) - self.width_offset
top = y + self.height_offset
bottom = y + (CHARACTER_TILE_SIZE - 1)
mid_x = x + CHARACTER_TILE_SIZE / 2
mid_y = y + CHARACTER_TILE_SIZE / 2
if (self.map.is_solid_at(left, top) or
self.map.is_solid_at(right, top) or
self.map.is_solid_at(left, bottom) or
self.map.is_solid_at(right, bottom) or
self.map.is_solid_at(left, mid_y) or
self.map.is_solid_at(right, mid_y) or
self.map.is_solid_at(mid_x, top) or
self.map.is_solid_at(mid_x, bottom)):
return True
elif (self.dy >= 0 and
(self.map.get_tile_number_at(left, bottom) in TOP_SOLID_TILES or
self.map.get_tile_number_at(right, bottom) in TOP_SOLID_TILES)):
return True
return False
#---------------------------------------------------------------------------
# Method: overlaps
#
# Description: Determines whether this character is colliding with another
# given character.
#
# Inputs: other - The other character to be tested for collision.
#
# Outputs: Returns 'True' if the two characters are overlapping.
#---------------------------------------------------------------------------
def overlaps(self, other):
self_left = self.x + self.width_offset
self_right = self.x + (CHARACTER_TILE_SIZE - 1) - self.width_offset
self_top = self.y + self.height_offset
self_bottom = self.y + (CHARACTER_TILE_SIZE - 1)
other_left = other.x + other.width_offset
other_right = other.x + (CHARACTER_TILE_SIZE - 1) - other.width_offset
other_top = other.y + other.height_offset
other_bottom = other.y + (CHARACTER_TILE_SIZE - 1)
return (self_right >= other_left and self_left <= other_right and
self_top <= other_bottom and self_bottom >= other_top)
#---------------------------------------------------------------------------
# Method: will_fall
#
# Description: Determines whether the character's about to fall off a ledge.
#
# Inputs: None.
#
# Outputs: 'True' if character's on a solid surface next to a ledge.
#---------------------------------------------------------------------------
def will_fall(self):
left = self.x
right = self.x + CHARACTER_TILE_SIZE + 1 # - self.width_offset
bottom = self.y + CHARACTER_TILE_SIZE + 1
return self.on_ground() and (
(self.facing_right and self.map.is_non_solid_at(right, bottom)) or
(not self.facing_right and self.map.is_non_solid_at(left, bottom)))
#---------------------------------------------------------------------------
# Method: on_ground
#
# Description: Determines whether the character's feet are on solid ground.
#
# Inputs: None.
#
# Outputs: 'True' if character's feet are on a solid or top-solid tile.
#---------------------------------------------------------------------------
def on_ground(self):
return self.is_colliding(self.x, self.y + 1)
#---------------------------------------------------------------------------
# Method: on_ice
#
# Description: Determines whether character's feet are on an icy tile.
#
# Inputs: None.
#
# Outputs: 'True' if character's feet are directly above an icy tile.
#---------------------------------------------------------------------------
def on_ice(self):
return self.get_tile_number_below() in ICY_TILES
#---------------------------------------------------------------------------
# Method: get_tile_number_behind
#
# Description: Returns tile number corresponding to map tile directly behind
# the character (i.e., "background" tile).
#
# Inputs: None.
#
# Outputs: Tile number of map tile behind the character.
#---------------------------------------------------------------------------
def get_tile_number_behind(self):
return self.map.get_tile_number_at(self.x + CHARACTER_TILE_SIZE / 2,
self.y + CHARACTER_TILE_SIZE - 1)
#---------------------------------------------------------------------------
# Method: get_tile_number_below
#
# Description: Returns tile number corresponding to map tile directly below
# the character (i.e., one pixel lower than character's lowest
# pixel at its midpoint).
#
# Inputs: None.
#
# Outputs: Tile number of map tile underneath the character.
#---------------------------------------------------------------------------
def get_tile_number_below(self):
return self.map.get_tile_number_at(self.x + CHARACTER_TILE_SIZE / 2,
self.y + CHARACTER_TILE_SIZE + 1)
#---------------------------------------------------------------------------
# Method: round_up
#
# Description: Given a floating point value, returns the nearest greater or
# equal integer in intervals of MIN_PIXELS_PER_FRAME.
#
# Inputs: n - Floating point value to be rounded up.
#
# Outputs: The rounded-up integer value.
#---------------------------------------------------------------------------
def round_up(self, n):
minimum = MIN_PIXELS_PER_FRAME
return ((int(math.ceil(n)) + (minimum - 1)) / minimum) * minimum
#-------------------------------------------------------------------------------
# Class: PlayerCharacter
#
# Description: Represents the player character.
#
# Methods: __init__, game_logic, draw, is_big, is_small, grow, shrink,
# is_invincible, climb, climb_up, climb_down, can_climb,
# take_damage, pose_and_pause, victory_pose, die
#-------------------------------------------------------------------------------
class PlayerCharacter(GameCharacter):
#---------------------------------------------------------------------------
# Method: __init__
#
# Description: Initializes the player character's basic stats.
#
# Inputs: tiles - Tileset used by all game characters.
# map - The current map/level.
# screen - The screen on which the game is displayed.
# x - Initial x-coordinate for upper-left pixel.
# y - Initial y-coordinate for upper-left pixel.
#
# Outputs: None.
#---------------------------------------------------------------------------
def __init__(self, tiles, map, screen, x, y):
GameCharacter.__init__(self, PLAYER, PLAYER_MAX_SPEED_X,
PLAYER_MAX_SPEED_Y, PLAYER_ACCEL_RATE, tiles,
FIRST_PLAYER_TILE_BIG, PLAYER_STANCES,
PLAYER_WIDTH_OFFSET, PLAYER_HEIGHT_OFFSET_BIG,
map, screen, x, y)
self.invincibility_timer = 0
self.climbing_stance = 0
#---------------------------------------------------------------------------
# Method: game_logic
#
# Description: Determines player character's behavior according to keyboard
# input and interactions among all active game objects.
#
# Inputs: keys - Keys currently pressed down.
# new_keys - Keys that have just begun to be pressed down.
#
# Outputs: None.
#---------------------------------------------------------------------------
def game_logic(self, keys, new_keys):
# check for damage and death
if self.get_tile_number_below() == SPIKE_TILE:
self.take_damage()
elif self.y + self.height_offset > (self.map.map_height *
MAP_TILE_SIZE):
self.die()
# check invincibility timer
if self.invincibility_timer > 0:
self.invincibility_timer -= 1
# check crouching/climbing status
if (self.is_crouching and pygame.K_DOWN not in keys and
pygame.K_s not in keys):
self.is_crouching = False
if self.is_climbing:
if not self.can_climb():
self.is_climbing = False
else:
self.dy = 0.0
# horizontal acceleration
if self.on_ground():
self.apply_friction()
if not self.is_crouching:
if pygame.K_LEFT in keys or pygame.K_a in keys:
self.move_left()
if pygame.K_RIGHT in keys or pygame.K_d in keys:
self.move_right()
else:
if pygame.K_LEFT in keys or pygame.K_a in keys:
self.move_left(0.5)
if pygame.K_RIGHT in keys or pygame.K_d in keys:
self.move_right(0.5)
# vertical acceleration
self.apply_gravity()
if pygame.K_SPACE in new_keys:
self.jump()
# climbing
if (pygame.K_UP in keys or pygame.K_w in keys) and self.can_climb():
self.climb_up()
elif (pygame.K_DOWN in keys or pygame.K_s in keys) and self.can_climb():
self.climb_down()
# crouching
if pygame.K_DOWN in keys or pygame.K_s in keys:
self.is_crouching = True
# apply motion
self.move()
#---------------------------------------------------------------------------
# Method: draw
#
# Description: Draws the player character on the screen.
#
# Inputs: position - Tuple containing (x, y) pixel coordinates.
#
# Outputs: None.
#---------------------------------------------------------------------------
def draw(self, position):
if self.is_invincible() and random.randint(0, 1): # flicker effect
return
elif self.is_climbing:
if self.is_big():
self.screen.blit(self.tiles.get_image(
FIRST_CLIMBING_TILE_BIG +
self.climbing_stance),
position)
else:
self.screen.blit(self.tiles.get_image(
FIRST_CLIMBING_TILE_SMALL +
self.climbing_stance),
position)
elif self.is_crouching:
if self.is_big():
self.screen.blit(self.tiles.get_image(CROUCHING_TILE_BIG),
position)
else:
self.screen.blit(self.tiles.get_image(CROUCHING_TILE_SMALL),
position)
elif self.facing_right:
self.screen.blit(self.tiles.get_image(self.first_tile +
self.current_stance),
position)
else: # facing left
self.screen.blit(self.tiles.get_image(self.first_tile +
self.stances +
self.current_stance),
position)
#---------------------------------------------------------------------------
# Method: is_big
#
# Description: Determines whether Toad is "big" (full health).
#
# Inputs: None.
#
# Outputs: 'True' if the character has full health, 'False' otherwise.
#---------------------------------------------------------------------------
def is_big(self):
return self.first_tile == FIRST_PLAYER_TILE_BIG
#---------------------------------------------------------------------------
# Method: is_small
#
# Description: Determines whether Toad is "small" (not full health).
#
# Inputs: None.
#
# Outputs: 'True' if the character's taken damage, 'False' otherwise.
#---------------------------------------------------------------------------
def is_small(self):
return self.first_tile == FIRST_PLAYER_TILE_SMALL
#---------------------------------------------------------------------------
# Method: grow
#
# Description: Makes Toad "big" (restores full health).
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def grow(self):
self.first_tile = FIRST_PLAYER_TILE_BIG
self.height_offset = PLAYER_HEIGHT_OFFSET_BIG
#---------------------------------------------------------------------------
# Method: shrink
#
# Description: Makes Toad "small" (one more hit = death).
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def shrink(self):
self.first_tile = FIRST_PLAYER_TILE_SMALL
self.height_offset = PLAYER_HEIGHT_OFFSET_SMALL
#---------------------------------------------------------------------------
# Method: is_invincible
#
# Description: Determines whether player is currently invincible.
#
# Inputs: None.
#
# Outputs: 'True' if the character's invincible.
#---------------------------------------------------------------------------
def is_invincible(self):
return self.invincibility_timer > 0
#---------------------------------------------------------------------------
# Method: climb
#
# Description: Causes player to move in a given vertical direction according
# to climbing rate.
#
# Inputs: direction - The direction in which to climb: positive for
# upward movement, negative for downward.
#
# Outputs: None.
#---------------------------------------------------------------------------
def climb(self, direction):
if not self.is_climbing:
self.is_climbing = True
self.dx = 0.0
self.dy = 0.0
self.dy = PLAYER_CLIMB_RATE * direction
if random.randint(0, 8) == 0:
self.climbing_stance += 1
if self.climbing_stance > 1:
self.climbing_stance = 0
#---------------------------------------------------------------------------
# Method: climb_up
#
# Description: Causes Toad to climb upwards.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def climb_up(self):
self.climb(-1)
#---------------------------------------------------------------------------
# Method: climb_down
#
# Description: Causes Toad to climb downwards.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def climb_down(self):
self.climb(1)
#---------------------------------------------------------------------------
# Method: can_climb
#
# Description: Determines whether Toad can climb based on the tile directly
# behind him.
#
# Inputs: None.
#
# Outputs: 'True' if Toad can climb, 'False' otherwise.
#---------------------------------------------------------------------------
def can_climb(self):
return self.get_tile_number_behind() in CLIMBABLE_TILES
#---------------------------------------------------------------------------
# Method: take_damage
#
# Description: Causes Toad to take damage. If Toad was big, he's now small
# and temporarily invincible. If Toad was small, he's now dead.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def take_damage(self):
if not self.is_invincible():
if self.is_small():
self.die()
else:
self.shrink()
self.invincibility_timer = INVINCIBILITY_AFTER_DAMAGE
#---------------------------------------------------------------------------
# Method: pose_and_pause
#
# Description: Displays Toad in a given pose for a given number of seconds
# (during which everything will be paused).
#
# Inputs: pose - Tile number of desired pose.
# pause - Pause duration, in seconds.
#
# Outputs: None.
#---------------------------------------------------------------------------
def pose_and_pause(self, pose, pause):
# draw map again to erase player's previous tile
x = self.x - (self.map.screen_width / 2)
y = self.y - (self.map.screen_height / 2)
self.map.draw(x, y)
# draw desired pose, then pause the game
position = (self.map.screen_width / 2 - MAP_TILE_SIZE,
self.map.screen_height / 2 - MAP_TILE_SIZE)
self.screen.blit(self.tiles.get_image(pose), position)
display.flip()
time.sleep(pause)
#---------------------------------------------------------------------------
# Method: victory_pose
#
# Description: Determines appropriate victory pose and temporarily pauses
# the game while displaying it.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def victory_pose(self):
if self.is_big():
pose = PLAYER_VICTORY_TILE_BIG # big, facing 4th wall
elif self.facing_right:
pose = PLAYER_VICTORY_TILE_SMALL # small, facing right
else:
pose = PLAYER_VICTORY_TILE_SMALL + 1 # small, facing left
self.pose_and_pause(pose, 1)
#---------------------------------------------------------------------------
# Method: die
#
# Description: Handles Toad's death and respawns him at starting point.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def die(self):
self.pose_and_pause(PLAYER_DEAD_TILE, 1)
self.grow()
self.facing_right = True
(self.x, self.y) = self.map.player_start_location
(self.dx, self.dy) = (0, 0)
#-------------------------------------------------------------------------------
# Class: NonPlayerCharacter
#
# Description: Represents an NPC.
#
# Methods: __init__, game_logic, draw
#-------------------------------------------------------------------------------
class NonPlayerCharacter(GameCharacter):
#---------------------------------------------------------------------------
# Method: __init__
#
# Description: Initializes the NPC's basic stats.
#
# Inputs: ID - ID number for the NPC's type.
# tiles - Tileset used by all game characters.
# map - The current map/level.
# screen - The screen on which the game is displayed.
# x - Initial x-coordinate for upper-left pixel.
# y - Initial y-coordinate for upper-left pixel.
# facing_right - 'True' if character should be facing right.
#
# Outputs: None.
#---------------------------------------------------------------------------
def __init__(self, ID, tiles, map, screen, x, y, facing_right=False):
max_speed_x = DEFAULT_NPC_MAX_SPEED_X
max_speed_y = DEFAULT_NPC_MAX_SPEED_Y
accel_rate = DEFAULT_NPC_ACCEL_RATE
width_offset = DEFAULT_WIDTH_OFFSET
height_offset = DEFAULT_HEIGHT_OFFSET
first_tile = NPC_FIRST_TILES[ID]
stances = DEFAULT_STANCES
if ID == SPARK:
stances = SPARK_STANCES
elif ID == ALBATOSS:
stances = ALBATOSS_STANCES
elif ID == PHANTO:
stances = PHANTO_STANCES
elif ID == FLURRY:
max_speed_x *= 1.5
GameCharacter.__init__(self, ID, max_speed_x, max_speed_y, accel_rate,
tiles, first_tile, stances, width_offset,
height_offset, map, screen, x, y, facing_right)
if ID == SPARK or ID == ALBATOSS or ID == PHANTO:
self.is_flying = True
#---------------------------------------------------------------------------
# Method: game_logic
#
# Description: Determines NPC behavior according to interactions among all
# active game objects.
#
# Inputs: None.
#
# Outputs: None.
#---------------------------------------------------------------------------
def game_logic(self):
# check for change in orientation
if ((self.facing_right and
self.is_colliding(self.x + CHARACTER_TILE_SIZE -
self.width_offset * 3, self.y)) or
(not self.facing_right and self.is_colliding(self.x - 1, self.y)) or
(self.is_flying and ((not self.facing_right and self.x - 1 <= 0) or
(self.facing_right and self.x +
CHARACTER_TILE_SIZE -
self.width_offset * 2 >=
self.map.get_size()[0] * MAP_TILE_SIZE))) or
(self.ID == SHY_GUY_BLUE and self.will_fall())):
self.facing_right = not self.facing_right
self.dx *= -1.0
# horizontal acceleration
self.apply_friction()
if self.facing_right:
self.move_right()
else:
self.move_left()
# vertical acceleration
self.apply_gravity()
# apply motion
self.move()
#---------------------------------------------------------------------------
# Method: draw
#
# Description: Draws the NPC on the screen, if currently visible.
#
# Inputs: map_x - Left-most map pixel currently displayed.
# map_y - Top-most map pixel currently displayed.
#
# Outputs: None.
#---------------------------------------------------------------------------
def draw(self, map_x, map_y):
if (self.x < map_x or self.x > (map_x + self.map.screen_width) or
self.y < map_y or self.y > (map_y + self.map.screen_height)):
return
position = (self.x - map_x - MAP_TILE_SIZE,
self.y - map_y - MAP_TILE_SIZE)
if self.ID == SPARK or self.facing_right:
self.screen.blit(self.tiles.get_image(self.first_tile +
self.current_stance),
position)
else: # facing left
self.screen.blit(self.tiles.get_image(self.first_tile +
self.stances +
self.current_stance),
position)
| 40.691589 | 80 | 0.45151 |
ddee7a1ba74ef0ae6a3a15088f53b3ab35e44552 | 2,022 | py | Python | measures/coverage.py | markanewman/corpustools | 9a100396474c48579ef98ba0db6f44b4efdfc6c8 | [
"MIT"
] | null | null | null | measures/coverage.py | markanewman/corpustools | 9a100396474c48579ef98ba0db6f44b4efdfc6c8 | [
"MIT"
] | null | null | null | measures/coverage.py | markanewman/corpustools | 9a100396474c48579ef98ba0db6f44b4efdfc6c8 | [
"MIT"
] | null | null | null | import pathlib
from ..utils.csvfile import read_dictionary, write_dictionary
from ..utils.tarfile import file_in_corpus, read_lines_from_tar_file
from statistics import mean
def coverage(corpus, tokens, tokenizer = None):
"""
Calculates the Zif's law coverage of a given set of tokens on the corpus an a document by document basis
Parameters
----------
corpus : str
The tarball containing the corpus
tokens: str
The file containing the list of tokens to get a % coverage on
tokenizer: function
Optional: function to take in a line (str) and output a list of tokens (str[])
Example
---------
import corpustools.measure as ctm; ctm.coverage('d:/working/corpus.tar', 'd:/working/tokens.csv')
"""
corpus = pathlib.Path(corpus)
measures_file = corpus.parent.joinpath('./coverage.csv')
if measures_file.exists():
measures_file.unlink()
if tokenizer == None:
tokenizer = lambda line: [token.upper() for token in line.strip().split() if len(token) > 0]
tokens = set(read_dictionary(tokens).keys())
measures = _measures(corpus, tokens, tokenizer)
_write_measures(measures_file, measures)
return (measures_file, mean(measures.values()))
def _measures(corpus, tokens, tokenizer):
print('Measuring Coverage...')
measures = {}
for (tar_info, tar_file) in file_in_corpus(corpus):
total_tokens = 0
total_coverage = 0
for line in read_lines_from_tar_file(tar_file):
line_tokens = tokenizer(line)
total_tokens = total_tokens + len(line_tokens)
for token in line_tokens:
if token in tokens:
total_coverage = total_coverage + 1
pass
pass
pass
measures[tar_info.name] = round(total_coverage/total_tokens, 8)
return measures
def _write_measures(file_name, measures):
print('Writing Measures...')
write_dictionary(file_name, measures)
| 32.095238 | 108 | 0.656281 |
1b6897d34d934d3ce125ce99cf35292e0ec525ec | 14,305 | py | Python | exe/portable-python/App/Lib/test/test_sysconfig.py | jaredmusil/iawsc-data-toolbox | 65b97d45e13813935017f8b3c5726784027b065f | [
"MIT"
] | null | null | null | exe/portable-python/App/Lib/test/test_sysconfig.py | jaredmusil/iawsc-data-toolbox | 65b97d45e13813935017f8b3c5726784027b065f | [
"MIT"
] | 1 | 2018-04-15T22:59:15.000Z | 2018-04-15T22:59:15.000Z | exe/portable-python/App/Lib/test/test_sysconfig.py | jaredmusil/iawsc-data-toolbox | 65b97d45e13813935017f8b3c5726784027b065f | [
"MIT"
] | null | null | null | """Tests for sysconfig."""
import unittest
import sys
import os
import subprocess
import shutil
from copy import copy, deepcopy
from test.support import (run_unittest, TESTFN, unlink,
captured_stdout, skip_unless_symlink)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
"""Make a copy of sys.path"""
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._CONFIG_VARS)
self.old_environ = deepcopy(os.environ)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = copy(self._config_vars)
for key, value in self.old_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
for key in list(os.environ.keys()):
if key not in self.old_environ:
del os.environ[key]
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = list(wanted.items())
wanted.sort()
scheme = list(scheme.items())
scheme.sort()
self.assertEqual(scheme, wanted)
def test_get_path(self):
# xxx make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertTrue(isinstance(cvars, dict))
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["Path"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["Path"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.prefix != sys.exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.prefix)
base = base.replace(sys.exec_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
with open('/dev/null', 'w') as devnull_fp:
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=devnull_fp,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=open('/dev/null'),
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
class MakefileTests(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
| 38.557951 | 97 | 0.561622 |
deac7816a6d9fa9eb9e1fadcd399f774fcf1e833 | 442 | py | Python | src/299. Bulls and Cows.py | xiaonanln/myleetcode-python | 95d282f21a257f937cd22ef20c3590a69919e307 | [
"Apache-2.0"
] | null | null | null | src/299. Bulls and Cows.py | xiaonanln/myleetcode-python | 95d282f21a257f937cd22ef20c3590a69919e307 | [
"Apache-2.0"
] | null | null | null | src/299. Bulls and Cows.py | xiaonanln/myleetcode-python | 95d282f21a257f937cd22ef20c3590a69919e307 | [
"Apache-2.0"
] | null | null | null | from itertools import izip
from collections import Counter
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
A = 0
B = 0
C = Counter()
for a, b in izip(secret, guess):
if a == b:
A += 1
continue
C[a] += 1
if C[a] <= 0:
B += 1
C[b] -= 1
if C[b] >= 0:
B += 1
return '%dA%dB' % (A, B)
print Solution().getHint('1807', '7810') | 15.241379 | 40 | 0.533937 |
212b1f74d559efa09c2081d2e79ce80e0137d265 | 4,900 | py | Python | CampusMap-master/main.py | divineflatus/ICM | 0d0aaa2a3433f9d25fc9c8c7fe90ccb970378032 | [
"MIT"
] | null | null | null | CampusMap-master/main.py | divineflatus/ICM | 0d0aaa2a3433f9d25fc9c8c7fe90ccb970378032 | [
"MIT"
] | null | null | null | CampusMap-master/main.py | divineflatus/ICM | 0d0aaa2a3433f9d25fc9c8c7fe90ccb970378032 | [
"MIT"
] | null | null | null | import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from welcome import Ui_WelcomeWindow
from options import Ui_OptionWindow
from HomeScreen import Ui_HomeScreen
from MapScreen import Ui_MapScreen
from mapConstructor import MapConstructor
from event import Event
from webServer import WebServer
class Dialog(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Dialog, self).__init__(parent)
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(320, 240)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(100, 180, 121, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(10, 70, 301, 71))
self.label.setObjectName("label")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.pushButton.clicked.connect(self.OKBotton)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "OK"))
self.label.setText(_translate(
"Dialog", "<html><head/><body><p><span style=\" font-size:16pt;\">Events are successfully updated</span></p></body></html>"))
def OKBotton(self):
self.hide()
class Dialog2(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Dialog2, self).__init__(parent)
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(320, 240)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(100, 180, 121, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(10, 70, 301, 71))
self.label.setObjectName("label")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.pushButton.clicked.connect(self.OKBotton)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "OK"))
self.label.setText(_translate(
"Dialog", "<html><head/><body><p><span style=\" font-size:16pt;\">Maps are successfully updated</span></p></body></html>"))
def OKBotton(self):
self.hide()
class Welcome(QtWidgets.QMainWindow, Ui_WelcomeWindow):
def __init__(self, parent=None):
super(Welcome, self).__init__(parent)
self.setupUi(self)
self.HomeScreenBotton.clicked.connect(self.homeBotton)
self.OptionsBotton.clicked.connect(self.optionsBotton)
def homeBotton(self):
self.homescreen = HomeScreen()
self.homescreen.showFullScreen()
self.hide()
def optionsBotton(self):
self.op = Options()
self.op.show()
class Options(QtWidgets.QMainWindow, Ui_OptionWindow):
def __init__(self, parent=None):
super(Options, self).__init__(parent)
self.setupUi(self)
self.MapScreenBotton.clicked.connect(self.mapBotton)
self.HTMLBotton.clicked.connect(self.HTML)
def mapBotton(self):
self.dialog = Dialog()
event = Event()
event.getEvent()
event.downloadImage()
self.map = MapConstructor()
self.map.scriptEvent()
self.dialog.show()
def HTML(self):
self.dialog = Dialog2()
self.map = MapConstructor()
self.map.createMap()
self.map.scriptEvent()
self.dialog.show()
class HomeScreen(QtWidgets.QMainWindow, Ui_HomeScreen):
def __init__(self, parent=None):
super(HomeScreen, self).__init__(parent)
self.setupUi(self)
self.launchMapBotton.clicked.connect(self.mapBotton)
def mapBotton(self):
self.mapscreen = MapScreen()
self.mapscreen.showFullScreen()
self.hide()
class MapScreen(QtWidgets.QMainWindow, Ui_MapScreen):
def __init__(self, parent=None):
super(MapScreen, self).__init__(parent)
self.setupUi(self)
self.HomeScreenBotton.clicked.connect(self.homeBotton)
def homeBotton(self):
self.homescreen = HomeScreen()
self.hide()
self.homescreen.showFullScreen()
if __name__ == '__main__':
webserver = WebServer()
webserver.startServer()
app = QtWidgets.QApplication(sys.argv)
w = Welcome()
w.show()
sys.exit(app.exec_())
| 31.818182 | 137 | 0.665102 |
ae1b1414ab4a96d0bc679073517a2bd61c05aa76 | 1,339 | py | Python | genosha/main.py | marcioaug/genosha | 756e70c835b82c70201ebd2aa458bc95a39e9c69 | [
"MIT"
] | null | null | null | genosha/main.py | marcioaug/genosha | 756e70c835b82c70201ebd2aa458bc95a39e9c69 | [
"MIT"
] | null | null | null | genosha/main.py | marcioaug/genosha | 756e70c835b82c70201ebd2aa458bc95a39e9c69 | [
"MIT"
] | null | null | null | import os
import json
from hunor.main import Hunor
from hunor.args import Options
from genosha.utils import gen_export_dir
def main():
config_file = '/home/marcioaug/PycharmProjects/genosha/examples/relational/relational/config.json'
mutants_dir = '/home/marcioaug/PycharmProjects/genosha/examples/relational/mutants'
with open(config_file) as f:
config = json.loads(f.read())
for target in config['targets']:
run_hunor(
config_file=config_file,
mutants=gen_export_dir(
mutants_path=mutants_dir,
java_class=target['class'],
class_method=target['method'],
line_number=target['line'],
target_id=target['id']),
sut_class=target['class'],
mutation_tool='major'
)
def run_hunor(config_file, mutants, sut_class, mutation_tool):
options = Options(
maven_home='/home/marcioaug/Tools/maven/current',
java_home='/home/marcioaug/Tools/java/jdk1.8.0_181',
config_file=config_file,
mutants=mutants,
sut_class=sut_class,
mutation_tool=mutation_tool,
is_randoop_disabled=True,
output=os.path.join(mutants, 'hunor-output')
)
return Hunor(options=options).run()
if __name__ == '__main__':
main()
| 27.326531 | 102 | 0.643764 |
b0509be8069691fb4ed99bde92b1d01fcd045dfa | 945 | py | Python | scraper/storage_spiders/giaynamcaonet.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/giaynamcaonet.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/giaynamcaonet.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='infoProduct']/h1",
'price' : "//span[@class='color-red font-bigger']/text()",
'category' : "//ul[@class='bc-menu']/li/a",
'description' : "//div[@class='_mota']/p",
'images' : "//div[@class='thumnailProduct']//a/@data-image",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'giaynamcao.net'
allowed_domains = ['giaynamcao.net']
start_urls = ['http://giaynamcao.net/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-1-1-\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-2-1-\d+\.html$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 35 | 79 | 0.621164 |
4f3de7e50db7522a212bee56c802121c8f92a70e | 22,956 | py | Python | src/manhole/__init__.py | jgarte/python-manhole | e71a91d138339aedf3778eebe8ccfd5b6c1a46f4 | [
"BSD-2-Clause"
] | 256 | 2015-02-11T08:06:43.000Z | 2022-03-26T04:17:36.000Z | src/manhole/__init__.py | jgarte/python-manhole | e71a91d138339aedf3778eebe8ccfd5b6c1a46f4 | [
"BSD-2-Clause"
] | 29 | 2015-06-15T01:58:22.000Z | 2022-01-04T04:54:30.000Z | src/manhole/__init__.py | jgarte/python-manhole | e71a91d138339aedf3778eebe8ccfd5b6c1a46f4 | [
"BSD-2-Clause"
] | 15 | 2015-06-15T01:53:49.000Z | 2022-03-12T00:44:54.000Z | from __future__ import print_function
import atexit
import code
import errno
import os
import signal
import socket
import struct
import sys
import traceback
from contextlib import closing
__version__ = '1.8.0'
try:
import signalfd
except ImportError:
signalfd = None
try:
string = basestring
except NameError: # python 3
string = str
try:
InterruptedError = InterruptedError
except NameError: # python <= 3.2
InterruptedError = OSError
try:
BrokenPipeError = BrokenPipeError
except NameError: # old python
class BrokenPipeError(Exception):
pass
if hasattr(sys, 'setswitchinterval'):
setinterval = sys.setswitchinterval
getinterval = sys.getswitchinterval
else:
setinterval = sys.setcheckinterval
getinterval = sys.getcheckinterval
try:
from eventlet.patcher import original as _original
def _get_original(mod, name):
return getattr(_original(mod), name)
except ImportError:
try:
from gevent.monkey import get_original as _get_original
except ImportError:
def _get_original(mod, name):
return getattr(__import__(mod), name)
_ORIGINAL_SOCKET = _get_original('socket', 'socket')
try:
_ORIGINAL_ALLOCATE_LOCK = _get_original('thread', 'allocate_lock')
except ImportError: # python 3
_ORIGINAL_ALLOCATE_LOCK = _get_original('_thread', 'allocate_lock')
_ORIGINAL_THREAD = _get_original('threading', 'Thread')
_ORIGINAL_EVENT = _get_original('threading', 'Event')
_ORIGINAL__ACTIVE = _get_original('threading', '_active')
_ORIGINAL_SLEEP = _get_original('time', 'sleep')
PY3 = sys.version_info[0] == 3
try:
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
_pthread_setname_np = libpthread.pthread_setname_np
_pthread_setname_np.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_pthread_setname_np.restype = ctypes.c_int
def pthread_setname_np(ident, name):
_pthread_setname_np(ident, name[:15])
except ImportError:
def pthread_setname_np(ident, name):
pass
if sys.platform == 'darwin' or sys.platform.startswith("freebsd"):
_PEERCRED_LEVEL = getattr(socket, 'SOL_LOCAL', 0)
_PEERCRED_OPTION = getattr(socket, 'LOCAL_PEERCRED', 1)
else:
_PEERCRED_LEVEL = socket.SOL_SOCKET
# TODO: Is this missing on some platforms?
_PEERCRED_OPTION = getattr(socket, 'SO_PEERCRED', 17)
_ALL_SIGNALS = tuple(getattr(signal, sig) for sig in dir(signal)
if sig.startswith('SIG') and '_' not in sig)
# These (_LOG and _MANHOLE) will hold instances after install
_MANHOLE = None
_LOCK = _ORIGINAL_ALLOCATE_LOCK()
def force_original_socket(sock):
with closing(sock):
if hasattr(sock, 'detach'):
return _ORIGINAL_SOCKET(sock.family, sock.type, sock.proto, sock.detach())
else:
assert hasattr(_ORIGINAL_SOCKET, '_sock')
return _ORIGINAL_SOCKET(_sock=sock._sock)
def get_peercred(sock):
"""Gets the (pid, uid, gid) for the client on the given *connected* socket."""
buf = sock.getsockopt(_PEERCRED_LEVEL, _PEERCRED_OPTION, struct.calcsize('3i'))
return struct.unpack('3i', buf)
class AlreadyInstalled(Exception):
pass
class NotInstalled(Exception):
pass
class ConfigurationConflict(Exception):
pass
class SuspiciousClient(Exception):
pass
class ManholeThread(_ORIGINAL_THREAD):
"""
Thread that runs the infamous "Manhole". This thread is a `daemon` thread - it will exit if the main thread
exits.
On connect, a different, non-daemon thread will be started - so that the process won't exit while there's a
connection to the manhole.
Args:
sigmask (list of signal numbers): Signals to block in this thread.
start_timeout (float): Seconds to wait for the thread to start. Emits a message if the thread is not running
when calling ``start()``.
bind_delay (float): Seconds to delay socket binding. Default: `no delay`.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
"""
def __init__(self,
get_socket, sigmask, start_timeout, connection_handler,
bind_delay=None, daemon_connection=False):
super(ManholeThread, self).__init__()
self.daemon = True
self.daemon_connection = daemon_connection
self.name = "Manhole"
self.psname = b"Manhole"
self.sigmask = sigmask
self.serious = _ORIGINAL_EVENT()
# time to wait for the manhole to get serious (to have a complete start)
# see: http://emptysqua.re/blog/dawn-of-the-thread/
self.start_timeout = start_timeout
self.bind_delay = bind_delay
self.connection_handler = connection_handler
self.get_socket = get_socket
self.should_run = False
def stop(self):
self.should_run = False
def clone(self, **kwargs):
"""
Make a fresh thread with the same options. This is usually used on dead threads.
"""
return ManholeThread(
self.get_socket, self.sigmask, self.start_timeout,
connection_handler=self.connection_handler,
daemon_connection=self.daemon_connection,
**kwargs
)
def start(self):
self.should_run = True
super(ManholeThread, self).start()
if not self.serious.wait(self.start_timeout):
_LOG("WARNING: Waited %s seconds but Manhole thread didn't start yet :(" % self.start_timeout)
def run(self):
"""
Runs the manhole loop. Only accepts one connection at a time because:
* This thread is a daemon thread (exits when main thread exists).
* The connection need exclusive access to stdin, stderr and stdout so it can redirect inputs and outputs.
"""
self.serious.set()
if signalfd and self.sigmask:
signalfd.sigprocmask(signalfd.SIG_BLOCK, self.sigmask)
pthread_setname_np(self.ident, self.psname)
if self.bind_delay:
_LOG("Delaying UDS binding %s seconds ..." % self.bind_delay)
_ORIGINAL_SLEEP(self.bind_delay)
sock = self.get_socket()
while self.should_run:
_LOG("Waiting for new connection (in pid:%s) ..." % os.getpid())
try:
client = ManholeConnectionThread(sock.accept()[0], self.connection_handler, self.daemon_connection)
client.start()
client.join()
except socket.timeout:
continue
except (InterruptedError, socket.error) as e:
if e.errno != errno.EINTR:
raise
continue
finally:
client = None
class ManholeConnectionThread(_ORIGINAL_THREAD):
"""
Manhole thread that handles the connection. This thread is a normal thread (non-daemon) - it won't exit if the
main thread exits.
"""
def __init__(self, client, connection_handler, daemon=False):
super(ManholeConnectionThread, self).__init__()
self.daemon = daemon
self.client = force_original_socket(client)
self.connection_handler = connection_handler
self.name = "ManholeConnectionThread"
self.psname = b"ManholeConnectionThread"
def run(self):
_LOG('Started ManholeConnectionThread thread. Checking credentials ...')
pthread_setname_np(self.ident, b"Manhole -------")
pid, _, _ = check_credentials(self.client)
pthread_setname_np(self.ident, b"Manhole < PID:%d" % pid)
try:
self.connection_handler(self.client)
except BaseException as exc:
_LOG("ManholeConnectionThread failure: %r" % exc)
def check_credentials(client):
"""
Checks credentials for given socket.
"""
pid, uid, gid = get_peercred(client)
euid = os.geteuid()
client_name = "PID:%s UID:%s GID:%s" % (pid, uid, gid)
if uid not in (0, euid):
raise SuspiciousClient("Can't accept client with %s. It doesn't match the current EUID:%s or ROOT." % (
client_name, euid
))
_LOG("Accepted connection on fd:%s from %s" % (client.fileno(), client_name))
return pid, uid, gid
def handle_connection_exec(client):
"""
Alternate connection handler. No output redirection.
"""
class ExitExecLoop(Exception):
pass
def exit():
raise ExitExecLoop()
client.settimeout(None)
fh = client.makefile()
with closing(client):
with closing(fh):
try:
payload = fh.readline()
while payload:
_LOG("Running: %r." % payload)
eval(compile(payload, '<manhole>', 'exec'), {'exit': exit}, _MANHOLE.locals)
payload = fh.readline()
except ExitExecLoop:
_LOG("Exiting exec loop.")
def handle_connection_repl(client):
"""
Handles connection.
"""
client.settimeout(None)
# # disable this till we have evidence that it's needed
# client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0)
# # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html
backup = []
old_interval = getinterval()
patches = [('r', ('stdin', '__stdin__')), ('w', ('stdout', '__stdout__'))]
if _MANHOLE.redirect_stderr:
patches.append(('w', ('stderr', '__stderr__')))
try:
for mode, names in patches:
for name in names:
backup.append((name, getattr(sys, name)))
setattr(sys, name, client.makefile(mode, 1 if PY3 else 0))
try:
handle_repl(_MANHOLE.locals)
except BrokenPipeError:
_LOG("REPL client disconnected")
except Exception as exc:
_LOG("REPL failed with %r." % exc)
_LOG("DONE.")
finally:
try:
# Change the switch/check interval to something ridiculous. We don't want to have other thread try
# to write to the redirected sys.__std*/sys.std* - it would fail horribly.
setinterval(2147483647)
for name, fh in backup:
try:
getattr(sys, name).close()
except IOError:
pass
setattr(sys, name, fh)
try:
client.close()
except IOError:
pass
finally:
setinterval(old_interval)
_LOG("Cleaned up.")
_CONNECTION_HANDLER_ALIASES = {
'repl': handle_connection_repl,
'exec': handle_connection_exec
}
class ManholeConsole(code.InteractiveConsole):
def __init__(self, *args, **kw):
code.InteractiveConsole.__init__(self, *args, **kw)
if _MANHOLE.redirect_stderr:
self.file = sys.stderr
else:
self.file = sys.stdout
def write(self, data):
self.file.write(data)
def handle_repl(locals):
"""
Dumps stacktraces and runs an interactive prompt (REPL).
"""
dump_stacktraces()
namespace = {
'dump_stacktraces': dump_stacktraces,
'sys': sys,
'os': os,
'socket': socket,
'traceback': traceback,
}
if locals:
namespace.update(locals)
try:
ManholeConsole(namespace).interact()
except SystemExit:
pass
finally:
for attribute in ['last_type', 'last_value', 'last_traceback']:
try:
delattr(sys, attribute)
except AttributeError:
pass
class Logger(object):
"""
Internal object used for logging.
Initially this is not configured. Until you call ``manhole.install()``, this logger object won't work (will raise
``NotInstalled``).
"""
time = _get_original('time', 'time')
enabled = True
destination = None
def configure(self, enabled, destination):
self.enabled = enabled
self.destination = destination
def release(self):
self.enabled = True
self.destination = None
def __call__(self, message):
"""
Fail-ignorant logging function.
"""
if self.enabled:
if self.destination is None:
raise NotInstalled("Manhole is not installed!")
try:
full_message = "Manhole[%s:%.4f]: %s\n" % (os.getpid(), self.time(), message)
if isinstance(self.destination, int):
os.write(self.destination, full_message.encode('ascii', 'ignore'))
else:
self.destination.write(full_message)
except Exception:
pass
_LOG = Logger()
class Manhole(object):
# Manhole core configuration
# These are initialized when manhole is installed.
daemon_connection = False
locals = None
original_os_fork = None
original_os_forkpty = None
redirect_stderr = True
reinstall_delay = 0.5
should_restart = None
sigmask = _ALL_SIGNALS
socket_path = None
start_timeout = 0.5
connection_handler = None
previous_signal_handlers = None
_thread = None
def configure(self,
patch_fork=True, activate_on=None, sigmask=_ALL_SIGNALS, oneshot_on=None, thread=True,
start_timeout=0.5, socket_path=None, reinstall_delay=0.5, locals=None, daemon_connection=False,
redirect_stderr=True, connection_handler=handle_connection_repl):
self.socket_path = socket_path
self.reinstall_delay = reinstall_delay
self.redirect_stderr = redirect_stderr
self.locals = locals
self.sigmask = sigmask
self.daemon_connection = daemon_connection
self.start_timeout = start_timeout
self.previous_signal_handlers = {}
self.connection_handler = _CONNECTION_HANDLER_ALIASES.get(connection_handler, connection_handler)
if oneshot_on is None and activate_on is None and thread:
self.thread.start()
self.should_restart = True
if oneshot_on is not None:
oneshot_on = getattr(signal, 'SIG' + oneshot_on) if isinstance(oneshot_on, string) else oneshot_on
self.previous_signal_handlers.setdefault(oneshot_on, signal.signal(oneshot_on, self.handle_oneshot))
if activate_on is not None:
activate_on = getattr(signal, 'SIG' + activate_on) if isinstance(activate_on, string) else activate_on
if activate_on == oneshot_on:
raise ConfigurationConflict('You cannot do activation of the Manhole thread on the same signal '
'that you want to do oneshot activation !')
self.previous_signal_handlers.setdefault(activate_on, signal.signal(activate_on, self.activate_on_signal))
atexit.register(self.remove_manhole_uds)
if patch_fork:
if activate_on is None and oneshot_on is None and socket_path is None:
self.patch_os_fork_functions()
else:
if activate_on:
_LOG("Not patching os.fork and os.forkpty. Activation is done by signal %s" % activate_on)
elif oneshot_on:
_LOG("Not patching os.fork and os.forkpty. Oneshot activation is done by signal %s" % oneshot_on)
elif socket_path:
_LOG("Not patching os.fork and os.forkpty. Using user socket path %s" % socket_path)
def release(self):
if self._thread:
self._thread.stop()
self._thread = None
self.remove_manhole_uds()
self.restore_os_fork_functions()
for sig, handler in self.previous_signal_handlers.items():
signal.signal(sig, handler)
self.previous_signal_handlers.clear()
@property
def thread(self):
if self._thread is None:
self._thread = ManholeThread(
self.get_socket, self.sigmask, self.start_timeout, self.connection_handler,
daemon_connection=self.daemon_connection
)
return self._thread
@thread.setter
def thread(self, value):
self._thread = value
def get_socket(self):
sock = _ORIGINAL_SOCKET(socket.AF_UNIX, socket.SOCK_STREAM)
name = self.remove_manhole_uds()
sock.bind(name)
sock.listen(5)
_LOG("Manhole UDS path: " + name)
return sock
def reinstall(self):
"""
Reinstalls the manhole. Checks if the thread is running. If not, it starts it again.
"""
with _LOCK:
if not (self.thread.is_alive() and self.thread in _ORIGINAL__ACTIVE):
self.thread = self.thread.clone(bind_delay=self.reinstall_delay)
if self.should_restart:
self.thread.start()
def handle_oneshot(self, _signum=None, _frame=None):
try:
try:
sock = self.get_socket()
_LOG("Waiting for new connection (in pid:%s) ..." % os.getpid())
client = force_original_socket(sock.accept()[0])
check_credentials(client)
self.connection_handler(client)
finally:
self.remove_manhole_uds()
except BaseException as exc: # pylint: disable=W0702
# we don't want to let any exception out, it might make the application misbehave
_LOG("Oneshot failure: %r" % exc)
def remove_manhole_uds(self):
name = self.uds_name
if os.path.exists(name):
os.unlink(name)
return name
@property
def uds_name(self):
if self.socket_path is None:
return "/tmp/manhole-%s" % os.getpid()
return self.socket_path
def patched_fork(self):
"""Fork a child process."""
pid = self.original_os_fork()
if not pid:
_LOG('Fork detected. Reinstalling Manhole.')
self.reinstall()
return pid
def patched_forkpty(self):
"""Fork a new process with a new pseudo-terminal as controlling tty."""
pid, master_fd = self.original_os_forkpty()
if not pid:
_LOG('Fork detected. Reinstalling Manhole.')
self.reinstall()
return pid, master_fd
def patch_os_fork_functions(self):
self.original_os_fork, os.fork = os.fork, self.patched_fork
self.original_os_forkpty, os.forkpty = os.forkpty, self.patched_forkpty
_LOG("Patched %s and %s." % (self.original_os_fork, self.original_os_forkpty))
def restore_os_fork_functions(self):
if self.original_os_fork:
os.fork = self.original_os_fork
if self.original_os_forkpty:
os.forkpty = self.original_os_forkpty
def activate_on_signal(self, _signum, _frame):
self.thread.start()
def install(verbose=True,
verbose_destination=sys.__stderr__.fileno() if hasattr(sys.__stderr__, 'fileno') else sys.__stderr__,
strict=True,
**kwargs):
"""
Installs the manhole.
Args:
verbose (bool): Set it to ``False`` to squelch the logging.
verbose_destination (file descriptor or handle): Destination for verbose messages. Default is unbuffered stderr
(stderr ``2`` file descriptor).
patch_fork (bool): Set it to ``False`` if you don't want your ``os.fork`` and ``os.forkpy`` monkeypatched
activate_on (int or signal name): set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole thread to start when this signal is sent. This is desireable in case you don't want the
thread active all the time.
oneshot_on (int or signal name): Set to ``"USR1"``, ``"USR2"`` or some other signal name, or a number if you
want the Manhole to listen for connection in the signal handler. This is desireable in case you don't want
threads at all.
thread (bool): Start the always-on ManholeThread. Default: ``True``. Automatically switched to ``False`` if
``oneshort_on`` or ``activate_on`` are used.
sigmask (list of ints or signal names): Will set the signal mask to the given list (using
``signalfd.sigprocmask``). No action is done if ``signalfd`` is not importable.
**NOTE**: This is done so that the Manhole thread doesn't *steal* any signals; Normally that is fine because
Python will force all the signal handling to be run in the main thread but signalfd doesn't.
socket_path (str): Use a specific path for the unix domain socket (instead of ``/tmp/manhole-<pid>``). This
disables ``patch_fork`` as children cannot reuse the same path.
reinstall_delay (float): Delay the unix domain socket creation *reinstall_delay* seconds. This
alleviates cleanup failures when using fork+exec patterns.
locals (dict): Names to add to manhole interactive shell locals.
daemon_connection (bool): The connection thread is daemonic (dies on app exit). Default: ``False``.
redirect_stderr (bool): Redirect output from stderr to manhole console. Default: ``True``.
connection_handler (function): Connection handler to use. Use ``"exec"`` for simple implementation without
output redirection or your own function. (warning: this is for advanced users). Default: ``"repl"``.
"""
# pylint: disable=W0603
global _MANHOLE
with _LOCK:
if _MANHOLE is None:
_MANHOLE = Manhole()
else:
if strict:
raise AlreadyInstalled("Manhole already installed!")
else:
_LOG.release()
_MANHOLE.release() # Threads might be started here
_LOG.configure(verbose, verbose_destination)
_MANHOLE.configure(**kwargs) # Threads might be started here
return _MANHOLE
def dump_stacktraces():
"""
Dumps thread ids and tracebacks to stdout.
"""
lines = []
for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212
lines.append("\n######### ProcessID=%s, ThreadID=%s #########" % (
os.getpid(), thread_id
))
for filename, lineno, name, line in traceback.extract_stack(stack):
lines.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
lines.append(" %s" % (line.strip()))
lines.append("#############################################\n\n")
print('\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)
| 35.86875 | 120 | 0.629726 |
3b3abeb1720d9cd48c02c2f3a49e70bf66a2b883 | 7,951 | py | Python | graphs/basic_graphs.py | ELR424/Python | a212efee5b44312c8b4b626ae412bacc5f4117fd | [
"MIT"
] | 1,568 | 2019-04-25T11:54:45.000Z | 2022-03-31T23:35:23.000Z | graphs/basic_graphs.py | rayenough/Python | 2fc2ae3f32fad16226c88358cb7c9e4e5c790a8f | [
"MIT"
] | 58 | 2019-02-20T10:45:50.000Z | 2020-09-30T12:18:45.000Z | graphs/basic_graphs.py | rayenough/Python | 2fc2ae3f32fad16226c88358cb7c9e4e5c790a8f | [
"MIT"
] | 464 | 2019-04-17T04:57:16.000Z | 2022-03-31T04:12:57.000Z | from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
# Accept No. of Nodes and edges
n, m = map(int, raw_input().split(" "))
# Initialising Dictionary of edges
g = {}
for i in xrange(n):
g[i + 1] = []
"""
--------------------------------------------------------------------------------
Accepting edges of Unweighted Directed Graphs
--------------------------------------------------------------------------------
"""
for _ in xrange(m):
x, y = map(int, raw_input().split(" "))
g[x].append(y)
"""
--------------------------------------------------------------------------------
Accepting edges of Unweighted Undirected Graphs
--------------------------------------------------------------------------------
"""
for _ in xrange(m):
x, y = map(int, raw_input().split(" "))
g[x].append(y)
g[y].append(x)
"""
--------------------------------------------------------------------------------
Accepting edges of Weighted Undirected Graphs
--------------------------------------------------------------------------------
"""
for _ in xrange(m):
x, y, r = map(int, raw_input().split(" "))
g[x].append([y, r])
g[y].append([x, r])
"""
--------------------------------------------------------------------------------
Depth First Search.
Args : G - Dictionary of edges
s - Starting Node
Vars : vis - Set of visited nodes
S - Traversal Stack
--------------------------------------------------------------------------------
"""
def dfs(G, s):
vis, S = set([s]), [s]
print(s)
while S:
flag = 0
for i in G[S[-1]]:
if i not in vis:
S.append(i)
vis.add(i)
flag = 1
print(i)
break
if not flag:
S.pop()
"""
--------------------------------------------------------------------------------
Breadth First Search.
Args : G - Dictionary of edges
s - Starting Node
Vars : vis - Set of visited nodes
Q - Traveral Stack
--------------------------------------------------------------------------------
"""
from collections import deque
def bfs(G, s):
vis, Q = set([s]), deque([s])
print(s)
while Q:
u = Q.popleft()
for v in G[u]:
if v not in vis:
vis.add(v)
Q.append(v)
print(v)
"""
--------------------------------------------------------------------------------
Dijkstra's shortest path Algorithm
Args : G - Dictionary of edges
s - Starting Node
Vars : dist - Dictionary storing shortest distance from s to every other node
known - Set of knows nodes
path - Preceding node in path
--------------------------------------------------------------------------------
"""
def dijk(G, s):
dist, known, path = {s: 0}, set(), {s: 0}
while True:
if len(known) == len(G) - 1:
break
mini = 100000
for i in dist:
if i not in known and dist[i] < mini:
mini = dist[i]
u = i
known.add(u)
for v in G[u]:
if v[0] not in known:
if dist[u] + v[1] < dist.get(v[0], 100000):
dist[v[0]] = dist[u] + v[1]
path[v[0]] = u
for i in dist:
if i != s:
print(dist[i])
"""
--------------------------------------------------------------------------------
Topological Sort
--------------------------------------------------------------------------------
"""
from collections import deque
def topo(G, ind=None, Q=[1]):
if ind is None:
ind = [0] * (len(G) + 1) # SInce oth Index is ignored
for u in G:
for v in G[u]:
ind[v] += 1
Q = deque()
for i in G:
if ind[i] == 0:
Q.append(i)
if len(Q) == 0:
return
v = Q.popleft()
print(v)
for w in G[v]:
ind[w] -= 1
if ind[w] == 0:
Q.append(w)
topo(G, ind, Q)
"""
--------------------------------------------------------------------------------
Reading an Adjacency matrix
--------------------------------------------------------------------------------
"""
def adjm():
n, a = raw_input(), []
for i in xrange(n):
a.append(map(int, raw_input().split()))
return a, n
"""
--------------------------------------------------------------------------------
Floyd Warshall's algorithm
Args : G - Dictionary of edges
s - Starting Node
Vars : dist - Dictionary storing shortest distance from s to every other node
known - Set of knows nodes
path - Preceding node in path
--------------------------------------------------------------------------------
"""
def floy(A_and_n):
(A, n) = A_and_n
dist = list(A)
path = [[0] * n for i in xrange(n)]
for k in xrange(n):
for i in xrange(n):
for j in xrange(n):
if dist[i][j] > dist[i][k] + dist[k][j]:
dist[i][j] = dist[i][k] + dist[k][j]
path[i][k] = k
print(dist)
"""
--------------------------------------------------------------------------------
Prim's MST Algorithm
Args : G - Dictionary of edges
s - Starting Node
Vars : dist - Dictionary storing shortest distance from s to nearest node
known - Set of knows nodes
path - Preceding node in path
--------------------------------------------------------------------------------
"""
def prim(G, s):
dist, known, path = {s: 0}, set(), {s: 0}
while True:
if len(known) == len(G) - 1:
break
mini = 100000
for i in dist:
if i not in known and dist[i] < mini:
mini = dist[i]
u = i
known.add(u)
for v in G[u]:
if v[0] not in known:
if v[1] < dist.get(v[0], 100000):
dist[v[0]] = v[1]
path[v[0]] = u
"""
--------------------------------------------------------------------------------
Accepting Edge list
Vars : n - Number of nodes
m - Number of edges
Returns : l - Edge list
n - Number of Nodes
--------------------------------------------------------------------------------
"""
def edglist():
n, m = map(int, raw_input().split(" "))
l = []
for i in xrange(m):
l.append(map(int, raw_input().split(' ')))
return l, n
"""
--------------------------------------------------------------------------------
Kruskal's MST Algorithm
Args : E - Edge list
n - Number of Nodes
Vars : s - Set of all nodes as unique disjoint sets (initially)
--------------------------------------------------------------------------------
"""
def krusk(E_and_n):
# Sort edges on the basis of distance
(E, n) = E_and_n
E.sort(reverse=True, key=lambda x: x[2])
s = [set([i]) for i in range(1, n + 1)]
while True:
if len(s) == 1:
break
print(s)
x = E.pop()
for i in xrange(len(s)):
if x[0] in s[i]:
break
for j in xrange(len(s)):
if x[1] in s[j]:
if i == j:
break
s[j].update(s[i])
s.pop(i)
break
# find the isolated node in the graph
def find_isolated_nodes(graph):
isolated = []
for node in graph:
if not graph[node]:
isolated.append(node)
return isolated
| 27.323024 | 86 | 0.353415 |
a9fa9643378a63b503aace876cc2c622a5e0de6f | 1,183 | py | Python | setup.py | Computational-Plant-Science/smart | 852f9651555cc24ff3603b5b44937bf07ffe9480 | [
"BSD-3-Clause"
] | 3 | 2020-11-23T18:41:21.000Z | 2020-11-24T22:13:06.000Z | setup.py | Computational-Plant-Science/smart | 852f9651555cc24ff3603b5b44937bf07ffe9480 | [
"BSD-3-Clause"
] | 3 | 2020-11-23T17:03:31.000Z | 2021-04-29T20:07:27.000Z | setup.py | Computational-Plant-Science/spg | 852f9651555cc24ff3603b5b44937bf07ffe9480 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='smart-arabidopsis-traits',
version='0.5.2',
description='Extract geometric traits from top-view images of plants.',
long_description=long_description,
long_description_content_type="text/markdown",
author='Suxing Liu',
author_email='suxing.liu@uga.edu',
license='BSD-3-Clause',
url='https://github.com/Computational-Plant-Science/SMART',
packages=setuptools.find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'smart = core.cli:cli'
]
},
python_requires='>=3.6.8',
install_requires=[
'click',
'psutil',
'numba',
'pandas',
'networkx',
'skan',
'tabulate',
'imutils',
'python-magic',
'seaborn',
'openpyxl',
'opencv-python',
'matplotlib',
'scikit-learn',
'scikit-image',
'scikit-build',
'scipy',
'Pillow==8.4.0'
],
setup_requires=['wheel'],
tests_require=['pytest', 'coveralls'])
| 24.645833 | 75 | 0.578191 |
45e4280f02a61bdb05517c152f97a207280e10ec | 17,721 | py | Python | venv/Lib/site-packages/sklearn/impute/tests/test_knn.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/impute/tests/test_knn.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/impute/tests/test_knn.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from sklearn import config_context
from sklearn.impute import KNNImputer
from sklearn.metrics.pairwise import nan_euclidean_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils._testing import assert_allclose
@pytest.mark.parametrize("weights", ["uniform", "distance"])
@pytest.mark.parametrize("n_neighbors", range(1, 6))
def test_knn_imputer_shape(weights, n_neighbors):
# Verify the shapes of the imputed matrix for different weights and
# number of neighbors.
n_rows = 10
n_cols = 2
X = np.random.rand(n_rows, n_cols)
X[0, 0] = np.nan
imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
X_imputed = imputer.fit_transform(X)
assert X_imputed.shape == (n_rows, n_cols)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_default_with_invalid_input(na):
# Test imputation with default values and invalid input
# Test with inf present
X = np.array(
[
[np.inf, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
]
)
with pytest.raises(ValueError, match="Input contains (infinity|NaN)"):
KNNImputer(missing_values=na).fit(X)
# Test with inf present in matrix passed in transform()
X = np.array(
[
[np.inf, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
]
)
X_fit = np.array(
[
[0, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
]
)
imputer = KNNImputer(missing_values=na).fit(X_fit)
with pytest.raises(ValueError, match="Input contains (infinity|NaN)"):
imputer.transform(X)
# negative n_neighbors
with pytest.raises(ValueError, match="Expected n_neighbors > 0"):
KNNImputer(missing_values=na, n_neighbors=0).fit(X_fit)
# Test with missing_values=0 when NaN present
imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
X = np.array(
[
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
]
)
msg = (
r"Input contains NaN, infinity or a value too large for " r"dtype\('float64'\)"
)
with pytest.raises(ValueError, match=msg):
imputer.fit(X)
X = np.array(
[
[0, 0],
[np.nan, 2],
]
)
# Test with a metric type without NaN support
imputer = KNNImputer(metric="euclidean")
bad_metric_msg = "The selected metric does not support NaN values"
with pytest.raises(ValueError, match=bad_metric_msg):
imputer.fit(X)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_removes_all_na_features(na):
X = np.array(
[
[1, 1, na, 1, 1, 1.0],
[2, 3, na, 2, 2, 2],
[3, 4, na, 3, 3, na],
[6, 4, na, na, 6, 6],
]
)
knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X)
X_transform = knn.transform(X)
assert not np.isnan(X_transform).any()
assert X_transform.shape == (4, 5)
X_test = np.arange(0, 12).reshape(2, 6)
X_transform = knn.transform(X_test)
assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_zero_nan_imputes_the_same(na):
# Test with an imputable matrix and compare with different missing_values
X_zero = np.array(
[
[1, 0, 1, 1, 1.0],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 0],
[6, 6, 0, 6, 6],
]
)
X_nan = np.array(
[
[1, na, 1, 1, 1.0],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, na],
[6, 6, na, 6, 6],
]
)
X_imputed = np.array(
[
[1, 2.5, 1, 1, 1.0],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 1.5],
[6, 6, 2.5, 6, 6],
]
)
imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform")
assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed)
assert_allclose(
imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan)
)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_verify(na):
# Test with an imputable matrix
X = np.array(
[
[1, 0, 0, 1],
[2, 1, 2, na],
[3, 2, 3, na],
[na, 4, 5, 5],
[6, na, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
]
)
X_imputed = np.array(
[
[1, 0, 0, 1],
[2, 1, 2, 8],
[3, 2, 3, 8],
[4, 4, 5, 5],
[6, 3, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
]
)
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test when there is not enough neighbors
X = np.array(
[
[1, 0, 0, na],
[2, 1, 2, na],
[3, 2, 3, na],
[4, 4, 5, na],
[6, 7, 6, na],
[8, 8, 8, na],
[20, 20, 20, 20],
[22, 22, 22, 22],
]
)
# Not enough neighbors, use column mean from training
X_impute_value = (20 + 22) / 2
X_imputed = np.array(
[
[1, 0, 0, X_impute_value],
[2, 1, 2, X_impute_value],
[3, 2, 3, X_impute_value],
[4, 4, 5, X_impute_value],
[6, 7, 6, X_impute_value],
[8, 8, 8, X_impute_value],
[20, 20, 20, 20],
[22, 22, 22, 22],
]
)
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test when data in fit() and transform() are different
X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]])
X1 = np.array([[1, 0], [3, 2], [4, na]])
X_2_1 = (0 + 3 + 6 + 7 + 8) / 5
X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit(X).transform(X1), X1_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_one_n_neighbors(na):
X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]])
imputer = KNNImputer(n_neighbors=1, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_all_samples_are_neighbors(na):
X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
X_imputed = np.array([[0, 0], [6, 2], [4, 3], [5, 5.5], [7, 7], [6, 8], [14, 13]])
n_neighbors = X.shape[0] - 1
imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
n_neighbors = X.shape[0]
imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
assert_allclose(imputer_plus1.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_weight_uniform(na):
X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
# Test with "uniform" weight (or unweighted)
X_imputed_uniform = np.array(
[[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
)
imputer = KNNImputer(weights="uniform", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" weight
def no_weight(dist):
return None
imputer = KNNImputer(weights=no_weight, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" uniform weight
def uniform_weight(dist):
return np.ones_like(dist)
imputer = KNNImputer(weights=uniform_weight, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_weight_distance(na):
X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
# Test with "distance" weight
nn = KNeighborsRegressor(metric="euclidean", weights="distance")
X_rows_idx = [0, 2, 3, 4, 5, 6]
nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0])
knn_imputed_value = nn.predict(X[1:2, 1:])[0]
# Manual calculation
X_neighbors_idx = [0, 2, 3, 4, 5]
dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na)
weights = 1 / dist[:, X_neighbors_idx].ravel()
manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights)
X_imputed_distance1 = np.array(
[[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
)
# NearestNeighbor calculation
X_imputed_distance2 = np.array(
[[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
)
imputer = KNNImputer(weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_distance1)
assert_allclose(imputer.fit_transform(X), X_imputed_distance2)
# Test with weights = "distance" and n_neighbors=2
X = np.array(
[
[na, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
]
)
# neighbors are rows 1, 2, the nan_euclidean_distances are:
dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2))
dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2))
imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2])
X_imputed = np.array(
[
[imputed_value, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
]
)
imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test with varying missingness patterns
X = np.array(
[
[1, 0, 0, 1],
[0, na, 1, na],
[1, 1, 1, na],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
]
)
# Get weights of donor neighbors
dist = nan_euclidean_distances(X, missing_values=na)
r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
r1c1_nbor_wt = 1 / r1c1_nbor_dists
r1c3_nbor_wt = 1 / r1c3_nbor_dists
r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
r2c3_nbor_wt = 1 / r2c3_nbor_dists
# Collect donor values
col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
# Final imputed values
r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
X_imputed = np.array(
[
[1, 0, 0, 1],
[0, r1c1_imp, 1, r1c3_imp],
[1, 1, 1, r2c3_imp],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
]
)
imputer = KNNImputer(weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
X = np.array(
[
[0, 0, 0, na],
[1, 1, 1, na],
[2, 2, na, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[na, 7, 7, 7],
]
)
dist = pairwise_distances(
X, metric="nan_euclidean", squared=False, missing_values=na
)
# Calculate weights
r0c3_w = 1.0 / dist[0, 2:-1]
r1c3_w = 1.0 / dist[1, 2:-1]
r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
r7c0_w = 1.0 / dist[7, 2:7]
# Calculate weighted averages
r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
X_imputed = np.array(
[
[0, 0, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7],
]
)
imputer_comp_wt = KNNImputer(missing_values=na, weights="distance")
assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed)
def test_knn_imputer_callable_metric():
# Define callable metric that returns the l1 norm:
def custom_callable(x, y, missing_values=np.nan, squared=False):
x = np.ma.array(x, mask=np.isnan(x))
y = np.ma.array(y, mask=np.isnan(y))
dist = np.nansum(np.abs(x - y))
return dist
X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]])
X_0_3 = (9 + 9) / 2
X_3_0 = (6 + 4) / 2
X_imputed = np.array(
[[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]]
)
imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
assert_allclose(imputer.fit_transform(X), X_imputed)
@pytest.mark.parametrize("working_memory", [None, 0])
@pytest.mark.parametrize("na", [-1, np.nan])
# Note that we use working_memory=0 to ensure that chunking is tested, even
# for a small dataset. However, it should raise a UserWarning that we ignore.
@pytest.mark.filterwarnings("ignore:adhere to working_memory")
def test_knn_imputer_with_simple_example(na, working_memory):
X = np.array(
[
[0, na, 0, na],
[1, 1, 1, na],
[2, 2, na, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[na, 7, 7, 7],
]
)
r0c1 = np.mean(X[1:6, 1])
r0c3 = np.mean(X[2:-1, -1])
r1c3 = np.mean(X[2:-1, -1])
r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2])
r7c0 = np.mean(X[2:-1, 0])
X_imputed = np.array(
[
[0, r0c1, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7],
]
)
with config_context(working_memory=working_memory):
imputer_comp = KNNImputer(missing_values=na)
assert_allclose(imputer_comp.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [-1, np.nan])
@pytest.mark.parametrize("weights", ["uniform", "distance"])
def test_knn_imputer_not_enough_valid_distances(na, weights):
# Samples with needed feature has nan distance
X1 = np.array([[na, 11], [na, 1], [3, na]])
X1_imputed = np.array([[3, 11], [3, 1], [3, 6]])
knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights)
assert_allclose(knn.fit_transform(X1), X1_imputed)
X2 = np.array([[4, na]])
X2_imputed = np.array([[4, 6]])
assert_allclose(knn.transform(X2), X2_imputed)
@pytest.mark.parametrize("na", [-1, np.nan])
def test_knn_imputer_drops_all_nan_features(na):
X1 = np.array([[na, 1], [na, 2]])
knn = KNNImputer(missing_values=na, n_neighbors=1)
X1_expected = np.array([[1], [2]])
assert_allclose(knn.fit_transform(X1), X1_expected)
X2 = np.array([[1, 2], [3, na]])
X2_expected = np.array([[2], [1.5]])
assert_allclose(knn.transform(X2), X2_expected)
@pytest.mark.parametrize("working_memory", [None, 0])
@pytest.mark.parametrize("na", [-1, np.nan])
def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory):
X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]])
dist = pairwise_distances(
X, metric="nan_euclidean", squared=False, missing_values=na
)
X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5])
X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5])
X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5])
X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5])
X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]])
with config_context(working_memory=working_memory):
knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance")
assert_allclose(knn_3.fit_transform(X), X_expected)
knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance")
assert_allclose(knn_4.fit_transform(X), X_expected)
@pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)])
def test_knn_tags(na, allow_nan):
knn = KNNImputer(missing_values=na)
assert knn._get_tags()["allow_nan"] == allow_nan
| 31.364602 | 89 | 0.526776 |
4dbcdafc75999b826871b0bee9623aed58f2f3a0 | 11,804 | py | Python | scripts/scriptmgr.py | pgq/skytools | 8b7e6c118572a605d28b7a3403c96aeecfd0d272 | [
"0BSD"
] | 23 | 2017-01-18T03:33:56.000Z | 2021-02-15T18:51:23.000Z | scripts/scriptmgr.py | pgq/skytools | 8b7e6c118572a605d28b7a3403c96aeecfd0d272 | [
"0BSD"
] | 2 | 2017-08-29T00:05:24.000Z | 2018-07-23T13:33:52.000Z | scripts/scriptmgr.py | pgq/skytools | 8b7e6c118572a605d28b7a3403c96aeecfd0d272 | [
"0BSD"
] | 12 | 2016-11-24T10:17:00.000Z | 2020-06-30T01:18:32.000Z | #! /usr/bin/env python
"""Bulk start/stop of scripts.
Reads a bunch of config files and maps them to scripts, then handles those.
Config template:
[scriptmgr]
job_name = scriptmgr_cphdb5
config_list = ~/random/conf/*.ini
logfile = ~/log/%(job_name)s.log
pidfile = ~/pid/%(job_name)s.pid
#use_skylog = 1
# defaults for services
[DEFAULT]
cwd = ~/
args = -v
# service descriptions
[cube_dispatcher]
script = cube_dispatcher.py
[table_dispatcher]
script = table_dispatcher.py
[bulk_loader]
script = bulk_loader.py
[londiste]
script = londiste.py
args = replay
[pgqadm]
script = pgqadm.py
args = ticker
# services to be ignored
[log_checker]
disabled = 1
"""
import sys, os, signal, glob, ConfigParser, time
import pkgloader
pkgloader.require('skytools', '3.0')
import skytools
try:
import pwd
except ImportError:
pwd = None
command_usage = """
%prog [options] INI CMD [subcmd args]
Commands:
start -a | -t=service | jobname [...] start job(s)
stop -a | -t=service | jobname [...] stop job(s)
restart -a | -t=service | jobname [...] restart job(s)
reload -a | -t=service | jobname [...] send reload signal
status [-a | -t=service | jobname ...]
"""
def job_sort_cmp(j1, j2):
d1 = j1['service'] + j1['job_name']
d2 = j2['service'] + j2['job_name']
if d1 < d2: return -1
elif d1 > d2: return 1
else: return 0
def launch_cmd(job, cmd):
if job['user']:
cmd = 'sudo -nH -u "%s" %s' % (job['user'], cmd)
return os.system(cmd)
def full_path(job, fn):
"""Like os.path.expanduser() but works for other users.
"""
if not fn:
return fn
if fn[0] == '~':
if fn.find('/') > 0:
user, rest = fn.split('/',1)
else:
user = fn
rest = ''
user = user[1:]
if not user:
user = job['user']
# find home
if user:
home = pwd.getpwnam(user).pw_dir
elif 'HOME' in os.environ:
home = os.environ['HOME']
else:
home = os.pwd.getpwuid(os.getuid()).pw_dir
if rest:
return os.path.join(home, rest)
else:
return home
# always return full path
return os.path.join(job['cwd'], fn)
class ScriptMgr(skytools.DBScript):
__doc__ = __doc__
svc_list = []
svc_map = {}
config_list = []
job_map = {}
job_list = []
def init_optparse(self, p = None):
p = skytools.DBScript.init_optparse(self, p)
p.add_option("-a", "--all", action="store_true", help="apply command to all jobs")
p.add_option("-t", "--type", action="store", metavar="SVC", help="apply command to all jobs of this service type")
p.add_option("-w", "--wait", action="store_true", help="wait for job(s) after signaling")
p.set_usage(command_usage.strip())
return p
def load_jobs(self):
self.svc_list = []
self.svc_map = {}
self.config_list = []
# load services
svc_list = self.cf.sections()
svc_list.remove(self.service_name)
with_user = 0
without_user = 0
for svc_name in svc_list:
cf = self.cf.clone(svc_name)
disabled = cf.getboolean('disabled', 0)
defscript = None
if disabled:
defscript = '/disabled'
svc = {
'service': svc_name,
'script': cf.getfile('script', defscript),
'cwd': cf.getfile('cwd'),
'disabled': disabled,
'args': cf.get('args', ''),
'user': cf.get('user', ''),
}
if svc['user']:
with_user += 1
else:
without_user += 1
self.svc_list.append(svc)
self.svc_map[svc_name] = svc
if with_user and without_user:
raise skytools.UsageError("Invalid config - some jobs have user=, some don't")
# generate config list
for tmp in self.cf.getlist('config_list'):
tmp = os.path.expanduser(tmp)
tmp = os.path.expandvars(tmp)
for fn in glob.glob(tmp):
self.config_list.append(fn)
# read jobs
for fn in self.config_list:
raw = ConfigParser.SafeConfigParser({'job_name':'?', 'service_name':'?'})
raw.read(fn)
# skip its own config
if raw.has_section(self.service_name):
continue
got = 0
for sect in raw.sections():
if sect in self.svc_map:
got = 1
self.add_job(fn, sect)
if not got:
self.log.warning('Cannot find service for %s', fn)
def add_job(self, cf_file, service_name):
svc = self.svc_map[service_name]
cf = skytools.Config(service_name, cf_file)
disabled = svc['disabled']
if not disabled:
disabled = cf.getboolean('disabled', 0)
job = {
'disabled': disabled,
'config': cf_file,
'cwd': svc['cwd'],
'script': svc['script'],
'args': svc['args'],
'user': svc['user'],
'service': svc['service'],
'job_name': cf.get('job_name'),
'pidfile': cf.get('pidfile', ''),
}
if job['pidfile']:
job['pidfile'] = full_path(job, job['pidfile'])
self.job_list.append(job)
self.job_map[job['job_name']] = job
def cmd_status (self, jobs):
err = 0
for jn in jobs:
try:
job = self.job_map[jn]
except KeyError:
self.log.error ("Unknown job: %s", jn)
continue
pidfile = job['pidfile']
name = job['job_name']
svc = job['service']
if job['disabled']:
name += " (disabled)"
if not pidfile:
print(" pidfile? [%s] %s" % (svc, name))
elif os.path.isfile(pidfile) and skytools.signal_pidfile(pidfile, 0):
print(" OK [%s] %s" % (svc, name))
else:
err += 1
print(" STOPPED [%s] %s" % (svc, name))
return err
def cmd_info (self, jobs):
for jn in jobs:
try:
job = self.job_map[jn]
except KeyError:
self.log.error ("Unknown job: %s", jn)
continue
print(job)
def cmd_start(self, job_name):
job = self.get_job_by_name (job_name)
if isinstance (job, int):
return job # ret.code
self.log.info('Starting %s', job_name)
pidfile = job['pidfile']
if not pidfile:
self.log.warning("No pidfile for %s, cannot launch", job_name)
return 0
if os.path.isfile(pidfile):
if skytools.signal_pidfile(pidfile, 0):
self.log.warning("Script %s seems running", job_name)
return 0
else:
self.log.info("Ignoring stale pidfile for %s", job_name)
os.chdir(job['cwd'])
cmd = "%(script)s %(config)s %(args)s -d" % job
res = launch_cmd(job, cmd)
self.log.debug(res)
if res != 0:
self.log.error('startup failed: %s', job_name)
return 1
else:
return 0
def cmd_stop(self, job_name):
job = self.get_job_by_name (job_name)
if isinstance (job, int):
return job # ret.code
self.log.info('Stopping %s', job_name)
self.signal_job(job, signal.SIGINT)
def cmd_reload(self, job_name):
job = self.get_job_by_name (job_name)
if isinstance (job, int):
return job # ret.code
self.log.info('Reloading %s', job_name)
self.signal_job(job, signal.SIGHUP)
def get_job_by_name (self, job_name):
if job_name not in self.job_map:
self.log.error ("Unknown job: %s", job_name)
return 1
job = self.job_map[job_name]
if job['disabled']:
self.log.info ("Skipping %s", job_name)
return 0
return job
def wait_for_stop (self, job_name):
job = self.get_job_by_name (job_name)
if isinstance (job, int):
return job # ret.code
msg = False
while True:
if skytools.signal_pidfile (job['pidfile'], 0):
if not msg:
self.log.info ("Waiting for %s to stop", job_name)
msg = True
time.sleep (0.1)
else:
return 0
def signal_job(self, job, sig):
pidfile = job['pidfile']
if not pidfile:
self.log.warning("No pidfile for %s (%s)", job['job_name'], job['config'])
return
if os.path.isfile(pidfile):
pid = int(open(pidfile).read())
if job['user']:
# run sudo + kill to avoid killing unrelated processes
res = os.system("sudo -u %s kill %d" % (job['user'], pid))
if res:
self.log.warning("Signaling %s failed", job['job_name'])
else:
# direct kill
try:
os.kill(pid, sig)
except Exception, det:
self.log.warning("Signaling %s failed: %s", job['job_name'], det)
else:
self.log.warning("Job %s not running", job['job_name'])
def work(self):
self.set_single_loop(1)
self.job_list = []
self.job_map = {}
self.load_jobs()
self.job_list.sort(job_sort_cmp)
if len(self.args) < 2:
print("need command")
sys.exit(1)
cmd = self.args[1]
jobs = self.args[2:]
if cmd in ["status", "info"] and len(jobs) == 0 and not self.options.type:
self.options.all = True
if len(jobs) == 0 and self.options.all:
for job in self.job_list:
jobs.append(job['job_name'])
if len(jobs) == 0 and self.options.type:
for job in self.job_list:
if job['service'] == self.options.type:
jobs.append(job['job_name'])
if cmd == "status":
err = self.cmd_status(jobs)
if err > 0:
self.log.error('some scripts are not running')
sys.exit(1)
return
elif cmd == "info":
self.cmd_info(jobs)
return
if len(jobs) == 0:
print("no jobs given?")
sys.exit(1)
if cmd == "start":
err = 0
for n in jobs:
err += self.cmd_start(n)
if err > 0:
self.log.error('some scripts failed')
sys.exit(1)
elif cmd == "stop":
for n in jobs:
self.cmd_stop(n)
if self.options.wait:
for n in jobs:
self.wait_for_stop(n)
elif cmd == "restart":
for n in jobs:
self.cmd_stop(n)
if self.options.wait:
for n in jobs:
self.wait_for_stop(n)
else:
time.sleep(2)
for n in jobs:
self.cmd_start(n)
elif cmd == "reload":
for n in jobs:
self.cmd_reload(n)
else:
print("unknown command: " + cmd)
sys.exit(1)
if __name__ == '__main__':
script = ScriptMgr('scriptmgr', sys.argv[1:])
script.start()
| 29.808081 | 122 | 0.503643 |
98b8df0ae930b976c438af445dd16e42fa6d5927 | 3,254 | py | Python | ftpsyncprogress.py | ashucg/SublimeText2-FTPSync | 5893073bf081a0c7d51dff26fac77f2163d8d71f | [
"MIT"
] | 178 | 2015-01-07T12:25:48.000Z | 2022-03-02T18:27:34.000Z | ftpsyncprogress.py | ashucg/SublimeText2-FTPSync | 5893073bf081a0c7d51dff26fac77f2163d8d71f | [
"MIT"
] | 106 | 2015-01-06T12:39:24.000Z | 2021-07-20T04:16:09.000Z | ftpsyncprogress.py | ashucg/SublimeText2-FTPSync | 5893073bf081a0c7d51dff26fac77f2163d8d71f | [
"MIT"
] | 47 | 2015-01-05T11:26:32.000Z | 2022-01-08T10:21:52.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Jiri "NoxArt" Petruzelka
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# @author Jiri "NoxArt" Petruzelka | petruzelka@noxart.cz | @NoxArt
# @copyright (c) 2012 Jiri "NoxArt" Petruzelka
# @link https://github.com/NoxArt/SublimeText2-FTPSync
# ==== Libraries ===========================================================================
# Python's built-in libraries
import math
# ==== Content =============================================================================
# Class implementing logic for progress bar
class Progress:
def __init__(self, current=0):
self.current = 0
self.entries = []
# Add unfinished entries to progress bar
#
# @type self: Progress
# @type entries: list
# @param entries: list of unfinished entries, usually strings
def add(self, entries):
for entry in entries:
if entry not in self.entries:
self.entries.append(entry)
# Return number of items in the progress
#
# @type self: Progress
#
# @return int
def getTotal(self):
return len(self.entries)
# Marks a certain number of entries as finished
#
# @type self: Progress
# @type by: integer
# @param by: number of finished items
def progress(self, by=1):
self.current += int(by)
if self.current > self.getTotal():
self.current = self.getTotal()
# Returns whether the process has been finished
#
# @type self: Progress
#
# @return bool
def isFinished(self):
return self.current >= self.getTotal()
# Get percentage of the progress bar, maybe rounded, see @return
#
# @type self: Progress
# @type division: integer
# @param division: rounding amount
#
# @return integer between 0 and 100 / division
def getPercent(self, division=5):
if division is 0:
division = 1
total = self.getTotal()
if total is 0:
total = self.current
if total is 0:
total = 1
percent = int(math.ceil(float(self.current) / float(total) * 100))
percent = math.ceil(percent / division)
return percent | 30.990476 | 92 | 0.632145 |
3b4edc6c1e78c3b6a4e19cd5f78516443ed0801c | 971 | py | Python | Python/statistics_with_Python/04_Exploring_Data_with_Graphs/Script_Files/07_linegraphs.py | snehilk1312/AppliedStatistics | 0e2b9ca45b004f38f796fa6506270382ca3c95a0 | [
"MIT"
] | null | null | null | Python/statistics_with_Python/04_Exploring_Data_with_Graphs/Script_Files/07_linegraphs.py | snehilk1312/AppliedStatistics | 0e2b9ca45b004f38f796fa6506270382ca3c95a0 | [
"MIT"
] | null | null | null | Python/statistics_with_Python/04_Exploring_Data_with_Graphs/Script_Files/07_linegraphs.py | snehilk1312/AppliedStatistics | 0e2b9ca45b004f38f796fa6506270382ca3c95a0 | [
"MIT"
] | null | null | null | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/Hiccups.dat', sep='\s+')
print(data.head())
print(data.describe())
data['id'] = data.index
df = pd.melt(data, id_vars='id', value_vars=['Baseline', 'Tongue', 'Carotid', 'Rectum'], var_name='Intervention', value_name='Hiccups')
df.drop(['id'], axis=1, inplace=True)
print(df.head())
_ = sns.pointplot(x='Intervention', y='Hiccups', data=df,linestyles=['--'])
plt.show()
# When there are several independent variables
da = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/TextMessages.dat', sep='\t')
print(da.head())
da_ = pd.melt(da, id_vars='Group', value_vars=['Baseline', 'Six_months'], var_name='time', value_name='num_msgs')
print(da_.tail())
_ = sns.pointplot(x='time', y='num_msgs', data=da_, hue='Group')
plt.show()
| 29.424242 | 135 | 0.725026 |
5ea853db43cb8dc6f4f374efa5f8688addf6774c | 24,543 | py | Python | src/olympia/search/tests/test_search_ranking.py | fdintino/nginxconf-2018-mozilla-addons-server | 01643e0c8e24ae32a4de33ee99c0119e388a394b | [
"BSD-3-Clause"
] | null | null | null | src/olympia/search/tests/test_search_ranking.py | fdintino/nginxconf-2018-mozilla-addons-server | 01643e0c8e24ae32a4de33ee99c0119e388a394b | [
"BSD-3-Clause"
] | null | null | null | src/olympia/search/tests/test_search_ranking.py | fdintino/nginxconf-2018-mozilla-addons-server | 01643e0c8e24ae32a4de33ee99c0119e388a394b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from olympia import amo
from olympia.amo.tests import (
APITestClient, ESTestCase, reverse_ns, create_switch)
class TestRankingScenarios(ESTestCase):
client_class = APITestClient
def _check_scenario(self, query, expected, no_match=None):
# Make sure things are properly flushed and searchable
url = reverse_ns('addon-search')
response = self.client.get(url, {'q': query})
assert response.status_code == 200
results = json.loads(response.content)['results']
# We only check for greater or equal since we usually don't care
# about what else ElasticSearch finds magically for any query.
# We're mostly concerned about the first few results to check
# our general ranking. In real-world the rest that follows matches
# the general scoring idea.
assert len(results) >= len(expected), (
'Expected {} results but {} found for query "{}": {}'.format(
len(expected), len(results), query,
[x['name']['en-US'] for x in results]
)
)
for idx, addon in enumerate(expected):
expected_name = addon[0]
expected_score = addon[1]
found_name = results[idx]['name']['en-US']
found_score = results[idx]['_score']
assert found_name == expected_name, (
'Expected "{}" to be on position {} with score {} but '
'"{}" was found instead with score {} for query {}'
.format(expected_name, idx, expected_score,
found_name, found_score, query)
)
assert found_score == expected_score, (
'Expected "{}" to be on position {} with score {} but '
'"{}" was found instead with score {} for query {}'
.format(expected_name, idx, expected_score,
found_name, found_score, query)
)
if no_match is not None:
for name in no_match:
names = [item['name']['en-US'] for item in results]
assert name not in names, (
'Expected "{}" not to exist in results for query {}'
.format(name, query)
)
@classmethod
def setUpTestData(cls):
# For simplicity reasons, let's simply use the new algorithm
# we're most certainly going to put live anyway
# Also, this needs to be created before `setUpTestData`
# since we need that setting on index-creation time.
create_switch('es-use-classic-similarity')
super(TestRankingScenarios, cls).setUpTestData()
# Shouldn't be necessary, but just in case.
cls.empty_index('default')
# This data was taken from our production add-ons to test
# a few search scenarios. (2018-01-25)
# Note that it's important to set average_daily_users in every case,
# because it affects the ranking score and otherwise addon_factory()
# sets a random value.
amo.tests.addon_factory(
average_daily_users=18981,
description=None,
name='Tab Center Redux',
slug=u'tab-center-redux',
summary='Move your tabs to the side of your browser window.',
weekly_downloads=915)
amo.tests.addon_factory(
average_daily_users=468126,
description=None,
name='Tab Mix Plus',
slug=u'tab-mix-plus',
summary=(
'Tab Mix Plus enhances Firefox\'s tab browsing capabilities. '
'It includes such features as duplicating tabs, controlling '
'tab focus, tab clicking options, undo closed tabs and '
'windows, plus much more. It also includes a full-featured '
'session manager.'),
weekly_downloads=3985)
amo.tests.addon_factory(
average_daily_users=8838,
description=None,
name='Redux DevTools',
slug=u'remotedev',
summary=(
'DevTools for Redux with actions history, undo and replay.'),
weekly_downloads=1032)
amo.tests.addon_factory(
average_daily_users=482,
description=None,
name='Open Image in New Tab',
slug=u'open-image-new-tab',
summary='Adds a context menu to open images in a new tab.',
weekly_downloads=158)
amo.tests.addon_factory(
average_daily_users=2607,
description=None,
name='Open image in a new tab',
slug=u'open-image-in-a-new-tab',
summary='A context menu to open images in a new tab',
weekly_downloads=329)
amo.tests.addon_factory(
average_daily_users=27832,
description=None,
name='Open Bookmarks in New Tab',
slug=u'open-bookmarks-in-new-tab',
summary=(
'After you installed this addon to your Firefox, bookmarks '
'are opened in new tab always.'),
weekly_downloads=145)
amo.tests.addon_factory(
average_daily_users=528,
description=None,
name='Coinhive Blocker',
slug=u'coinhive-blocker',
summary='Coinhive mining blocker',
weekly_downloads=132)
amo.tests.addon_factory(
average_daily_users=3015,
description=None,
name='CoinBlock',
slug=u'coinblock',
summary=(
'With the rising popularity of coinminers in js form, this '
'extension attempts to block those hosted on coin-hive, and '
'cryptoloot.\nA multiple entry block list is planned.'),
weekly_downloads=658)
amo.tests.addon_factory(
average_daily_users=418,
description=None,
name='NoMiners',
slug=u'nominers',
summary=(
'NoMiners is an Add-on that tries to block cryptominers such '
'as coinhive.\n\nBlocking those pesky miner scripts will '
'relieve your CPU and BATTERY while browsing the web.'
'\n\nIt\'s open source, so feel free to check out the code '
'and submit improvements.'),
weekly_downloads=71)
amo.tests.addon_factory(
average_daily_users=399485,
description=None,
name='Privacy Badger',
slug=u'privacy-badger17',
summary=(
'Protects your privacy by blocking spying ads and invisible '
'trackers.'),
weekly_downloads=22931)
amo.tests.addon_factory(
average_daily_users=8728,
description=None,
name='Privacy Pass',
slug=u'privacy-pass',
summary=(
'Handles passes containing cryptographically blinded tokens '
'for bypassing challenge pages.'),
weekly_downloads=4599)
amo.tests.addon_factory(
average_daily_users=15406,
description=None,
name='Privacy Settings',
slug=u'privacy-settings',
summary=(
'Alter Firefox\'s built-in privacy settings easily with a '
'toolbar panel.'),
weekly_downloads=1492)
amo.tests.addon_factory(
average_daily_users=12857,
description=None,
name='Google Privacy',
slug=u'google-privacy',
summary=(
'Make some popular websites respect your privacy settings.\n'
'Please see the known issues below!'),
weekly_downloads=117)
amo.tests.addon_factory(
average_daily_users=70553,
description=None,
name='Blur',
slug=u'donottrackplus',
summary='Protect your Passwords, Payments, and Privacy.',
weekly_downloads=2224)
amo.tests.addon_factory(
average_daily_users=1009156,
description=None,
name='Ghostery',
slug=u'ghostery',
summary=(
u'See who’s tracking you online and protect your privacy with '
u'Ghostery.'),
weekly_downloads=49315)
amo.tests.addon_factory(
average_daily_users=954288,
description=None,
name='Firebug',
slug=u'firebug',
summary=(
'Firebug integrates with Firefox to put a wealth of '
'development tools at your fingertips while you browse. You '
'can edit, debug, and monitor CSS, HTML, and JavaScript live '
'in any web page...'),
weekly_downloads=21969)
amo.tests.addon_factory(
average_daily_users=10821,
description=None,
name='Firebug Autocompleter',
slug=u'firebug-autocompleter',
summary='Firebug command line autocomplete.',
weekly_downloads=76)
amo.tests.addon_factory(
average_daily_users=11992,
description=None,
name='Firefinder for Firebug',
slug=u'firefinder-for-firebug',
summary=(
'Finds HTML elements matching chosen CSS selector(s) or XPath '
'expression'),
weekly_downloads=358)
amo.tests.addon_factory(
average_daily_users=8200,
description=None,
name='Fire Drag',
slug=u'fire-drag',
summary='drag texts and links with/without e10s',
weekly_downloads=506)
amo.tests.addon_factory(
average_daily_users=61014,
description=None,
name='Menu Wizard',
slug=u's3menu-wizard',
summary=(
'Customizemenus=Helps removing, moving and renaming menus and '
'menu items\nColorize important menu for ease of use! (use '
'Style (CSS))\nChange or disable any of used keyboard '
'shortcutsnSuppor=Firefox, Thunderbird and SeaMonkey'),
weekly_downloads=927)
amo.tests.addon_factory(
average_daily_users=81237,
description=None,
name='Add-ons Manager Context Menu',
slug=u'am-context',
summary='Add more items to Add-ons Manager context menu.',
weekly_downloads=169)
amo.tests.addon_factory(
average_daily_users=51,
description=None,
name='Frame Demolition',
slug=u'frame-demolition',
summary=(
'Enabling route to load abstracted file layer in select '
'sites.'),
weekly_downloads=70)
amo.tests.addon_factory(
average_daily_users=99,
description=None,
name='reStyle',
slug=u're-style',
summary=(
'A user style manager which can load local files and apply UI '
'styles even in Firefox 57+'),
weekly_downloads=70)
amo.tests.addon_factory(
average_daily_users=150,
description=None,
name='MegaUpload DownloadHelper',
slug=u'megaupload-downloadhelper',
summary=(
'Download from MegaUpload.\nMegaUpload Download Helper will '
'start your download once ready.\nMegaUpload Download Helper '
'will monitor time limitations and will auto-start your '
'download.'),
weekly_downloads=77)
amo.tests.addon_factory(
average_daily_users=2830,
description=None,
name='RapidShare DownloadHelper',
slug=u'rapidshare-downloadhelper',
summary=(
'Note from Mozilla: This add-on has been discontinued. Try '
'<a rel="nofollow" href="https://addons.mozilla.org/firefox/'
'addon/rapidshare-helper/">Rapidshare Helper</a> instead.\n\n'
'RapidShare Download Helper will start your download once '
'ready.'),
weekly_downloads=125)
amo.tests.addon_factory(
average_daily_users=98716,
description=None,
name='Popup Blocker',
slug=u'popup_blocker',
summary=(
'Prevents your web browser from opening a new window on top '
'of the content or web site you are viewing. The Addon also '
'supresses unwanted advertisement windows on your screen. '
'The one deciding what consitutes a popup is the user.'),
weekly_downloads=3940)
amo.tests.addon_factory(
average_daily_users=8830,
description=None,
name='No Flash',
slug=u'no-flash',
summary=(
'Replace Youtube, Vimeo and Dailymotion Flash video players '
'embedded on third-party website by the HTML5 counterpart '
'when the content author still use the old style embed '
'(Flash).\n\nSource code at <a rel="nofollow" href="https://'
'outgoing.prod.mozaws.net/v1/14b404a3c05779fa94b24e0bffc0d710'
'6836f1d6b771367b065fb96e9c8656b9/https%3A//github.com/hfigui'
'ere/no-flash">https://github.com/hfiguiere/no-flash</a>'),
weekly_downloads=77)
amo.tests.addon_factory(
average_daily_users=547880,
description=None,
name='Download Flash and Video',
slug=u'download-flash-and-video',
summary=(
'Download Flash and Video is a great download helper tool '
'that lets you download Flash games and Flash videos '
'(YouTube, Facebook, Dailymotion, Google Videos and more) '
'with a single click.\nThe downloader is very easy to use.'),
weekly_downloads=65891)
amo.tests.addon_factory(
average_daily_users=158796,
description=None,
name='YouTube Flash Video Player',
slug=u'youtube-flash-video-player',
summary=(
'YouTube Flash Video Player is a powerful tool that will let '
'you choose Flash video player as default YouTube video '
'player.'),
weekly_downloads=12239)
amo.tests.addon_factory(
average_daily_users=206980,
description=None,
name='YouTube Flash Player',
slug=u'youtube-flash-player',
summary=(
u'A very lightweight add-on that allows you to watch YouTube™ '
u'videos using Flash® Player instead of the '
u'default HTML5 player. The Flash® Player will consume less '
u'CPU and RAM resources if your device doesn\'t easily '
u'support HTML5 videos. Try it!'),
weekly_downloads=21882)
amo.tests.addon_factory(
average_daily_users=5056, description=None,
name='Disable Hello, Pocket & Reader+',
slug=u'disable-hello-pocket-reader',
summary=(
'Turn off Pocket, Reader, Hello and WebRTC bloatware - keep '
'browser fast and clean'),
weekly_downloads=85)
amo.tests.addon_factory(
average_daily_users=26135,
description=None,
name='Reader',
slug=u'reader',
summary='Reader is the ultimate Reader tool for Firefox.',
weekly_downloads=2463)
amo.tests.addon_factory(
average_daily_users=53412,
description=None,
name='Disable WebRTC',
slug=u'happy-bonobo-disable-webrtc',
summary=(
'WebRTC leaks your actual IP addresses from behind your VPN, '
'by default.'),
weekly_downloads=10583)
amo.tests.addon_factory(
average_daily_users=12953,
description=None,
name='In My Pocket',
slug=u'in-my-pocket',
summary=(
'For all those who are missing the old Firefox Pocket addon, '
'and not satisfied with the new Pocket integration, here is '
'an unofficial client for the excellent Pocket service. '
'Hope you\'ll enjoy it!'),
weekly_downloads=1123)
amo.tests.addon_factory(
name='GrApple Yummy', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0)
amo.tests.addon_factory(
name='Delicious Bookmarks', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0)
# Some more or less Dummy data to test a few very specific scenarios
# e.g for exact name matching
amo.tests.addon_factory(
name='Merge Windows', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='Merge All Windows', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='All Downloader Professional', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='test addon test11', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='test addon test21', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='test addon test31', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='1-Click YouTube Video Download',
type=amo.ADDON_EXTENSION,
average_daily_users=566337, weekly_downloads=150000,
description=(
'button, click that button, 1-Click Youtube Video '
'Downloader is a click click great tool')),
amo.tests.addon_factory(
name='Amazon 1-Click Lock', type=amo.ADDON_EXTENSION,
average_daily_users=50, weekly_downloads=0),
cls.refresh()
def test_scenario_tab_center_redux(self):
self._check_scenario('tab center redux', (
['Tab Center Redux', 42.221684],
['Tab Mix Plus', 0.028214136],
['Redux DevTools', 0.020817358],
))
def test_scenario_open_image_new_tab(self):
self._check_scenario('Open Image in New Tab', (
['Open Image in New Tab', 13.606365],
['Open image in a new tab', 2.6027737],
))
def test_scenario_coinhive(self):
# TODO, should match "CoinBlock"
self._check_scenario('CoinHive', (
['Coinhive Blocker', 1.8078496],
['NoMiners', 0.017527454], # via description
# ['CoinBlock', 0], # via prefix search
))
def test_scenario_privacy(self):
self._check_scenario('Privacy', (
['Privacy Badger', 4.045193],
['Privacy Settings', 2.1114779],
['Google Privacy', 2.0161355], # More users, summary
['Privacy Pass', 1.2800134],
['Ghostery', 0.0741566], # Crazy amount of users, summary
# summary + a lot of users, but not as many as ghostery
['Blur', 0.06171894],
))
def test_scenario_firebu(self):
self._check_scenario('firebu', (
['Firebug', 1.643983],
# unclear why preference to Firebug Autocompleter,
# weekly downloads + users?
['Firefinder for Firebug', 0.6100478],
['Firebug Autocompleter', 0.42855242],
['Fire Drag', 0.26174024],
))
def test_scenario_fireb(self):
self._check_scenario('fireb', (
['Firebug', 1.643983],
['Firefinder for Firebug', 0.6100478],
['Firebug Autocompleter', 0.42855242],
['Fire Drag', 0.26174024],
))
def test_scenario_menu_wizzard(self):
self._check_scenario('Menu Wizzard', (
['Menu Wizard', 0.044465635], # (fuzzy, typo)
# partial match + users
['Add-ons Manager Context Menu', 0.03403218],
))
def test_scenario_frame_demolition(self):
self._check_scenario('Frame Demolition', (
['Frame Demolition', 13.404683],
))
def test_scenario_demolition(self):
# Find "Frame Demolition" via a typo
self._check_scenario('Demolation', (
['Frame Demolition', 0.027297318],
))
def test_scenario_restyle(self):
self._check_scenario('reStyle', (
['reStyle', 17.824808],
))
def test_scenario_megaupload_downloadhelper(self):
# Doesn't find "RapidShare DownloadHelper" anymore
# since we now query by "MegaUpload AND DownloadHelper"
self._check_scenario('MegaUpload DownloadHelper', (
['MegaUpload DownloadHelper', 22.838007],
))
def test_scenario_downloadhelper(self):
# No direct match, "Download Flash and Video" has
# huge amount of users that puts it first here
self._check_scenario('DownloadHelper', (
['RapidShare DownloadHelper', 1.5038899],
['MegaUpload DownloadHelper', 0.837598],
['Download Flash and Video', 0.7398568],
['1-Click YouTube Video Download', 0.55960035],
))
def test_scenario_megaupload(self):
self._check_scenario('MegaUpload', (
['MegaUpload DownloadHelper', 1.881489],
['Popup Blocker', 0.57406807],
))
def test_scenario_no_flash(self):
self._check_scenario('No Flash', (
['No Flash', 42.063526],
['Download Flash and Video', 4.081107],
['YouTube Flash Player', 3.30791],
['YouTube Flash Video Player', 2.875178],
))
# Case should not matter.
self._check_scenario('no flash', (
['No Flash', 42.063526],
['Download Flash and Video', 4.081107],
['YouTube Flash Player', 3.30791],
['YouTube Flash Video Player', 2.875178],
))
def test_scenario_disable_hello_pocket_reader_plus(self):
self._check_scenario('Disable Hello, Pocket & Reader+', (
['Disable Hello, Pocket & Reader+', 28.746347], # yeay!
))
def test_scenario_grapple(self):
"""Making sure this scenario works via the API,
see `legacy_api.SearchTest` for various examples.
"""
self._check_scenario('grapple', (
['GrApple Yummy', 0.45373428],
))
def test_scenario_delicious(self):
"""Making sure this scenario works via the API,
see `legacy_api.SearchTest` for various examples.
"""
self._check_scenario('delicious', (
['Delicious Bookmarks', 0.5275579],
))
def test_score_boost_name_match(self):
# Tests that we match directly "Merge Windows" and also find
# "Merge All Windows" because of slop=1
self._check_scenario('merge windows', (
['Merge Windows', 9.161506],
['Merge All Windows', 0.6894618],
), no_match=(
'All Downloader Professional',
))
self._check_scenario('merge all windows', (
['Merge All Windows', 9.34597],
['Merge Windows', 0.020408927],
['All Downloader Professional', 0.0073838094],
))
def test_score_boost_exact_match(self):
"""Test that we rank exact matches at the top."""
self._check_scenario('test addon test21', (
['test addon test21', 10.347969],
))
def test_score_boost_exact_match_description_hijack(self):
"""Test that we rank exact matches at the top."""
self._check_scenario('Amazon 1-Click Lock', (
['Amazon 1-Click Lock', 24.268158],
['1-Click YouTube Video Download', 0.12715381],
))
| 40.836938 | 79 | 0.572057 |
140275f981addc767a8ed9b4412d0ac9a12ec112 | 4,380 | py | Python | addons/io_sketchfab_plugin/pack_for_export.py | gorenje/blender-plugin | 2d4d65e77bb9c35965bfc9714c947f9a10819462 | [
"Apache-2.0"
] | 2 | 2020-04-16T22:12:40.000Z | 2022-01-22T17:18:45.000Z | addons/io_sketchfab_plugin/pack_for_export.py | gorenje/blender-plugin | 2d4d65e77bb9c35965bfc9714c947f9a10819462 | [
"Apache-2.0"
] | null | null | null | addons/io_sketchfab_plugin/pack_for_export.py | gorenje/blender-plugin | 2d4d65e77bb9c35965bfc9714c947f9a10819462 | [
"Apache-2.0"
] | 2 | 2019-05-16T04:01:09.000Z | 2020-08-25T11:42:26.000Z | """
Copyright 2019 Sketchfab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import bpy
import json
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from blender.blender_version import Version
SKETCHFAB_EXPORT_TEMP_DIR = sys.argv[7]
SKETCHFAB_EXPORT_DATA_FILE = os.path.join(SKETCHFAB_EXPORT_TEMP_DIR, "export-sketchfab.json")
# save a copy of the current blendfile
def save_blend_copy():
import time
filepath = SKETCHFAB_EXPORT_TEMP_DIR
filename = time.strftime("Sketchfab_%Y_%m_%d_%H_%M_%S.blend",
time.localtime(time.time()))
filepath = os.path.join(filepath, filename)
bpy.ops.wm.save_as_mainfile(filepath=filepath,
compress=True,
copy=True)
size = os.path.getsize(filepath)
return (filepath, filename, size)
# change visibility statuses and pack images
def prepare_assets(export_settings):
hidden = set()
images = set()
# If we did not ask to export all models, do some cleanup
if export_settings['selection']:
for ob in bpy.data.objects:
if ob.type == 'MESH':
for mat_slot in ob.material_slots:
if not mat_slot.material:
continue
if bpy.app.version < (2, 80, 0):
for tex_slot in mat_slot.material.texture_slots:
if not tex_slot:
continue
tex = tex_slot.texture
if tex.type == 'IMAGE':
image = tex.image
if image is not None:
images.add(image)
if mat_slot.material.use_nodes:
nodes = mat_slot.material.node_tree.nodes
for n in nodes:
if n.type == "TEX_IMAGE":
if n.image is not None:
images.add(n.image)
if export_settings['selection'] and ob.type == 'MESH':
# Add relevant objects to the list of objects to remove
if not Version.get_visible(ob): # Not visible
hidden.add(ob)
elif not Version.get_selected(ob): # Visible but not selected
Version.set_visible(ob, False)
hidden.add(ob)
for img in images:
if not img.packed_file:
try:
img.pack()
except:
# can fail in rare cases
import traceback
traceback.print_exc()
for ob in hidden:
bpy.data.objects.remove(ob)
# delete unused materials and associated textures (will remove unneeded packed images)
for m in bpy.data.meshes:
if m.users == 0:
bpy.data.meshes.remove(m)
for m in bpy.data.materials:
if m.users == 0:
bpy.data.materials.remove(m)
for t in bpy.data.images:
if t.users == 0:
bpy.data.images.remove(t)
def prepare_file(export_settings):
prepare_assets(export_settings)
return save_blend_copy()
def read_settings():
with open(SKETCHFAB_EXPORT_DATA_FILE, 'r') as s:
return json.load(s)
def write_result(filepath, filename, size):
with open(SKETCHFAB_EXPORT_DATA_FILE, 'w') as s:
json.dump({
'filepath': filepath,
'filename': filename,
'size': size,
}, s)
if __name__ == "__main__":
try:
export_settings = read_settings()
filepath, filename, size = prepare_file(export_settings)
write_result(filepath, filename, size)
except:
import traceback
traceback.print_exc()
sys.exit(1)
| 33.692308 | 93 | 0.575114 |
b70946fb3838dd244b04287f8aab373e535e1d10 | 784 | py | Python | api/v1/list/tests/test_list_item.py | RyanNoelk/OpenEats | dd015570cf07961b03525d256cd6d175bdb28ea2 | [
"MIT"
] | 113 | 2016-10-05T19:33:10.000Z | 2020-12-28T15:25:02.000Z | api/v1/list/tests/test_list_item.py | RyanNoelk/OpenEats | dd015570cf07961b03525d256cd6d175bdb28ea2 | [
"MIT"
] | 151 | 2016-10-27T19:40:22.000Z | 2018-03-02T22:58:41.000Z | api/v1/list/tests/test_list_item.py | RyanNoelk/OpenEats | dd015570cf07961b03525d256cd6d175bdb28ea2 | [
"MIT"
] | 47 | 2016-10-26T16:42:34.000Z | 2020-05-25T12:38:34.000Z | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import AnonymousUser, User
from rest_framework.test import APIRequestFactory
from v1.list import views
class ListTests(TestCase):
fixtures = ['test/users.json', 'test/lists.json', 'test/list_items.json']
def setUp(self):
self.factory = APIRequestFactory()
def test_list_item_count(self):
"""Test to make sure the count is right for items"""
view = views.GroceryItemViewSet.as_view({'get': 'list'})
request = self.factory.get('/api/v1/list/items/?list=8')
request.user = User.objects.get(pk=1)
response = view(request)
self.assertEqual(response.data.get('count'), 7)
| 32.666667 | 77 | 0.700255 |
9968ad5140db830a55a8784aaf6b74a44c151153 | 12,794 | py | Python | Workshop1-2/ControlFlow.py | pythonsolidity/python-workshops | 865867883dedaaf8ebcedd51add2ba05688af91c | [
"MIT"
] | null | null | null | Workshop1-2/ControlFlow.py | pythonsolidity/python-workshops | 865867883dedaaf8ebcedd51add2ba05688af91c | [
"MIT"
] | null | null | null | Workshop1-2/ControlFlow.py | pythonsolidity/python-workshops | 865867883dedaaf8ebcedd51add2ba05688af91c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Introduction to Python Statements
# Let's create a simple statement that says:
# "If a is greater than b, assign 2 to a and 4 to b"
#
# if a>b:
# a = 2
# b = 4
# ## Indentation
# Note how Python is so heavily driven by code indentation and whitespace. This means that code readability is a core part of the design of the Python language.
#
# Now let's start diving deeper by coding these sort of statements in Python!
# # if, elif, else Statements
#
# <code>if</code> Statements in Python allows us to tell the computer to perform alternative actions based on a certain set of results.
#
# Verbally, we can imagine we are telling the computer:
#
# "Hey if this case happens, perform some action"
#
# We can then expand the idea further with <code>elif</code> and <code>else</code> statements, which allow us to tell the computer:
#
# "Hey if this case happens, perform some action. Else, if another case happens, perform some other action. Else, if *none* of the above cases happened, perform this action."
#
# Let's go ahead and look at the syntax format for <code>if</code> statements to get a better idea of this:
#
# if case1:
# perform action1
# elif case2:
# perform action2
# else:
# perform action3
# ## First Example
#
# Let's see a quick example of this:
# In[ ]:
var = 1
if var == 1:
print('It was true!')
# Let's add in some else logic:
# In[ ]:
x = 10
if x < 5:
print('Ignore x!')
else:
print('Cannot ignore x')
# Note how the nested <code>if</code> statements are each checked until a True boolean causes the nested code below it to run. You should also note that you can put in as many <code>elif</code> statements as you want before you close off with an <code>else</code>.
#
# Let's create two more simple examples for the <code>if</code>, <code>elif</code>, and <code>else</code> statements:
# In[ ]:
person = 'Alice'
if person == 'Alice':
print('Welcome Alice!')
else:
print("Welcome, what's your name?")
# In[ ]:
person = 'Bob'
if person == 'Alice':
print('Welcome Alice!')
elif person =='Bob':
print('Welcome Bob!')
else:
print("Welcome, what's your name?")
# # for Loops
#
# A <code>for</code> loop acts as an iterator in Python; it goes through items that are in a *sequence* or any other iterable item. Objects that we've learned about that we can iterate over include strings, lists, tuples, and even built-in iterables for dictionaries, such as keys or values.
#
# We've already seen the <code>for</code> statement a little bit in past lectures but now let's formalize our understanding.
#
# Here's the general format for a <code>for</code> loop in Python:
#
# for item in object:
# statements to do stuff
#
# The variable name used for the item is completely up to the coder, so use your best judgment for choosing a name that makes sense and you will be able to understand when revisiting your code. This item name can then be referenced inside your loop, for example if you wanted to use <code>if</code> statements to perform checks.
#
# Let's go ahead and work through several example of <code>for</code> loops using a variety of data object types. We'll start simple and build more complexity later on.
#
# ## Example 1
# Iterating through a list
# In[ ]:
# We'll learn how to automate this sort of list in the next lecture
list1 = [1,2,3,4,5,6,7,8,9,10]
# In[ ]:
for num in list1:
print(num)
# Great! Hopefully this makes sense. Now let's add an <code>if</code> statement to check for even numbers. We'll first introduce a new concept here--the modulo.
# ### Modulo
# The modulo allows us to get the remainder in a division and uses the % symbol. For example:
# In[ ]:
17 % 5
# This makes sense since 17 divided by 5 is 3 remainder 2. Let's see a few more quick examples:
# In[ ]:
# 2 no remainder
4 % 2
# Notice that if a number is fully divisible with no remainder, the result of the modulo call is 0. We can use this to test for even numbers, since if a number modulo 2 is equal to 0, that means it is an even number!
#
# Back to the <code>for</code> loops!
#
# ## Example 3
# Let's print only the even numbers from that list!
# In[ ]:
for num in list1:
if num % 2 == 0:
print(num)
# We could have also put an <code>else</code> statement in there:
# In[ ]:
for num in list1:
if num % 2 == 0:
print(num)
else:
print('Odd number')
# ## Example 3
# Another common idea during a <code>for</code> loop is keeping some sort of running tally during multiple loops. For example, let's create a <code>for</code> loop that sums up the list:
# In[ ]:
# Start sum at zero
list_sum = 0
for num in list1:
list_sum = list_sum + num
print(list_sum)
# Great! Read over the above cell and make sure you understand fully what is going on. Also we could have implemented a <code>+=</code> to perform the addition towards the sum. For example:
# In[ ]:
# Start sum at zero
list_sum = 0
for num in list1:
list_sum += num
print(list_sum)
# ## Example 4
# We've used <code>for</code> loops with lists, how about with strings? Remember strings are a sequence so when we iterate through them we will be accessing each item in that string.
# In[ ]:
for letter in 'This is a string.':
print(letter)
# ## Example 5: Dictionaries
# In[ ]:
d = {'k1':1,'k2':2,'k3':3}
# In[ ]:
for item in d:
print(item)
# Notice how this produces only the keys. So how can we get the values? Or both the keys and the values?
#
# We're going to introduce three new Dictionary methods: **.keys()**, **.values()** and **.items()**
#
# In Python each of these methods return a *dictionary view object*. It supports operations like membership test and iteration, but its contents are not independent of the original dictionary – it is only a view. Let's see it in action:
# In[ ]:
# Create a dictionary view object
d.items()
# Since the .items() method supports iteration, we can perform *dictionary unpacking* to separate keys and values just as we did in the previous examples.
# In[ ]:
# Dictionary unpacking
for k,v in d.items():
print(k)
print(v)
# If you want to obtain a true list of keys, values, or key/value tuples, you can *cast* the view as a list:
# In[ ]:
list(d.keys())
# Remember that dictionaries are unordered, and that keys and values come back in arbitrary order. You can obtain a sorted list using sorted():
# In[ ]:
sorted(d.values())
# # while Loops
#
# The <code>while</code> statement in Python is one of most general ways to perform iteration. A <code>while</code> statement will repeatedly execute a single statement or group of statements as long as the condition is true. The reason it is called a 'loop' is because the code statements are looped through over and over again until the condition is no longer met.
#
# The general format of a while loop is:
#
# while test:
# code statements
# else:
# final code statements
#
# Let’s look at a few simple <code>while</code> loops in action.
# In[ ]:
x = 0
while x < 10:
print('x is currently: ',x)
print(' x is still less than 10, adding 1 to x')
x+=1
# Notice how many times the print statements occurred and how the <code>while</code> loop kept going until the True condition was met, which occurred once x==10. It's important to note that once this occurred the code stopped. Let's see how we could add an <code>else</code> statement:
# # break, continue, pass
#
# We can use <code>break</code>, <code>continue</code>, and <code>pass</code> statements in our loops to add additional functionality for various cases. The three statements are defined by:
#
# break: Breaks out of the current closest enclosing loop.
# continue: Goes to the top of the closest enclosing loop.
# pass: Does nothing at all.
#
#
# Thinking about <code>break</code> and <code>continue</code> statements, the general format of the <code>while</code> loop looks like this:
#
# while test:
# code statement
# if test:
# break
# if test:
# continue
# else:
#
# <code>break</code> and <code>continue</code> statements can appear anywhere inside the loop’s body, but we will usually put them further nested in conjunction with an <code>if</code> statement to perform an action based on some condition.
#
# Let's go ahead and look at some examples!
# In[ ]:
x = 0
while x < 10:
print('x is currently: ',x)
print(' x is still less than 10, adding 1 to x')
x+=1
if x==3:
print('Breaking because x==3')
break
else:
print('continuing...')
continue
# Note how the other <code>else</code> statement wasn't reached and continuing was never printed!
#
# After these brief but simple examples, you should feel comfortable using <code>while</code> statements in your code.
#
# **A word of caution however! It is possible to create an infinitely running loop with <code>while</code> statements. For example:**
# In[ ]:
# DO NOT RUN THIS CODE!!!!
while True:
print("I'm stuck in an infinite loop!")
# A quick note: If you *did* run the above cell, click on the Kernel menu above to restart the kernel!
# # Useful Operators
#
# There are a few built-in functions and "operators" in Python that don't fit well into any category, so we will go over them in this lecture, let's begin!
# ## range
#
# The range function allows you to quickly *generate* a list of integers, this comes in handy a lot, so take note of how to use it! There are 3 parameters you can pass, a start, a stop, and a step size. Let's see some examples:
# In[ ]:
range(0,11)
# Note that this is a **generator** function, so to actually get a list out of it, we need to cast it to a list with **list()**. What is a generator? Its a special type of function that will generate information and not need to save it to memory. We haven't talked about functions or generators yet, so just keep this in your notes for now, we will discuss this in much more detail in later on in your training!
# In[ ]:
# Notice how 11 is not included, up to but not including 11, just like slice notation!
list(range(0,11))
# In[ ]:
list(range(0,12))
# In[ ]:
# Third parameter is step size!
# step size just means how big of a jump/leap/step you
# take from the starting number to get to the next number.
list(range(0,11,2))
# In[ ]:
list(range(0,101,10))
# ## For Loop with range( )
# In[ ]:
for i in range(0,11):
print(i**2)
# ## in operator
#
# We've already seen the **in** keyword durng the for loop, but we can also use it to quickly check if an object is in a list
# In[ ]:
'x' in ['x','y','z']
# In[ ]:
'x' in [1,2,3]
# ## min and max
#
# Quickly check the minimum or maximum of a list with these functions.
# In[ ]:
mylist = [10,20,30,40,100]
# In[ ]:
min(mylist)
# In[ ]:
max(mylist)
# ## input ( )
# In[ ]:
input('Enter Something into this box: ')
# In[ ]:
# finding area of a rectangle
length = input('Enter the length: ')
width = input('Enter the width: ')
area = length * width
print('The area is: ', area)
# # List Comprehensions
#
# In addition to sequence operations and list methods, Python includes a more advanced operation called a list comprehension.
#
# List comprehensions allow us to build out lists using a different notation. You can think of it as essentially a one line <code>for</code> loop built inside of brackets. For a simple example:
# ## Example 1
# In[ ]:
# Creating a list of odd numbers between 7 and 200 - The Long Way
lst = []
for x in range(7, 200, 2):
lst.append(x)
print(lst)
# In[ ]:
# Creating a list of odd numbers between 7 and 200 - The Short Way
lst = [x for x in range(7, 200, 2)]
print(lst)
# ## Example 2
# In[ ]:
# Grab every letter in string
lst = [x for x in 'word']
# In[ ]:
# Check
lst
# This is the basic idea of a list comprehension. If you're familiar with mathematical notation this format should feel familiar for example: x^2 : x in { 0,1,2...10 }
#
# Let's see a few more examples of list comprehensions in Python:
# ## Example 3
# In[ ]:
# Square numbers in range and turn into list
lst = [x**2 for x in range(11)]
# In[ ]:
lst
# ## Example 4
# Let's see how to add in <code>if</code> statements:
# In[ ]:
# Check for even numbers in a range
lst = [x for x in range(11) if x % 2 == 0]
# In[ ]:
lst
# ## Example 5
# Can also do more complicated arithmetic:
# In[ ]:
# Convert Celsius to Fahrenheit
celsius = [0,10,20.1,34.5]
fahrenheit = [((9/5)*temp + 32) for temp in celsius ]
fahrenheit
| 23.389397 | 411 | 0.674769 |
e69887726933ed82daaeeaec610b72e7866a6b5f | 2,325 | py | Python | TSD_DeconvNet/data.py | KaiL4eK/keras_traffic_signs_localization | 38d5f9b2a11c5db04850ddba3ae91f18dcb98066 | [
"MIT"
] | 4 | 2019-01-03T18:21:39.000Z | 2022-01-25T00:24:07.000Z | TSD_DeconvNet/data.py | KaiL4eK/keras_traffic_signs_localization | 38d5f9b2a11c5db04850ddba3ae91f18dcb98066 | [
"MIT"
] | 1 | 2020-09-26T00:37:57.000Z | 2020-09-26T00:37:57.000Z | TSD_DeconvNet/data.py | KaiL4eK/keras_traffic_signs_localization | 38d5f9b2a11c5db04850ddba3ae91f18dcb98066 | [
"MIT"
] | null | null | null | import os
from os.path import isfile, isdir, join, dirname, exists
import pickle
from sklearn.model_selection import train_test_split
def image_preprocess(image):
return normalize(image)
def normalize(image):
return image / 255.
def create_training_instances(
train_images,
train_masks,
valid_images,
valid_masks,
cache_name
):
train_entries = []
valid_entries = []
if cache_name and os.path.exists(cache_name):
print('Loading data from .pkl file')
with open(cache_name, 'rb') as handle:
cache = pickle.load(handle)
return cache['train'], cache['valid']
if not train_images or not train_masks:
print('Train folder is not set - exit')
return train_entries, valid_entries
train_entries = [(join(train_images, f), join(train_masks, f))
for f in os.listdir(train_masks)
if isfile(join(train_masks, f)) and exists(join(train_images, f))]
print('Found {} train imgs'.format(len(train_entries)))
if not valid_images or not valid_masks:
print('Validation folder is not set - Splitting train set to generate valid set')
train_entries, valid_entries = train_test_split(train_entries, test_size=0.2, random_state=42)
else:
valid_entries = [(join(valid_images, f), join(valid_masks, f))
for f in os.listdir(valid_masks)
if isfile(join(valid_masks, f)) and exists(join(valid_images, f))]
print('Found {} train imgs'.format(len(train_entries)))
print('Found {} valid imgs'.format(len(valid_entries)))
if cache_name:
if not isdir(dirname(cache_name)):
os.makedirs(dirname(cache_name))
cache = {'train': train_entries, 'valid': valid_entries}
with open(cache_name, 'wb') as handle:
pickle.dump(cache, handle, protocol=pickle.HIGHEST_PROTOCOL)
return train_entries, valid_entries
if __name__ == '__main__':
train_im = '../../data_root/__signs/robofest_data/positive_bbox_class'
train_msk = '../../data_root/__signs/robofest_data/positive_masks_train'
valid_msk = '../../data_root/__signs/robofest_data/positive_masks_test'
create_training_instances(train_im, train_msk, train_im, valid_msk, None)
| 34.191176 | 102 | 0.665376 |
8f4671ab87b1ed77d5e33a579e56f423ad62ad04 | 3,579 | py | Python | Exercise3/cfn_template_automation.py | n-fink/arcgis-python-serverless-example | 8e07233f2175e8ed66bae5cb46a0128749ad8fcd | [
"MIT"
] | null | null | null | Exercise3/cfn_template_automation.py | n-fink/arcgis-python-serverless-example | 8e07233f2175e8ed66bae5cb46a0128749ad8fcd | [
"MIT"
] | null | null | null | Exercise3/cfn_template_automation.py | n-fink/arcgis-python-serverless-example | 8e07233f2175e8ed66bae5cb46a0128749ad8fcd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
""" A python script utilizing the Troposphere library to generate a CloudFormation template to deploy a serverless
ArcGIS data update stack
"""
__author__ = "Nick Fink"
__contact__ = "nicholas.fink@nltgis.com"
__copyright__ = "Copyright 2021 New Light Technologies, Inc."
__date__ = "2021/01/26"
__license__ = "MIT"
from troposphere import events, awslambda, iam, Template, GetAtt
# create a cloudformation template to
template = Template()
# set your variables
portal_url = "https://YourOrg.maps.arcgis.com"
portal_user = "YourUser"
portal_password = "SuperSecurePassword"
data_url = "https://www3.septa.org/api/TrainView/"
frmt = "CSV"
id = ""
file = "my_awesome_api_test.csv"
cron = "rate(5 minutes)" # how often the service should update
timeout = 30 # lambda time out length in seconds
mem = 256 # lambda workspace memory size
bucket = "my-unique-bucket-name" # S3 bucket that hosts your zip archive
key = "arcgisLambda.zip" # the bucket key of your zip archive object
# these variables are just for naming the various stack pieces
name = str(file).split(".")[0]
name_split = name.split("_")
name_cap = [word.capitalize() for word in name_split]
new_name = "".join(name_cap)
lambda_name = "lambda"+new_name
rule_name = "rule"+new_name
perm_name = "perm"+new_name
role_name = "role"+new_name
# create a new IAM role for the lambda
arcgis_role = template.add_resource(iam.Role(
role_name,
Path="/",
Policies=[iam.Policy(
PolicyName="root",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["logs:*"],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow"
}]
})],
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"Service": ["lambda.amazonaws.com"]
}
}]
}))
# create a lambda to run the code
arcgis_lambda = template.add_resource(awslambda.Function(
lambda_name,
Code=awslambda.Code(
S3Bucket=bucket,
S3Key=key),
Description="Lambda task that updates {}".format(lambda_name),
FunctionName=lambda_name,
Handler="lambda.agol_update",
Runtime="python3.7",
Timeout=timeout,
Role=GetAtt(arcgis_role.title, "Arn"),
MemorySize=mem,
Environment=awslambda.Environment(
Variables={
"data_url": data_url,
"portal_url": portal_url,
"portal_user": portal_user,
"portal_password": portal_password,
"portal_item": id,
"file": file,
"format": frmt})
))
# create an EventBridge rule to kick off the lambda
arcgis_rule = template.add_resource(events.Rule(
rule_name,
ScheduleExpression=cron,
Description="My Lambda CloudWatch Event",
State="ENABLED",
Targets=[
events.Target(
"MyLambdaTarget",
Arn=GetAtt(arcgis_lambda.title, "Arn"),
Id="MyLambdaId"
)
]
))
# add permissions to the lambda to allow EventBridge to kick it off
arcgis_permission = template.add_resource(awslambda.Permission(
perm_name,
FunctionName=GetAtt(arcgis_lambda.title, 'Arn'),
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com',
SourceArn=GetAtt(arcgis_rule.title, 'Arn'),
))
# write the json template to a yaml file
template.to_yaml()
fh = open("template.yaml", "w")
fh.writelines(template.to_yaml())
fh.close()
| 30.07563 | 114 | 0.649064 |
60152c13f13af904eddc7cbb8c9dba1a164c2f36 | 743 | py | Python | web_manage/member/urls.py | chicken-noodle/mysite | 6973edd1357a2490f7f47e5ce427af5b144fb50a | [
"bzip2-1.0.6"
] | 1 | 2019-09-02T02:14:51.000Z | 2019-09-02T02:14:51.000Z | web_manage/member/urls.py | chicken-noodle/mysite | 6973edd1357a2490f7f47e5ce427af5b144fb50a | [
"bzip2-1.0.6"
] | null | null | null | web_manage/member/urls.py | chicken-noodle/mysite | 6973edd1357a2490f7f47e5ce427af5b144fb50a | [
"bzip2-1.0.6"
] | 1 | 2019-09-02T03:33:06.000Z | 2019-09-02T03:33:06.000Z | from django.urls import path
from . import views
urlpatterns = [
path('com_manage/', views.com_manage, name='com_manage'),
path('set_com_status/', views.set_com_status, name='set_com_status'),
path('com_detail_manage/', views.com_detail_manage, name='com_detail_manage'),
path('com_edit/', views.com_edit, name='com_edit'),
path('add_com/', views.add_com, name='add_com'),
path('add_com_complete/', views.add_com_complete, name='add_com_complete'),
path('apply_application/', views.apply_application, name='apply_application'),
path('apply_application_agree/', views.apply_application_agree, name='apply_application_agree'),
path('apply_application_disagree/', views.apply_application_disagree, name='apply_application_disagree'),
]
| 49.533333 | 106 | 0.781965 |
e3fa68534e94629435d3104a3799bf7f921114e8 | 4,471 | py | Python | Network Simulations/Assignment 2/SW/client.py | neeladripal/bcse-lab | 915d2f535ae95a062438fc85980419646a3951ad | [
"MIT"
] | null | null | null | Network Simulations/Assignment 2/SW/client.py | neeladripal/bcse-lab | 915d2f535ae95a062438fc85980419646a3951ad | [
"MIT"
] | null | null | null | Network Simulations/Assignment 2/SW/client.py | neeladripal/bcse-lab | 915d2f535ae95a062438fc85980419646a3951ad | [
"MIT"
] | 1 | 2021-08-06T14:39:53.000Z | 2021-08-06T14:39:53.000Z | import socket
import select
from sender import Sender
from receiver import Receiver
# Function to handle client operations
def client():
# define server ip address
SERVER_IP='127.0.0.1'
# define server port address
SERVER_PORT=12345
# start the client socket
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as client:
client.connect((SERVER_IP, SERVER_PORT))
# recieve connection acknowledgement from server
msg = client.recv(1024).decode()
print("From Server :" , msg, end='')
# take the client name as input and send it to server
name=input()
client.sendall (bytes(name,'UTF-8'))
# get client port number from server
address = client.recv(1024).decode()
senderAddress = int(address)
# Loop until client wants to go offline
while(True):
print('\nClient Name - ' + name + ' | You can 1.Send data 2.Receive data 3.Leave\n')
choice=int(input('Enter option : '))
if(choice==1):
client.send(str.encode("request for sending"))
elif(choice!=2):
client.send(str.encode("close"))
break
# initialize input and output event generators
inputs=[client]
output=[]
# wait until any input/output event or timeout occurs
readable,writable,exceptionals=select.select(inputs,output,inputs,3600)
# if input event is generated(any data/signal came from server), handle it
for s in readable:
# Receive and decode the data
data=s.recv(1024).decode()
# If no other client is connected with server, cancel sending request
if(data=="No client is available"):
print(data)
break
# if this client got sending permission from server
elif(choice == 1):
# data file name where data is stored
file_name="data.txt"
# receive available receiver list from server
receiver_list=data.split('$')
# print the list and choose one of them for data transfer
print("\nAvailable clients----->")
for index in range(0,len(receiver_list)):
print((index+1),'. ',receiver_list[index])
choice=int(input('\nEnter your choice : '))
choice-=1
# ensure that the choice is valid
while(choice not in range(0,(len(receiver_list)))):
choice=int(input('Correctly input your choice : '))
choice-=1
#notify server about the choice
s.send(str.encode(str(choice)))
# get receiver port from server
receiverAddress = int(s.recv(1024).decode())
# initialize sender object
my_sender=Sender(client,name,senderAddress,receiver_list[index],receiverAddress,file_name)
# start transmission (using sender object)
my_sender.transmit()
# set and print notification from server about data transfer complition
data=s.recv(1024)
data=data.decode()
print(data)
# if this client got receiving request
else:
# print the receiver starting status
print('Receiving data ...')
# file name where received data will be stored
file_name = "test.txt"
# initialize receiver object
receiverAddress = int(data)
s.send (bytes("start", 'utf-8'))
my_receiver=Receiver(client,name,senderAddress,receiverAddress,file_name)
# start data receiving through receiver object
my_receiver.startReceiving()
# if no data sent/received for an hour, again ask for user options(loop again)
if not (readable or writable or exceptionals):
continue
if __name__=='__main__':
client() | 36.950413 | 110 | 0.527622 |
0d9d27da66edc0b91653b7da267479ce332fe392 | 4,688 | py | Python | process_raw_data.py | zadjii/github-analytics | 9f0f82320a2606e8c838c475379e1b2943ffab4d | [
"MIT"
] | 1 | 2021-05-12T13:01:35.000Z | 2021-05-12T13:01:35.000Z | process_raw_data.py | zadjii/github-analytics | 9f0f82320a2606e8c838c475379e1b2943ffab4d | [
"MIT"
] | null | null | null | process_raw_data.py | zadjii/github-analytics | 9f0f82320a2606e8c838c475379e1b2943ffab4d | [
"MIT"
] | null | null | null | import sys
import json
import re
import github
from github import Github
from common.Instance import get_github_token, get_github_repo, Instance
from models.Issue import Issue
from models.Comment import Comment
def usage():
print("python list_dupes.py ")
print(" <todo>")
def _do_issue(db, issue_model):
json_obj = json.loads(issue_model.raw_data)
# issue_model.num = json_obj["number"]
issue_model.title = json_obj["title"]
issue_model.state = json_obj["state"]
issue_model.body = json_obj["body"]
# num_comments = issue_model.comments.count()
# print(f"#{num} {title} ({num_comments} comments)")
# related_issues = []
# duplicate_of = []
# def process_body(body):
# # print(f'{body}')
# if body is None or body == "":
# return
# strong_dupes = re.findall(r"/dup((licate)|(e))?( of)? #(\d+)\s*", body)
# for match in strong_dupes:
# dupe_number = int(match[-1])
# duplicate_of.append(dupe_number)
# mentioned_issues = re.findall(r"#(\d+)\s*", body)
# # print(f"{mentioned_issues}")
# for match in mentioned_issues:
# mentioned_num = int(match)
# related_issues.append(mentioned_num)
# process_body(json_obj["body"])
db.session.add(issue_model)
for comment in issue_model.comments:
comment_json = json.loads(comment.raw_data)
comment.body = comment_json["body"]
# process_body(body)
db.session.add(comment)
print(f"Processed #{issue_model.number}")
# related_issues = list(set(related_issues))
# duplicate_of = list(set(duplicate_of))
# db.session.add(issue_model)
# for issue_num in related_issues:
# other = db.session.query(Issue).filter(Issue.number == issue_num).first()
# if other is not None:
# issue_model.mentioned_issues.append(other)
# db.session.add(other)
# for issue_num in duplicate_of:
# other = db.session.query(Issue).filter(Issue.number == issue_num).first()
# if other is not None:
# issue_model.duplicate_of.append(other)
# db.session.add(other)
# print(f"\tRelated to {list(set(related_issues))}")
# print(f"\tDuplicate of {list(set(duplicate_of))}")
# duplicates = [other.number for other in issue_model.duplicates]
# mentioned_by = [other.number for other in issue_model.mentioned_by]
# print(f"\tduplicates: {duplicates}")
# print(f"\tmentioned_by: {mentioned_by}")
# duplicate_of_results = [other.number for other in issue_model.duplicate_of]
# mentioned_issues_results = [other.number for other in issue_model.mentioned_issues]
# print(f"\tduplicate_of_results: {duplicate_of_results}")
# print(f"\tmentioned_issues_results: {mentioned_issues_results}")
db.session.commit()
def do_range(instance, min_num=None, max_num=None):
db = instance.get_db()
for issue in db.session.query(Issue).order_by(Issue.number).all():
if min_num is not None and issue.number < min_num:
continue
if max_num is not None and issue.number > max_num:
continue
_do_issue(db, issue)
def print_all(instance):
db = instance.get_db()
for issue in db.session.query(Issue).order_by(Issue.number).all():
# [issue.mentioned_by.remove(other) for other in issue.mentioned_by]
# [issue.mentioned_issues.remove(other) for other in issue.mentioned_issues]
# [issue.duplicates.remove(other) for other in issue.duplicates]
# [issue.duplicate_of.remove(other) for other in issue.duplicate_of]
# issue.mentioned_by = []
# issue.mentioned_issues = []
# issue.duplicates = []
# issue.duplicate_of = []
db.session.add(issue)
db.session.commit()
print(f"cleared issue #{issue.number}")
for issue in db.session.query(Issue).order_by(Issue.number).all():
_print_issue(db, issue)
def print_single(instance, number):
db = instance.get_db()
issue_model = db.session.query(Issue).filter(Issue.number == number).first()
if issue_model is None:
print(f"Issue #{number} is not in the DB")
else:
_print_issue(db, issue_model)
def main(argv):
instance = Instance()
if len(argv) > 2:
do_range(instance, int(argv[1]), int(argv[2]))
elif len(argv) > 1:
do_range(instance, int(argv[1]))
else:
do_range(instance)
if __name__ == "__main__":
main(sys.argv)
| 33.726619 | 90 | 0.627986 |
30fb96c18e497c359f6eaca0c9f3298ec777b572 | 1,532 | py | Python | hub/views/public/exporters.py | ecila7290/exporterhub-be | 07f5763586c10f8722fcb4c8411c0af097edc0d0 | [
"Apache-2.0"
] | 4 | 2020-11-25T00:27:30.000Z | 2020-12-04T03:54:20.000Z | hub/views/public/exporters.py | ralfyang/exporterhub-be | 47370f04207545f33949e2ce4c194ec24ad9d23c | [
"Apache-2.0"
] | null | null | null | hub/views/public/exporters.py | ralfyang/exporterhub-be | 47370f04207545f33949e2ce4c194ec24ad9d23c | [
"Apache-2.0"
] | 2 | 2020-11-25T00:27:31.000Z | 2020-12-03T05:51:52.000Z | import json
from django.views import View
from django.http import JsonResponse
from hub.models import Exporter
class MainView(View):
def get(self, request):
try:
exporters=Exporter.objects.select_related('category', 'official').prefetch_related('release_set').order_by('id')
data={"exporters":
[
{
"exporter_id" : exporter.id,
"name" : exporter.name,
"logo_url" : exporter.logo_url,
"category" : exporter.category.name,
"official" : exporter.official.name,
"stars" : exporter.stars,
"repository_url" : exporter.repository_url,
"description" : exporter.description,
"recent_release" : exporter.release_set.last().date if exporter.release_set.all() else '1970-01-01',
"release" : [{
"release_version": release.version,
"release_date" : release.date,
"release_url" : release.release_url
} for release in exporter.release_set.all()],
}
for exporter in exporters]
}
return JsonResponse(data, status=200)
except Exception as e:
return JsonResponse({'message':f"{e}"}, status=400) | 43.771429 | 124 | 0.484334 |
8afb40961a2e4795cdda20cb15b2ce4a2d5a33e8 | 1,893 | py | Python | fhir/resources/STU3/codeableconcept.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 144 | 2019-05-08T14:24:43.000Z | 2022-03-30T02:37:11.000Z | fhir/resources/STU3/codeableconcept.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 82 | 2019-05-13T17:43:13.000Z | 2022-03-30T16:45:17.000Z | fhir/resources/STU3/codeableconcept.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 48 | 2019-04-04T14:14:53.000Z | 2022-03-30T06:07:31.000Z | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/CodeableConcept
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import typing
from pydantic import Field
from . import element, fhirtypes
class CodeableConcept(element.Element):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Concept - reference to a terminology or just text.
A concept that may be defined by a formal reference to a terminology or
ontology or may be provided by text.
"""
resource_type = Field("CodeableConcept", const=True)
coding: typing.List[fhirtypes.CodingType] = Field(
None,
alias="coding",
title="Code defined by a terminology system",
description="A reference to a code defined by a terminology system.",
# if property is element of this resource.
element_property=True,
)
text: fhirtypes.String = Field(
None,
alias="text",
title="Plain text representation of the concept",
description=(
"A human language representation of the concept as "
"seen/selected/uttered by the user who entered the data and/or which "
"represents the intended meaning of the user."
),
# if property is element of this resource.
element_property=True,
)
text__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_text", title="Extension field for ``text``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``CodeableConcept`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "coding", "text"]
| 31.55 | 82 | 0.663497 |
7462be138cbc44f9e4c9c8994dc9595ab5a57890 | 1,702 | py | Python | configs/_base_/datasets/coco_detection.py | ruiningTang/mmdetection | 100b0b5e0edddc45af0812b9f1474493c61671ef | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/coco_detection.py | ruiningTang/mmdetection | 100b0b5e0edddc45af0812b9f1474493c61671ef | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/coco_detection.py | ruiningTang/mmdetection | 100b0b5e0edddc45af0812b9f1474493c61671ef | [
"Apache-2.0"
] | null | null | null | dataset_type = 'CocoDataset'
data_root = '/home/amax/coco2017/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| 34.734694 | 77 | 0.62691 |
9c4c5ee997a1319b889e41ab9d83c48894e62ebb | 349 | py | Python | astropy/utils/setup_package.py | jayvdb/astropy | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | [
"BSD-3-Clause"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | astropy/utils/setup_package.py | jayvdb/astropy | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | [
"BSD-3-Clause"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | astropy/utils/setup_package.py | jayvdb/astropy | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | [
"BSD-3-Clause"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from distutils.core import Extension
from os.path import dirname, join, relpath
ASTROPY_UTILS_ROOT = dirname(__file__)
def get_extensions():
return [
Extension('astropy.utils._compiler',
[relpath(join(ASTROPY_UTILS_ROOT, 'src', 'compiler.c'))])
]
| 24.928571 | 75 | 0.69341 |
87cfe878e415773590706e787c65e21aacfa1148 | 978 | py | Python | blasteroids/lib/client_messages/welcome.py | smallarmyofnerds/blasteroids | 082bc010ed6d2ec6098a8848edcbee433a5b6961 | [
"MIT"
] | null | null | null | blasteroids/lib/client_messages/welcome.py | smallarmyofnerds/blasteroids | 082bc010ed6d2ec6098a8848edcbee433a5b6961 | [
"MIT"
] | 7 | 2021-09-04T18:49:13.000Z | 2021-09-05T19:37:39.000Z | blasteroids/lib/client_messages/welcome.py | smallarmyofnerds/blasteroids | 082bc010ed6d2ec6098a8848edcbee433a5b6961 | [
"MIT"
] | null | null | null | from .message import Message
from ..constants import WELCOME_MESSAGE_ID
class WelcomeMessage(Message):
def __init__(self, world_width, world_height, boundary):
super(WelcomeMessage, self).__init__(WELCOME_MESSAGE_ID)
self.world_width = world_width
self.world_height = world_height
self.boundary = boundary
def __repr__(self):
return f'{super(WelcomeMessage, self).__repr__()}:{self.world_width}:{self.world_height}'
def encode(self, message_encoder):
super(WelcomeMessage, self).encode(message_encoder)
message_encoder.push_short(self.world_width)
message_encoder.push_short(self.world_height)
message_encoder.push_short(self.boundary)
def decode_body(encoded_message):
world_width = encoded_message.pop_short()
world_height = encoded_message.pop_short()
boundary = encoded_message.pop_short()
return WelcomeMessage(world_width, world_height, boundary)
| 37.615385 | 97 | 0.729039 |
059aeac8dbd988fc40efa7fa5e9ea760d8deab56 | 1,110 | py | Python | pandas-iris/{{ cookiecutter.repo_name }}/src/tests/test_run.py | kedro-org/kedro-starters | 6f22cc804bdc9ca7e800a950de4cade6d7b942af | [
"Apache-2.0"
] | 5 | 2022-01-21T07:33:52.000Z | 2022-03-06T11:32:10.000Z | pandas-iris/{{ cookiecutter.repo_name }}/src/tests/test_run.py | kedro-org/kedro-starters | 6f22cc804bdc9ca7e800a950de4cade6d7b942af | [
"Apache-2.0"
] | 15 | 2022-01-17T09:32:18.000Z | 2022-03-30T17:22:35.000Z | pandas-iris/{{ cookiecutter.repo_name }}/src/tests/test_run.py | kedro-org/kedro-starters | 6f22cc804bdc9ca7e800a950de4cade6d7b942af | [
"Apache-2.0"
] | 7 | 2022-01-10T10:03:54.000Z | 2022-03-28T07:27:00.000Z | """
This module contains an example test.
Tests should be placed in ``src/tests``, in modules that mirror your
project's structure, and in files named test_*.py. They are simply functions
named ``test_*`` which test a unit of logic.
To run the tests, run ``kedro test`` from the project root directory.
"""
from pathlib import Path
import pytest
from kedro.config import ConfigLoader
from kedro.framework.context import KedroContext
from kedro.framework.hooks import _create_hook_manager
@pytest.fixture
def config_loader():
return ConfigLoader(conf_source=str(Path.cwd()))
@pytest.fixture
def project_context(config_loader):
return KedroContext(
package_name="{{ cookiecutter.python_package }}",
project_path=Path.cwd(),
config_loader=config_loader,
hook_manager=_create_hook_manager(),
)
# The tests below are here for the demonstration purpose
# and should be replaced with the ones testing the project
# functionality
class TestProjectContext:
def test_project_path(self, project_context):
assert project_context.project_path == Path.cwd()
| 27.75 | 76 | 0.754955 |
3b561d32cb3b6724e6935e6978791bccbd4f9edb | 3,176 | py | Python | cli/setup.py | hanwen-pcluste/aws-parallelcluster | 9bc1fe4df528da0e81393e4eee9c736b52db54d4 | [
"Apache-2.0"
] | null | null | null | cli/setup.py | hanwen-pcluste/aws-parallelcluster | 9bc1fe4df528da0e81393e4eee9c736b52db54d4 | [
"Apache-2.0"
] | 34 | 2022-02-03T03:14:37.000Z | 2022-03-28T03:32:22.000Z | cli/setup.py | hanwen-pcluste/aws-parallelcluster | 9bc1fe4df528da0e81393e4eee9c736b52db54d4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from setuptools import find_packages, setup
def readme():
"""Read the README file and use it as long description."""
with open(os.path.join(os.path.dirname(__file__), "README")) as f:
return f.read()
VERSION = "2.10.3"
REQUIRES = [
"setuptools",
"boto3>=1.16.14",
"tabulate==0.8.5" if sys.version_info.major == 3 and sys.version_info.minor <= 4 else "tabulate>=0.8.2,<0.8.10",
"ipaddress>=1.0.22",
"PyYAML==5.2" if sys.version_info.major == 3 and sys.version_info.minor <= 4 else "PyYAML>=5.3.1",
"jinja2==2.10.1" if sys.version_info.major == 3 and sys.version_info.minor <= 4 else "jinja2>=2.11.0",
]
if sys.version_info[0] == 2:
REQUIRES.append("enum34>=1.1.6")
REQUIRES.append("configparser>=3.5.0,<=3.8.1")
setup(
name="aws-parallelcluster",
version=VERSION,
author="Amazon Web Services",
description="AWS ParallelCluster is an AWS supported Open Source cluster management tool to deploy "
"and manage HPC clusters in the AWS cloud.",
url="https://github.com/aws/aws-parallelcluster",
license="Apache License 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
python_requires=">=3.6",
install_requires=REQUIRES,
entry_points={
"console_scripts": [
"pcluster = pcluster.cli:main",
"pcluster-config = pcluster_config.cli:main",
"awsbqueues = awsbatch.awsbqueues:main",
"awsbhosts = awsbatch.awsbhosts:main",
"awsbstat = awsbatch.awsbstat:main",
"awsbkill = awsbatch.awsbkill:main",
"awsbsub = awsbatch.awsbsub:main",
"awsbout = awsbatch.awsbout:main",
]
},
include_package_data=True,
zip_safe=False,
package_data={"": ["src/examples/config"]},
long_description=readme(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
],
project_urls={
"Changelog": "https://github.com/aws/aws-parallelcluster/blob/develop/CHANGELOG.md",
"Issue Tracker": "https://github.com/aws/aws-parallelcluster/issues",
"Documentation": "https://docs.aws.amazon.com/parallelcluster/",
},
)
| 37.809524 | 119 | 0.649874 |
74ae0981104d59638c188fd5c170dc14a8163916 | 519 | py | Python | profiles_api/urls.py | geraldini/profiles-rest-api | 104fb06da1af1de659bc0e85e802626775dd4253 | [
"MIT"
] | null | null | null | profiles_api/urls.py | geraldini/profiles-rest-api | 104fb06da1af1de659bc0e85e802626775dd4253 | [
"MIT"
] | null | null | null | profiles_api/urls.py | geraldini/profiles-rest-api | 104fb06da1af1de659bc0e85e802626775dd4253 | [
"MIT"
] | null | null | null | from django.urls import include
from django.urls import path
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view', views.HelloAPIView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls)),
]
| 28.833333 | 79 | 0.770713 |
520ff401a7000cee0a99e69951e7d0e909021823 | 1,132 | py | Python | tests/flytekit/loadtests/dynamic_job.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | null | null | null | tests/flytekit/loadtests/dynamic_job.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | 2 | 2021-06-26T04:32:43.000Z | 2021-07-14T04:47:52.000Z | tests/flytekit/loadtests/dynamic_job.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | null | null | null | import time
from six.moves import range
from flytekit.sdk.tasks import dynamic_task, inputs, outputs, python_task
from flytekit.sdk.types import Types
from flytekit.sdk.workflow import Input, workflow_class
@inputs(value1=Types.Integer)
@outputs(out=Types.Integer)
@python_task(cpu_request="1", cpu_limit="1", memory_request="5G")
def dynamic_sub_task(workflow_parameters, value1, out):
for i in range(11 * 60):
print("This is load test task. I have been running for {} seconds.".format(i))
time.sleep(1)
output = value1 * 2
print("Output: {}".format(output))
out.set(output)
@inputs(tasks_count=Types.Integer)
@outputs(out=[Types.Integer])
@dynamic_task(cache_version="1")
def dynamic_task(workflow_parameters, tasks_count, out):
res = []
for i in range(0, tasks_count):
task = dynamic_sub_task(value1=i)
yield task
res.append(task.outputs.out)
# Define how to set the final result of the task
out.set(res)
@workflow_class
class FlyteDJOLoadTestWorkflow(object):
tasks_count = Input(Types.Integer)
dj = dynamic_task(tasks_count=tasks_count)
| 27.609756 | 86 | 0.719081 |
be86dd9e1c30547a9cfea3c021e5ca80ec9a5d5e | 609 | py | Python | rest-api-example-server/note_model.py | doctor-blue/retrofit-tutorial | 2f067d2b94b401cf6f2edd30aced0fc692405526 | [
"MIT"
] | null | null | null | rest-api-example-server/note_model.py | doctor-blue/retrofit-tutorial | 2f067d2b94b401cf6f2edd30aced0fc692405526 | [
"MIT"
] | null | null | null | rest-api-example-server/note_model.py | doctor-blue/retrofit-tutorial | 2f067d2b94b401cf6f2edd30aced0fc692405526 | [
"MIT"
] | null | null | null | class NoteModel:
def __init__(
self, note_id=0, title="", description=""
):
self.title = title
self.description = description
self.id = note_id
def set_data(self, data):
self.title = data["title"]
self.description = data["description"]
return self
def to_json(self, ip_address):
data = {
"id": self.id,
"title": self.title,
"description": self.description,
}
return data
def createNoteModel(note_id, data):
return NoteModel(note_id, data["title"], data["description"])
| 24.36 | 65 | 0.56486 |
8c8943d7324560263fb5cfa8f6c5938b8e2c37d9 | 10,710 | py | Python | sdks/python/apache_beam/dataframe/transforms.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 1 | 2019-01-16T10:14:33.000Z | 2019-01-16T10:14:33.000Z | sdks/python/apache_beam/dataframe/transforms.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 2 | 2018-10-17T23:20:08.000Z | 2019-09-25T02:30:43.000Z | sdks/python/apache_beam/dataframe/transforms.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 1 | 2018-12-18T03:44:31.000Z | 2018-12-18T03:44:31.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Tuple
from typing import TypeVar
from typing import Union
import pandas as pd
import apache_beam as beam
from apache_beam import transforms
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frames # pylint: disable=unused-import
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from apache_beam.pvalue import PCollection
T = TypeVar('T')
class DataframeTransform(transforms.PTransform):
"""A PTransform for applying function that takes and returns dataframes
to one or more PCollections.
For example, if pcoll is a PCollection of dataframes, one could write::
pcoll | DataframeTransform(lambda df: df.group_by('key').sum(), proxy=...)
To pass multiple PCollections, pass a tuple of PCollections wich will be
passed to the callable as positional arguments, or a dictionary of
PCollections, in which case they will be passed as keyword arguments.
"""
def __init__(self, func, proxy):
self._func = func
self._proxy = proxy
def expand(self, input_pcolls):
# Avoid circular import.
from apache_beam.dataframe import convert
# Convert inputs to a flat dict.
input_dict = _flatten(input_pcolls) # type: Dict[Any, PCollection]
proxies = _flatten(self._proxy)
input_frames = {
k: convert.to_dataframe(pc, proxies[k])
for k, pc in input_dict.items()
} # type: Dict[Any, DeferredFrame]
# Apply the function.
frames_input = _substitute(input_pcolls, input_frames)
if isinstance(frames_input, dict):
result_frames = self._func(**frames_input)
elif isinstance(frames_input, tuple):
result_frames = self._func(*frames_input)
else:
result_frames = self._func(frames_input)
# Compute results as a tuple.
result_frames_dict = _flatten(result_frames)
keys = list(result_frames_dict.keys())
result_frames_tuple = tuple(result_frames_dict[key] for key in keys)
result_pcolls_tuple = convert.to_pcollection(
*result_frames_tuple, label='Eval', always_return_tuple=True)
# Convert back to the structure returned by self._func.
result_pcolls_dict = dict(zip(keys, result_pcolls_tuple))
return _substitute(result_frames, result_pcolls_dict)
class _DataframeExpressionsTransform(transforms.PTransform):
def __init__(self, outputs):
self._outputs = outputs
def expand(self, inputs):
return self._apply_deferred_ops(inputs, self._outputs)
def _apply_deferred_ops(
self,
inputs, # type: Dict[expressions.Expression, PCollection]
outputs, # type: Dict[Any, expressions.Expression]
): # -> Dict[Any, PCollection]
"""Construct a Beam graph that evaluates a set of expressions on a set of
input PCollections.
:param inputs: A mapping of placeholder expressions to PCollections.
:param outputs: A mapping of keys to expressions defined in terms of the
placeholders of inputs.
Returns a dictionary whose keys are those of outputs, and whose values are
PCollections corresponding to the values of outputs evaluated at the
values of inputs.
Logically, `_apply_deferred_ops({x: a, y: b}, {f: F(x, y), g: G(x, y)})`
returns `{f: F(a, b), g: G(a, b)}`.
"""
class ComputeStage(beam.PTransform):
"""A helper transform that computes a single stage of operations.
"""
def __init__(self, stage):
self.stage = stage
def default_label(self):
return '%s:%s' % (self.stage.ops, id(self))
def expand(self, pcolls):
if self.stage.is_grouping:
# Arrange such that partitioned_pcoll is properly partitioned.
input_pcolls = {
tag: pcoll | 'Flat%s' % tag >> beam.FlatMap(partition_by_index)
for (tag, pcoll) in pcolls.items()
}
partitioned_pcoll = input_pcolls | beam.CoGroupByKey(
) | beam.MapTuple(
lambda _,
inputs: {tag: pd.concat(vs)
for tag, vs in inputs.items()})
else:
# Already partitioned, or no partitioning needed.
(k, pcoll), = pcolls.items()
partitioned_pcoll = pcoll | beam.Map(lambda df: {k: df})
# Actually evaluate the expressions.
def evaluate(partition, stage=self.stage):
session = expressions.Session(
{expr: partition[expr._id]
for expr in stage.inputs})
for expr in stage.outputs:
yield beam.pvalue.TaggedOutput(expr._id, expr.evaluate_at(session))
return partitioned_pcoll | beam.FlatMap(evaluate).with_outputs()
class Stage(object):
"""Used to build up a set of operations that can be fused together.
"""
def __init__(self, inputs, is_grouping):
self.inputs = set(inputs)
self.is_grouping = is_grouping or len(self.inputs) > 1
self.ops = []
self.outputs = set()
# First define some helper functions.
def output_is_partitioned_by_index(expr, stage):
if expr in stage.inputs:
return stage.is_grouping
elif expr.preserves_partition_by_index():
if expr.requires_partition_by_index():
return True
else:
return all(
output_is_partitioned_by_index(arg, stage) for arg in expr.args())
else:
return False
def partition_by_index(df, levels=None, parts=10):
if levels is None:
levels = list(range(df.index.nlevels))
elif isinstance(levels, (int, str)):
levels = [levels]
hashes = sum(
pd.util.hash_array(df.index.get_level_values(level))
for level in levels)
for key in range(parts):
yield key, df[hashes % parts == key]
def common_stages(stage_lists):
# Set intersection, with a preference for earlier items in the list.
if stage_lists:
for stage in stage_lists[0]:
if all(stage in other for other in stage_lists[1:]):
yield stage
@memoize
def expr_to_stages(expr):
assert expr not in inputs
# First attempt to compute this expression as part of an existing stage,
# if possible.
#
# If expr does not require partitioning, just grab any stage, else grab
# the first stage where all of expr's inputs are partitioned as required.
# In either case, use the first such stage because earlier stages are
# closer to the inputs (have fewer intermediate stages).
for stage in common_stages([expr_to_stages(arg) for arg in expr.args()
if arg not in inputs]):
if (not expr.requires_partition_by_index() or
all(output_is_partitioned_by_index(arg, stage)
for arg in expr.args())):
break
else:
# Otherwise, compute this expression as part of a new stage.
stage = Stage(expr.args(), expr.requires_partition_by_index())
for arg in expr.args():
if arg not in inputs:
# For each non-input argument, declare that it is also available in
# this new stage.
expr_to_stages(arg).append(stage)
# It also must be declared as an output of the producing stage.
expr_to_stage(arg).outputs.add(arg)
stage.ops.append(expr)
# This is a list as given expression may be available in many stages.
return [stage]
def expr_to_stage(expr):
# Any will do; the first requires the fewest intermediate stages.
return expr_to_stages(expr)[0]
# Ensure each output is computed.
for expr in outputs.values():
if expr not in inputs:
expr_to_stage(expr).outputs.add(expr)
@memoize
def stage_to_result(stage):
return {expr._id: expr_to_pcoll(expr)
for expr in stage.inputs} | ComputeStage(stage)
@memoize
def expr_to_pcoll(expr):
if expr in inputs:
return inputs[expr]
else:
return stage_to_result(expr_to_stage(expr))[expr._id]
# Now we can compute and return the result.
return {k: expr_to_pcoll(expr) for k, expr in outputs.items()}
def memoize(f):
cache = {}
def wrapper(*args):
if args not in cache:
cache[args] = f(*args)
return cache[args]
return wrapper
def _dict_union(dicts):
result = {}
for d in dicts:
result.update(d)
return result
def _flatten(
valueish, # type: Union[T, List[T], Tuple[T], Dict[Any, T]]
root=(), # type: Tuple[Any, ...]
):
# type: (...) -> Mapping[Tuple[Any, ...], T]
"""Given a nested structure of dicts, tuples, and lists, return a flat
dictionary where the values are the leafs and the keys are the "paths" to
these leaves.
For example `{a: x, b: (y, z)}` becomes `{(a,): x, (b, 0): y, (b, 1): c}`.
"""
if isinstance(valueish, dict):
return _dict_union(_flatten(v, root + (k, )) for k, v in valueish.items())
elif isinstance(valueish, (tuple, list)):
return _dict_union(
_flatten(v, root + (ix, )) for ix, v in enumerate(valueish))
else:
return {root: valueish}
def _substitute(valueish, replacements, root=()):
"""Substitutes the values in valueish with those in replacements where the
keys are as in _flatten.
For example,
```
_substitute(
{a: x, b: (y, z)},
{(a,): X, (b, 0): Y, (b, 1): Z})
```
returns `{a: X, b: (Y, Z)}`.
"""
if isinstance(valueish, dict):
return type(valueish)({
k: _substitute(v, replacements, root + (k, ))
for (k, v) in valueish.items()
})
elif isinstance(valueish, (tuple, list)):
return type(valueish)((
_substitute(v, replacements, root + (ix, ))
for (ix, v) in enumerate(valueish)))
else:
return replacements[root]
| 34.326923 | 80 | 0.660971 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.