max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
apps/accounts/migrations/0005_organisatons_for_user_changes.py
|
developersociety/commonslibrary
| 4
|
12779151
|
<reponame>developersociety/commonslibrary<filename>apps/accounts/migrations/0005_organisatons_for_user_changes.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-09 12:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('directory', '0001_initial'),
('accounts', '0004_organisations_remove_null'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='organisations',
),
migrations.AddField(
model_name='user',
name='approved_organisations',
field=models.ManyToManyField(blank=True, related_name='approved_organisations', to='directory.Organisation'),
),
migrations.AddField(
model_name='user',
name='chosen_organisations',
field=models.ManyToManyField(blank=True, help_text='Organisations chosen on the registration.', to='directory.Organisation'),
),
]
| 1.539063
| 2
|
scripts/compile.py
|
martinphellwig/brython_wf
| 0
|
12779152
|
<reponame>martinphellwig/brython_wf<filename>scripts/compile.py
## execute this file via node.js
# $> nodejs node_bridge.js compile.py
#
# Author: <NAME>
# Date: 04/19/2013
# License: MIT
#
# This file can be used to compile python code to javascript code
# which can be used with brython.
import os
#import dis
#fixme os.path.join doesn't work (ie, import posixpath as path, does not work)
def os_path_join(a,b):
return "%s/%s" % (a,b)
class FileIO:
def __init__(self, filename, mode):
self._filename=filename
self._mode=mode
self._fs=JSObject(fs)
def read(self):
return self._fs.readFileSync(self._filename, 'utf8')
def write(self, data):
return self._fs.writeFileSync(self._filename, data, 'utf8')
def close(self):
pass
def compile_file(root, file):
print("compiling %s" % os_path_join(root, file))
_fp=FileIO(os_path_join(root, file), 'r')
_src=_fp.read()
_fp.close()
_js=__BRYTHON__.compile_python(_src,file);
if _js is not None:
_fp1=FileIO(os_path_join(root, file.replace('.py', '.pyj')), 'w')
_fp1.write(_js)
_fp1.close()
else:
print("error compiling %s" % os_path_join(root, file))
#fixme, todo: modify to os.walk once scope issue is fixed..
#for _root, _dirs, _files in os.walk('./src'):
_files=['errno.py', 'local_storage.py', 'keyword.py', 'os.py', 'datetime.py',
'sys.py', 'traceback.py', 'string.py', 'dis.py', 'pydom.py']
_files+=['_random.py', 'random.py', 'heapq.py', 'collections.py']
#_files+=['_sre.py', 're.py', 'sre_compile.py', 'sre_constants.py']
_root="../src/Lib"
for _file in _files:
compile_file(_root, _file)
| 2.5625
| 3
|
week7/lecture13/test2.py
|
nobodysshadow/edX_MITx_6.00.1x
| 622
|
12779153
|
<filename>week7/lecture13/test2.py
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 12 07:17:17 2016
@author: ericgrimson
"""
#import numpy as np
import pylab as plt
mySamples = []
myLinear = []
myQuadratic = []
myCubic = []
myExponential = []
for i in range(0, 30):
mySamples.append(i)
myLinear.append(i)
myQuadratic.append(i**2)
myCubic.append(i**3)
myExponential.append(1.5**i)
# first trial
plt.plot(mySamples, myLinear)
plt.plot(mySamples, myQuadratic)
plt.plot(mySamples, myCubic)
plt.plot(mySamples, myExponential)
# second trial
#plt.figure('lin')
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.plot(mySamples, myExponential)
# third trial
#plt.figure('lin')
#plt.xlabel('sample points')
#plt.ylabel('linear function')
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.plot(mySamples, myExponential)
#plt.figure('quad')
#plt.ylabel('quadratic function')
# fourth trial
#plt.figure('lin')
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.plot(mySamples, myExponential)
#plt.figure('lin')
#plt.title('Linear')
#plt.figure('quad')
#plt.title('Quadratic')
#plt.figure('cube')
#plt.title('Cubic')
#plt.figure('expo')
#plt.title('Exponential')
# fifth trial
#plt.figure('lin')
#plt.clf()
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.clf()
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.clf()
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.clf()
#plt.plot(mySamples, myExponential)
#plt.figure('lin')
#plt.title('Linear')
#plt.figure('quad')
#plt.title('Quadratic')
#plt.figure('cube')
#plt.title('Cubic')
#plt.figure('expo')
#plt.title('Exponential')
# sixth trial
#plt.figure('lin')
#plt.clf()
#plt.ylim(0,1000)
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.clf()
#plt.ylim(0,1000)
#plt.plot(mySamples, myQuadratic)
#plt.figure('lin')
#plt.title('Linear')
#plt.figure('quad')
#plt.title('Quadratic')
# seventh trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear)
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic)
#plt.plot(mySamples, myExponential)
#plt.figure('lin quad')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.title('Cubic vs. Exponential')
# eighth trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear, label = 'linear')
#plt.plot(mySamples, myQuadratic, label = 'quadratic')
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic, label = 'cubic')
#plt.plot(mySamples, myExponential, label = 'exponential')
#plt.legend()
#plt.title('Cubic vs. Exponential')
# ninth trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear, 'b-', label = 'linear')
#plt.plot(mySamples, myQuadratic,'ro', label = 'quadratic')
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g^', label = 'cubic')
#plt.plot(mySamples, myExponential, 'r--',label = 'exponential')
#plt.legend()
#plt.title('Cubic vs. Exponential')
# tenth trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear, 'b-', label = 'linear', linewidth = 2.0)
#plt.plot(mySamples, myQuadratic,'r', label = 'quadratic', linewidth = 3.0)
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 4.0)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 5.0)
#plt.legend()
#plt.title('Cubic vs. Exponential')
# eleventh trial
#plt.figure('lin quad')
#plt.clf()
#plt.subplot(211)
#plt.ylim(0, 900)
#plt.plot(mySamples, myLinear, 'b-', label = 'linear', linewidth = 2.0)
#plt.subplot(212)
#plt.ylim(0, 900)
#plt.plot(mySamples, myQuadratic,'r', label = 'quadratic', linewidth = 3.0)
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.subplot(121)
#plt.ylim(0, 140000)
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 4.0)
#plt.subplot(122)
#plt.ylim(0, 140000)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 5.0)
#plt.legend()
#plt.title('Cubic vs. Exponential')
# twelfth trial
#plt.figure('cube exp log')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 2.0)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 4.0)
#plt.yscale('log')
#plt.legend()
#plt.title('Cubic vs. Exponential')
#plt.figure('cube exp linear')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 2.0)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 4.0)
#plt.legend()
#plt.title('Cubic vs. Exponential')
| 2.796875
| 3
|
cicd/fitnesse/fitnesseSettings.py
|
consag/build-and-deploy-informatica
| 4
|
12779154
|
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
# fitnesseSettings
# @Since: 23-OCT-2019
# @Author: <NAME>
# @Version: 20191023.0 - JBE - Initial
##
import cicd.fitnesse.fitnesseConstants as constants
import supporting, os, logging
import supporting.generalSettings as generalsettings
from supporting.generalSettings import completePath
logger = logging.getLogger(__name__)
sourcefitnessedir = constants.DEFAULT_SOURCE_FITNESSEDIR
targetfitnessedir = constants.DEFAULT_TARGET_FITNESSEDIR
def getfitnesseenvvars():
thisproc="getfitnesseenvvars"
global fitnessedeploylist, sourcefitnessedir, targetfitnessedir
supporting.log(logger, logging.DEBUG, thisproc, 'started')
fitnessedeploylist = completePath(os.environ.get(constants.varFitNesseDeployList, constants.DEFAULT_FITNESSE_DEPLOYLIST), generalsettings.sourceDir)
sourcefitnessedir = completePath(os.environ.get(constants.varSourceFitNesseDir, constants.DEFAULT_SOURCE_FITNESSEDIR), generalsettings.sourceDir)
targetfitnessedir = completePath(os.environ.get(constants.varTargetFitNesseDir, constants.DEFAULT_TARGET_FITNESSEDIR), generalsettings.sourceDir)
def outfitnesseenvvars():
thisproc = "outfitnesseenvvars"
supporting.log(logger, logging.INFO, thisproc, 'fitnessedeploylist is >' + fitnessedeploylist + "<.")
supporting.log(logger, logging.INFO, thisproc, 'sourcefitnessedir is >' + sourcefitnessedir +"<.")
supporting.log(logger, logging.INFO, thisproc, 'targetfitnessedir is >' + targetfitnessedir +"<.")
| 1.445313
| 1
|
evaluate.py
|
existentmember7/detectron2_revise
| 0
|
12779155
|
import argparse
import time
import os
import numpy as np
import json
import cv2
import random
import torch
from ACID_test import test
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common detectron2 utilities
from detectron2.model_zoo import model_zoo
from detectron2.engine import DefaultTrainer, DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader
from detectron2.data.datasets import register_coco_instances
from detectron2.structures import BoxMode
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.modeling import build_model
parser = argparse.ArgumentParser(description='ACID_Object_Detection_Train')
parser.add_argument('--dataset', default='ACID_dataset', type=str, help='name of dataset')
parser.add_argument('--file', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_train_augmentation', type=str, help='data file')
parser.add_argument('--label', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_train_augmentation.json', type=str, help='COCO format json')
parser.add_argument('--test_dataset', default='ACID_testing', type=str, help='name of testing dataset')
parser.add_argument('--test_file', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_testing', type=str, help='testing data file')
parser.add_argument('--test_label', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_test.json', type=str, help='testing json')
parser.add_argument('--model', default='COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml', type=str, help='model')
parser.add_argument('--weight', default='./output/model_final.pth', type=str, help='model weight')
parser.add_argument('--num_class', default=3, type=int, help='num of classes')
parser.add_argument('--iter', default=30000, type=int, help='max iter')
def main():
args = parser.parse_args()
register_coco_instances(args.dataset, {}, args.label, args.file) # training dataset
register_coco_instances(args.test_dataset, {}, args.test_label, args.test_file) # testing dataset
### set metadata
MetadataCatalog.get(args.test_dataset).evaluator_type="coco"
DatasetCatalog.get(args.test_dataset)
### cfg setting
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(args.model))
cfg.DATASETS.TRAIN = (args.dataset,)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class # excavator, dump_truck, cement_truck
cfg.MODEL.WEIGHTS = args.weight
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set the testing threshold for this model
cfg.DATASETS.TEST = (args.test_dataset,)
### trainner setting
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(cfg.MODEL.WEIGHTS)
### evaluation setting
evaluator = COCOEvaluator(args.test_dataset, cfg, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, args.test_dataset)
inference_on_dataset(trainer.model, val_loader, evaluator)
if __name__ == '__main__':
main()
| 1.96875
| 2
|
jessy/stt/witai.py
|
isbm/jessie
| 0
|
12779156
|
<reponame>isbm/jessie
# -*- coding: utf-8-*-
import os
import logging
import requests
import yaml
from jessy import jessypath
from jessy import diagnose
from jessy.stt import AbstractSTTEngine
from jessy.utils import _module_getter
def is_valid():
'''
Module validator.
'''
return True
class WitAiSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the Wit.ai Speech API.
This implementation requires an Wit.ai Access Token to be present in
profile.conf. Please sign up at https://wit.ai and copy your instance
token, which can be found under Settings in the Wit console to your
profile.conf:
...
stt_engine: witai
witai-stt:
access_token: <PASSWORD>
"""
SLUG = "witai"
def __init__(self, access_token):
self._logger = logging.getLogger(__name__)
self.token = access_token
@classmethod
def get_config(cls, profile):
config = {}
if 'witai-stt' in profile and 'access_token' in profile['witai-stt']:
config['access_token'] = profile['witai-stt']['access_token']
return config
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
self._headers = {'Authorization': 'Bearer %s' % self.token,
'accept': 'application/json',
'Content-Type': 'audio/wav'}
@property
def headers(self):
return self._headers
def transcribe(self, fp):
data = fp.read()
r = requests.post('https://api.wit.ai/speech?v=20160526',
data=data,
headers=self.headers)
try:
r.raise_for_status()
text = r.json()['_text']
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with response: %r',
r.text,
exc_info=True)
return []
except requests.exceptions.RequestException:
self._logger.critical('Request failed.', exc_info=True)
return []
except ValueError as e:
self._logger.critical('Cannot parse response: %s',
e.args[0])
return []
except KeyError:
self._logger.critical('Cannot parse response.',
exc_info=True)
return []
else:
transcribed = []
if text:
transcribed.append(text.upper())
self._logger.info('Transcribed: %r', transcribed)
return transcribed
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
initiator = _module_getter(WitAiSTT)
| 2.3125
| 2
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/migrations/0001_initial.py
|
osoco/better-ways-of-thinking-about-software
| 3
|
12779157
|
import django.core.validators
import django.utils.timezone
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserCourseTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=255, db_index=True)),
('course_id', CourseKeyField(max_length=255, db_index=True)),
('value', models.TextField()),
('user', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='UserOrgTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('key', models.CharField(max_length=255, db_index=True)),
('org', models.CharField(max_length=255, db_index=True)),
('value', models.TextField()),
('user', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='UserPreference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(db_index=True, max_length=255, validators=[django.core.validators.RegexValidator('[-_a-zA-Z0-9]+')])),
('value', models.TextField()),
('user', models.ForeignKey(related_name='preferences', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='userpreference',
unique_together={('user', 'key')},
),
migrations.AlterUniqueTogether(
name='userorgtag',
unique_together={('user', 'org', 'key')},
),
migrations.AlterUniqueTogether(
name='usercoursetag',
unique_together={('user', 'course_id', 'key')},
),
]
| 1.921875
| 2
|
backend/Techfesia2019/payments/tests.py
|
masterashu/Techfesia2019
| 1
|
12779158
|
import datetime as dt
from json import dumps as json_dumps
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from registration.models import User
from events.models import SoloEvent
from event_registrations.models import SoloEventRegistration
from payments.models import Transaction
class PaymentInitiateViewTestCase(APITestCase):
def setUp(self):
self.user1 = User.objects.create(username='test_user1',
first_name='test', last_name='user',
email='<EMAIL>', email_confirmed=True
)
self.user2 = User.objects.create(username='test_user2',
first_name='test', last_name='user',
email='<EMAIL>', email_confirmed=True
)
self.profile1 = self.user1.profile
self.profile2 = self.user2.profile
self.event1 = SoloEvent.objects.create(title='SoloEvent1',
start_date=dt.date(2019, 7, 19), end_date=dt.date(2019, 7, 19),
start_time=dt.time(12, 0, 0), end_time=dt.time(15, 0, 0),
fee=100, reserved_fee=80, reserved_slots=10, max_participants=20
)
self.event2 = SoloEvent.objects.create(title='SoloEvent2',
start_date=dt.date(2019, 7, 19), end_date=dt.date(2019, 7, 19),
start_time=dt.time(12, 0, 0), end_time=dt.time(15, 0, 0),
fee=100, reserved_fee=0, reserved_slots=10, max_participants=20
)
self.registration1 = SoloEventRegistration.objects.create(event=self.event1, profile=self.profile1)
self.registration2 = SoloEventRegistration.objects.create(event=self.event1, profile=self.profile2,
is_reserved=True)
self.registration3 = SoloEventRegistration.objects.create(event=self.event2, profile=self.profile2,
is_reserved=True, is_complete=True)
self.registration3_transaction = Transaction.objects.create(created_by=self.profile1,
solo_registration=self.registration1,
status='Failed')
self.registration3_transaction = Transaction.objects.create(created_by=self.profile2,
solo_registration=self.registration3,
status='Successful')
def test_payment_initiate_view_unauthenticated(self):
url = reverse('payment_initiate')
self.client.login(user=None)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_payment_initiate_view_wrong_user(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user2)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_payment_initiate_view_invalid_data(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user1)
response = self.client.post(url,
data=json_dumps({'eventPublicId': 'random_string',
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': 'random_string'}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_payment_initiate_view_missing_data(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user1)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
response = self.client.post(url,
data=json_dumps({'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_payment_initiate_view_already_paid(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user2)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event2.public_id,
'registrationId': self.registration3.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_payment_initiate_view_again(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user2)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration2.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_payment_initiate_view(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user1)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
| 2.0625
| 2
|
767.py
|
wilbertgeng/LeetCode_exercise
| 0
|
12779159
|
<gh_stars>0
"""767. Reorganize String"""
class Solution(object):
def reorganizeString(self, S):
"""
:type S: str
:rtype: str
"""
res = []
s = Counter(S)
pq = []
for key, value in s.items():
pq.append((-value, key))
heapq.heapify(pq)
v_prev = 0
k_prev = ""
while pq: # two smallest heaps alternate
v, k = heapq.heappop(pq)
res += [k]
if v_prev < 0:
heapq.heappush(pq, (v_prev, k_prev))
v += 1
v_prev, k_prev = v, k
res = "".join(res)
return res if len(res) == len(S) else ""
| 3.171875
| 3
|
govuk_bank_holidays/bank_holidays.py
|
ministryofjustice/govuk-bank-holidays
| 19
|
12779160
|
<reponame>ministryofjustice/govuk-bank-holidays
import datetime
import functools
import gettext
import json
import logging
import os
import requests
__all__ = ('BankHolidays',)
logger = logging.getLogger(__name__)
class BankHolidays:
"""
Tool to load UK bank holidays from GOV.UK (see https://www.gov.uk/bank-holidays)
NB: Bank holidays vary between parts of the UK so GOV.UK provide separate lists for different "divisions".
Methods of this class will default to only considering bank holidays common to *all* divisions
unless a specific division is provided.
"""
source_url = 'https://www.gov.uk/bank-holidays.json'
# division constants
ENGLAND_AND_WALES = 'england-and-wales'
SCOTLAND = 'scotland'
NORTHERN_IRELAND = 'northern-ireland'
@classmethod
def load_backup_data(cls):
with open(os.path.join(os.path.dirname(__file__), 'bank-holidays.json')) as f:
return json.load(f)
def __init__(self, locale=None, weekend=(5, 6), use_cached_holidays=False):
"""
Load UK bank holidays
:param locale: the locale into which holidays should be translated; defaults to no translation
:param weekend: days of the week that are never work days; defaults to Saturday and Sunday
:param use_cached_holidays: use the cached local copy of the holiday list
"""
self.weekend = set(weekend)
if use_cached_holidays:
data = self.load_backup_data()
else:
try:
logger.debug('Downloading bank holidays from %s' % self.source_url)
data = requests.get(self.source_url).json()
except (requests.RequestException, ValueError):
logger.warning('Using backup bank holiday data')
data = self.load_backup_data()
if locale:
trans = gettext.translation('messages', fallback=True, languages=[locale],
localedir=os.path.join(os.path.dirname(__file__), 'locale'))
else:
trans = gettext.NullTranslations()
trans = trans.ugettext if hasattr(trans, 'ugettext') else trans.gettext
def _(text):
if not text:
return text
return trans(text)
def map_holiday(holiday):
try:
return {
'title': _(holiday['title']),
'date': datetime.datetime.strptime(holiday['date'], '%Y-%m-%d').date(),
'notes': _(holiday.get('notes', '')),
'bunting': bool(holiday.get('bunting')),
}
except (KeyError, ValueError):
logger.warning('Holiday could not be parsed')
logger.debug(holiday, exc_info=True)
self.data = {
division: sorted(filter(None, map(map_holiday, item.get('events', []))),
key=lambda e: e['date'])
for division, item in data.items()
}
def __iter__(self):
"""
Iterates over the current year's holidays that are common to *all* divisions
:return: list of dicts with titles, dates, etc
"""
return iter(self.get_holidays(year=datetime.date.today().year))
def get_holidays(self, division=None, year=None):
"""
Gets a list of all known bank holidays, optionally filtered by division and/or year
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param division: see division constants; defaults to common holidays
:param year: defaults to all available years
:return: list of dicts with titles, dates, etc
"""
if division:
holidays = self.data[division]
else:
holidays = self.data[self.ENGLAND_AND_WALES]
dates_in_common = functools.reduce(
set.intersection,
(
set(map(lambda holiday: holiday['date'], division_holidays))
for division, division_holidays in self.data.items()
)
)
holidays = filter(lambda holiday: holiday['date'] in dates_in_common, holidays)
if year:
holidays = filter(lambda holiday: holiday['date'].year == year, holidays)
return list(holidays)
@functools.lru_cache()
def _get_known_holiday_date_set(self, division=None):
"""
Returns an unordered set of all known bank holiday dates
NB: If no division is specified, only holidays common to *all* divisions are returned.
"""
return set(
holiday['date']
for holiday in self.get_holidays(division=division)
)
def is_holiday(self, date, division=None):
"""
True if the date is a known bank holiday
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param date: the date to check
:param division: see division constants; defaults to common holidays
:return: bool
"""
return date in self._get_known_holiday_date_set(division=division)
def is_work_day(self, date, division=None):
"""
True if the date is not a weekend or a known bank holiday
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param date: the date to check
:param division: see division constants; defaults to common holidays
:return: bool
"""
return date.weekday() not in self.weekend and date not in self._get_known_holiday_date_set(division=division)
def get_next_holiday(self, division=None, date=None):
"""
Returns the next known bank holiday
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param division: see division constants; defaults to common holidays
:param date: search starting from this date; defaults to today
:return: dict or None
"""
date = date or datetime.date.today()
for holiday in self.get_holidays(division=division):
if holiday['date'] > date:
return holiday
def get_prev_holiday(self, division=None, date=None):
"""
Returns the previous known bank holiday
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param division: see division constants; defaults to common holidays
:param date: search starting from this date; defaults to today
:return: dict or None
"""
date = date or datetime.date.today()
for holiday in reversed(self.get_holidays(division=division)):
if holiday['date'] < date:
return holiday
def get_next_work_day(self, division=None, date=None):
"""
Returns the next work day, skipping weekends and bank holidays
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param division: see division constants; defaults to common holidays
:param date: search starting from this date; defaults to today
:return: datetime.date; NB: get_next_holiday returns a dict
"""
date = date or datetime.date.today()
one_day = datetime.timedelta(days=1)
while True:
date += one_day
if self.is_work_day(date, division=division):
return date
def get_prev_work_day(self, division=None, date=None):
"""
Returns the previous work day, skipping weekends and bank holidays
NB: If no division is specified, only holidays common to *all* divisions are returned.
:param division: see division constants; defaults to common holidays
:param date: search starting from this date; defaults to today
:return: datetime.date; NB: get_next_holiday returns a dict
"""
date = date or datetime.date.today()
one_day = datetime.timedelta(days=1)
while True:
date -= one_day
if self.is_work_day(date, division=division):
return date
def holidays_after(self, division=None, date=None):
"""
Yields known bank holidays in chronological order
NB: If no division is specified, only holidays common to *all* divisions are yielded.
:param division: see division constants; defaults to common holidays
:param date: starting after this date; defaults to today
"""
date = date or datetime.date.today()
holidays = self.get_holidays(division=division)
yield from filter(lambda holiday: holiday['date'] > date, holidays)
def holidays_before(self, division=None, date=None):
"""
Yields known bank holidays in reverse chronological order
NB: If no division is specified, only holidays common to *all* divisions are yielded.
:param division: see division constants; defaults to common holidays
:param date: starting before this date; defaults to today
"""
date = date or datetime.date.today()
holidays = reversed(self.get_holidays(division=division))
yield from filter(lambda holiday: holiday['date'] < date, holidays)
def work_days_after(self, division=None, date=None):
"""
Yields an infinite series of work days in chronological order skipping weekends and known bank holidays
NB: If no division is specified, only holidays common to *all* divisions are yielded.
:param division: see division constants; defaults to common holidays
:param date: starting after this date; defaults to today
"""
date = date or datetime.date.today()
one_day = datetime.timedelta(days=1)
while True:
date += one_day
if self.is_work_day(date, division=division):
yield date
def work_days_before(self, division=None, date=None):
"""
Yields an infinite series of work days in reverse chronological order skipping weekends and known bank holidays
NB: If no division is specified, only holidays common to *all* divisions are yielded.
:param division: see division constants; defaults to common holidays
:param date: starting before this date; defaults to today
"""
date = date or datetime.date.today()
one_day = datetime.timedelta(days=1)
while True:
date -= one_day
if self.is_work_day(date, division=division):
yield date
| 3.109375
| 3
|
chatterbox/settings.py
|
blitzagency/django-chatterbox
| 8
|
12779161
|
<reponame>blitzagency/django-chatterbox
# DJANGO IMPORTS
from django.conf import settings
# Admin Site Title
AUTO_APPROVE = getattr(settings, "CHATTERBOX_AUTO_APPROVE", True)
SUCCESS_REDIRECT_URL = getattr(settings,
"CHATTERBOX_SUCCESS_REDIRECT_URL",
'/admin/chatterbox/key/')
| 1.40625
| 1
|
setting.py
|
okboy32/anjukeSpider
| 0
|
12779162
|
<reponame>okboy32/anjukeSpider<gh_stars>0
# mysql配置
DB = 'anjuke'
USER = 'root'
PASSWORD = '<PASSWORD>'
HOST = 'localhost'
PORT = 3306
# 爬取城市
citys = {
# '苏州': 'suzhou',
# '北京': 'bj',
# '天津': 'tj',
# '大连': 'dl',
# '石家庄': 'sjz',
# '哈尔滨': 'heb',
# '沈阳': 'sy',
# '太原': 'ty',
# '长春': 'cc',
# '威海': 'weihai',
# '潍坊': 'wf',
# '呼和浩特': 'hhht',
# '包头': 'bt',
# '秦皇岛': 'qhd',
# '烟台': 'yt',
# '保定': 'bd',
# '上海': 'sh',
# '杭州': 'hz',
# '南京': 'nj',
# '无锡': 'wx',
# '济南': 'jn',
# '青岛': 'qd',
# '昆山': 'ks',
# '宁波': 'nb',
# '南昌': 'nc',
# '福州': 'fz',
# '合肥': 'hf',
# '徐州': 'xz',
# '淄博': 'zb',
# '南通': 'nt',
# '常州': 'cz',
# '湖州': 'huzhou',
# '深圳': 'sz',
# '广州': 'gz',
# '佛山': 'fs',
# '长沙': 'cs',
# '三亚': 'sanya',
# '惠州': 'huizhou',
# '东莞': 'dg',
# '海口': 'hk',
# '珠海': 'zh',
# '中山': 'zs',
# '厦门': 'xm',
# '南宁': 'nn',
# '泉州': 'qz',
'柳州': 'liuzhou',
'成都': 'cd',
'重庆': 'cq',
'武汉': 'wuhan',
'郑州': 'zz',
'西安': 'xa',
'昆明': 'km',
'贵阳': 'gy',
'洛阳': 'ly',
}
| 1.453125
| 1
|
utils/send_data.py
|
e0xextazy/dog_breeds_clf
| 1
|
12779163
|
import requests
image = {'image': open('data/test_photo.jpeg', 'rb').read()}
r1 = requests.get("http://0.0.0.0:5000/")
print(r1.text)
r2 = requests.post("http://localhost:5000/get_prob", files=image)
print(r2.text) # "Male" or "Female"
| 2.9375
| 3
|
phishing/phishing-HTML-linter.py
|
H1d3r/Penetration-Testing-Tools-1
| 6
|
12779164
|
<reponame>H1d3r/Penetration-Testing-Tools-1
#!/usr/bin/python3
import os, sys, re
import string
import argparse
import yaml
import textwrap
import json
from urllib import parse
from bs4 import BeautifulSoup
options = {
'format' : 'text',
}
executable_extensions = [
'.exe',
'.dll',
'.lnk',
'.scr',
'.sys',
'.ps1',
'.bat',
'.js',
'.jse',
'.vbs',
'.vba',
'.vbe',
'.wsl',
'.cpl',
]
options = {
'debug': False,
'verbose': False,
'nocolor' : False,
'log' : sys.stderr,
'format' : 'text',
}
class Logger:
colors_map = {
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'grey': 38,
}
colors_dict = {
'error': colors_map['red'],
'trace': colors_map['magenta'],
'info ': colors_map['green'],
'debug': colors_map['grey'],
'other': colors_map['grey'],
}
options = {}
def __init__(self, opts = None):
self.options.update(Logger.options)
if opts != None and len(opts) > 0:
self.options.update(opts)
@staticmethod
def with_color(c, s):
return "\x1b[%dm%s\x1b[0m" % (c, s)
def colored(self, txt, col):
if self.options['nocolor']:
return txt
return Logger.with_color(Logger.colors_map[col], txt)
# Invocation:
# def out(txt, mode='info ', fd=None, color=None, noprefix=False, newline=True):
@staticmethod
def out(txt, fd, mode='info ', **kwargs):
if txt == None or fd == 'none':
return
elif fd == None:
raise Exception('[ERROR] Logging descriptor has not been specified!')
args = {
'color': None,
'noprefix': False,
'newline': True,
'nocolor' : False
}
args.update(kwargs)
if type(txt) != str:
txt = str(txt)
txt = txt.replace('\t', ' ' * 4)
if args['nocolor']:
col = ''
elif args['color']:
col = args['color']
if type(col) == str and col in Logger.colors_map.keys():
col = Logger.colors_map[col]
else:
col = Logger.colors_dict.setdefault(mode, Logger.colors_map['grey'])
prefix = ''
if mode:
mode = '[%s] ' % mode
if not args['noprefix']:
if args['nocolor']:
prefix = mode.upper()
else:
prefix = Logger.with_color(Logger.colors_dict['other'], '%s'
% (mode.upper()))
nl = ''
if 'newline' in args:
if args['newline']:
nl = '\n'
if 'force_stdout' in args:
fd = sys.stdout
if type(fd) == str:
with open(fd, 'a') as f:
prefix2 = ''
if mode:
prefix2 = '%s' % (mode.upper())
f.write(prefix2 + txt + nl)
f.flush()
else:
if args['nocolor']:
fd.write(prefix + txt + nl)
else:
fd.write(prefix + Logger.with_color(col, txt) + nl)
# Info shall be used as an ordinary logging facility, for every desired output.
def info(self, txt, forced = False, **kwargs):
kwargs['nocolor'] = self.options['nocolor']
if forced or (self.options['verbose'] or \
self.options['debug'] ) \
or (type(self.options['log']) == str and self.options['log'] != 'none'):
Logger.out(txt, self.options['log'], 'info', **kwargs)
def text(self, txt, **kwargs):
kwargs['noPrefix'] = True
kwargs['nocolor'] = self.options['nocolor']
Logger.out(txt, self.options['log'], '', **kwargs)
def dbg(self, txt, **kwargs):
if self.options['debug']:
kwargs['nocolor'] = self.options['nocolor']
Logger.out(txt, self.options['log'], 'debug', **kwargs)
def err(self, txt, **kwargs):
kwargs['nocolor'] = self.options['nocolor']
Logger.out(txt, self.options['log'], 'error', **kwargs)
def fatal(self, txt, **kwargs):
kwargs['nocolor'] = self.options['nocolor']
Logger.out(txt, self.options['log'], 'error', **kwargs)
os._exit(1)
logger = Logger(options)
class PhishingMailParser:
#
# Based on:
# https://journeys.autopilotapp.com/blog/email-spam-trigger-words/
# https://www.activecampaign.com/blog/spam-words
# https://blog.hubspot.com/blog/tabid/6307/bid/30684/the-ultimate-list-of-email-spam-trigger-words.aspx
#
Suspicious_Words = {
'Manipulative': (
'creating unnecessary urgency or pressure',
(
"Act now", "Action", "Apply now", "Apply online", "Buy", "Buy direct", "Call", "Call now", "Click here",
"Clearance", "Click here", "Do it today", "Don't delete", "Drastically reduced", "Exclusive deal", "Expire",
"Get", "Get it now", "Get started now", "Important information regarding", "Instant", "Limited time",
"New customers only", "Now only", "Offer expires", "Once in a lifetime", "Order now", "Please read",
"Special promotion", "Take action", "This won't last", "Urgent", "While stocks last"
)
),
'Needy' : (
'sounding desperate or exaggerated claims',
(
"All-new", "Bargain", "Best price", "Bonus", "Email marketing", "Free", "For instant access", "Free gift",
"Free trial", "Have you been turned down?", "Great offer", "Join millions of Americans", "Incredible deal",
"Prize", "Satisfaction guaranteed", "Will not believe your eyes"
)
),
'Sleazy' : (
'being too pushy',
(
"As seen on", "Click here", "Click below", "Deal", "Direct email", "Direct marketing", "Do it today",
"Order now", "Order today", "Unlimited", "What are you waiting for?", "Visit our website"
)
),
'Cheap' : (
'no pre-qualifications, everybody wins',
(
"Acceptance", "Access", "Avoid bankruptcy", "Boss", "Cancel", "Card accepted", "Certified",
"Cheap", "Compare", "Compare rates", "Congratulations", "Credit card offers", "Cures", "Dear ",
"Dear friend", "Drastically reduced", "Easy terms", "Free grant money", "Free hosting", "Free info",
"Free membership", "Friend", "Get out of debt", "Giving away", "Guarantee", "Guaranteed",
"Have you been turned down?", "Hello", "Information you requested", "Join millions", "No age restrictions",
"No catch", "No experience", "No obligation", "No purchase necessary", "No questions asked",
"No strings attached", "Offer", "Opportunity", "Save big", "Winner", "Winning", "Won", "You are a winner!",
"You've been selected!"
)
),
'Far-fetched' : (
'statements that are too good to be true',
(
"Additional income", "All-natural", "Amazing", "Be your own boss", "Big bucks", "Billion",
"Billion dollars", "Cash", "Cash bonus", "Consolidate debt and credit", "Consolidate your debt",
"Double your income", "Earn", "Earn cash", "Earn extra cash", "Eliminate bad credit", "Eliminate debt",
"Extra", "Fantastic deal", "Financial freedom", "Financially independent", "Free investment", "Free money",
"Get paid", "Home", "Home-based", "Income", "Increase sales", "Increase traffic", "Lose", "Lose weight",
"Money back", "No catch", "No fees", "No hidden costs", "No strings attached", "Potential earnings",
"Pure profit", "Removes wrinkles", "Reverses aging", "Risk-free", "Serious cash", "Stop snoring",
"Vacation", "Vacation offers", "Weekend getaway", "Weight loss", "While you sleep", "Work from home"
)
),
'Exaggeration' : (
'exaggerated claims and promises',
(
"100% more", "100% free", "100% satisfied", "Additional income", "Be your own boss", "Best price",
"Big bucks", "Billion", "Cash bonus", "Cents on the dollar", "Consolidate debt", "Double your cash",
"Double your income", "Earn extra cash", "Earn money", "Eliminate bad credit", "Extra cash", "Extra income",
"Expect to earn", "Fast cash", "Financial freedom", "Free access", "Free consultation", "Free gift",
"Free hosting", "Free info", "Free investment", "Free membership", "Free money", "Free preview", "Free quote",
"Free trial", "Full refund", "Get out of debt", "Get paid", "Giveaway", "Guaranteed", "Increase sales",
"Increase traffic", "Incredible deal", "Lower rates", "Lowest price", "Make money", "Million dollars", "Miracle",
"Money back", "Once in a lifetime", "One time", "Pennies a day", "Potential earnings", "Prize",
"Promise", "Pure profit", "Risk-free", "Satisfaction guaranteed", "Save big money", "Save up to", "Special promotion",
)
),
'Urgency' : (
'create unnecessary urgency and pressure',
(
"Act now", "Apply now", "Become a member", "Call now", "Click below", "Click here", "Get it now",
"Do it today", "Don’t delete", "Exclusive deal", "Get started now", "Important information regarding",
"Information you requested", "Instant", "Limited time", "New customers only", "Order now", "Please read",
"See for yourself", "Sign up free", "Take action", "This won’t last", "Urgent", "What are you waiting for?",
"While supplies last", "Will not believe your eyes", "Winner", "Winning", "You are a winner", "You have been selected",
)
),
'Spammy' : (
'shady, spammy, or unethical behavior',
(
"Bulk email", "Buy direct", "Cancel at any time", "Check or money order", "Congratulations", "Confidentiality",
"Cures", "Dear friend", "Direct email", "Direct marketing", "Hidden charges", "Human growth hormone", "Internet marketing",
"Lose weight", "Mass email", "Meet singles", "Multi-level marketing", "No catch", "No cost", "No credit check",
"No fees", "No gimmick", "No hidden costs", "No hidden fees", "No interest", "No investment", "No obligation",
"No purchase necessary", "No questions asked", "No strings attached", "Not junk", "Notspam", "Obligation",
"Passwords", "Requires initial investment", "Social security number", "This isn’t a scam", "This isn’t junk",
"This isn’t spam", "Undisclosed", "Unsecured credit", "Unsecured debt", "Unsolicited", "Valium",
"Viagra", "Vicodin", "We hate spam", "Weight loss", "Xanax",
)
),
'Jargon' : (
'jargon or legalese',
(
"Accept credit cards", "All new", "As seen on", "Bargain", "Beneficiary", "Billing", "Bonus",
"Cards accepted", "Cash", "Certified", "Cheap", "Claims", "Clearance", "Compare rates", "Credit card offers",
"Deal", "Debt", "Discount", "Fantastic", "In accordance with laws", "Income", "Investment", "Join millions",
"Lifetime", "Loans", "Luxury", "Marketing solution", "Message contains", "Mortgage rates", "Name brand",
"Offer", "Online marketing", "Opt in", "Pre-approved", "Quote", "Rates", "Refinance", "Removal", "Reserves the right",
"Score", "Search engine", "Sent in compliance", "Subject to", "Terms and conditions", "Trial", "Unlimited",
"Warranty", "Web traffic", "Work from home",
)
),
'Shady' : (
'ethically or legally questionable behavior',
(
"Addresses", "Beneficiary", "Billing", "Casino", "Celebrity", "Collect child support", "Copy DVDs",
"Fast viagra delivery", "Hidden", "Human growth hormone", "In accordance with laws", "Investment",
"Junk", "Legal", "Life insurance", "Loan", "Lottery", "Luxury car", "Medicine", "Meet singles", "Message contains",
"Miracle", "Money", "Multi-level marketing", "Nigerian", "Offshore", "Online degree", "Online pharmacy", "Passwords",
"Refinance", "Request", "Rolex", "Score", "Social security number", "Spam", "This isn't spam", "Undisclosed recipient",
"University diplomas", "Unsecured credit", "Unsolicited", "US dollars", "Valium", "Viagra", "Vicodin",
"Warranty", "Xanax"
)
),
"Commerce" : (
"",
(
"As seen on", "Buy", "Buy direct", "Buying judgments", "Clearance", "Order", "Order status", "Orders shipped by shopper",
)
),
"Personal" : (
"",
(
"Dig up dirt on friends", "Meet singles", "Score with babes", "XXX", "Near you",
)
),
"Employment" : (
"",
(
"Additional income", "Be your own boss", "Compete for your business", "Double your", "Earn $", "Earn extra cash",
"Earn per week", "Expect to earn", "Extra income", "Home based", "Home employment", "Homebased business", "Income from home",
"Make $", "Make money", "Money making", "Online biz opportunity", "Online degree", "Opportunity",
"Potential earnings", "University diplomas", "While you sleep", "Work at home", "Work from home",
)
),
"Financial - General" : (
"",
(
"$$$", "Affordable", "Bargain", "Beneficiary", "Best price", "Big bucks", "Cash", "Cash bonus", "Cashcashcash",
"Cents on the dollar", "Cheap", "Check", "Claims", "Collect", "Compare rates", "Cost", "Credit", "Credit bureaus",
"Discount", "Earn", "Easy terms", "F r e e", "Fast cash", "For just $XXX", "Hidden assets", "hidden charges",
"Income", "Incredible deal", "Insurance", "Investment", "Loans", "Lowest price", "Million dollars", "Money",
"Money back", "Mortgage", "Mortgage rates", "No cost", "No fees", "One hundred percent free", "Only $", "Pennies a day",
"Price", "Profits", "Pure profit", "Quote", "Refinance", "Save $", "Save big money", "Save up to", "Serious cash",
"Subject to credit", "They keep your money — no refund!", "Unsecured credit", "Unsecured debt",
"US dollars", "Why pay more?",
)
),
"Financial - Business" : (
"",
(
"Accept credit cards", "Cards accepted", "Check or money order", "Credit card offers", "Explode your business",
"Full refund", "Investment decision", "No credit check", "No hidden Costs", "No investment",
"Requires initial investment", "Sent in compliance", "Stock alert", "Stock disclaimer statement", "Stock pick",
)
),
"Financial - Personal" : (
"",
(
"Avoice bankruptcy", "Calling creditors", "Collect child support", "Consolidate debt and credit",
"Consolidate your debt", "Eliminate bad credit", "Eliminate debt", "Financially independent",
"Get out of debt", "Get paid", "Lower interest rate", "Lower monthly payment", "Lower your mortgage rate",
"Lowest insurance rates", "Pre-approved", "Refinance home", "Social security number", "Your income",
)
),
"General" : (
"",
(
"Acceptance", "Accordingly", "Avoid", "Chance", "Dormant", "Freedom", "Here", "Hidden", "Home", "Leave",
"Lifetime", "Lose", "Maintained", "Medium", "Miracle", "Never", "Passwords", "Problem", "Remove", "Reverses",
"Sample", "Satisfaction", "Solution", "Stop", "Success", "Teen", "Wife",
)
),
"Greetings" : (
"",
(
"Dear ", "Friend", "Hello",
)
),
"Marketing" : (
"",
(
"Ad", "Auto email removal", "Bulk email", "Click", "Click below", "Click here", "Click to remove", "Direct email",
"Direct marketing", "Email harvest", "Email marketing", "Form", "Increase sales", "Increase traffic",
"Increase your sales", "Internet market", "Internet marketing", "Marketing", "Marketing solutions", "Mass email",
"Member", "Month trial offer", "More Internet Traffic", "Multi level marketing", "Notspam", "One time mailing",
"Online marketing", "Open", "Opt in", "Performance", "Removal instructions", "Sale", "Sales",
"Search engine listings", "Search engines", "Subscribe", "The following form", "This isn't junk", "This isn't spam",
"Undisclosed recipient", "Unsubscribe", "Visit our website", "We hate spam", "Web traffic", "Will not believe your eyes",
)
),
"Medical" : (
"",
(
"Cures baldness", "Diagnostic", "Fast Viagra delivery", "Human growth hormone", "Life insurance",
"Lose weight", "Lose weight spam", "Medicine", "No medical exams", "Online pharmacy", "Removes wrinkles",
"Reverses aging", "Stop snoring", "Valium", "Viagra", "Vicodin", "Weight loss", "Xanax",
)
),
"Numbers" : (
"",
(
"#1", "100% free", "100% satisfied", "4U", "50% off", "Billion", "Billion dollars", "Join millions",
"Join millions of Americans", "Million", "One hundred percent guaranteed", "Thousands",
)
),
"Offers" : (
"",
(
"Being a member", "Billing address", "Call", "Cannot be combined with any other offer",
"Confidentially on all orders", "Deal", "Financial freedom", "Gift certificate", "Giving away",
"Guarantee", "Have you been turned down?", "If only it were that easy", "Important information regarding",
"In accordance with laws", "Long distance phone offer", "Mail in order form", "Message contains",
"Name brand", "Nigerian", "No age restrictions", "No catch", "No claim forms", "No disappointment",
"No experience", "No gimmick", "No inventory", "No middleman", "No obligation", "No purchase necessary",
"No questions asked", "No selling", "No strings attached", "No-obligation", "Not intended",
"Obligation", "Off shore", "Offer", "Per day", "Per week", "Priority mail", "Prize", "Prizes",
"Produced and sent out", "Reserves the right", "Shopping spree", "Stuff on sale", "Terms and conditions",
"The best rates", "They’re just giving it away", "Trial", "Unlimited", "Unsolicited", "Vacation",
"Vacation offers", "Warranty", "We honor all", "Weekend getaway", "What are you waiting for?", "Who really wins?",
"Win", "Winner", "Winning", "Won", "You are a winner!", "You have been selected", "You’re a Winner!",
)
),
"Calls-to-Action" : (
"",
(
"Cancel at any time", "Compare", "Copy accurately", "Get", "Give it away", "Print form signature",
"Print out and fax", "See for yourself", "Sign up free today",
)
),
"Free" : (
"",
(
"Free", "Free access", "Free cell phone", "Free consultation", "Free DVD", "Free gift", "Free grant money",
"Free hosting", "Free installation", "Free Instant", "Free investment", "Free leads", "Free membership",
"Free money", "Free offer", "Free preview", "Free priority mail", "Free quote", "Free sample",
"Free trial", "Free website",
)
),
"Descriptions/Adjectives" : (
"",
(
"All natural", "All new", "Amazing", "Certified", "Congratulations", "Drastically reduced", "Fantastic deal",
"For free", "Guaranteed", "It’s effective", "Outstanding values", "Promise you", "Real thing",
"Risk free", "Satisfaction guaranteed",
)
),
"Sense of Urgency" : (
"",
(
"Access", "Act now!", "Apply now", "Apply online", "Call free", "Call now", "Can't live without", "Do it today",
"Don't delete", "Don't hesitate", "For instant access", "For Only", "For you", "Get it now", "Get started now",
"Great offer", "Info you requested", "Information you requested", "Instant", "Limited time", "New customers only",
"Now", "Now only", "Offer expires", "Once in lifetime", "One time", "Only", "Order now", "Order today",
"Please read", "Special promotion", "Supplies are limited", "Take action now", "Time limited", "Urgent",
"While supplies last",
)
),
"Nouns" : (
"",
(
"Addresses on CD", "Beverage", "Bonus", "Brand new pager", "Cable converter", "Casino", "Celebrity",
"Copy DVDs", "Laser printer", "Legal", "Luxury car", "New domain extensions", "Phone", "Rolex", "Stainless steel"
)
)
}
def __init__(self, options):
self.options = options
self.results = {}
def parse(self, html):
self.html = html
self.soup = BeautifulSoup(html, features="lxml")
self.results['Embedded Images'] = self.testEmbeddedImages()
self.results['Images without ALT'] = self.testImagesNoAlt()
self.results['Masqueraded Links'] = self.testMaskedLinks()
self.results['Use of underline tag <u>'] = self.testUnderlineTag()
self.results['HTML code in <a> link tags'] = self.testLinksWithHtmlCode()
self.results['<a href="..."> URL contained GET parameter'] = self.testLinksWithGETParams()
self.results['<a href="..."> URL contained GET parameter with URL'] = self.testLinksWithGETParamsBeingURLs()
self.results['<a href="..."> URL pointed to an executable file'] = self.testLinksWithDangerousExtensions()
self.results['Mail message contained suspicious words'] = self.testSuspiciousWords()
return {k: v for k, v in self.results.items() if v}
@staticmethod
def context(tag):
s = str(tag)
if len(s) < 100:
return s
beg = s[:50]
end = s[-50:]
return f'{beg}...{end}'
def testUnderlineTag(self):
links = self.soup('u')
if not links or len(links) == 0:
return []
desc = 'Underline tags are recognized by anti-spam filters and trigger additional rule (Office365: 67856001), but by their own shouldnt impact spam score.'
result = f'- Found {len(links)} <u> tags. This is not by itself an indication of spam, but is known to trigger some rules (like Office365: 67856001)\n'
context = ''
for i in range(len(links)):
context += str(links[i]) + '\n\n'
if i > 5: break
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testSuspiciousWords(self):
desc = '''
Input text message contained words considered as suspicious in context of E-Mails.
Therefore you will have better chances of delivering your phishing e-mail when you get rid of them.
'''
context = ''
result = ''
text = self.html
foundWords = set()
totalChecked = 0
totalFound = 0
for title, words in PhishingMailParser.Suspicious_Words.items():
found = set()
for word in words[1]:
if word.lower() in foundWords:
continue
totalChecked += 1
if re.search(r'\b' + re.escape(word) + r'\b', text, re.I):
found.add(word.lower())
foundWords.add(word.lower())
pos = text.find(word.lower())
if pos != -1:
line = ''
N = 50
if pos > N:
line = text[pos-N:pos]
line += text[pos:pos+N]
pos2 = line.find(word.lower())
line = line[:pos2] + logger.colored(line[pos2:pos2+len(word)], "red") + line[pos2+len(word):]
line = line.replace('\n', '')
line = re.sub(r' {2,}', ' ', line)
context += '\n' + line + '\n'
if len(found) > 0:
totalFound += len(found)
result += f'- Found {logger.colored(len(found), "red")} {logger.colored(title, "yellow")} words {logger.colored(words[0], "cyan")}:\n'
for w in found:
result += f'\t- {w}\n'
result += '\n'
if totalFound == 0:
return {}
result += f'- Found in total {logger.colored(totalFound, "red")} suspicious words (out of {totalChecked} total checked).\n'
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testLinksWithHtmlCode(self):
links = self.soup('a')
desc = 'Links that contain HTML code within <a> ... </a> may increase Spam score heavily'
context = ''
result = ''
num = 0
embed = ''
for link in links:
text = str(link)
pos = text.find('>')
code = text[pos+1:]
m = re.search(r'(.+)<\s*/\s*a\s*>', code, re.I)
if m:
code = m.group(1)
suspicious = '<' in text and '>' in text
if suspicious:
num += 1
if num < 5:
N = 70
tmp = text[:N]
if len(text) > N:
tmp += ' ... ' + text[-N:]
context += tmp + '\n'
code2 = PhishingMailParser.context(code)
context += f"\n\t- {logger.colored('Code inside of <a> tag:','red')}\n\t\t" + logger.colored(code2, 'yellow') + '\n'
if num > 0:
result += f'- Found {num} <a> tags that contained HTML code inside!\n'
result += '\t Links conveying HTML code within <a> ... </a> may greatly increase message Spam score!\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testLinksWithGETParams(self):
links = self.soup('a')
desc = 'Links with URLs containing GET parameters will be noticed by anti-spam filters resulting in another rule triggering on message (Office365: 21615005).'
context = ''
result = ''
num = 0
embed = ''
for link in links:
try:
href = link['href']
except:
continue
text = link.getText().replace('\n', '').strip()
params = dict(parse.parse_qsl(parse.urlsplit(href).query))
if len(params) > 0:
num += 1
if num < 5:
context += PhishingMailParser.context(link) + '\n\n'
hr = href
pos = hr.find('?')
if pos != -1:
hr = hr[:pos] + logger.colored(hr[pos:], 'yellow')
hr = hr.replace('\n', '').strip()
context += f'\thref = "{hr}"\n\n'
f = ''
for k, v in params.items():
f += f'{k}={v[:5]}..., '
context += f'\tparams = {f}\n\n'
if num > 0:
result += f'- Found {logger.colored(num, "red")} <a> tags with href="..." {logger.colored("URLs containing GET params", "yellow")}.\n'
result += '\t Links with URLs that contain GET params might trigger anti-spam rule (Office365: 21615005)\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testLinksWithDangerousExtensions(self):
links = self.soup('a')
desc = 'Message contained <a> tags with href="..." links pointing to a file with dangerous extension (such as .exe)'
context = ''
result = ''
num = 0
embed = ''
for link in links:
try:
href = link['href']
except:
continue
text = link.getText()
parsed = parse.urlsplit(href)
if '.' not in parsed.path:
continue
pos = parsed.path.rfind('.')
if pos == -1:
continue
extension = parsed.path.lower()[pos:]
if extension in executable_extensions:
num += 1
if num < 5:
context += PhishingMailParser.context(link) + '\n'
hr = href[:90]
pos1 = hr.lower().find(extension.lower())
hr = logger.colored(hr[:pos1], 'yellow') + logger.colored(hr[pos1:pos1+len(extension)], 'red') + logger.colored(hr[pos1+len(extension):], 'yellow')
context += f'\thref = "{hr}"\n'
context += f'\ttext = "{text[:90]}"\n\n'
context += f'\tExtension matched: {logger.colored(extension, "red")}\n'
if num > 0:
result += f'- Found {num} <a> tags with href="..." URLs pointing to files with dangerous extensions (such as .exe).\n'
result += '\t Links with URLs that point to potentially executable files might trigger anti-spam rule (Office365: 460985005)\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testLinksWithGETParamsBeingURLs(self):
links = self.soup('a')
desc = 'Links with URLs that contain GET parameters pointing to another URL, will trigger two Office365 anti-spam rules (Office365: 45080400002).'
context = ''
result = ''
num = 0
embed = ''
for link in links:
try:
href = link['href']
except:
continue
text = link.getText()
params = dict(parse.parse_qsl(parse.urlsplit(href).query))
url = re.compile(r'((http|https)\:\/\/)?[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*')
if len(params) > 0:
for k, v in params.items():
m = url.match(v)
if m:
urlmatched = m.group(1)
num += 1
if num < 5:
context += PhishingMailParser.context(link) + '\n'
hr = href[:90]
hr = logger.colored(hr, 'yellow')
context += f'\thref = "{hr}"\n'
context += f'\ttext = "{text[:90]}"\n\n'
context += f'\thref URL GET parameter contained another URL:\n\t\t' + logger.colored(v, "red") + '\n'
if num > 0:
result += f'- Found {num} <a> tags with href="..." URLs containing GET params containing another URL.\n'
result += '\t Links with URLs that contain GET params with another URL might trigger anti-spam rule (Office365: 45080400002)\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testMaskedLinks(self):
links = self.soup('a')
desc = 'Links that masquerade their href= attribute by displaying different link are considered harmful and will increase Spam score.'
context = ''
result = ''
num = 0
embed = ''
for link in links:
try:
href = link['href']
except:
continue
text = link.getText()
url = re.compile(r'((http|https)\:\/\/)?[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*')
url2 = re.compile(r'((http|https)\:\/\/)[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*')
m1 = url.match(href)
m2 = url2.search(text)
if m1 and m2:
num += 1
if num < 5:
context += PhishingMailParser.context(link) + '\n'
context += f'\thref = "{logger.colored(href[:90],"green")}"\n'
context += f'\ttext = "{logger.colored(text[:90],"red")}"\n\n'
if num > 0:
result += f'- Found {num} <a> tags that masquerade their href="" links with text!\n'
result += '\t Links that try to hide underyling URL are harmful and will be considered as Spam!\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testImagesNoAlt(self):
images = self.soup('img')
desc = 'Images without ALT="value" attribute may increase Spam scorage.'
context = ''
result = ''
num = 0
embed = ''
for img in images:
src = img['src']
alt = ''
try:
alt = img['alt']
except:
pass
if alt == '':
num += 1
if num < 5:
context += PhishingMailParser.context(img) + '\n\n'
if num > 0:
result += f'- Found {num} <img> tags without ALT="value" attribute.\n'
result += '\t Images without alternate text set in their attribute may increase Spam score\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def testEmbeddedImages(self):
images = self.soup('img')
x = '<img src="data:image/png;base64,<BLOB>"/>'
desc = f'Embedded images can increase Spam Confidence Level (SCL) in Office365. Embedded images are those with {logger.colored(x,"yellow")} . They should be avoided.'
context = ''
result = ''
num = 0
embed = ''
for img in images:
src = img['src']
alt = ''
try:
alt = img['alt']
except:
pass
if src.lower().startswith('data:image/'):
if len(embed) == 0:
embed = src[:30]
num += 1
if num < 5:
if len(alt) > 0:
context += f'- ALT="{alt}": ' + PhishingMailParser.context(img) + '\n'
else:
ctx = PhishingMailParser.context(img)
pos = ctx.find('data:')
pos2 = ctx.find('"', pos+1)
ctx = logger.colored(ctx[:pos], 'yellow') + logger.colored(ctx[pos:pos2], 'red') + logger.colored(ctx[pos2:], 'yellow')
context += ctx + '\n'
if num > 0:
result += f'- Found {logger.colored(num, "red")} <img> tags with embedded image ({logger.colored(embed, "yellow")}).\n'
result += '\t Embedded images increase Office365 SCL (Spam) level!\n'
if len(result) == 0:
return []
return {
'description' : desc,
'context' : context,
'analysis' : result
}
def printOutput(out):
if options['format'] == 'text':
width = 100
num = 0
for k, v in out.items():
num += 1
analysis = v['analysis'].strip()
context = v['context'].strip()
desc = '\n'.join(textwrap.wrap(
v['description'],
width = 80,
initial_indent = '',
subsequent_indent = ' '
)).strip()
analysis = analysis.replace('- ', '\t- ')
print(f'''
------------------------------------------
({num}) Test: {logger.colored(k, "cyan")}
{logger.colored("DESCRIPTION", "blue")}:
{desc}
{logger.colored("CONTEXT", "blue")}:
{context}
{logger.colored("ANALYSIS", "blue")}:
{analysis}
''')
elif options['format'] == 'json':
print(json.dumps(out))
def opts(argv):
global options
global headers
o = argparse.ArgumentParser(
usage = 'phishing-HTML-linter.py [options] <file.html>'
)
req = o.add_argument_group('Required arguments')
req.add_argument('file', help = 'Input HTML file')
args = o.parse_args()
options.update(vars(args))
return args
def main(argv):
args = opts(argv)
if not args:
return False
print('''
:: Phishing HTML Linter
Shows you bad smells in your HTML code that will get your mails busted!
<NAME> / mgeeky
''')
html = ''
with open(args.file, 'rb') as f:
html = f.read()
p = PhishingMailParser({})
ret = p.parse(html.decode())
if len(ret) > 0:
printOutput(ret)
else:
print('\n[+] Congrats! Your message does not have any known bad smells that could trigger anti-spam rules.\n')
if __name__ == '__main__':
main(sys.argv)
| 2.1875
| 2
|
gitvier/common.py
|
MasterOdin/gitvier
| 2
|
12779165
|
<filename>gitvier/common.py
import subprocess
def get_input(question, default=""):
add = "[{}] ".format(default) if default != "" else ""
user = input("{}: {}".format(question, add)).strip()
if user == "":
user = default
return user
def get_yes(question, yes=False):
user = input("{}: [{}] ".format(question, 'yes' if yes is True else 'no')).strip().lower()
return user in ('yes', 'y')
def call(command):
command = command.strip().split()
command = subprocess.call(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return command
def output(line, level=0):
line = ("│ " * level) + line
print(line)
| 3.15625
| 3
|
gsem/config.py
|
andriykohut/gsem
| 13
|
12779166
|
<filename>gsem/config.py
import os
from gsem.utils import gnome_shell_version
EXTENSION_DIR = os.path.expanduser("~/.local/share/gnome-shell/extensions")
GNOME_SHELL_VERSION = gnome_shell_version()
API_ROOT = "https://extensions.gnome.org"
API_DETAIL = f"{API_ROOT}/ajax/detail"
API_SEARCH = f"{API_ROOT}/extension-query"
| 1.648438
| 2
|
modules/QQqt4/exprobot.py
|
earlybackhome/You-cannot-guess
| 21
|
12779167
|
#!/usr/bin/env python
# coding=utf-8
from PyQt4.QtCore import *
import requests
import re, os
from OCR import Image2txt
from PIL import Image
class backEnd(QThread):
finish_signal = pyqtSignal(str, bool)
def __init__(self, txt):
super(backEnd, self).__init__()
self.txt = txt
def run(self):
path = '../OCR/tempimg/'
if not os.path.exists(path):
os.mkdir(path)
url='https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+self.txt+'表情包'+'&ct=201326592&ic=0&lm=-1&width=&height=&v=flip'
#url='http://image.baidu.com/search/flip?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1496141615672_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&ctd=1496141615672%5E00_1524X790&word=%E8%A1%A8%E6%83%85%E5%8C%85'
html=requests.get(url).text
pic_url=re.findall('"objURL":"(.*?)",',html,re.S)
i = 0
imgnum = 0
for each in pic_url:
print(each)
try:
pic=requests.get(each,timeout=10)
except requests.exceptions.ConnectionError:
print('error')
continue
imgpath=path + str(i) + '.jpg'
fp = open(imgpath,'wb')
fp.write(pic.content)
fp.close()
try:
im = Image.open(imgpath)
(x,y) = im.size #read image size
x_s = 200 #define standard width
y_s = y * x_s // x #calc height based on standard width
out = im.resize((x_s,y_s),Image.ANTIALIAS) #resize image with high-quality
out.save(imgpath)
except OSError as e:
pass
i += 1
try:
pic = Image2txt.picture_ocr(imgpath)
txt = pic.get_crop_txt()
print(txt)
# if txt is ok
except AttributeError as e:
continue
if not txt:
print('ocr failed %s', '放弃')
continue
else:
imgnum += 1
self.finish_signal.emit(imgpath, True)
if imgnum >= 3:
return
| 2.53125
| 3
|
Machine Learning/Lab/lab02/knn/knn.py
|
jJayyyyyyy/USTC-2018-Smester-1
| 32
|
12779168
|
<filename>Machine Learning/Lab/lab02/knn/knn.py
'''
1. 预处理
1.1 [done] 标准化
1.2 [done] 按比例,随机分割训练集、测试集
2. knn
2.1 输入一个测试样本 x
2.2 计算 x 与所有 train_x 的距离, 排序
2.3 取前 k 个 train_x, 即最近的 k 个, 统计 label(即 train_y)
2.4 取数量最大的 label 作为 x 的 label
'''
import numpy as np
class Dataset(object):
def __init__(self, train_x, train_y, test_x, test_y):
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
self.pred_y = None
def print_info(self):
print(self.train_x)
print(self.train_y)
print(self.test_x)
print(self.test_y)
print(self.pred_y)
class FeatureEngineer(object):
def __init__(self, iris):
self.x = iris.data[:]
self.y = iris.target[:]
# print(self.x)
# print(self.y)
def standardize(self, mean=0, std=1):
'''
标准化数据集 X
return: @self.std_x
'''
# std_x = np.zeros(self.x.shape)
mean = self.x.mean(axis = 0)
std = self.x.std(axis = 0)
std_x = []
for x in self.x:
record = []
size = len(x)
for i in range(size):
feature = x[i]
feature = (feature - mean[i]) / std[i]
record.append(feature)
std_x.append(record)
self.std_x = np.array(std_x)
# print(self.std_x)
return self.std_x
def standardize_with_sklearn(self):
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
std_x = ss.fit_transform(self.x)
self.std_x = std_x
# print(std_xx)
return self.std_x
def get_index_list(self, shuffle=True,seed=None):
index_list = np.arange(self.x.shape[0])
if shuffle:
if seed:
np.random.seed(seed)
np.random.shuffle(index_list)
# print(index_list)
return index_list
def get_train_and_test_data(self, train_ratio=0.8, shuffle=True, seed=1):
x = self.x
y = self.y
size = self.x.shape[0]
train_size = int(size * train_ratio)
test_size = size - train_size
index_list = self.get_index_list(shuffle=shuffle, seed=seed)
train_index = index_list[:train_size]
test_index = index_list[train_size:]
dataset = Dataset(x[train_index], y[train_index], x[test_index], y[test_index])
return dataset
class Neighbor(object):
def __init__(self, index, distance):
self.index = index
self.distance = distance
def print_info(self):
print(self.index, self.distance)
class KNN(object):
def __init__(self, dataset, k=5):
self.dataset = dataset
self.k = k
# 计算一个样本与训练集中所有样本的欧氏距离的平方
def get_euclidean_distance(self, one_x):
x = self.dataset.train_x
# 每一行记录单独进行 sum
distance = np.power(one_x - x, 2).sum(axis=1)
return distance
def get_sorted_neighbor_list(self, one_x):
distance = self.get_euclidean_distance(one_x)
neighbor_list = []
for i in range(distance.shape[0]):
neighbor = Neighbor(i, distance[i])
neighbor_list.append(neighbor)
sorted_neighbor_list = sorted(neighbor_list, key=lambda neighbor : neighbor.distance)
return sorted_neighbor_list
# 获取 k 个近邻的类别标签
def get_k_label_list(self, k_neighbor_list):
k_label_list = []
for neighbor in k_neighbor_list:
index = neighbor.index
label = self.dataset.train_y[index]
k_label_list.append(label)
return k_label_list
# 进行标签统计,得票最多的标签就是该测试样本的预测标签
def get_majority_vote(self, one_x):
sorted_neighbor_list = self.get_sorted_neighbor_list(one_x)
k_neighbor_list = sorted_neighbor_list[:self.k]
k_label_list = self.get_k_label_list(k_neighbor_list)
d = {}
for label in k_label_list:
if label in d:
d[label] += 1
else:
d[label] = 0
maxcnt = 0
maxlabel = k_label_list[0]
for key, value in d.items():
if maxcnt < value:
maxcnt = value
maxlabel = key
majority_vote = maxlabel
return majority_vote
def get_prediction(self):
train_x = self.dataset.train_x
train_y = self.dataset.train_y
test_x = self.dataset.test_x
pred_y = []
for one_x in test_x:
label = self.get_majority_vote(one_x)
pred_y.append(label)
self.dataset.pred_y = np.array(pred_y)
def get_accuracy(self):
test_y = self.dataset.test_y
pred_y = self.dataset.pred_y
accu = np.sum(test_y == pred_y) / test_y.shape[0]
print('模型准确率: ', accu)
def test_iris():
from sklearn import datasets
iris = datasets.load_iris()
fe = FeatureEngineer(iris)
fe.standardize(mean=0, std=1)
dataset = fe.get_train_and_test_data(train_ratio=0.75, seed=None)
knn = KNN(dataset, k=3)
knn.get_prediction()
knn.get_accuracy()
if __name__ == '__main__':
test_iris()
| 3.421875
| 3
|
cs15211/BulbSwitcher.py
|
JulyKikuAkita/PythonPrac
| 1
|
12779169
|
__source__ = 'https://leetcode.com/problems/bulb-switcher/description/'
# Time: O(1)
# Space: O(1)
#
# Description: Leetcode # 319. Bulb Switcher
#
# There are n bulbs that are initially off.
# You first turn on all the bulbs.
# Then, you turn off every second bulb.
# On the third round, you toggle every third bulb (turning on if it's off or turning off if it's on).
# For the ith round, you toggle every i bulb. For the nth round, you only toggle the last bulb.
# Find how many bulbs are on after n rounds.
#
# Example:
#
# Given n = 3.
#
# At first, the three bulbs are [off, off, off].
# After first round, the three bulbs are [on, on, on].
# After second round, the three bulbs are [on, off, on].
# After third round, the three bulbs are [on, off, off].
#
# So you should return 1, because there is only one bulb is on.
# Related Topics
# Math Brainteaser
# Similar Questions
# Bulb Switcher II
#
import math
import unittest
class Solution(object):
def bulbSwitch(self, n):
"""
type n: int
rtype: int
"""
# The number of full squares.
return int(math.sqrt(n))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://discuss.leetcode.com/topic/39558/share-my-o-1-solution-with-explanation
A bulb ends up on iff it is switched an odd number of times.
Call them bulb 1 to bulb n. Bulb i is switched in round d if and only if d divides i.
So bulb i ends up on if and only if it has an odd number of divisors.
Divisors come in pairs, like i=12 has divisors 1 and 12, 2 and 6, and 3 and 4.
Except when i is a square, like 36 has divisors 1 and 36, 2 and 18, 3 and 12, 4 and 9, and double divisor 6.
So bulb i ends up on if and only if i is a square.
So just count the square numbers.
Let R = int(sqrt(n)). That's the root of the largest square in the range [1,n]. And 1 is the smallest root.
So you have the roots from 1 to R, that's R roots. Which correspond to the R squares.
So int(sqrt(n)) is the answer. (C++ does the conversion to int automatically,
because of the specified return type).
#100% 0ms
public class Solution {
public int bulbSwitch(int n) {
return (int) Math.sqrt(n);
}
}
'''
| 4.1875
| 4
|
curso em video - Phython/desafios/desafio 12.py
|
ThyagoHiggins/LP-Phython
| 0
|
12779170
|
preco= float(input('Informe o preço o produto: R$ '))
print(f'O produto com preço original de R${preco} com desconto de 5% passar a ser R${preco*0.95:.2f}')
| 3.65625
| 4
|
setup.py
|
locationlabs/confab
| 3
|
12779171
|
<filename>setup.py<gh_stars>1-10
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Workaround for running "setup.py test"
# See: http://bugs.python.org/issue15881
try:
import multiprocessing # noqa
except ImportError:
pass
__version__ = '1.7.2'
# Jenkins will replace __build__ with a unique value.
__build__ = ''
here = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.rst')) as f:
CHANGES = f.read()
except:
README = ''
CHANGES = ''
setup(name='confab',
version=__version__ + __build__,
description='Configuration management with Fabric and Jinja2.',
long_description=README + '\n\n' + CHANGES,
author='Location Labs',
author_email='<EMAIL>',
url='http://github.com/locationlabs/confab',
license='Apache2',
packages=find_packages(exclude=['*.tests']),
setup_requires=[
'nose>=1.0'
],
install_requires=[
'Fabric>=1.4',
'Jinja2>=2.4',
'python-magic',
'gusset>=1.2',
'paramiko<1.13',
],
tests_require=[
'mock==1.0.1'
],
test_suite='confab.tests',
entry_points={
'console_scripts': [
'confab = confab.main:main',
'confab-show = confab.diagnostics:main',
]
},
)
| 1.5625
| 2
|
setup.py
|
thiwankajayasiri/Airband-_V
| 0
|
12779172
|
from setuptools import setup
setup(
name='V16_API',
packages=['V16_API'],
include_package_data=True,
install_requires=[
'flask', 'flask-bootstrap', 'flask-nav', 'pyserial', 'flask_wtf', 'gunicorn'
],
)
| 1.109375
| 1
|
setup.py
|
typemytype/defcon
| 0
|
12779173
|
<reponame>typemytype/defcon
#!/usr/bin/env python
import sys
from distutils.core import setup
try:
import fontTools
except:
print "*** Warning: defcon requires FontTools, see:"
print " fonttools.sf.net"
try:
import robofab
except:
print "*** Warning: defcon requires RoboFab, see:"
print " robofab.com"
if "sdist" in sys.argv:
import os
import subprocess
import shutil
docFolder = os.path.join(os.getcwd(), "documentation")
# remove existing
doctrees = os.path.join(docFolder, "build", "doctrees")
if os.path.exists(doctrees):
shutil.rmtree(doctrees)
# compile
p = subprocess.Popen(["make", "html"], cwd=docFolder)
p.wait()
# remove doctrees
shutil.rmtree(doctrees)
setup(name="defcon",
version="0.1",
description="A set of flexible objects for representing UFO data.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/typesupply/defcon",
license="MIT",
packages=[
"defcon",
"defcon.objects",
"defcon.pens",
"defcon.test",
"defcon.tools"
],
package_dir={"":"Lib"}
)
| 1.992188
| 2
|
util.py
|
VictorZXY/function-autoencoder
| 0
|
12779174
|
import torch.nn as nn
from matplotlib import pyplot as plt
def MLP(input_dim, out_dims):
"""
Creates an MLP for the models.
:param input_dim: Integer containing the dimensions of the input (= x_dim + y_dim).
:param out_dims: An iterable containing the output sizes of the layers of the MLP.
:return: The MLP, defined as a PyTorch neural network module.
"""
# The MLP (last layer without a ReLU)
layers = [nn.Linear(input_dim, out_dims[0])]
if len(out_dims) > 1:
layers.append(nn.ReLU())
for i in range(1, len(out_dims) - 1):
layers.append(nn.Linear(out_dims[i - 1], out_dims[i]))
layers.append(nn.ReLU())
layers.append(nn.Linear(out_dims[-2], out_dims[-1]))
return nn.Sequential(*layers)
def plot_functions(target_x, target_y, context_x, context_y, pred_y, σ_y,
save_to_filepath=None):
"""
Plots the predicted mean and variance and the context points.
:param target_x: An array of shape [batch_size, num_targets, 1] that contains
the x values of the target points.
:param target_y: An array of shape [batch_size, num_targets, 1] that contains
the y values of the target points.
:param context_x: An array of shape [batch_size, num_contexts, 1] that contains
the x values of the context points.
:param context_y: An array of shape [batch_size, num_contexts, 1] that contains
the y values of the context points.
:param pred_y: An array of shape [batch_size, num_targets, 1] that contains
the predicted means of the y values at the target points in target_x.
:param σ: An array of shape [batch_size, num_targets, 1] that contains the
predicted std. dev. of the y values at the target points in target_x.
:param save_to_filepath: A string containing the path of the file where the
plot is to be saved.
"""
# Plot everything
plt.plot(target_x[0], pred_y[0], 'tab:blue', linewidth=2)
plt.plot(target_x[0], target_y[0], 'k', linewidth=2, alpha=0.25)
plt.plot(context_x[0], context_y[0], 'kP', markersize=6)
plt.fill_between(
target_x[0, :, 0],
pred_y[0, :, 0] - 1.96 * σ_y[0, :, 0],
pred_y[0, :, 0] + 1.96 * σ_y[0, :, 0],
alpha=0.2,
facecolor='tab:blue',
interpolate=True)
# Make the plot pretty
plt.yticks([-2, 0, 2], fontsize=12)
plt.xticks([-2, 0, 2], fontsize=12)
plt.ylim([-2, 2])
ax = plt.gca()
if save_to_filepath is not None:
plt.savefig(save_to_filepath, bbox_inches='tight')
plt.show()
| 3.703125
| 4
|
kerasy/engine/base_layer.py
|
iwasakishuto/Keras-Imitation
| 4
|
12779175
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from ..utils.generic_utils import get_uid
class Layer():
"""Abstract base layer class."""
def __init__(self, **kwargs):
self._trainable_weights = []
self._non_trainable_weights = []
self._grads = {} # (name, delta)
self._updates = {}
prefix = self.__class__.__name__.lower()
self.name = prefix + '_' + str(get_uid(prefix))
self.trainable = kwargs.get('trainable', True)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer."""
output_shape = input_shape
self.output_shape = output_shape
return output_shape
def build(self, input_shape):
output_shape = self.compute_output_shape(input_shape)
return output_shape
def add_weight(self, shape=(), name=None, dtype=None, initializer=None, regularizer=None, constraint=None, trainable=True):
"""
@param shape : (tuple) The shape of the weight.
@param dtype : (dtype) The dtype of the weight.
@param initializer: (string) An Initializer instance.
@param regularizer: (string) A Regularizer instance.
@param trainable : (bool) A boolean, whether the weight should be trained via backprop or not.
@return weight : (ndarray) The created weights variable.
"""
weight = initializer(shape=shape, dtype=dtype)
if trainable:
self._trainable_weights.append(name)
else:
self._non_trainable_weights.append(name)
self._updates[name] = np.expand_dims(weight, axis=0) # shape=(z,x,y)
self._grads[name] = np.zeros_like(weight) # shape=(x,y)
return weight
def update(self, optimizer, batch_size):
if self.trainable and len(self._non_trainable_weights)>0:
self._trainable_weights += self._non_trainable_weights
self._non_trainable_weights = []
elif self.trainable == False and len(self._trainable_weights)>0:
self._non_trainable_weights += self._trainable_weights
self._trainable_weights = []
for name in self._trainable_weights:
weight = self.__dict__.get(name)
regularizer = self.__dict__.get(f"{name}_regularizer")
grad = self._grads[name]/batch_size + regularizer.diff(weight)
new_weight = optimizer.get_updates(
grad=grad,
curt_param=weight,
name=f"{self.name}_{name}"
)
self.__dict__[name] = new_weight # Update.
# self._updates[name] = np.r_[self._updates[name], np.expand_dims(new_weight, axis=0)]
self._grads[name] = np.zeros_like(new_weight)
def get_weights(self):
return []
def set_weights(self, weights):
pass
@property
def weights(self):
return self.get_weights()
| 2.796875
| 3
|
parkkeeper/tests/test_models.py
|
telminov/django-park-keeper
| 4
|
12779176
|
# coding: utf-8
from django.test import TestCase
from djutils.testrunner import TearDownTestCaseMixin
from parkkeeper import models
from parkkeeper import factories
class BaseTaskTestCase(TearDownTestCaseMixin, TestCase):
def tearDown(self):
self.tearDownMongo()
def test_get_task_model_monit(self):
monit_task = factories.MonitTask()
task_type = monit_task.get_task_type()
task_model = models.BaseTask.get_task_model(task_type)
self.assertEqual(
task_model,
models.MonitTask
)
def test_get_task_model_work(self):
work_task = factories.WorkTask()
task_type = work_task.get_task_type()
task_model = models.BaseTask.get_task_model(task_type)
self.assertEqual(
task_model,
models.WorkTask
)
| 2.1875
| 2
|
Application/cdpapp/views.py
|
Adi1222/Customer-Data-Protection
| 0
|
12779177
|
import simplejson
from MySQLdb._exceptions import IntegrityError
from django.shortcuts import render, redirect, reverse, get_object_or_404, get_list_or_404, _get_queryset
from django.contrib.auth import login, logout, authenticate
from .forms import *
from django.core import serializers
from django.http import HttpResponseRedirect, HttpResponse, StreamingHttpResponse, HttpResponseServerError, JsonResponse
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import *
from django.core.files.storage import FileSystemStorage
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
from datetime import date, datetime, time, timedelta
from django.views.decorators import gzip
from calendar import *
import urllib.request
import requests
import cv2
import numpy as np
from .camera import VideoCamera
def login_request(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = <PASSWORD>.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, f"You are now logged in as {request.user.username}")
return redirect('/Dashboard')
else:
pass
else:
pass
form = AuthenticationForm()
return render(request, "cdpapp/b1.html", context={"form": form})
def register(request):
if request.method == 'POST':
return redirect('/Dashboard')
else:
return render(request, 'cdpapp/register.html')
def getNavigation(request):
activeuser = Appuser.objects.get(user=request.user)
print(activeuser)
activeuser = Appuser.objects.get(user=request.user)
print(activeuser)
# activeuser = 12
query = ' select sm.* ' \
' from Roledetail rd ' \
' inner join Submenu sm on sm.id = rd.submenu_id ' \
' inner join Role r on r.id = rd.role_id ' \
' inner join Appuser au on au.role_id = r.id ' \
' inner join auth_user atu on atu.id = au.user_id ' \
' inner join Menu m on m.id = sm.menu_id ' \
' where atu.username = "%s" AND sm.is_deleted = "N" ' % (activeuser)
print(query)
submenusvalue = Submenu.objects.raw(query)
print(Submenu.objects.all())
return submenusvalue
@csrf_exempt
def get_cameras(request):
cameras = Camera.objects.filter(cluster_id=request.POST.get('cluster_id', ''))
camera_obj = serializers.serialize('python', cameras)
return JsonResponse(camera_obj, safe=False)
@csrf_exempt
def get_clusters(request):
clusters = Cluster.objects.filter(customer_id=request.POST.get('customer_id', ''))
cluster_obj = serializers.serialize('python', clusters)
return JsonResponse(cluster_obj, safe=False)
@csrf_exempt
def get_agents(request):
agents = Agent.objects.filter(customer_id=request.POST.get('customer_id', ''))
agent_obj = serializers.serialize('python', agents)
return JsonResponse(agent_obj, safe=False)
@csrf_exempt
def customer_validation(request):
present = 0
is_present = Cust_org.objects.filter(cust_org=request.POST.get('customer', ''))
if is_present:
present = 1
else:
present = 0
data = {
'present':present
}
return JsonResponse(data)
# ******************** DASHBOARD **************#
def dashboard(request):
(multiple_people_today, mobile_detected_today, unidentified_person_today, camera_tampered_today) = (0, 0, 0, 0)
(multiple_people, mobile_detected, unidentified_person, camera_tampered) = (0, 0, 0, 0)
delta = timedelta(days=1)
start_date = date(2020, 3, 27)
end_date = date.today()
start = start_date
end = end_date
categories = ['Camera Tampered', 'Mobile Detected', 'Multiple People', 'Unidentified Person']
while start <= end:
for category in categories:
incidents = Incident.objects.filter(incident_date=start, category=category)
total = 0
for incident in incidents:
total += 1
if category == 'Camera Tampered':
camera_tampered += total
elif category == 'Mobile Detected':
mobile_detected += total
elif category == 'Multiple People':
multiple_people += total
else :
unidentified_person += total
start += delta
today = date.today()
for category in categories:
incidents = Incident.objects.filter(incident_date=today, category=category)
cur = 0
for incident in incidents:
cur += 1
if category == 'Camera Tampered':
camera_tampered_today += cur
elif category == 'Mobile Detected':
mobile_detected_today += cur
elif category == 'Multiple People':
multiple_people_today += cur
else :
unidentified_person_today += cur
context = {
'multiple_people_today': multiple_people_today,
'mobile_detected_today': mobile_detected_today,
'unidentified_person_today': unidentified_person_today,
'camera_tampered_today': camera_tampered_today,
'multiple_people': multiple_people,
'mobile_detected': mobile_detected,
'unidentified_person': unidentified_person,
'camera_tampered': camera_tampered,
'submenusvalue': getNavigation(request)
}
return render(request, 'cdpapp/main dashboard.html', context=context)
def visitor_details_dashboard(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/visitors-count.html', {'clusters': clusters, 'cameras': cameras, 'customers': customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
# cluster_ids = set(map(lambda x: x.pk, clusters))
# cameras = list(Camera.objects.filter(cluster_id__in=cluster_ids))
return render(request, 'cdpapp/visitors-count.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def vehicle_details_dashboard(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/vehicle-count.html', {'clusters': clusters, 'cameras': cameras, 'customers': customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/vehicle-count.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def age_details_dashboard(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/age_detail.html', {'clusters': clusters, 'cameras': cameras, 'customers':customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
# cluster_ids = set(map(lambda x: x.pk, clusters))
# cameras = list(Camera.objects.filter(cluster_id__in=cluster_ids))
return render(request, 'cdpapp/age_detail.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def gender_details_dashboard(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/gender_detail.html', {'clusters': clusters, 'cameras': cameras, 'customers':customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
# cluster_ids = set(map(lambda x: x.pk, clusters))
# cameras = list(Camera.objects.filter(cluster_id__in=cluster_ids))
return render(request, 'cdpapp/gender_detail.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def repeat_vehicle_details_dashboard(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/repeat-vehicle.html', {'clusters': clusters,'customers':customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/repeat-vehicle.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def repeat_visitor_details_dashboard(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/repeat-visitors.html', {'clusters': clusters, 'cameras': cameras, 'customers':customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/repeat-visitors.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def camera_tampering_details_dashboard(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
categories = [ 'Camera Tampered', 'Mobile Detected', 'Multiple People', 'Unidentified Person']
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
return render(request, 'cdpapp/cameratampering_details.html', {'agents':agents, 'categories':categories, 'customers':customers, 'submenusvalue': getNavigation(request)})
def mobile_detected_details_dashboard(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
categories = [ 'Camera Tampered', 'Mobile Detected', 'Multiple People', 'Unidentified Person']
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
return render(request, 'cdpapp/mobiledetected_details.html', {'agents':agents, 'categories':categories, 'customers':customers, 'submenusvalue': getNavigation(request)})
def multiple_people_details_dashboard(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
categories = ['Camera Tampered', 'Mobile Detected', 'Multiple People', 'Unidentified Person']
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
return render(request, 'cdpapp/multiplepeople_details.html', {'agents':agents, 'categories':categories, 'customers':customers, 'submenusvalue': getNavigation(request)})
def unidentified_person_details_dashboard(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
categories = ['Camera Tampered', 'Mobile Detected', 'Multiple People', 'Unidentified Person']
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
return render(request, 'cdpapp/unidentifiedperson_details.html', {'agents':agents, 'categories':categories, 'customers':customers, 'submenusvalue': getNavigation(request)})
def profile(request):
if request.method == 'POST':
activeuser = Appuser.objects.get(user=request.user)
form1 = ProfileForm(request.POST, instance=request.user)
form2 = ProfileForm1(request.POST, request.FILES, instance=request.user)
if form1.is_valid() and form2.is_valid():
userform = form1.save()
# customeform = form2.save()
# customeform.user = userform
# customeform.save()
print(request.FILES)
data = form2.cleaned_data
print(data)
mobile = data["mobile"]
image = data["profile_pic"]
# profile_pic = 'profile_image/' + str(image)
# print(profile_pic)
if image != None:
Appuser.objects.filter(user=request.user).update(mobile=mobile, profile_pic=image)
print(image)
print(image.name)
fs = FileSystemStorage()
filename = fs.save(image.name, image)
uploaded_file_url = fs.url(filename)
print(uploaded_file_url)
else:
Appuser.objects.filter(user=request.user).update(mobile=mobile)
messages.success(request, f"Profile Updated Successfully")
return redirect('/Profile')
else:
messages.error(request, form2.errors)
return redirect('/Profile')
else:
form1 = ProfileForm(instance=request.user)
form2 = ProfileForm1(instance=request.user)
app_user = Appuser.objects.get(user=request.user)
return render(request, 'cdpapp/main-Profile.html', {'form1': form1, 'form2': form2, 'app_user': app_user, 'submenusvalue': getNavigation(request)})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeCustomForm(user=request.user, data=request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('/Profile')
else:
form = PasswordChangeCustomForm(request.user)
app_user = Appuser.objects.get(user=request.user)
return render(request, 'cdpapp/change password.html', {'form': form, 'app_user': app_user, 'submenusvalue': getNavigation(request)})
# ********************* REPORT ******************************
def count_visitors(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report visitors.html', {'customers': customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report visitors.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def age_gender(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report age-gender count.html', {'customers': customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report age-gender count.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def count_vehicles(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report vehicle.html', {'customers': customers})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report vehicle.html', {'clusters': clusters})
def repeat_vehicles_count(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report rvehicle.html', {'customers': customers})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report rvehicle.html', {'clusters': clusters})
def repeat_visitors_count(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report rvisitors.html', {'customers': customers})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report rvisitors.html', {'clusters': clusters})
def camera_tampering(request):
if request.user.is_superuser:
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report camera tempering.html', {'customers': customers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
clusters = Cluster.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/report camera tempering.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
# ********************* REPORT - DETAILS ******************************
@csrf_exempt
def visitor_details(request):
visitors = []
dates = []
if request.is_ajax():
x = request.POST.get('startdate', '')
y = request.POST.get('enddate', '')
camera = request.POST.get('camera', '')
print(x)
print(camera)
(month_start, date_start, month_end, date_end) = (0, 0, 0, 0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if (len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start[1])
date_start = int(temp_date_start)
temp_month_end = str(y.split("-")[1])
temp_date_end = str(y.split("-")[2])
if (len(temp_month_end) > 1):
month_end = int(temp_month_end)
else:
month_end = int(temp_month_end[1])
date_end = int(temp_date_end)
startdate = date(2020, month_start, date_start)
enddate = date(2020, month_end, date_end)
start = startdate
end = enddate
url = "http://172.16.31.10:8000/GetData?chartType=People&cameraName={}&startDate={}&endDate={}".format(str(camera), str(startdate), str(enddate))
response = requests.get(url)
data1 = response.json()
l = len(data1)
i = 0
people = 0
delta = timedelta(days=1)
while start <= end and i < l:
people = data1[i][str(start)][str(camera)]
dates.append(start)
visitors.append(people)
# visitors.append({"date" : start,"people": people})
start += delta
i += 1
print(visitors)
print()
data = {
'dates': dates,
'visitors': visitors
}
return JsonResponse(data)
@csrf_exempt
def age_details(request):
dates = []
data_male = []
data_female = []
if request.is_ajax():
x = request.POST.get('startdate','')
y = request.POST.get('enddate','')
camera = request.POST.get('camera','')
print(x)
print(camera)
(month_start, date_start, month_end, date_end) = (0,0,0,0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if(len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start[1])
date_start = int(temp_date_start)
temp_month_end = str(y.split("-")[1])
temp_date_end = str(y.split("-")[2])
if(len(temp_month_end) > 1):
month_end = int(temp_month_end)
else:
month_end = int(temp_month_end[1])
date_end = int(temp_date_end)
startdate = date(2020, month_start, date_start)
enddate = date(2020, month_end, date_end)
start = startdate
end = enddate
delta = timedelta(days=1)
while start<=end:
url = "http://3.93.246.89:8000/GetData?chartType=Gender&cameraName={}&startDate={}&endDate={}".format(str(camera), str(start), str(start))
response = requests.get(url)
data1 = response.json()
data_male.append(data1[0][str(camera)]['Male'])
data_female.append(data1[0][str(camera)]['Female'])
dates.append(start)
start += delta
data = {'dates': dates, 'data_male': data_male, 'data_female': data_female}
print(data_female)
print(data_male)
return JsonResponse(data)
@csrf_exempt
def vehicle_details(request):
vehicles = []
dates = []
if request.is_ajax():
x = request.POST.get('startdate', '')
y = request.POST.get('enddate', '')
camera = request.POST.get('camera', '')
print(x)
print(camera)
(month_start, date_start, month_end, date_end) = (0, 0, 0, 0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if (len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start[1])
date_start = int(temp_date_start)
temp_month_end = str(y.split("-")[1])
temp_date_end = str(y.split("-")[2])
if (len(temp_month_end) > 1):
month_end = int(temp_month_end)
else:
month_end = int(temp_month_end[1])
date_end = int(temp_date_end)
startdate = date(2020, month_start, date_start)
enddate = date(2020, month_end, date_end)
start = startdate
end = enddate
url = "http://3.93.246.89:8000/GetData?chartType=Vehicle&cameraName=&startDate={}&endDate={}".format(str(startdate), str(enddate))
response = requests.get(url)
data1 = response.json()
l = len(data1)
i = 0
vehicle = 0
delta = timedelta(days=1)
while start <= end and i < l:
vehicle = data1[i][str(start)]['B3-Parking']
dates.append(start)
vehicles.append(vehicle)
start += delta
i += 1
print(vehicles)
print()
data = {
'dates': dates,
'vehicles': vehicles
}
return JsonResponse(data)
@csrf_exempt
def repeat_vehicle_details(request):
repeatvehicles = []
dates = []
if request.is_ajax():
x = request.POST.get('startdate', '')
y = request.POST.get('enddate', '')
camera = request.POST.get('camera', '')
print(x)
print(camera)
(month_start, date_start, month_end, date_end) = (0, 0, 0, 0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if (len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start[1])
date_start = int(temp_date_start)
temp_month_end = str(y.split("-")[1])
temp_date_end = str(y.split("-")[2])
if (len(temp_month_end) > 1):
month_end = int(temp_month_end)
else:
month_end = int(temp_month_end[1])
date_end = int(temp_date_end)
startdate = date(2020, month_start, date_start)
enddate = date(2020, month_end, date_end)
start = startdate
end = enddate
url = "http://3.93.246.89:8000/GetData?chartType=Vehicle&cameraName=&startDate={}&endDate={}".format(str(startdate), str(enddate))
response = requests.get(url)
data1 = response.json()
l = len(data1)
i = 0
vehicle = 0
delta = timedelta(days=1)
while start <= end and i < l:
vehicle = data1[i][str(start)]['B3-Parking']
dates.append(start)
repeatvehicles.append(vehicle)
start += delta
i += 1
print(repeatvehicles)
print()
data = {
'dates': dates,
'repeatvehicles': repeatvehicles
}
return JsonResponse(data)
@csrf_exempt
def repeat_visitor_details(request):
repeatvisitors = []
dates = []
if request.is_ajax():
x = request.POST.get('startdate', '')
y = request.POST.get('enddate', '')
camera = request.POST.get('camera', '')
print(x)
print(camera)
(month_start, date_start, month_end, date_end) = (0, 0, 0, 0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if (len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start[1])
date_start = int(temp_date_start)
temp_month_end = str(y.split("-")[1])
temp_date_end = str(y.split("-")[2])
if (len(temp_month_end) > 1):
month_end = int(temp_month_end)
else:
month_end = int(temp_month_end[1])
date_end = int(temp_date_end)
startdate = date(2020, month_start, date_start)
enddate = date(2020, month_end, date_end)
start = startdate
end = enddate
url = "http://3.93.246.89:8000/GetData?chartType=People&cameraName={}&startDate={}&endDate={}".format(str(camera), str(startdate), str(enddate))
response = requests.get(url)
data1 = response.json()
l = len(data1)
i = 0
people = 0
delta = timedelta(days=1)
while start <= end and i < l:
people = data1[i][str(start)][str(camera)]
dates.append(start)
repeatvisitors.append(people)
# visitors.append({"date" : start,"people": people})
start += delta
i += 1
print(repeatvisitors)
print()
data = {
'dates': dates,
'repeatvisitors': repeatvisitors
}
return JsonResponse(data)
@csrf_exempt
def camera_tampering_details(request):
pass
def agent_list(request):
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
agent_list = Agent.objects.filter(is_deleted='N', customer=cust_inst).order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(agent_list, 5)
try:
agents = paginator.page(page)
except PageNotAnInteger:
agents = paginator.page(1)
except EmptyPage:
agents = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/agent table.html', {'agents': agents, 'submenusvalue': getNavigation(request)})
def add_agent(request):
if request.method == 'POST':
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
fname = request.POST['f_name']
lname = request.POST['l_name']
mac_id = request.POST['mac_id']
created_by = request.user.username
modified_by = request.user.username
lead = request.POST['lead']
manager = request.POST['manager']
u = User.objects.get(first_name=manager)
a = Appuser.objects.get(user=u)
user = User.objects.get(first_name=lead)
appuser = Appuser.objects.get(user=user)
new_agent = Agent(fname=fname, lname=lname, created_by=created_by, modified_by=modified_by,customer=cust_inst, lead=appuser, manager_id=a.pk, mac_id=mac_id)
new_agent.save()
return redirect('/Admin/Agent')
else:
if request.user.is_superuser:
leads = Appuser.objects.filter(designation='Lead')
managers = Appuser.objects.filter(designation='Manager')
return render(request, 'cdpapp/agent.html', {'leads':leads, 'managers':managers, 'submenusvalue': getNavigation(request)})
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
leads = Appuser.objects.filter(customer=cust_inst, designation='Lead')
managers = Appuser.objects.filter(customer=cust_inst, designation='Manager')
return render(request, 'cdpapp/agent.html', {'leads':leads, 'managers':managers, 'submenusvalue': getNavigation(request)})
def edit_agent(request, agent_id):
print(agent_id)
agent = Agent.objects.get(pk=agent_id)
if request.method == 'POST':
form = AgentForm(request.POST, instance=agent)
if form.is_valid():
form.save()
return redirect('/Admin/Agent')
else:
form = AgentForm(instance=agent)
return render(request, 'cdpapp/agent_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_agent(request, agent_id):
agent = get_object_or_404(Agent, pk=agent_id)
agent.is_deleted = 'Y'
agent.save()
return redirect('/Admin/Agent')
def mysql_query(request):
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
query = "select ag.* from Agent ag inner join Incident ic on ag.id = ic.agent_id where ag.customer_id ={}".format(cust_inst.pk)
objs = Agent.objects.raw(query)
print(objs)
return objs
# ************ NOTIFICATIONS *********************
def notification_list(request):
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
print(incident_list)
page = request.GET.get('page', 1)
paginator = Paginator(incident_list, 5)
try:
incidents = paginator.page(page)
except PageNotAnInteger:
incidents = paginator.page(1)
except EmptyPage:
incidents = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/notification_list.html', {"incidents":incidents, 'submenusvalue': getNavigation(request)})
def current_notifications(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
return render(request, 'cdpapp/notification_list_current.html', {'agents':agents, 'customers':customers, 'submenusvalue': getNavigation(request)})
def old_notifications(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = list(Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on"))
return render(request, 'cdpapp/notification_list_old.html', {'agents':agents, 'customers':customers, 'submenusvalue': getNavigation(request)})
def view_notification(request, notification_id):
notification = Incident.objects.get(id=notification_id)
notification.notification_status = 'Viewed'
notification.save()
return render(request, 'cdpapp/notification_detail.html', {"notification":notification, 'submenusvalue': getNavigation(request)})
def reports(request):
customers = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
categories = ['All', 'Camera Tampered', 'Mobile Detected', 'Multiple People', 'Unidentified Person']
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids).order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/reports.html', {'agents':agents, 'categories':categories, 'customers':customers, 'submenusvalue': getNavigation(request)})
@csrf_exempt
def report_data(request):
incident_data = []
dates = []
macids = []
agent_data = []
if request.is_ajax():
x = request.POST.get('startdate', '')
y = request.POST.get('enddate', '')
agentid = request.POST.get('agent_id', '')
category = request.POST.get('category', '')
print(x)
print(agentid)
print(category)
(month_start, date_start, month_end, date_end) = (0, 0, 0, 0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if (len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start)
date_start = int(temp_date_start)
temp_month_end = str(y.split("-")[1])
temp_date_end = str(y.split("-")[2])
if (len(temp_month_end) > 1):
month_end = int(temp_month_end)
else:
month_end = int(temp_month_end)
date_end = int(temp_date_end)
startdate = date(2020, month_start, date_start)
enddate = date(2020, month_end, date_end)
start = startdate
end = enddate
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
delta = timedelta(days=1)
while start <= end :
if category == 'All' and agentid == 'All':
incidents = Incident.objects.filter(incident_date=start)
elif category == 'All':
incidents = Incident.objects.filter(incident_date=start, agent_id=agentid)
elif agentid == 'All':
incidents = Incident.objects.filter(incident_date=start, category=category)
else:
incidents = Incident.objects.filter(incident_date=start, category=category, agent_id=agentid)
for incident in incidents:
incident_data.append(incident.category)
dates.append(start)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
start += delta
print()
data = {
'dates': dates,
'macids' : macids,
'agent_data' : agent_data,
'incident_data' : incident_data
}
return JsonResponse(data)
@csrf_exempt
def current_notifications_data(request):
incident_data = []
ids = []
dates = []
macids = []
agent_data = []
if request.is_ajax():
agentid = request.POST.get('agent_id', '')
if request.user.is_superuser:
customer_id = request.POST.get('customer_id', '')
cust_inst = Cust_org.objects.get(id=customer_id)
if agentid == 'All':
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Open').order_by("-created_on", "-modified_on")
for incident in incident_list:
incident_data.append(incident.category)
ids.append(incident.id)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
else:
agents = Agent.objects.filter(customer=cust_inst, id=agentid)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Open').order_by("-created_on", "-modified_on")
for incident in incident_list:
ids.append(int(incident.id))
incident_data.append(incident.category)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
if agentid == 'All':
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Open').order_by("-created_on", "-modified_on")
for incident in incident_list:
ids.append(incident.id)
incident_data.append(incident.category)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
else:
agents = Agent.objects.filter(customer=cust_inst, id=agentid)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Open').order_by("-created_on", "-modified_on")
for incident in incident_list:
ids.append(incident.id)
incident_data.append(incident.category)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
data = {
'ids': ids,
'dates': dates,
'macids' : macids,
'agent_data' : agent_data,
'incident_data' : incident_data
}
return JsonResponse(data)
@csrf_exempt
def old_notifications_data(request):
incident_data = []
ids = []
dates = []
macids = []
agent_data = []
if request.is_ajax():
x = request.POST.get('startdate', '')
agentid = request.POST.get('agent_id', '')
(month_start, date_start) = (0, 0)
temp_month_start = str(x.split("-")[1])
temp_date_start = str(x.split("-")[2])
if (len(temp_month_start) > 1):
month_start = int(temp_month_start)
else:
month_start = int(temp_month_start)
date_start = int(temp_date_start)
startdate = date(2020, month_start, date_start)
start = startdate
if request.user.is_superuser:
customer_id = request.POST.get('customer_id', '')
cust_inst = Cust_org.objects.get(id=customer_id)
if agentid == 'All':
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Viewed', incident_date=start).order_by("-created_on", "-modified_on")
for incident in incident_list:
incident_data.append(incident.category)
ids.append(incident.id)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
else:
agents = Agent.objects.filter(customer=cust_inst, id=agentid)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Viewed', incident_date=start).order_by("-created_on", "-modified_on")
for incident in incident_list:
ids.append(incident.id)
incident_data.append(incident.category)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
else:
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
if agentid == 'All':
agents = Agent.objects.filter(customer=cust_inst)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Viewed', incident_date=start).order_by("-created_on", "-modified_on")
for incident in incident_list:
ids.append(incident.id)
incident_data.append(incident.category)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
else:
agents = Agent.objects.filter(customer=cust_inst, id=agentid)
agent_ids = set(map(lambda x: x.id, agents))
incident_list = Incident.objects.filter(id__in=agent_ids, notification_status='Viewed', incident_date=start).order_by("-created_on", "-modified_on")
for incident in incident_list:
ids.append(incident.id)
incident_data.append(incident.category)
dates.append(incident.incident_date)
macids.append(incident.mac_id)
agent_data.append(incident.agent.fname)
data = {
'ids': ids,
'dates': dates,
'macids' : macids,
'agent_data' : agent_data,
'incident_data' : incident_data
}
return JsonResponse(data)
def user_list(request):
if request.user.is_superuser: # for super admin we want to show all the customers list
user_list = Appuser.objects.filter(is_deleted='N', is_superuser='N').order_by("-created_on", "-modified_on")
else:
activeuser = Appuser.objects.get(user=request.user) # for a particular customer we will show only the customer that belong to same customer organization
cust_inst = Cust_org.objects.filter(cust_org=activeuser.customer)[0]
user_list = Appuser.objects.filter(is_deleted='N', is_superuser='N', customer=cust_inst).order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(user_list, 5)
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/userlist.html', {'users': users, 'submenusvalue': getNavigation(request)})
def add_user(request):
if request.method == 'POST':
designation = request.POST['desig']
user_form = UserForm(request.POST)
if request.user.is_superuser:
super_admin_user_form = SuperAdminUserForm(request.POST)
if user_form.is_valid() and super_admin_user_form.is_valid():
user = user_form.save()
user.save()
app_user = super_admin_user_form.save(commit=False)
app_user.user = user
app_user.created_by = '<NAME>'
app_user.modified_by = '<NAME>'
app_user.designation = designation
app_user.save()
messages.success(request, 'User saved successfully.')
return redirect('/Admin/User')
else:
print(user_form.errors)
print(super_admin_user_form.errors)
messages.error(request, user_form.errors)
messages.error(request, super_admin_user_form.errors)
return redirect('/Admin/User/Add_User')
else: # ther user is not a superuser
other_user_form = OtherUserForm(request.POST)
if user_form.is_valid() and other_user_form.is_valid():
user = user_form.save()
user.save()
activeuser = Appuser.objects.get(user=request.user)
cust_inst = Cust_org.objects.get(cust_org=activeuser.customer)
app_user = other_user_form.save(commit=False)
app_user.customer = cust_inst
app_user.user = user
app_user.designation = designation
app_user.created_by = request.user.username
app_user.modified_by = request.user.username
app_user.save()
messages.success(request, 'User saved successfully.')
return redirect('/Admin/User')
else:
print(user_form.errors)
print(other_user_form.errors)
messages.error(request, user_form.errors)
messages.error(request, other_user_form.errors)
return redirect('/Admin/User/Add_User')
else:
roles = Role.objects.filter(is_deleted='N')
customers = Cust_org.objects.filter(is_deleted='N')
user_form = UserForm()
super_admin_user_form = SuperAdminUserForm()
other_user_form = OtherUserForm()
return render(request, 'cdpapp/add user.html',
{'roles': roles, 'customers': customers, 'user_form': user_form, 'super_admin_user_form': super_admin_user_form,
'other_user_form': other_user_form, 'submenusvalue': getNavigation(request)})
def edit_user(request, user_id):
user = User.objects.get(pk=user_id)
if request.method == 'POST':
form1 = EditUserForm1(request.POST, instance=user)
form2 = EditUserForm2(request.POST, instance=user)
if form1.is_valid() and form2.is_valid():
form1.save()
form2.save()
return redirect('/Admin/User')
else:
form1 = EditUserForm1(instance=user)
form2 = EditUserForm2(instance=user)
return render(request, 'cdpapp/user_edit.html', {'form1': form1, 'form2':form2, 'submenusvalue': getNavigation(request)})
def delete_user(request, user_id):
user = get_object_or_404(Appuser, pk=user_id)
user.is_deleted = 'Y'
user.save()
return redirect('/Admin/User')
def authorization(request):
return render(request, 'cdpapp/roles.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def view_feed(request):
if request.method == 'POST':
camera = request.POST.get('camera')
cluster = request.POST.get('clust')
print(cluster)
try:
camera_inst = Camera.objects.get(camname=camera)
except Camera.DoesNotExist:
messages.error(request, 'Select a Camera')
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
return render(request, 'cdpapp/view_feed.html', {'clusters': clusters})
id = camera_inst.pk
ip = camera_inst.camip
ip = str(ip)
print(ip)
print(camera)
print(cluster)
# cap = cv2.VideoCapture('rtsp://Aditya:1234@192.168.1.101:7777')
# cap = cv2.VideoCapture(ip)
opt = request.POST.get('submit')
if opt == 'view':
try:
return StreamingHttpResponse(gen(VideoCamera(ip)), content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("aborted")
'''while (True):
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()'''
elif opt == 'stop':
pass
else:
if request.user.is_superuser:
clusters = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
cameras = Camera.objects.filter(is_deleted='N')
return render(request, 'cdpapp/view_feed.html', {'clusters': clusters, 'cameras': cameras})
else:
clusters = Cluster.objects.filter(is_deleted='N', created_by=request.user.username).order_by("-created_on", "-modified_on")
cluster_ids = set(map(lambda x: x.pk, clusters))
cameras = list(Camera.objects.filter(cluster_id__in=cluster_ids))
return render(request, 'cdpapp/view_feed.html', {'clusters': clusters, 'cameras': cameras})
def subscription(request):
return render(request, 'cdpapp/subscription.html', {'submenusvalue': getNavigation(request)})
def subscription_plan(request):
return render(request, 'cdpapp/subscription plan.html', {'submenusvalue': getNavigation(request)})
def summary(request):
return render(request, 'cdpapp/summary.html', {'submenusvalue': getNavigation(request)})
# **************************** Super-Admin *************************
def algo_list(request):
algo_list = Algo_master.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(algo_list, 5)
try:
algorithms = paginator.page(page)
except PageNotAnInteger:
algorithms = paginator.page(1)
except EmptyPage:
algorithms = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/algorithm table.html', {'algorithms': algorithms, 'submenusvalue': getNavigation(request)})
def add_algo(request):
if request.method == 'POST':
algo = request.POST['algo_name']
algo_desc = request.POST['descr']
# created_by = request.user.username
# modified_by = request.user.username
new_algo = Algo_master(algo=algo, algo_desc=algo_desc)
new_algo.save()
return redirect('/Super-Admin/algorithms')
else:
return render(request, 'cdpapp/algorithm.html', {'submenusvalue': getNavigation(request)})
def edit_algo(request, algo_id):
algo = Algo_master.objects.get(pk=algo_id)
if request.method == 'POST':
form = AlgoithmForm(request.POST, instance=algo)
if form.is_valid():
form.save()
return redirect('/Super-Admin/algorithms')
else:
form = AlgoithmForm(instance=algo)
return render(request, 'cdpapp/algo_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_algo(request, algo_id):
algo = get_object_or_404(Algo_master, pk=algo_id)
algo.is_deleted = 'Y'
algo.save()
return redirect('/Super-Admin/algorithms')
def customer_list(request):
customer_list = Cust_org.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(customer_list, 5)
try:
customers = paginator.page(page)
except PageNotAnInteger:
customers = paginator.page(1)
except EmptyPage:
customers = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/customer table.html', {'customers': customers, 'submenusvalue': getNavigation(request)})
def add_customer(request):
if request.method == 'POST':
if IntegrityError:
bills = Bill_plan.objects.filter(is_deleted='N')
messages.error(request, "Customer Organization already Exists.")
return render(request, 'cdpapp/customer.html', {'bills': bills, 'submenusvalue': getNavigation(request)})
cust_org = request.POST['cust_org']
cust_org_acro = request.POST['cust_acro']
created_by = request.user.username
modified_by = request.user.username
bill_plan = request.POST['bill']
status = request.POST['status']
date_str = request.POST['onboard']
bill_plan_inst = Bill_plan.objects.get(billplan=bill_plan)
temp_date = datetime.strptime(date_str, "%Y-%m-%d").date()
new_customer = Cust_org(cust_org=cust_org, cust_org_acro=cust_org_acro, status=status, bill_plan=bill_plan_inst, onboard_date=temp_date, created_by=created_by, modified_by=modified_by)
new_customer.save()
return redirect('/Super-Admin/Customers')
else:
bills = Bill_plan.objects.filter(is_deleted='N')
'''
try:
last_inserted = Cust_org.objects.order_by('-id')[0]
customerid = last_inserted.id
except IndexError:
customerid = 1
except Cust_org.DoesNotExist:
customerid = 1'''
return render(request, 'cdpapp/customer.html', {'bills': bills, 'submenusvalue': getNavigation(request)})
def edit_customer(request, customer_id):
customer = Cust_org.objects.get(pk=customer_id)
if request.method == 'POST':
form = CustomerForm(request.POST, instance=customer)
if form.is_valid():
form.save()
return redirect('/Super-Admin/Customers')
else:
form = CustomerForm(instance=customer)
return render(request, 'cdpapp/customer_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_customer(request, customer_id):
customer = get_object_or_404(Cust_org, pk=customer_id)
customer.is_deleted = 'Y'
customer.save()
return redirect('/Super-Admin/Customers')
def bill_list(request):
bill_list = Bill_plan.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(bill_list, 5)
try:
bills = paginator.page(page)
except PageNotAnInteger:
bills = paginator.page(1)
except EmptyPage:
bills = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/Bill Plan Table.html', {'bills': bills, 'submenusvalue': getNavigation(request)})
def add_bill(request):
if request.method == 'POST':
billplan = request.POST['billplan']
billplan_cd = request.POST['billplan_cd']
created_by = request.user.username
modified_by = request.user.username
new_bill = Bill_plan(billplan=billplan, billplan_cd=billplan_cd, created_by=created_by, modified_by=modified_by)
new_bill.save()
return redirect('/Super-Admin/Bill-Plan')
else:
return render(request, 'cdpapp/BillPlan.html', {'submenusvalue': getNavigation(request)})
def edit_bill(request, bill_id):
bill = Bill_plan.objects.get(pk=bill_id)
if request.method == 'POST':
form = BillPlanForm(request.POST, instance=bill)
if form.is_valid():
form.save()
return redirect('/Super-Admin/Bill-Plan')
else:
form = BillPlanForm(instance=bill)
return render(request, 'cdpapp/bill_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_bill(request, bill_id):
bill = get_object_or_404(Bill_plan, pk=bill_id)
bill.is_deleted = 'Y'
bill.save()
return redirect('/Super-Admin/Bill-Plan')
def menu_list(request):
menu_list = Menu.objects.filter(is_deleted='N').order_by("menu","-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(menu_list, 10)
try:
menus = paginator.page(page)
except PageNotAnInteger:
menus = paginator.page(1)
except EmptyPage:
menus = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/Menu List.html', {'menus': menus, 'submenusvalue': getNavigation(request)})
def add_menu(request):
if request.method == 'POST':
menu = request.POST['menu']
created_by = request.user.username
modified_by = request.user.username
new_menu = Menu(menu=menu, created_by=created_by, modified_by=modified_by)
new_menu.save()
return redirect('/Super-Admin/Menu')
else:
return render(request, 'cdpapp/Menu.html', {'submenusvalue': getNavigation(request)})
def edit_menu(request, menu_id):
menu = Menu.objects.get(pk=menu_id)
if request.method == 'POST':
form = MenuForm(request.POST, instance=menu)
if form.is_valid():
form.save()
return redirect('/Super-Admin/Menu')
else:
form = MenuForm(instance=menu)
return render(request, 'cdpapp/menu_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_menu(request, menu_id):
menu = get_object_or_404(Menu, pk=menu_id)
menu.is_deleted = 'Y'
menu.save()
return redirect('/Super-Admin/Menu')
def submenu_list(request):
menu_list = Menu.objects.filter(is_deleted='N').order_by("menu","-created_on", "-modified_on")
submenus = Submenu.objects.filter(is_deleted='N')
page = request.GET.get('page', 1)
paginator = Paginator(menu_list, 3)
try:
menus = paginator.page(page)
except PageNotAnInteger:
menus = paginator.page(1)
except EmptyPage:
menus = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/Sub Menu List.html', {'menus': menus, 'submenus' : submenus, 'submenusvalue': getNavigation(request)})
def add_submenu(request):
if request.method == 'POST':
submenu = request.POST['submenu']
menu = request.POST['menu']
created_by = request.user.username
modified_by = request.user.username
menu_inst = Menu.objects.get(menu=menu)
new_submenu = Submenu(submenu=submenu, menu=menu_inst, created_by=created_by, modified_by=modified_by)
new_submenu.save()
return redirect('/Super-Admin/SubMenu')
else:
menus = Menu.objects.filter(is_deleted='N')
return render(request, 'cdpapp/SubMenu.html', {'menus': menus, 'submenusvalue': getNavigation(request)})
def edit_submenu(request, submenu_id):
submenu = Submenu.objects.get(pk=submenu_id)
if request.method == 'POST':
form = SubMenuForm(request.POST, instance=submenu)
if form.is_valid():
form.save()
return redirect('/Super-Admin/SubMenu')
else:
form = SubMenuForm(instance=submenu)
return render(request, 'cdpapp/submenu_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_submenu(request, submenu_id):
submenu = get_object_or_404(Submenu, pk=submenu_id)
submenu.is_deleted = 'Y'
submenu.save()
return redirect('/Super-Admin/SubMenu')
# ************************ Admin *******************************
def role_list(request):
role_list = Role.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(role_list, 5)
try:
roles = paginator.page(page)
except PageNotAnInteger:
roles = paginator.page(1)
except EmptyPage:
roles = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/Role List.html', {'roles': roles, 'submenusvalue': getNavigation(request)})
def add_role(request):
if request.method == 'POST':
role = request.POST['role']
role_desc = request.POST['role_desc']
created_by = request.user.username
modified_by = request.user.username
new_role = Role(role=role, role_desc=role_desc, created_by=created_by, modified_by=modified_by)
new_role.save()
List = []
for menu in Menu.objects.all(): # Fetching the selected checkboxes' values
if menu.is_deleted == 'N':
x = str(menu) + '[]'
r = request.POST.getlist(x)
# print(r)
if (len(r) != 0):
List.append(r)
print(List)
for i in List:
menu_name = i.pop(0)
print(menu_name)
for j in i:
submenu_name = j
print(submenu_name)
menu_inst = Menu.objects.get(menu=menu_name)
for x in Submenu.objects.all():
if x.menu == menu_inst and x.submenu == submenu_name:
print(x)
new_role_detail = Roledetail(menu=menu_inst, submenu=x, role=new_role)
new_role_detail.save()
return redirect('/Admin/Roles')
else:
menus = Menu.objects.filter(is_deleted='N')
submenus = Submenu.objects.filter(is_deleted='N')
return render(request, 'cdpapp/Role.html', {'menus': menus, 'submenus': submenus, 'submenusvalue': getNavigation(request)})
def edit_role(request, role_id):
role = Role.objects.get(pk=role_id)
if request.method == 'POST':
form = RoleForm(request.POST, instance=role)
if form.is_valid():
form.save()
return redirect('/Admin/Roles')
else:
form = RoleForm(instance=role)
return render(request, 'cdpapp/role_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def role_details(request, role_id):
role = Role.objects.get(pk=role_id)
if request.method == 'POST':
form = RoleDetailForm(request.POST, instance=role)
if form.is_valid():
form.save()
return redirect('/Admin/Roles')
else:
form = RoleDetailForm(instance=role)
return render(request, 'cdpapp/role_detail.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_role(request, role_id):
role = get_object_or_404(Role, pk=role_id)
role.is_deleted = 'Y'
role.save()
return redirect('/Admin/Roles')
def cluster_list(request):
cluster_list = Cluster.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(cluster_list, 5)
try:
clusters = paginator.page(page)
except PageNotAnInteger:
clusters = paginator.page(1)
except EmptyPage:
clusters = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/cluster table.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def add_cluster(request):
if request.method == 'POST':
cluster_name = request.POST['cluster_name']
description = request.POST['descr']
created_by = request.user.username
new_cluster = Cluster(cluster_name=cluster_name, description=description)
new_cluster.save()
return redirect('/Admin/Configuration/Cluster')
else:
return render(request, 'cdpapp/cluster.html', {'submenusvalue': getNavigation(request)})
def edit_cluster(request, cluster_id):
cluster = Cluster.objects.get(pk=cluster_id)
if request.method == 'POST':
form = ClusterForm(request.POST, instance=cluster)
if form.is_valid():
form.save()
cluster.modified_by = request.user.username
cluster.save()
return redirect('/Admin/Configuration/Cluster')
else:
form = ClusterForm(instance=cluster)
return render(request, 'cdpapp/cluster_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_cluster(request, cluster_id):
cluster = get_object_or_404(Cluster, pk=cluster_id)
cluster.is_deleted = 'Y'
cluster.save()
return redirect('/Admin/Configuration/Cluster')
def camera_table(request):
camera_list = Camera.objects.filter(is_deleted='N').order_by("-created_on", "-modified_on")
page = request.GET.get('page', 1)
paginator = Paginator(camera_list, 5)
try:
cameras = paginator.page(page)
except PageNotAnInteger:
cameras = paginator.page(1)
except EmptyPage:
cameras = paginator.page(paginator.num_pages)
return render(request, 'cdpapp/camera table.html', {'cameras': cameras, 'submenusvalue': getNavigation(request)})
def add_camera(request):
if request.method == 'POST':
camname = request.POST['camname']
camip = request.POST['camip']
x1_cord = request.POST['x1']
y1_cord = request.POST['y1']
x2_cord = request.POST['x2']
y2_cord = request.POST['y2']
cluster = request.POST['clust']
algo_type = request.POST['algo']
clusterinst = Cluster.objects.get(cluster_name=cluster)
new_camera = Camera(camname=camname, camip=camip, x1_cord=x1_cord, y1_cord=y1_cord, x2_cord=x2_cord, y2_cord=y2_cord, cluster=clusterinst,
algo_type=algo_type)
new_camera.save()
return redirect('/Admin/Configuration/Camera')
else:
clusters = Cluster.objects.filter(is_deleted='N')
return render(request, 'cdpapp/Camera.html', {'clusters': clusters, 'submenusvalue': getNavigation(request)})
def edit_camera(request, camera_id):
camera = Camera.objects.get(pk=camera_id)
if request.method == 'POST':
form = CameraForm(request.POST, instance=camera)
if form.is_valid():
form.save()
return redirect('/Admin/Configuration/Camera')
else:
form = CameraForm(instance=camera)
return render(request, 'cdpapp/camera_edit.html', {'form': form, 'submenusvalue': getNavigation(request)})
def delete_camera(request, camera_id):
camera = get_object_or_404(Camera, pk=camera_id)
camera.is_deleted = 'Y'
camera.save()
return redirect('/Admin/Configuration/Camera')
def other_config(request):
return render(request, 'cdpapp/other-config.html', {'submenusvalue': getNavigation(request)}) # REMAINING
def logout_request(request):
logout(request)
return redirect('/')
| 1.90625
| 2
|
setup.py
|
benlindsay/sim-tree
| 0
|
12779178
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
from distutils.core import setup
setup(
name = 'sim-tree',
packages = ['sim_tree'], # this must be the same as the name above
install_requires = ['os', 'pandas', 'time', 'string'],
version = '0.6',
description = 'A module for automating hierarchical simulation studies',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/benlindsay/sim-tree',
download_url = 'https://github.com/benlindsay/sim-tree/archive/0.6.tar.gz',
keywords = ['workflow', 'simulations'],
classifiers = [],
)
| 1.101563
| 1
|
atx/record/scene_detector.py
|
jamjven/ATX
| 1,132
|
12779179
|
#-*- encoding: utf-8 -*-
import os
import cv2
import yaml
import numpy as np
from collections import defaultdict
def find_match(img, tmpl, rect=None, mask=None):
if rect is not None:
h, w = img.shape[:2]
x, y, x1, y1 = rect
if x1 > w or y1 > h:
return 0, None
img = img[y:y1, x:x1, :]
if mask is not None:
img = img.copy()
img[mask!=0] = 0
tmpl = tmpl.copy()
tmpl[mask!=0] = 0
s_bgr = cv2.split(tmpl) # Blue Green Red
i_bgr = cv2.split(img)
weight = (0.3, 0.3, 0.4)
resbgr = [0, 0, 0]
for i in range(3): # bgr
resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
confidence = max_val
x, y = max_loc
h, w = tmpl.shape[:2]
if rect is None:
rect = (x, y, x+w, y+h)
# cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
# cv2.imshow('test', img)
# cv2.waitKey(20)
return confidence, rect
def get_mask(img1, img2, thresh=20):
if img1.shape != img2.shape:
return
diff = cv2.absdiff(img1, img2)
diff = np.mean(diff, axis=2)
diff[diff<=thresh] = 0
diff[diff>thresh] = 255
mask = np.dstack([diff]*3)
return mask
def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
class SceneDetector(object):
'''detect game scene from screen image'''
def __init__(self, scene_directory):
self.scene_touches = {}
self.scene_directory = scene_directory
self.build_tree(scene_directory)
def build_tree(self, directory):
'''build scene tree from images'''
confile = os.path.join(directory, 'config.yml')
conf = {}
if os.path.exists(confile):
conf = yaml.load(open(confile).read())
class node(defaultdict):
name = ''
parent = None
tmpl = None
rect = None
mask = None
def __str__(self):
obj = self
names = []
while obj.parent is not None:
names.append(obj.name)
obj = obj.parent
return '-'.join(names[::-1])
def tree():
return node(tree)
root = tree()
for s in os.listdir(directory):
if not s.endswith('.png') or s.endswith('_mask.png'):
continue
obj = root
for i in s[:-4].split('-'):
obj[i].name = i
obj[i].parent = obj
obj = obj[i]
obj.tmpl = cv2.imread(os.path.join(directory, s))
obj.rect = conf.get(s[:-4], {}).get('rect')
maskimg = conf.get(s[:-4], {}).get('mask')
if maskimg is not None:
maskimg = os.path.join(directory, maskimg)
if os.path.exists(maskimg):
obj.mask = cv2.imread(maskimg)
self.tree = root
self.current_scene = []
self.confile = confile
self.conf = conf
def match_child(self, img, node):
c, s, r = (0, None, None)
for scene in node.itervalues():
if scene.tmpl is None:
continue
print str(scene), scene.rect, img.shape
confidence, rect = find_match(img, scene.tmpl, scene.rect, scene.mask)
# print scene.name, confidence, rect
if confidence > c:
c, s, r = (confidence, scene, rect)
if c > 0.95:
key = str(s)
if key not in self.conf:
self.conf[key] = {}
changed = False
if c > self.conf[key].get('confidence', 0):
s.rect = r
self.conf[key]['confidence'] = c
self.conf[key]['rect'] = list(r)
changed = True
if changed or s.mask is None:
x, y, x1, y1 = r
s.mask = get_mask(img[y:y1, x:x1, :], s.tmpl, 20)
maskimg = os.path.join(self.scene_directory, '%s_mask.png' % key)
cv2.imwrite(maskimg, s.mask)
self.conf[key]['mask'] = maskimg
changed = True
if changed:
self.save_config()
return c, s, r
def save_config(self):
print 'save config', self.conf
with open(self.confile, 'w') as f:
yaml.dump(self.conf, f)
def detect(self, img):
# check current scene path
# print 'checking current scene'
if self.current_scene:
for i in range(len(self.current_scene)):
s, r = self.current_scene[i]
x, y, x1, y1 = r
c = get_match_confidence(img[y:y1, x:x1, :], s.tmpl, s.mask)
if c < 0.75:
break
else:
# print 'current scene ok'
s = self.current_scene[-1][0]
if len(s.values()) == 0:
return s
self.current_scene = self.current_scene[:i]
# top scene has changed
if not self.current_scene:
c, s, r = self.match_child(img, self.tree)
if c < 0.75:
return
self.current_scene = [(s, r)]
s = self.current_scene[-1][0]
while True:
c, s, r = self.match_child(img, s)
if c < 0.75:
break
self.current_scene.append((s, r))
return s
| 2.453125
| 2
|
description/funcmap.py
|
shelljane/FastCGRA
| 4
|
12779180
|
<reponame>shelljane/FastCGRA
from networkx.algorithms.operators.unary import reverse
import xmltodict
import json
import networkx as nx
from networkx.algorithms import isomorphism as iso
import utils
from utils import Base
from protocols import *
class IsoMapper(Base):
def __init__(self, graph, units):
self._original = graph
self._units = units
self._patterns = {}
self._matched = []
self._map = {}
self._graph = Graph()
self._compat = {}
def match(self):
for uname, unit in self._units.items():
self._patterns[uname] = {}
for pname, patt in unit.patterns().items():
self._patterns[uname][pname] = patt.graph()
# print("Pattern: " + uname + "." + pname)
# print(self._patterns[uname][pname].info())
# print("Matching: " + uname + "." + pname)
g1 = self._original.toNX()
g2 = self._patterns[uname][pname].toNX()
matcher = iso.DiGraphMatcher(g1, g2, lambda x, y: x["attrs"]["function"] == y["attrs"]["function"])
# print(matcher.subgraph_is_isomorphic())
isomorphisms = matcher.subgraph_isomorphisms_iter()
for match in isomorphisms:
self._matched.append((uname, pname, match, ))
# print(match)
self._matched.sort(key=lambda x: (len(x[2]), -len(self._units[x[0]].patterns())), reverse=True)
# print(self._original.info())
# print(utils.list2str(self._matched))
used = set()
for match in self._matched:
uname = match[0]
pname = match[1]
info = match[2]
duplicated = False
for v1, v2 in info.items():
if v1 in used:
duplicated = True
break
if duplicated:
continue
for v1, v2 in info.items():
used.add(v1)
vertexName = ""
for v1, v2 in info.items():
if not "." in v1:
vertexName += v1 + "_"
vertexName = vertexName[:-1]
self._graph.addVertex(vertexName, {"unit": uname, "pattern": pname})
if not vertexName in self._compat:
self._compat[vertexName] = set()
self._compat[vertexName].add(uname)
for v1, v2 in info.items():
portName = ""
portType = ""
for key, value in self._units[uname].pattern(pname).portMap().items():
if value == v2:
portName = key
if portName in self._units[uname].inputs():
portType = "input"
elif portName in self._units[uname].outputs():
portType = "output"
else:
assert portName in self._units[uname].inputs() or portName in self._units[uname].outputs(), "IsoMapper: Invalid port: " + portName + " of " + uname
if portName != "":
temp = portName
portName = vertexName + "." + portName
self._graph.addVertex(portName, {"unit": uname + "." + temp})
self._map[v1] = portName
if portType == "input":
self._graph.addEdge(portName, vertexName, {})
elif portType == "output":
self._graph.addEdge(vertexName, portName, {})
# print(utils.dict2str(self._map))
if len(used) < len(self._original.vertices()):
print("IsoMapper: FAILED. ")
exit(1)
for vname, vertex in self._original.vertices().items():
if vname in self._map:
for edge in self._original.edgesOut()[vname]:
if edge.to() in self._map:
self._graph.addEdge(self._map[edge.fr()], self._map[edge.to()], {})
def graph(self):
return self._graph
def graphInfo(self):
return self._graph.info()
def compat(self):
return self._compat
def compatInfo(self):
info = ""
for vertex, compats in self._compat.items():
info += vertex
for compat in compats:
info += " " + compat
info += "\n"
return info
def trivial(ops, units):
result = ""
for opname in ops:
op = ops[opname]
optype = op.type()
found = False
mapped = []
for key in units:
unit = units[key]
funcs = unit.funcs()
for name in funcs:
func = funcs[name]
if func[0:2] == "__":
func = func[2:]
if func[-2:] == "__":
func = func[:-2]
if func == optype:
found = True
mapped.append(unit.name())
temp = opname
for unit in mapped:
temp += " " + unit
result += temp + "\n"
# print("Map " + opname + " to " + utils.list2str(mapped))
return result
#TODO
| 2.4375
| 2
|
run-format-converter.py
|
bruno-ferreira/run-format-converter
| 0
|
12779181
|
<reponame>bruno-ferreira/run-format-converter
#!/usr/bin/env python
"""docstring"""
import os
from xml.etree import ElementTree as ET
import argparse
# create a class that opens and reads any xml format
# pwx
# gpx
# etc
# json
class OpenXMLFile(object):
"""docstring"""
def open(self):
tree = ET.parse(self)
root = tree.getroot()
def pwx_reader(a_file):
"""docstring"""
a_file = os.path.abspath(a_file)
tree = ET.parse(a_file)
root = tree.getroot()
print root
print root.tag # main tag
# print root.attrib # main attribute
# for child in root:
# print child.tag, child.attrib
# for neighbor in root.iter('sample'):
# print 'bla'
# print neighbor.attrib
#
# print root[0][8:]
for country in root.findall('pwx/workout'):
print 'bla'
element = root.find('pwx/workout/sample')
print element
def main():
"""docstring"""
# Not sure if this is really needed. TODO: check it
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converts run files formats')
parser.add_argument('-f',
'--file',
required=True,
help='run file that will be converted')
parser.add_argument('-t',
'--type',
default='json',
help='output type of run file')
args = vars(parser.parse_args())
pwx_reader(args['file'])
| 3.3125
| 3
|
services/storage/src/simcore_service_storage/dsm.py
|
colinRawlings/osparc-simcore
| 25
|
12779182
|
<reponame>colinRawlings/osparc-simcore
# pylint: disable=no-value-for-parameter
# FIXME: E1120:No value for argument 'dml' in method call
# pylint: disable=protected-access
# FIXME: Access to a protected member _result_proxy of a client class
import asyncio
import logging
import os
import re
import tempfile
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import aiobotocore
import attr
import botocore
import sqlalchemy as sa
from aiobotocore.client import AioBaseClient
from aiobotocore.session import AioSession, ClientCreatorContext
from aiohttp import web
from aiopg.sa import Engine
from aiopg.sa.result import ResultProxy, RowProxy
from servicelib.aiohttp.aiopg_utils import DBAPIError, PostgresRetryPolicyUponOperation
from servicelib.aiohttp.client_session import get_client_session
from servicelib.utils import fire_and_forget_task
from sqlalchemy.dialects.postgresql import insert as pg_insert
from sqlalchemy.sql.expression import literal_column
from tenacity import retry
from tenacity.before_sleep import before_sleep_log
from tenacity.retry import retry_if_exception_type, retry_if_result
from tenacity.stop import stop_after_delay
from tenacity.wait import wait_exponential
from yarl import URL
from .access_layer import (
AccessRights,
get_file_access_rights,
get_project_access_rights,
get_readable_project_ids,
)
from .constants import (
APP_CONFIG_KEY,
APP_DB_ENGINE_KEY,
APP_DSM_KEY,
APP_S3_KEY,
DATCORE_ID,
DATCORE_STR,
SIMCORE_S3_ID,
SIMCORE_S3_STR,
)
from .datcore_adapter import datcore_adapter
from .models import (
DatasetMetaData,
FileMetaData,
FileMetaDataEx,
file_meta_data,
get_location_from_id,
projects,
)
from .s3wrapper.s3_client import MinioClientWrapper
from .settings import Settings
from .utils import download_to_file_or_raise
logger = logging.getLogger(__name__)
postgres_service_retry_policy_kwargs = PostgresRetryPolicyUponOperation(logger).kwargs
def setup_dsm(app: web.Application):
async def _cleanup_context(app: web.Application):
cfg: Settings = app[APP_CONFIG_KEY]
with ThreadPoolExecutor(max_workers=cfg.STORAGE_MAX_WORKERS) as executor:
dsm = DataStorageManager(
s3_client=app.get(APP_S3_KEY),
engine=app.get(APP_DB_ENGINE_KEY),
loop=asyncio.get_event_loop(),
pool=executor,
simcore_bucket_name=cfg.STORAGE_S3.S3_BUCKET_NAME,
has_project_db=not cfg.STORAGE_TESTING,
app=app,
) # type: ignore
app[APP_DSM_KEY] = dsm
yield
assert app[APP_DSM_KEY].pool is executor # nosec
logger.info("Shuting down %s", dsm.pool)
# ------
app.cleanup_ctx.append(_cleanup_context)
def to_meta_data_extended(row: RowProxy) -> FileMetaDataEx:
assert row
meta = FileMetaData(**dict(row)) # type: ignore
meta_extended = FileMetaDataEx(
fmd=meta,
parent_id=str(Path(meta.object_name).parent),
) # type: ignore
return meta_extended
@attr.s(auto_attribs=True)
class DatCoreApiToken:
api_token: Optional[str] = None
api_secret: Optional[str] = None
def to_tuple(self):
return (self.api_token, self.api_secret)
@attr.s(auto_attribs=True)
class DataStorageManager: # pylint: disable=too-many-public-methods
"""Data storage manager
The dsm has access to the database for all meta data and to the actual backend. For now this
is simcore's S3 [minio] and the datcore storage facilities.
For all data that is in-house (simcore.s3, ...) we keep a synchronized database with meta information
for the physical files.
For physical changes on S3, that might be time-consuming, the db keeps a state (delete and upload mostly)
The dsm provides the following additional functionalities:
- listing of folders for a given users, optionally filtered using a regular expression and optionally
sorted by one of the meta data keys
- upload/download of files
client -> S3 : presigned upload link
S3 -> client : presigned download link
datcore -> client: presigned download link
S3 -> datcore: local copy and then upload via their api
minio/S3 and postgres can talk nicely with each other via Notifications using rabbigMQ which we already have.
See:
https://blog.minio.io/part-5-5-publish-minio-events-via-postgresql-50f6cc7a7346
https://docs.minio.io/docs/minio-bucket-notification-guide.html
"""
# TODO: perhaps can be used a cache? add a lifetime?
s3_client: MinioClientWrapper
engine: Engine
loop: object
pool: ThreadPoolExecutor
simcore_bucket_name: str
has_project_db: bool
session: AioSession = attr.Factory(aiobotocore.get_session)
datcore_tokens: Dict[str, DatCoreApiToken] = attr.Factory(dict)
app: Optional[web.Application] = None
def _create_aiobotocore_client_context(self) -> ClientCreatorContext:
assert hasattr(self.session, "create_client")
# pylint: disable=no-member
# SEE API in https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
# SEE https://aiobotocore.readthedocs.io/en/latest/index.html
return self.session.create_client(
"s3",
endpoint_url=self.s3_client.endpoint_url,
aws_access_key_id=self.s3_client.access_key,
aws_secret_access_key=self.s3_client.secret_key,
)
def _get_datcore_tokens(self, user_id: str) -> Tuple[Optional[str], Optional[str]]:
# pylint: disable=no-member
token = self.datcore_tokens.get(user_id, DatCoreApiToken())
return token.to_tuple()
async def locations(self, user_id: str):
locs = []
simcore_s3 = {"name": SIMCORE_S3_STR, "id": SIMCORE_S3_ID}
locs.append(simcore_s3)
api_token, api_secret = self._get_datcore_tokens(user_id)
if api_token and api_secret and self.app:
if await datcore_adapter.check_user_can_connect(
self.app, api_token, api_secret
):
datcore = {"name": DATCORE_STR, "id": DATCORE_ID}
locs.append(datcore)
return locs
@classmethod
def location_from_id(cls, location_id: str):
return get_location_from_id(location_id)
# LIST/GET ---------------------------
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
async def list_files(
self, user_id: str, location: str, uuid_filter: str = "", regex: str = ""
) -> List[FileMetaDataEx]:
"""Returns a list of file paths
- Works for simcore.s3 and datcore
- Can filter on uuid: useful to filter on project_id/node_id
- Can filter upon regular expression (for now only on key: value pairs of the FileMetaData)
"""
data = deque()
if location == SIMCORE_S3_STR:
accesible_projects_ids = []
async with self.engine.acquire() as conn, conn.begin():
accesible_projects_ids = await get_readable_project_ids(
conn, int(user_id)
)
has_read_access = (
file_meta_data.c.user_id == user_id
) | file_meta_data.c.project_id.in_(accesible_projects_ids)
query = sa.select([file_meta_data]).where(has_read_access)
async for row in conn.execute(query):
d = FileMetaData(**dict(row))
dex = FileMetaDataEx(
fmd=d, parent_id=str(Path(d.object_name).parent)
)
data.append(dex)
if self.has_project_db:
uuid_name_dict = {}
# now parse the project to search for node/project names
try:
async with self.engine.acquire() as conn, conn.begin():
query = sa.select([projects]).where(
projects.c.uuid.in_(accesible_projects_ids)
)
async for row in conn.execute(query):
proj_data = dict(row.items())
uuid_name_dict[proj_data["uuid"]] = proj_data["name"]
wb = proj_data["workbench"]
for node in wb.keys():
uuid_name_dict[node] = wb[node]["label"]
except DBAPIError as _err:
logger.exception("Error querying database for project names")
if not uuid_name_dict:
# there seems to be no project whatsoever for user_id
return []
# only keep files from non-deleted project
clean_data = deque()
for dx in data:
d = dx.fmd
if d.project_id not in uuid_name_dict:
continue
#
# FIXME: artifically fills ['project_name', 'node_name', 'file_id', 'raw_file_path', 'display_file_path']
# with information from the projects table!
d.project_name = uuid_name_dict[d.project_id]
if d.node_id in uuid_name_dict:
d.node_name = uuid_name_dict[d.node_id]
d.raw_file_path = str(
Path(d.project_id) / Path(d.node_id) / Path(d.file_name)
)
d.display_file_path = d.raw_file_path
d.file_id = d.file_uuid
if d.node_name and d.project_name:
d.display_file_path = str(
Path(d.project_name) / Path(d.node_name) / Path(d.file_name)
)
# once the data was sync to postgres metadata table at this point
clean_data.append(dx)
data = clean_data
elif location == DATCORE_STR:
api_token, api_secret = self._get_datcore_tokens(user_id)
return await datcore_adapter.list_all_datasets_files_metadatas(
self.app, api_token, api_secret
)
if uuid_filter:
# TODO: incorporate this in db query!
_query = re.compile(uuid_filter, re.IGNORECASE)
filtered_data = deque()
for dx in data:
d = dx.fmd
if _query.search(d.file_uuid):
filtered_data.append(dx)
return list(filtered_data)
if regex:
_query = re.compile(regex, re.IGNORECASE)
filtered_data = deque()
for dx in data:
d = dx.fmd
_vars = vars(d)
for v in _vars.keys():
if _query.search(v) or _query.search(str(_vars[v])):
filtered_data.append(dx)
break
return list(filtered_data)
return list(data)
async def list_files_dataset(
self, user_id: str, location: str, dataset_id: str
) -> Union[List[FileMetaData], List[FileMetaDataEx]]:
# this is a cheap shot, needs fixing once storage/db is in sync
data = []
if location == SIMCORE_S3_STR:
data: List[FileMetaDataEx] = await self.list_files(
user_id, location, uuid_filter=dataset_id + "/"
)
elif location == DATCORE_STR:
api_token, api_secret = self._get_datcore_tokens(user_id)
# lists all the files inside the dataset
return await datcore_adapter.list_all_files_metadatas_in_dataset(
self.app, api_token, api_secret, dataset_id
)
return data
async def list_datasets(self, user_id: str, location: str) -> List[DatasetMetaData]:
"""Returns a list of top level datasets
Works for simcore.s3 and datcore
"""
data = []
if location == SIMCORE_S3_STR:
if self.has_project_db:
try:
async with self.engine.acquire() as conn, conn.begin():
readable_projects_ids = await get_readable_project_ids(
conn, int(user_id)
)
has_read_access = projects.c.uuid.in_(readable_projects_ids)
# FIXME: this DOES NOT read from file-metadata table!!!
query = sa.select([projects.c.uuid, projects.c.name]).where(
has_read_access
)
async for row in conn.execute(query):
dmd = DatasetMetaData(
dataset_id=row.uuid,
display_name=row.name,
)
data.append(dmd)
except DBAPIError as _err:
logger.exception("Error querying database for project names")
elif location == DATCORE_STR:
api_token, api_secret = self._get_datcore_tokens(user_id)
return await datcore_adapter.list_datasets(self.app, api_token, api_secret)
return data
async def list_file(
self, user_id: str, location: str, file_uuid: str
) -> Optional[FileMetaDataEx]:
if location == SIMCORE_S3_STR:
async with self.engine.acquire() as conn, conn.begin():
can: Optional[AccessRights] = await get_file_access_rights(
conn, int(user_id), file_uuid
)
if can.read:
query = sa.select([file_meta_data]).where(
file_meta_data.c.file_uuid == file_uuid
)
result = await conn.execute(query)
row = await result.first()
if not row:
return None
file_metadata = to_meta_data_extended(row)
if file_metadata.fmd.entity_tag is None:
# we need to update from S3 here since the database is not up-to-date
file_metadata = await self.update_database_from_storage(
file_metadata.fmd.file_uuid,
file_metadata.fmd.bucket_name,
file_metadata.fmd.object_name,
)
return file_metadata
# FIXME: returns None in both cases: file does not exist or use has no access
logger.debug("User %s cannot read file %s", user_id, file_uuid)
return None
elif location == DATCORE_STR:
# FIXME: review return inconsistencies
# api_token, api_secret = self._get_datcore_tokens(user_id)
import warnings
warnings.warn("NOT IMPLEMENTED!!!")
return None
# UPLOAD/DOWNLOAD LINKS ---------------------------
async def upload_file_to_datcore(
self, _user_id: str, _local_file_path: str, _destination_id: str
):
import warnings
warnings.warn(f"NOT IMPLEMENTED!!! in {self.__class__}")
# uploads a locally available file to dat core given the storage path, optionally attached some meta data
# api_token, api_secret = self._get_datcore_tokens(user_id)
# await dcw.upload_file_to_id(destination_id, local_file_path)
async def update_database_from_storage(
self,
file_uuid: str,
bucket_name: str,
object_name: str,
silence_exception: bool = False,
) -> Optional[FileMetaDataEx]:
try:
async with self._create_aiobotocore_client_context() as aioboto_client:
result = await aioboto_client.head_object(
Bucket=bucket_name, Key=object_name
) # type: ignore
file_size = result["ContentLength"] # type: ignore
last_modified = result["LastModified"] # type: ignore
entity_tag = result["ETag"].strip('"') # type: ignore
async with self.engine.acquire() as conn:
result: ResultProxy = await conn.execute(
file_meta_data.update()
.where(file_meta_data.c.file_uuid == file_uuid)
.values(
file_size=file_size,
last_modified=last_modified,
entity_tag=entity_tag,
)
.returning(literal_column("*"))
)
if not result:
return None
row: Optional[RowProxy] = await result.first()
if not row:
return None
return to_meta_data_extended(row)
except botocore.exceptions.ClientError:
if silence_exception:
logger.debug("Error happened while trying to access %s", file_uuid)
else:
logger.warning(
"Error happened while trying to access %s", file_uuid, exc_info=True
)
# the file is not existing or some error happened
return None
@retry(
stop=stop_after_delay(3600),
wait=wait_exponential(multiplier=0.1, exp_base=1.2, max=30),
retry=(
retry_if_exception_type() | retry_if_result(lambda result: result is None)
),
before_sleep=before_sleep_log(logger, logging.INFO),
)
async def auto_update_database_from_storage_task(
self, file_uuid: str, bucket_name: str, object_name: str
):
return await self.update_database_from_storage(
file_uuid, bucket_name, object_name, silence_exception=True
)
async def upload_link(self, user_id: str, file_uuid: str):
"""
Creates pre-signed upload link and updates metadata table when
link is used and upload is successfuly completed
SEE _metadata_file_updater
"""
async with self.engine.acquire() as conn:
can: Optional[AccessRights] = await get_file_access_rights(
conn, int(user_id), file_uuid
)
if not can.write:
logger.debug(
"User %s was not allowed to upload file %s", user_id, file_uuid
)
raise web.HTTPForbidden(
reason=f"User does not have enough access rights to upload file {file_uuid}"
)
@retry(**postgres_service_retry_policy_kwargs)
async def _init_metadata() -> Tuple[int, str]:
async with self.engine.acquire() as conn:
fmd = FileMetaData()
fmd.simcore_from_uuid(file_uuid, self.simcore_bucket_name)
fmd.user_id = user_id # NOTE: takes ownership of uploaded data
# if file already exists, we might want to update a time-stamp
# upsert file_meta_data
insert_stmt = pg_insert(file_meta_data).values(**vars(fmd))
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
index_elements=["file_uuid"]
)
await conn.execute(do_nothing_stmt)
return fmd.file_size, fmd.last_modified
await _init_metadata()
bucket_name = self.simcore_bucket_name
object_name = file_uuid
# a parallel task is tarted which will update the metadata of the updated file
# once the update has finished.
fire_and_forget_task(
self.auto_update_database_from_storage_task(
file_uuid=file_uuid,
bucket_name=bucket_name,
object_name=object_name,
)
)
return self.s3_client.create_presigned_put_url(bucket_name, object_name)
async def download_link_s3(self, file_uuid: str, user_id: int) -> str:
# access layer
async with self.engine.acquire() as conn:
can: Optional[AccessRights] = await get_file_access_rights(
conn, int(user_id), file_uuid
)
if not can.read:
# NOTE: this is tricky. A user with read access can download and data!
# If write permission would be required, then shared projects as views cannot
# recover data in nodes (e.g. jupyter cannot pull work data)
#
logger.debug(
"User %s was not allowed to download file %s", user_id, file_uuid
)
raise web.HTTPForbidden(
reason=f"User does not have enough rights to download {file_uuid}"
)
bucket_name = self.simcore_bucket_name
async with self.engine.acquire() as conn:
stmt = sa.select([file_meta_data.c.object_name]).where(
file_meta_data.c.file_uuid == file_uuid
)
object_name: Optional[str] = await conn.scalar(stmt)
if object_name is None:
raise web.HTTPNotFound(
reason=f"File '{file_uuid}' does not exists in storage."
)
link = self.s3_client.create_presigned_get_url(bucket_name, object_name)
return link
async def download_link_datcore(self, user_id: str, file_id: str) -> URL:
api_token, api_secret = self._get_datcore_tokens(user_id)
return await datcore_adapter.get_file_download_presigned_link(
self.app, api_token, api_secret, file_id
)
# COPY -----------------------------
async def copy_file_s3_s3(self, user_id: str, dest_uuid: str, source_uuid: str):
# FIXME: operation MUST be atomic
# source is s3, location is s3
to_bucket_name = self.simcore_bucket_name
to_object_name = dest_uuid
from_bucket = self.simcore_bucket_name
from_object_name = source_uuid
# FIXME: This is not async!
self.s3_client.copy_object(
to_bucket_name, to_object_name, from_bucket, from_object_name
)
# update db
async with self.engine.acquire() as conn:
fmd = FileMetaData()
fmd.simcore_from_uuid(dest_uuid, self.simcore_bucket_name)
fmd.user_id = user_id
ins = file_meta_data.insert().values(**vars(fmd))
await conn.execute(ins)
async def copy_file_s3_datcore(
self, user_id: str, dest_uuid: str, source_uuid: str
):
session = get_client_session(self.app)
# source is s3, get link and copy to datcore
bucket_name = self.simcore_bucket_name
object_name = source_uuid
filename = source_uuid.split("/")[-1]
s3_dowload_link = self.s3_client.create_presigned_get_url(
bucket_name, object_name
)
with tempfile.TemporaryDirectory() as tmpdir:
# FIXME: connect download and upload streams
local_file_path = os.path.join(tmpdir, filename)
# Downloads S3 -> local
await download_to_file_or_raise(session, s3_dowload_link, local_file_path)
# Uploads local -> DATCore
await self.upload_file_to_datcore(
_user_id=user_id,
_local_file_path=local_file_path,
_destination_id=dest_uuid,
)
async def copy_file_datcore_s3(
self,
user_id: str,
dest_uuid: str,
source_uuid: str,
filename_missing: bool = False,
):
session = get_client_session(self.app)
# 2 steps: Get download link for local copy, the upload link to s3
# TODO: This should be a redirect stream!
dc_link, filename = await self.download_link_datcore(
user_id=user_id, file_id=source_uuid
)
if filename_missing:
dest_uuid = str(Path(dest_uuid) / filename)
s3_upload_link = await self.upload_link(user_id, dest_uuid)
with tempfile.TemporaryDirectory() as tmpdir:
# FIXME: connect download and upload streams
local_file_path = os.path.join(tmpdir, filename)
# Downloads DATCore -> local
await download_to_file_or_raise(session, dc_link, local_file_path)
# Uploads local -> S3
s3_upload_link = URL(s3_upload_link)
async with session.put(
s3_upload_link,
data=Path(local_file_path).open("rb"),
raise_for_status=True,
) as resp:
logger.debug(
"Uploaded local -> SIMCore %s . Status %s",
s3_upload_link,
resp.status,
)
return dest_uuid
async def copy_file(
self,
user_id: str,
dest_location: str,
dest_uuid: str,
source_location: str,
source_uuid: str,
):
if source_location == SIMCORE_S3_STR:
if dest_location == DATCORE_STR:
await self.copy_file_s3_datcore(user_id, dest_uuid, source_uuid)
elif dest_location == SIMCORE_S3_STR:
await self.copy_file_s3_s3(user_id, dest_uuid, source_uuid)
elif source_location == DATCORE_STR:
if dest_location == DATCORE_STR:
raise NotImplementedError("copy files from datcore 2 datcore not impl")
if dest_location == SIMCORE_S3_STR:
await self.copy_file_datcore_s3(user_id, dest_uuid, source_uuid)
async def deep_copy_project_simcore_s3(
self,
user_id: str,
source_project: Dict[str, Any],
destination_project: Dict[str, Any],
node_mapping: Dict[str, str],
):
"""Parses a given source project and copies all related files to the destination project
Since all files are organized as
project_id/node_id/filename or links to datcore
this function creates a new folder structure
project_id/node_id/filename
and copies all files to the corresponding places.
Additionally, all external files from datcore are being copied and the paths in the destination
project are adapted accordingly
Lastly, the meta data db is kept in sync
"""
source_folder = source_project["uuid"]
dest_folder = destination_project["uuid"]
# access layer
async with self.engine.acquire() as conn, conn.begin():
source_access_rights = await get_project_access_rights(
conn, int(user_id), project_id=source_folder
)
dest_access_rights = await get_project_access_rights(
conn, int(user_id), project_id=dest_folder
)
if not source_access_rights.read:
logger.debug(
"User %s was not allowed to read from project %s",
user_id,
source_folder,
)
raise web.HTTPForbidden(
reason=f"User does not have enough access rights to read from project '{source_folder}'"
)
if not dest_access_rights.write:
logger.debug(
"User %s was not allowed to write to project %s",
user_id,
dest_folder,
)
raise web.HTTPForbidden(
reason=f"User does not have enough access rights to write to project '{dest_folder}'"
)
# build up naming map based on labels
uuid_name_dict = {}
uuid_name_dict[dest_folder] = destination_project["name"]
for src_node_id, src_node in source_project["workbench"].items():
new_node_id = node_mapping.get(src_node_id)
if new_node_id is not None:
uuid_name_dict[new_node_id] = src_node["label"]
async with self._create_aiobotocore_client_context() as aioboto_client:
logger.debug(
"Listing all items under %s:%s/",
self.simcore_bucket_name,
source_folder,
)
# Step 1: List all objects for this project replace them with the destination object name
# and do a copy at the same time collect some names
# Note: the / at the end of the Prefix is VERY important, makes the listing several order of magnitudes faster
response = await aioboto_client.list_objects_v2(
Bucket=self.simcore_bucket_name, Prefix=f"{source_folder}/"
)
contents: List = response.get("Contents", [])
logger.debug(
"Listed %s items under %s:%s/",
len(contents),
self.simcore_bucket_name,
source_folder,
)
for item in contents:
source_object_name = item["Key"]
source_object_parts = Path(source_object_name).parts
if len(source_object_parts) != 3:
# This may happen once we have shared/home folders
# FIXME: this might cause problems
logger.info(
"Skipping copy of '%s'. Expected three parts path!",
source_object_name,
)
continue
old_node_id = source_object_parts[1]
new_node_id = node_mapping.get(old_node_id)
if new_node_id is not None:
old_filename = source_object_parts[2]
dest_object_name = str(
Path(dest_folder) / new_node_id / old_filename
)
copy_kwargs = dict(
CopySource={
"Bucket": self.simcore_bucket_name,
"Key": source_object_name,
},
Bucket=self.simcore_bucket_name,
Key=dest_object_name,
)
logger.debug("Copying %s ...", copy_kwargs)
# FIXME: if 5GB, it must use multipart upload Upload Part - Copy API
# SEE https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.copy_object
await aioboto_client.copy_object(**copy_kwargs)
# Step 2: List all references in outputs that point to datcore and copy over
for node_id, node in destination_project["workbench"].items():
outputs: Dict = node.get("outputs", {})
for _, output in outputs.items():
source = output["path"]
if output.get("store") == DATCORE_ID:
destination_folder = str(Path(dest_folder) / node_id)
logger.info("Copying %s to %s", source, destination_folder)
destination = await self.copy_file_datcore_s3(
user_id=user_id,
dest_uuid=destination_folder,
source_uuid=source,
filename_missing=True,
)
assert destination.startswith(destination_folder) # nosec
output["store"] = SIMCORE_S3_ID
output["path"] = destination
elif output.get("store") == SIMCORE_S3_ID:
destination = str(Path(dest_folder) / node_id / Path(source).name)
output["store"] = SIMCORE_S3_ID
output["path"] = destination
fmds = []
async with self._create_aiobotocore_client_context() as aioboto_client:
# step 3: list files first to create fmds
# Note: the / at the end of the Prefix is VERY important, makes the listing several order of magnitudes faster
response = await aioboto_client.list_objects_v2(
Bucket=self.simcore_bucket_name, Prefix=f"{dest_folder}/"
)
if "Contents" in response:
for item in response["Contents"]:
fmd = FileMetaData()
fmd.simcore_from_uuid(item["Key"], self.simcore_bucket_name)
fmd.project_name = uuid_name_dict.get(dest_folder, "Untitled")
fmd.node_name = uuid_name_dict.get(fmd.node_id, "Untitled")
fmd.raw_file_path = fmd.file_uuid
fmd.display_file_path = str(
Path(fmd.project_name) / fmd.node_name / fmd.file_name
)
fmd.user_id = user_id
fmd.file_size = item["Size"]
fmd.last_modified = str(item["LastModified"])
fmds.append(fmd)
# step 4 sync db
async with self.engine.acquire() as conn, conn.begin():
# TODO: upsert in one statment of ALL
for fmd in fmds:
query = sa.select([file_meta_data]).where(
file_meta_data.c.file_uuid == fmd.file_uuid
)
# if file already exists, we might w
rows = await conn.execute(query)
exists = await rows.scalar()
if exists:
delete_me = file_meta_data.delete().where(
file_meta_data.c.file_uuid == fmd.file_uuid
)
await conn.execute(delete_me)
ins = file_meta_data.insert().values(**vars(fmd))
await conn.execute(ins)
# DELETE -------------------------------------
async def delete_file(self, user_id: str, location: str, file_uuid: str):
"""Deletes a file given its fmd and location
Additionally requires a user_id for 3rd party auth
For internal storage, the db state should be updated upon completion via
Notification mechanism
For simcore.s3 we can use the file_name
For datcore we need the full path
"""
if location == SIMCORE_S3_STR:
# FIXME: operation MUST be atomic, transaction??
to_delete = []
async with self.engine.acquire() as conn, conn.begin():
can: Optional[AccessRights] = await get_file_access_rights(
conn, int(user_id), file_uuid
)
if not can.delete:
logger.debug(
"User %s was not allowed to delete file %s",
user_id,
file_uuid,
)
raise web.HTTPForbidden(
reason=f"User '{user_id}' does not have enough access rights to delete file {file_uuid}"
)
query = sa.select(
[file_meta_data.c.bucket_name, file_meta_data.c.object_name]
).where(file_meta_data.c.file_uuid == file_uuid)
async for row in conn.execute(query):
if self.s3_client.remove_objects(
row.bucket_name, [row.object_name]
):
to_delete.append(file_uuid)
await conn.execute(
file_meta_data.delete().where(
file_meta_data.c.file_uuid.in_(to_delete)
)
)
elif location == DATCORE_STR:
# FIXME: review return inconsistencies
api_token, api_secret = self._get_datcore_tokens(user_id)
await datcore_adapter.delete_file(
self.app, api_token, api_secret, file_uuid
)
async def delete_project_simcore_s3(
self, user_id: str, project_id: str, node_id: Optional[str] = None
) -> Optional[web.Response]:
"""Deletes all files from a given node in a project in simcore.s3 and updated db accordingly.
If node_id is not given, then all the project files db entries are deleted.
"""
# FIXME: operation MUST be atomic. Mark for deletion and remove from db when deletion fully confirmed
async with self.engine.acquire() as conn, conn.begin():
# access layer
can: Optional[AccessRights] = await get_project_access_rights(
conn, int(user_id), project_id
)
if not can.delete:
logger.debug(
"User %s was not allowed to delete project %s",
user_id,
project_id,
)
raise web.HTTPForbidden(
reason=f"User does not have delete access for {project_id}"
)
delete_me = file_meta_data.delete().where(
file_meta_data.c.project_id == project_id,
)
if node_id:
delete_me = delete_me.where(file_meta_data.c.node_id == node_id)
await conn.execute(delete_me)
async with self._create_aiobotocore_client_context() as aioboto_client:
# Note: the / at the end of the Prefix is VERY important, makes the listing several order of magnitudes faster
response = await aioboto_client.list_objects_v2(
Bucket=self.simcore_bucket_name,
Prefix=f"{project_id}/{node_id}/" if node_id else f"{project_id}/",
)
objects_to_delete = []
for f in response.get("Contents", []):
objects_to_delete.append({"Key": f["Key"]})
if objects_to_delete:
response = await aioboto_client.delete_objects(
Bucket=self.simcore_bucket_name,
Delete={"Objects": objects_to_delete},
)
return response
# SEARCH -------------------------------------
async def search_files_starting_with(
self, user_id: int, prefix: str
) -> List[FileMetaDataEx]:
# Avoids using list_files since it accounts for projects/nodes
# Storage should know NOTHING about those concepts
files_meta = deque()
async with self.engine.acquire() as conn, conn.begin():
# access layer
can_read_projects_ids = await get_readable_project_ids(conn, int(user_id))
has_read_access = (
file_meta_data.c.user_id == str(user_id)
) | file_meta_data.c.project_id.in_(can_read_projects_ids)
stmt = sa.select([file_meta_data]).where(
file_meta_data.c.file_uuid.startswith(prefix) & has_read_access
)
async for row in conn.execute(stmt):
meta_extended = to_meta_data_extended(row)
files_meta.append(meta_extended)
return list(files_meta)
async def create_soft_link(
self, user_id: int, target_uuid: str, link_uuid: str
) -> FileMetaDataEx:
# validate link_uuid
async with self.engine.acquire() as conn:
# TODO: select exists(select 1 from file_metadat where file_uuid=12)
found = await conn.scalar(
sa.select([file_meta_data.c.file_uuid]).where(
file_meta_data.c.file_uuid == link_uuid
)
)
if found:
raise ValueError(f"Invalid link {link_uuid}. Link already exists")
# validate target_uuid
target = await self.list_file(str(user_id), SIMCORE_S3_STR, target_uuid)
if not target:
raise ValueError(
f"Invalid target '{target_uuid}'. File does not exists for this user"
)
# duplicate target and change the following columns:
target.fmd.file_uuid = link_uuid
target.fmd.file_id = link_uuid # NOTE: api-server relies on this id
target.fmd.is_soft_link = True
async with self.engine.acquire() as conn:
stmt = (
file_meta_data.insert()
.values(**attr.asdict(target.fmd))
.returning(literal_column("*"))
)
result = await conn.execute(stmt)
link = to_meta_data_extended(await result.first())
return link
async def synchronise_meta_data_table(
self, location: str, dry_run: bool
) -> Dict[str, Any]:
PRUNE_CHUNK_SIZE = 20
removed: List[str] = []
to_remove: List[str] = []
async def _prune_db_table(conn):
if not dry_run:
await conn.execute(
file_meta_data.delete().where(
file_meta_data.c.object_name.in_(to_remove)
)
)
logger.info(
"%s %s orphan items",
"Would have deleted" if dry_run else "Deleted",
len(to_remove),
)
removed.extend(to_remove)
to_remove.clear()
# ----------
assert ( # nosec
location == SIMCORE_S3_STR
), "Only with s3, no other sync implemented" # nosec
if location == SIMCORE_S3_STR:
# NOTE: only valid for simcore, since datcore data is not in the database table
# let's get all the files in the table
logger.warning(
"synchronisation of database/s3 storage started, this will take some time..."
)
async with self.engine.acquire() as conn, self._create_aiobotocore_client_context() as aioboto_client:
number_of_rows_in_db = await conn.scalar(file_meta_data.count()) or 0
logger.warning(
"Total number of entries to check %d",
number_of_rows_in_db,
)
assert isinstance(aioboto_client, AioBaseClient) # nosec
async for row in conn.execute(
sa.select([file_meta_data.c.object_name])
):
s3_key = row.object_name # type: ignore
# now check if the file exists in S3
# SEE https://www.peterbe.com/plog/fastest-way-to-find-out-if-a-file-exists-in-s3
response = await aioboto_client.list_objects_v2(
Bucket=self.simcore_bucket_name, Prefix=s3_key
)
if response.get("KeyCount", 0) == 0:
# this file does not exist in S3
to_remove.append(s3_key)
if len(to_remove) >= PRUNE_CHUNK_SIZE:
await _prune_db_table(conn)
if to_remove:
await _prune_db_table(conn)
assert len(to_remove) == 0 # nosec
assert len(removed) <= number_of_rows_in_db # nosec
logger.info(
"%s %d entries ",
"Would delete" if dry_run else "Deleting",
len(removed),
)
return {"removed": removed}
| 1.726563
| 2
|
services/traction/acapy_wrapper/models/cred_attr_spec.py
|
Open-Earth-Foundation/traction
| 12
|
12779183
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
class CredAttrSpec(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
CredAttrSpec - a model defined in OpenAPI
mime_type: The mime_type of this CredAttrSpec [Optional].
name: The name of this CredAttrSpec.
value: The value of this CredAttrSpec.
"""
mime_type: Optional[str] = None
name: str
value: str
CredAttrSpec.update_forward_refs()
| 2.109375
| 2
|
devops_spt/external_dependency.py
|
dksmiffs/devops
| 0
|
12779184
|
<gh_stars>0
"""External dependency management module"""
from abc import ABC, abstractmethod
class ExternalDependency(ABC):
"""Define interface for managing external dependencies"""
@abstractmethod
def existing(self):
"""
Return installed version
OR, set existing = None in subclass if only update is desired
"""
@abstractmethod
def latest(self):
"""
Return latest version available
OR, set latest = None in subclass if only update is desired
"""
@abstractmethod
def update(self, verbose=False):
"""Update installed version to latest if necessary"""
| 2.9375
| 3
|
pbsmmapi/asset/models.py
|
WGBH/django-pbsmmapi
| 0
|
12779185
|
<reponame>WGBH/django-pbsmmapi<filename>pbsmmapi/asset/models.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ..abstract.models import PBSMMGenericAsset
from .helpers import check_asset_availability
AVAILABILITY_GROUPS = (
('Station Members', 'station_members'), ('All Members', 'all_members'),
('Public', 'public')
)
# remember the closing slash
PBSMM_ASSET_ENDPOINT = 'https://media.services.pbs.org/api/v1/assets/'
PBSMM_LEGACY_ASSET_ENDPOINT = 'https://media.services.pbs.org/api/v1/assets/legacy/?tp_media_id='
YES_NO = (
(1, 'Yes'),
(0, 'No'),
)
class PBSMMAbstractAsset(PBSMMGenericAsset):
"""
These are fields unique to Assets.
Each object model has a *-Asset table, e.g., PBSMMEpisode has PBSMMEpisodeAsset,
PBSMMShow has PBSShowAsset, etc.
Aside from the FK reference to the parent, each of these *-Asset models are identical in structure.
"""
# These fields are unique to Asset
legacy_tp_media_id = models.BigIntegerField(
_('COVE ID'),
null=True,
blank=True,
unique=True,
help_text='(Legacy TP Media ID)',
)
availability = models.TextField(
_('Availability'),
null=True,
blank=True,
help_text='JSON serialized Field',
)
duration = models.IntegerField(
_('Duration'),
null=True,
blank=True,
help_text="(in seconds)",
)
object_type = models.CharField( # This is 'clip', etc.
_('Object Type'),
max_length=40,
null=True, blank=True,
)
# CAPTIONS
has_captions = models.BooleanField(
_('Has Captions'),
default=False,
)
# TAGS, Topics
tags = models.TextField(
_('Tags'),
null=True,
blank=True,
help_text='JSON serialized field',
)
topics = models.TextField(
_('Topics'),
null=True,
blank=True,
help_text='JSON serialized field',
)
# PLAYER FIELDS
player_code = models.TextField(
_('Player Code'),
null=True,
blank=True,
)
# CHAPTERS
chapters = models.TextField(
_('Chapters'),
null=True,
blank=True,
help_text="JSON serialized field",
)
content_rating = models.CharField(
_('Content Rating'),
max_length=100,
null=True,
blank=True,
)
content_rating_description = models.TextField(
_('Content Rating Description'),
null=True,
blank=True,
)
# This is a custom field that lies outside of the API.
# It alloes the content producer to define WHICH Asset is shown on the parental object's Detail page.
# Since the PBSMM API does not know how to distinguish mutliple "clips" from one another, this is necessary
# to show a Promo vs. a Short Form video, etc.
#
# ... thanks PBS.
override_default_asset = models.PositiveIntegerField(
_('Override Default Asset'), null=False, choices=YES_NO, default=0
)
class Meta:
abstract = True
###
# Properties and methods
###
def __unicode__(self):
return "%d | %s (%d) | %s" % (
self.pk, self.object_id, self.legacy_tp_media_id, self.title
)
def __object_model_type(self):
"""
This handles the correspondence to the "type" field in the PBSMM JSON object.
Basically this just makes it easy to identify whether an object is an asset or not.
"""
return 'asset'
object_model_type = property(__object_model_type)
def asset_publicly_available(self):
"""
This is mostly for tables listing Assets in the Admin detail page for ancestral objects:
e.g., an Episode's page in the Admin has a list of the episode's assets, and this provides
a simple column to show availability in that list.
"""
if self.availability:
a = json.loads(self.availability)
p = a.get('public', None)
if p:
return check_asset_availability(start=p['start'], end=p['end'])[0]
return None
asset_publicly_available.short_description = 'Pub. Avail.'
asset_publicly_available.boolean = True
def __is_asset_publicly_available(self):
"""
Am I available to the public? True/False.
"""
return self.asset_publicly_available
is_asset_publicly_available = property(__is_asset_publicly_available)
def __duration_hms(self):
"""
Show the asset's duration as #h ##m ##s.
"""
if self.duration:
d = self.duration
hours = d // 3600
if hours > 0:
hstr = '%dh' % hours
else:
hstr = ''
d %= 3600
minutes = d // 60
if hours > 0:
mstr = '%02dm' % minutes
else:
if minutes > 0:
mstr = '%2dm' % minutes
else:
mstr = ''
seconds = d % 60
if minutes > 0:
sstr = '%02ds' % seconds
else:
sstr = '%ds' % seconds
return ' '.join((hstr, mstr, sstr))
return ''
duration_hms = property(__duration_hms)
def __formatted_duration(self):
"""
Show the Asset's duration as ##:##:##
"""
if self.duration:
seconds = self.duration
hours = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hours, minutes, seconds)
return ''
formatted_duration = property(__formatted_duration)
def __is_default(self):
"""
Return True/False if the Asset is the "default" Asset for it's parent.
"""
if self.override_default_asset:
return True
return False
is_default = property(__is_default)
| 2.1875
| 2
|
jwt_devices/middleware.py
|
poxip/drf-jwt-devices
| 13
|
12779186
|
from django.http.response import JsonResponse
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from jwt_devices import views
from jwt_devices.settings import api_settings
class PermittedHeadersMiddleware(object):
"""
Middleware used to disallow sending the permanent_token header in other requests than during permanent token
refresh to make sure naive FE developers do not send the fragile permanent token with each request.
"""
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
if self.get_response:
return self.get_response(request)
def process_view(self, request, view_func, view_args, view_kwargs):
view_cls = getattr(view_func, "cls", None)
if (view_cls and api_settings.JWT_PERMANENT_TOKEN_AUTH and request.META.get("HTTP_PERMANENT_TOKEN") and view_cls != views.DeviceRefreshJSONWebToken):
return JsonResponse({
"HTTP_PERMANENT_TOKEN": {
"details": _("Using the Permanent-Token header is disallowed for {}").format(type(view_cls))
}
}, status=status.HTTP_400_BAD_REQUEST)
| 2.140625
| 2
|
reader/migrations/0005_categories.py
|
a-mere-peasant/MangAdventure
| 0
|
12779187
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('reader', '0004_float_numbers')]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.CharField(
auto_created=True, primary_key=True,
max_length=25, serialize=False
)),
('name', models.CharField(
help_text='The name of the category. '
'Must be unique and cannot be changed once set',
max_length=25, unique=True, serialize=False
)),
('description', models.CharField(
help_text='A description for the category.',
max_length=250
)),
],
options={'verbose_name_plural': 'categories'},
),
migrations.AddField(
model_name='series',
name='categories',
field=models.ManyToManyField(
blank=True, to='reader.Category'
),
),
]
| 2.0625
| 2
|
main/PythonTools/LoewnerRunFactory.py
|
ucapdak/loewner
| 0
|
12779188
|
from Constants import CONST_IDX, LINR_IDX, KAPPA_IDX, CALPHA_IDX, SQRTPLUS_IDX, EXACT_CUBIC_CONSTANT, STANDARD_IDXS, CUBIC_EXACT_IDXS, QUADRATIC_FORWARD_EXACT_IDXS, NOTORIGIN_IDXS
from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun
class LoewnerRunFactory():
def __init__(self, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Set the time parameters for the factory
self.start_time = start_time
self.final_time = final_time
# Set the resolution parameters for the factory
self.outer_points = outer_points
self.inner_points = inner_points
# Set the compilation setting for the factory
self.compile_modules = compile_modules
# Set the saving options for the factory
self.save_plot = save_plot
self.save_data = save_data
# Give default arguments for the extra parameters
self.kappa = 0
self.alpha = 0
self.constant = 0
def select_single_run(self,index,start_time=None,final_time=None,outer_points=None,inner_points=None,constant=None,kappa=None,alpha=None):
# Choose the class variables for the LoewnerRun object if no alternative is given
if start_time is None:
start_time = self.start_time
if final_time is None:
final_time = self.final_time
if outer_points is None:
outer_points = self.outer_points
if inner_points is None:
inner_points = self.inner_points
if kappa is None:
kappa = self.kappa
if constant is None:
constant = self.constant
if alpha is None:
alpha = self.alpha
# Create LoewnerRun object based on which driving function was chosen
if index == CONST_IDX:
return ConstantLoewnerRun(constant,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == LINR_IDX:
return LinearLoewnerRun(start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == KAPPA_IDX:
if final_time > 1:
final_time = 1
return KappaLoewnerRun(kappa,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == CALPHA_IDX:
return CAlphaLoewnerRun(alpha,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == SQRTPLUS_IDX:
return SqrtTPlusOneLoewnerRun(start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
# Create an ordinary LoewnerRun
return LoewnerRun(index,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
def create_standard_runs(self):
# Create a list of LoewnerRuns for driving functions that do not require additional arguments
return [self.select_single_run(index=i) for i in STANDARD_IDXS]
def create_wedge_runs(self):
# Create a list of LoewnerRuns for driving functions that do not start at origin excluding Kappa
return [self.select_single_run(index=i,constant=1) for i in NOTORIGIN_IDXS]
def vary_kappa(self, kappas, outer_points=None, inner_points=None):
if outer_points is None:
outer_points=self.outer_points
if inner_points is None:
inner_points=self.inner_points
# Create a list of kappa-driving LoewnerRuns with different values for kappa
return [self.select_single_run(index=KAPPA_IDX, kappa=k, outer_points=outer_points, inner_points=inner_points) for k in kappas]
def vary_alpha(self, alphas):
# Create a list of calpha-driving LoewnerRuns with different values for alpha
return [self.select_single_run(index=CALPHA_IDX, alpha=a) for a in alphas]
def vary_inner_res(self, index, points, constant=None, kappa=None, alpha=None):
# Create a list of LoewnerRuns with the same driving function and different values for 'inner time'
return [self.select_single_run(index=index, inner_points=p, constant=constant, kappa=kappa, alpha=alpha) for p in points]
def vary_final_time(self, index, times, constant=None, kappa=None, alpha=None):
# Create a list of LoewnerRuns with the same driving function and different values for the final time
return [self.select_single_run(index=index, final_time=t, constant=constant, kappa=kappa, alpha=alpha) for t in times]
def create_exact_cubic(self):
# Create a list of LoewnerRuns that have an exact cubic forward solution
return [self.select_single_run(index=i, constant=EXACT_CUBIC_CONSTANT) for i in CUBIC_EXACT_IDXS]
def create_exact_quadratic_forward(self):
# Create a list of LoewnerRuns that have an exact quadratic forward solution
return [self.select_single_run(index=i) for i in QUADRATIC_FORWARD_EXACT_IDXS]
| 2.34375
| 2
|
bitcoin/exchange.py
|
darbik/work
| 0
|
12779189
|
<reponame>darbik/work
def buy_bitcoin(price):
amount = int(raw_input("How much do you want to buy?"))
if amount > 500 or amount < 5:
while amount > 500 or amount < 5:
amount = int(raw_input("Sorry please try another amount between $5 and $500."))
amount = ('buy', amount)
return amount
def sell_bitcoin(price):
amount = int(raw_input("How much do you want to sell?"))
if amount > 500 or amount < 5:
while amount > 500 or amount < 5:
amount = int(raw_input("Sorry please try another amount between $5 and $500."))
amount = ('sell', amount)
return amount
def calculate_fees(atmAmount):
additionalFee = (((atmAmount / 7500) - 1) * 0.35)
return additionalFee
def display_transaction(amount, price):
operation, amount = amount
if operation == 'buy':
print "You are buying %.8f bitcoin." % (amount / price * 0.95)
elif operation == 'sell':
print "You are selling %.8f bitcoin." % (amount / price * 0.95)
print "Your transaction costs %.8f bitcoin in fees." % (amount / price * 0.05)
def find_denominations(amount, availableBills):
operation, amount = amount
(fifties, twenties, tens, fives) = availableBills
value = amount
if operation == 'sell':
remainder = amount
nFifty = 0
nTwenty = 0
nTen = 0
nFive = 0
try:
if remainder % 50 < 50 and fifties != 0:
nFifty = amount / 50
if nFifty > fifties:
while nFifty > fifties:
nFifty -= 1
remainder -= (nFifty * 50)
amount = remainder
else:
remainder = remainder % 50
amount = remainder
except:
pass
try:
if remainder % 20 < 20 and twenties != 0:
nTwenty = amount / 20
if nTwenty > twenties:
while nTwenty > twenties:
nTwenty -= 1
remainder -= (nTwenty * 20)
amount = remainder
else:
remainder = remainder % 20
amount = remainder
except:
pass
try:
if remainder % 10 < 10 and tens != 0:
nTen = amount / 10
if nTen > tens:
while nTen > tens:
nTen -= 1
remainder -= (nTen * 10)
amount = remainder
else:
remainder = remainder % 10
amount = remainder
except:
pass
try:
if remainder % 5 < 5 and fives != 0:
nFive = amount / 5
if nFive > fives:
while nFive > fives:
nFive -= 1
remainder -= (nFive * 5)
amount = remainder
else:
remainder = remainder % 5
amount = remainder
except:
pass
if remainder != 0:
option1 = value - remainder
option2 = value + remainder
answer = int(raw_input("Sorry we can't create a transaction for %i, but we can make a transaction for %i or %i, which do you want?" % (value, option1, option2)))
if answer == option1:
denominations = find_denominations(('sell', answer), availableBills)
elif answer == option2:
denominations = find_denominations(('sell', answer), availableBills)
else:
denominations = (nFifty, nTwenty, nTen, nFive)
return denominations
| 3.859375
| 4
|
libraryproject/profiles/forms.py
|
elotgamu/libraryproject
| 0
|
12779190
|
<gh_stars>0
from django import forms
from .models import Visitor, Student, Librarian
class VisitorsForm(forms.ModelForm):
class Meta:
model = Visitor
fields = ('address',
'phone',
'id_card'
)
class StudentForm(forms.ModelForm):
class Meta:
model = Student
fields = ('address',
'phone',
'id_card',
'student_number',
'school_name',
)
class LibrarianForm(forms.ModelForm):
class Meta:
model = Librarian
fields = ('user',
'address',
'phone',
'id_card',
)
| 2.21875
| 2
|
2021_CPS_festival/test.py
|
yehyunchoi/Algorithm
| 0
|
12779191
|
<gh_stars>0
def f() :
for i in range(1, 10):
print((i * 2 + 5) * 50 +1771 - 1994)
| 2.828125
| 3
|
bot.py
|
pixley/discord-audio-pipe
| 1
|
12779192
|
<reponame>pixley/discord-audio-pipe
import sound
import discord
import logging
import config
from discord.ext import commands
class Dap_Bot(commands.Bot):
def __init__(self, command_prefix):
commands.Bot.__init__(self, command_prefix)
# discord.AudioSource stream
self.stream = None
# int device_id
self.device_id = -1
# discord.VoiceClient voice
self.voice = None
# boolean use_vban
self.use_vban = False
def apply_config(self):
self.use_vban = config.get_config_bool("Audio", "use_vban")
self.start_stream()
def start_stream(self):
if self.use_vban:
self.stream = sound.VBANStream()
else:
# device id
self.device_id = config.get_config_int("Audio", "device_id")
self.stream = sound.PCMStream()
self.stream.change_device(self.device_id)
# params: int new_id
# return boolean
def change_device(self, new_id):
if not self.use_vban:
if new_id != self.device_id:
# sounddevice.DeviceList device_list
device_list = sound.query_devices()
# int device_count
device_count = len(device_list)
if new_id >= 0 and new_id < device_count:
self.device_id = new_id
self.stream.change_device(new_id)
config.set_config("Audio", "device_id", new_id)
print("Device {} selected".format(new_id))
return True
else:
print("Error: invalid device id or no devices available!")
return False
# params: float volume
# return: boolean
def change_volume(self, volume):
if volume >= 0.0 and volume <= 2.0:
if self.voice is not None and self.voice.source is not None:
self.voice.source.volume = volume
config.set_config("Audio", "volume", volume)
return True
return False
#params: Discord.VoiceChannel channel
async def join_voice_channel(self, channel):
self.voice = await channel.connect()
self.voice.play(self.stream)
vol = config.get_config_float("Audio", "volume")
self.voice.source = discord.PCMVolumeTransformer(original=self.stream, volume=vol)
if self.use_vban:
self.stream.start_vban()
async def leave_voice_channel(self):
if self.voice is not None:
await self.voice.disconnect()
self.voice = None
def reset_stream(self):
if self.stream is not None:
self.stream.cleanup()
if self.voice is not None and self.voice.source is not None:
start_stream()
self.voice.source.original = self.stream()
| 2.640625
| 3
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/waffle_utils/testutils.py
|
osoco/better-ways-of-thinking-about-software
| 3
|
12779193
|
"""
Test utilities for waffle utilities.
"""
# Can be used with FilteredQueryCountMixin.assertNumQueries() to blacklist
# waffle tables. For example:
# QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
# with self.assertNumQueries(6, table_blacklist=QUERY_COUNT_TABLE_BLACKLIST):
WAFFLE_TABLES = [
"waffle_utils_waffleflagcourseoverridemodel",
"waffle_flag",
"waffle_switch",
"waffle_sample",
]
| 2.03125
| 2
|
LeetCode/Python/two_sum.py
|
tejeshreddy/competitive-programming
| 0
|
12779194
|
"""
Title: 0001 - Two Sum
Tags: Hash Table
Time: O(n)
Space: O(n)
Source: https://leetcode.com/problems/two-sum/
Difficulty: Easy
"""
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
hmap = {}
for i, v in enumerate(nums):
diff = target - v
if diff not in hmap:
hmap[v] = i
else:
return [hmap[diff], i]
| 3.546875
| 4
|
services/gps/NmeaSerialGps.py
|
robisen1/AndroidWifiCracker
| 19
|
12779195
|
#!/usr/bin/env python
from serial import Serial
from threading import Thread
import time
from NmeaEvents import NmeaEventSource
import NmeaParser
import NmeaSentences
class NmeaSerialGps(NmeaEventSource):
"""A NMEA GPS device that is connected via a serial connection"""
def __init__(self, port, baudrate = 4800):
"""Create a new NMEA serial GPS
Arguments:
port -- the device that is the GPS serial port (i.e. /dev/ttyS0)
baudrate -- the baud rate of the serial connection. Set to 4800 by
default, since this is the baud rate specified by the NMEA
specification
"""
NmeaEventSource.__init__(self)
# Configure the serial port
self.__serialPort = Serial(port = port, baudrate = baudrate)
# Create a thread to read from the serial port
self.__thread = Thread(target = self.__threadLoop,
name = "NmeaSerialGps thread")
# Set a variable that lets us know when we have been requested to stop
self.__stopRequested = False
def __del__(self):
"""Destroy an NMEA serial GPS instance"""
self.__serialPort.close()
def start(self):
"""Start capturing data from the serial GPS device"""
self.__stopRequested = False
self.__thread.start()
def stop(self):
"""Stop capturing data from the serial GPS device"""
self.__stopRequested = True
self.__thread.join()
def __threadLoop(self):
"""Retrieve NMEA sentences from the device"""
while not self.__stopRequested:
nextSentence = self.__serialPort.readline()
if (nextSentence != None):
nmea = None
try:
# nmea will be None if the latest message is a type we
# don't understand
nmea = NmeaParser.NmeaParser.Parse(nextSentence)
except NmeaParser.InvalidNmeaSentence, e:
# Send an invalid NMEA sentence event
self.sendInvalidNmeaSentenceEvent(e)
except NmeaSentences.InvalidGpggaSentence, e:
# Send an invalid GPGGA sentence event
self.sendInvalidGpggaSentenceEvent(e)
if (nmea != None):
if isinstance(nmea, NmeaSentences.GpggaSentence):
# Send a GPGGA event
self.sendGpggaEvent(nmea)
| 3.203125
| 3
|
greatbigcrane/urls.py
|
pnomolos/greatbigcrane
| 3
|
12779196
|
"""
Copyright 2010 <NAME>, <NAME>, and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
handler500 # Pyflakes
urlpatterns = patterns('',
url('^$', 'project.views.index', name="dashboard_url"),
url('^about/$', 'project.views.about', name="about_url"),
(r'^notifications/', include('notifications.urls')),
(r'^projects/', include('project.urls')),
(r'^recipes/', include('recipes.urls')),
(r'^preferences/', include('preferences.urls')),
(r'^jobs/', include('job_queue.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| 1.625
| 2
|
Makro Keyboard for PC/deviceConfig.py
|
erenterzioglu/Macro-Keyboard
| 3
|
12779197
|
from serial import *
import serial.tools.list_ports
#import serial.tools.list_ports
import jsonConfig as j
import time
#connected_devices=[""]
def serialConnection(values,device):
print("hi ")
print("serialConnection()")
print("values: {} , port: {}".format(values,device))
#c_number=getDeviceComNumber(device)
#print(c_number)
print("port is going to open")
port = serial.Serial(getDeviceComNumber(device), 9600, timeout=1) #serialport değşecek
time.sleep(3)
if(port is not None):
print("port openned")
for i in range(16):
binding_str= ''.join(j.getBindings(i,values))
print("str value: {} str type: {}".format(binding_str, type(binding_str)))
time.sleep(0.001)
if(len(binding_str)>-1):
port.write(bytes("{}".format(chr(i)),encoding="ascii"))
while(port.inWaiting()<1):
time.sleep(0.1)
print("Waiting data")
time.sleep(0.1)
print("Data comes: ")
print(port.readline().decode('ascii'))
port.write(bytes("{}".format(chr(len(binding_str))),encoding="ascii"))
while(port.inWaiting()<1):
time.sleep(0.1)
print("Waiting data")
#time.sleep(0.1)
print("Data need to send lenght: ")
print(port.readline().decode('ascii'))
#port.write(bytes(binding_str,encoding="ascii"))
print(binding_str.encode('iso8859_9'))
port.write(binding_str.encode('iso8859_9'))
while(port.inWaiting()<len(binding_str)):
time.sleep(0.1)
print("Waiting data")
time.sleep(0.1)
print("Data comes: ")
print(port.readline().decode('iso8859_9'))
else:
print("Bindings of the key error ")
port.close()
else:
print("Device cannot found")
def getDevices():
connected_devices=[]
connected_devices=serial.tools.list_ports.comports()
#print("connected_devices:")
#print(connected_devices)
#for i in connected_devices:
# print(i)
return connected_devices
def getDeviceComNumber(device):
number= device.split(' ')
#print(number[0])
return number[0]
| 3
| 3
|
a10sdk/core/authentication/authentication_console.py
|
deepfield/a10sdk-python
| 16
|
12779198
|
from a10sdk.common.A10BaseClass import A10BaseClass
class TypeCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param console_type: {"enum": ["ldap", "local", "radius", "tacplus"], "type": "string", "format": "enum-list"}
:param type: {"default": 0, "type": "number", "description": "The login authentication type", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "<KEY>"
self.DeviceProxy = ""
self.console_type = ""
self.A10WW_type = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Console(A10BaseClass):
"""Class Description::
Configure console authentication type.
Class console supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/authentication/console`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "console"
self.a10_url="/axapi/v3/authentication/console"
self.DeviceProxy = ""
self.type_cfg = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 2.1875
| 2
|
auth_service/__init__.py
|
yoophi/auth-service
| 0
|
12779199
|
<gh_stars>0
import logging.config
from datetime import datetime
from flask import Flask
from flask_social_login import SQLAlchemyConnectionDatastore
from auth_service.database import db, migrate
from auth_service.extensions import cors, ma
from auth_service.oauth2 import config_oauth
from .config import config
from .social import social
from .models import User, Role, Connection
from .user import user_manager
def create_app(config_name="default", settings_override=None):
init_logging()
app = Flask(__name__)
app_config = config[config_name]
app.config.from_object(app_config)
app_config.init_app(app)
init_db(app)
user_manager.init_app(app, db, User)
app.extensions["login_manager"] = user_manager.login_manager
init_extensions(app)
init_social(app, db)
if settings_override:
app.config.update(settings_override)
init_blueprint(app)
init_commands(app)
config_oauth(app)
return app
def init_logging():
LOGGING = {
"version": 1,
"formatters": {
"brief": {"format": "%(message)s"},
"default": {
"format": "%(asctime)s %(levelname)-8s %(name)-15s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "default",
"stream": "ext://sys.stdout",
},
"none": {"class": "logging.NullHandler"},
},
"loggers": {
"amqp": {"handlers": ["none"], "propagate": False},
"conf": {"handlers": ["none"], "propagate": False},
"": {
"handlers": [
"console",
],
"level": "DEBUG",
"propagate": False,
},
},
}
logging.config.dictConfig(LOGGING)
def init_db(app):
db.init_app(app)
migrate.init_app(app, db)
def init_extensions(app):
cors.init_app(
app,
resources={
r"/*": {"origins": "*"},
},
)
ma.init_app(app)
def init_social(app, db):
def wrapper(db, User, Connection):
def connection_not_found_handler(cv):
user = User()
user.active = True
if cv["email"]:
user.email = cv["email"]
user.email_confirmed_at = datetime.utcnow()
db.session.add(user)
connection = Connection()
connection.user = user
for k, v in cv.items():
setattr(connection, k, v)
db.session.add(connection)
db.session.commit()
return connection
return connection_not_found_handler
app.config["SOCIAL_CONNECTION_NOT_FOUND_HANDLER"] = wrapper(db, User, Connection)
datastore = SQLAlchemyConnectionDatastore(db, Connection)
social.init_app(app, datastore)
def init_blueprint(app):
from auth_service.api import api as api_bp
from auth_service.oauth import oauth2 as oauth_bp
from auth_service.swagger import swagger_bp
from auth_service.views import main as main_bp
app.register_blueprint(main_bp, url_prefix="/")
app.register_blueprint(api_bp, url_prefix="/api")
app.register_blueprint(oauth_bp, url_prefix="/oauth")
app.register_blueprint(swagger_bp, url_prefix="/swagger")
def init_commands(app):
@app.cli.command("init-db")
def init_database():
db.create_all()
# Create '<EMAIL>' user with no roles
if (
not db.session.query(User)
.filter(User.email == "<EMAIL>")
.first()
):
user = User(
email="<EMAIL>",
email_confirmed_at=datetime.utcnow(),
password=<PASSWORD>_manager.hash_password("<PASSWORD>"),
)
db.session.add(user)
db.session.commit()
# Create '<EMAIL>' user with 'Admin' and 'Agent' roles
if not db.session.query(User).filter(User.email == "<EMAIL>").first():
user = User(
email="<EMAIL>",
email_confirmed_at=datetime.utcnow(),
password=user_manager.hash_password("<PASSWORD>"),
)
user.roles.append(Role(name="Admin"))
user.roles.append(Role(name="Agent"))
db.session.add(user)
db.session.commit()
| 2.046875
| 2
|
src/endpoints/email.py
|
alexfrunza/FiiPractic-Flask-API-2021
| 0
|
12779200
|
<reponame>alexfrunza/FiiPractic-Flask-API-2021<gh_stars>0
from flask import request, Blueprint, Response
from src.models.user import User
from src.utils.decorators import http_handling, session, is_authorized
email_bp = Blueprint('email', __name__, url_prefix="")
@email_bp.route('/email-confirmation', methods=["GET"])
@http_handling
@session
def activate_user(context):
token = request.args.get('token')
User.activate_user(context, token)
return Response(status=200, response="Account activated")
@email_bp.route('/resend-email-confirmation', methods=["POST"])
@http_handling
@session
@is_authorized
def resend_email_confirmation(context, user):
User.resend_email_confirmation(context, user)
return Response(status=200, response="Email confirmation sent")
| 2.46875
| 2
|
web/blog/comments/views.py
|
BumagniyPacket/django-blog
| 2
|
12779201
|
<filename>web/blog/comments/views.py
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import CreateView, DeleteView, UpdateView
from blog.comments.forms import CommentForm
from blog.comments.models import Comment
class CommentApproveView(LoginRequiredMixin, UpdateView):
model = Comment
def get_success_url(self):
self.object.approve()
return self.object.article.get_absolute_url()
class CommentDeleteView(LoginRequiredMixin, DeleteView):
model = Comment
def get_success_url(self):
return self.object.article.get_absolute_url()
class CommentAddView(CreateView):
form_class = CommentForm
model = Comment
http_method_names = ['post']
def get_success_url(self):
return self.object.article.get_absolute_url()
| 2.015625
| 2
|
fromconfig_yarn/__init__.py
|
criteo/fromconfig-yarn
| 0
|
12779202
|
# pylint: disable=unused-import,missing-docstring
from fromconfig_yarn.launcher import YarnLauncher
| 1.078125
| 1
|
util/fung_metrics.py
|
eliagbayani/local_reference_taxonomy
| 8
|
12779203
|
<filename>util/fung_metrics.py
from org.opentreeoflife.taxa import Taxonomy
"""
Usage:
from org.opentreeoflife.taxa import Taxonomy
ott = Taxonomy.getTaxonomy('tax/ott2.8/')
import fung_metrics
fung = ott.select('Fungi')
fung.analyze()
fung_metrics.doit(fung)
"""
def doit(fung):
internal = 0
tips = 0
species = 0
fungorum = 0
ncbi = 0
gbif = 0
other = 0
extinct = 0
hidden = 0
for node in fung:
if node.children == None:
tips += 1
else:
internal += 1
if node.rank == 'species':
species += 1
if node.sourceIds != None:
source = node.sourceIds[0].getPrefix()
if source == "if":
fungorum += 1
elif source == "ncbi":
ncbi += 1
elif source == "gbif":
gbif += 1
else:
other += 1
if node.isHidden():
hidden += 1
elif node.isExtinct():
extinct += 1
print "Internal nodes: %s\nTips: %s\nSpecies: %s" % (internal, tips, species)
print " (The following counts are for species only)"
print "From IF: %s\nFrom NCBI but not IF: %s\nFrom GBIF but not NCBI or IF: %s\nFrom elsewhere: %s" % (fungorum, ncbi, gbif, other)
print "Incertae sedis and similar: %s\nExtinct and not incertae sedis: %s" % (hidden, extinct)
| 2.671875
| 3
|
nezzle/graphics/edges/edgeconverter.py
|
dwgoon/nezzle
| 2
|
12779204
|
<filename>nezzle/graphics/edges/edgeconverter.py
from typing import AnyStr
from typing import Union
from typing import Type
from nezzle.graphics.edges.baseedge import BaseEdge
from nezzle.graphics.edges.edgefactory import EdgeClassFactory
class EdgeConverter(object):
@staticmethod
def convert(edge: BaseEdge, edge_type: Union[Type, AnyStr]):
if isinstance(edge_type, str):
edge_type = EdgeClassFactory.create(edge_type)
if type(edge) == edge_type:
return
attr = edge.to_dict()
attr["ITEM_TYPE"] = edge_type.ITEM_TYPE
new_edge = edge_type.from_dict(attr=attr, source=edge.source, target=edge.target)
return new_edge
| 2.265625
| 2
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/steps/download_secret_to_worker.py
|
Alecto3-D/testable-greeter
| 2
|
12779205
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot.process.buildstep import FAILURE
from buildbot.process.buildstep import SUCCESS
from buildbot.process.buildstep import BuildStep
from buildbot.process.results import worst_status
from buildbot.steps.worker import CompositeStepMixin
class DownloadSecretsToWorker(BuildStep, CompositeStepMixin):
renderables = ['secret_to_be_populated']
def __init__(self, populated_secret_list, **kwargs):
super(DownloadSecretsToWorker, self).__init__(**kwargs)
self.secret_to_be_populated = populated_secret_list
@defer.inlineCallbacks
def runPopulateSecrets(self):
result = SUCCESS
for path, secretvalue in self.secret_to_be_populated:
if not isinstance(path, str):
raise ValueError("Secret path %s is not a string" % path)
self.secret_to_be_interpolated = secretvalue
res = yield self.downloadFileContentToWorker(path, self.secret_to_be_interpolated)
result = worst_status(result, res)
defer.returnValue(result)
@defer.inlineCallbacks
def run(self):
self._start_deferred = None
res = yield self.runPopulateSecrets()
defer.returnValue(res)
class RemoveWorkerFileSecret(BuildStep, CompositeStepMixin):
def __init__(self, populated_secret_list, logEnviron=False, **kwargs):
self.paths = []
for path, secret in populated_secret_list:
self.paths.append(path)
self.logEnviron = logEnviron
super(RemoveWorkerFileSecret, self).__init__(**kwargs)
@defer.inlineCallbacks
def runRemoveWorkerFileSecret(self):
all_results = []
for path in self.paths:
res = yield self.runRmFile(path, abandonOnFailure=False)
all_results.append(res)
if FAILURE in all_results:
result = FAILURE
else:
result = SUCCESS
defer.returnValue(result)
@defer.inlineCallbacks
def run(self):
self._start_deferred = None
res = yield self.runRemoveWorkerFileSecret()
defer.returnValue(res)
| 1.789063
| 2
|
docs/conf.py
|
AleCandido/opale
| 0
|
12779206
|
from datetime import datetime
extensions = []
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u"Opale"
year = datetime.now().year
copyright = u"%d <NAME> " % year
exclude_patterns = ["_build"]
html_theme = "opale"
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
# html_logo = "_static/logo.png"
html_static_path = ["_static"]
html_theme_options = {
"logo": "logo.png",
"logo_name": True,
"logo_text_align": "center",
"description": "Dark theme based on Alabaster.",
"github_user": "AleCandido",
"github_repo": "opale",
"fixed_sidebar": True,
}
extensions.append("releases")
releases_github_path = "AleCandido/opale"
# Our pre-0.x releases are unstable / mix bugs+features
releases_unstable_prehistory = True
| 1.695313
| 2
|
anime/migrations/0004_auto_20210812_1612.py
|
AniLite/API-v1.2
| 2
|
12779207
|
<reponame>AniLite/API-v1.2
# Generated by Django 3.2.6 on 2021-08-12 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('anime', '0003_anime_episode_summary'),
]
operations = [
migrations.AlterModelOptions(
name='genre',
options={'ordering': ['name']},
),
migrations.AddField(
model_name='anime',
name='cover_image',
field=models.URLField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='anime',
name='poster_image',
field=models.URLField(blank=True, max_length=255, null=True),
),
]
| 1.898438
| 2
|
train.py
|
KimMeen/DCRNN
| 4
|
12779208
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 22:27:03 2020
@author: <NAME>
"""
import math
import tqdm
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import utils
from net import DCRNNModel
# import sys
# sys.path.append("./xlwang_version")
# from dcrnn_model import DCRNNModel
"""
Hyperparameters
"""
batch_size = 64
enc_input_dim = 2
dec_input_dim = 1
hidden_dim = 64
output_dim = 1
diffusion_steps = 2
num_nodes = 207
rnn_layers = 2
seq_length = 12
horizon = 12
cl_decay_steps = 2000 # decrease teaching force ratio in global steps
filter_type = "dual_random_walk"
epochs = 100
lr = 0.01
weight_decay = 0.0
epsilon = 1.0e-3
amsgard = True
lr_decay_ratio = 0.1
lr_decay_steps = [20, 30, 40, 50]
max_grad_norm = 5
checkpoints = './checkpoints/dcrnn.pt'
sensor_ids = './data/METR-LA/graph_sensor_ids.txt'
sensor_distance = './data/METR-LA/distances_la_2012.csv'
recording='data/processed/METR-LA'
"""
Dataset
"""
# read sensor IDs
with open(sensor_ids) as f:
sensor_ids = f.read().strip().split(',')
# read sensor distance
distance_df = pd.read_csv(sensor_distance, dtype={'from': 'str', 'to': 'str'})
# build adj matrix based on equation (10)
adj_mx = utils.get_adjacency_matrix(distance_df, sensor_ids)
data = utils.load_dataset(dataset_dir=recording, batch_size=batch_size, test_batch_size=batch_size)
train_data_loader = data['train_loader']
val_data_loader = data['val_loader']
test_data_loader = data['test_loader']
standard_scaler = data['scaler']
"""
Init model
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DCRNNModel(adj_mx,
diffusion_steps,
num_nodes,
batch_size,
enc_input_dim,
dec_input_dim,
hidden_dim,
output_dim,
rnn_layers,
filter_type).to(device)
# model = DCRNNModel(adj_mx,
# batch_size,
# enc_input_dim,
# dec_input_dim,
# diffusion_steps,
# num_nodes,
# rnn_layers,
# hidden_dim,
# horizon,
# output_dim,
# filter_type).to(device)
optimizer = torch.optim.Adam(model.parameters(),
lr=lr, eps=epsilon,
weight_decay=weight_decay,
amsgard=amsgard)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=lr_decay_steps,
gamma=lr_decay_ratio)
"""
DCRNN Training
"""
def compute_mae_loss(y_true, y_predicted, standard_scaler):
y_true = standard_scaler.inverse_transform(y_true)
y_predicted = standard_scaler.inverse_transform(y_predicted)
return utils.masked_mae_loss(y_predicted, y_true, null_val=0.0)
def eval_metrics(y_true_np, y_predicted_np, standard_scaler):
metrics = np.zeros(3)
y_true_np = standard_scaler.inverse_transform(y_true_np)
y_predicted_np = standard_scaler.inverse_transform(y_predicted_np)
mae = utils.masked_mae_np(y_predicted_np, y_true_np, null_val=0.0)
mape = utils.masked_mape_np(y_predicted_np, y_true_np, null_val=0.0)
rmse = utils.masked_rmse_np(y_predicted_np, y_true_np, null_val=0.0)
metrics[0] += mae
metrics[1] += mape
metrics[2] += rmse
return metrics
# some pre-calculated properties
num_train_iteration_per_epoch = math.ceil(data['x_train'].shape[0] / batch_size)
num_val_iteration_per_epoch = math.ceil(data['x_val'].shape[0] / batch_size)
num_test_iteration_per_epoch = math.ceil(data['x_test'].shape[0] / batch_size)
# start training
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Total number of trainable parameters:", params)
print("Initialization complete. Start training... ==>", epochs, "epochs with", num_train_iteration_per_epoch, "batches per epoch.")
for epoch in range(1, epochs + 1):
model.train()
train_iterator = train_data_loader.get_iterator()
val_iterator = val_data_loader.get_iterator()
total_loss = 0.0
total_metrics = np.zeros(3) # Three matrics: MAE, MAPE, RMSE
total_val_metrics = np.zeros(3)
for batch_idx, (x, y) in enumerate(tqdm.tqdm(train_iterator)):
x = torch.FloatTensor(x)
y = torch.FloatTensor(y)
y_true = y[..., :output_dim] # delete time encoding to form as label
# x:[batch, seq_len, nodes, enc_input_dim]
# y:[batch, horizon, nodes, output_dim + 1]
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
# compute teaching force ratio: decrease this gradually to 0
global_steps = (epoch - 1) * num_train_iteration_per_epoch + batch_idx
teaching_force_ratio = cl_decay_steps / (cl_decay_steps + math.exp(global_steps / cl_decay_steps))
# feedforward
y_hat = model(x, y, teaching_force_ratio) # [horizon, batch, nodes*output_dim]
y_hat = torch.transpose(torch.reshape(y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
# back propagation
loss = compute_mae_loss(y_true, y_hat.cpu(), standard_scaler)
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
# training statistics
total_loss += loss.item()
t_metrics = eval_metrics(y_true.numpy(), y_hat.detach().cpu().numpy(), standard_scaler)
total_metrics += t_metrics
# print('Batch_idx {:03d} | TF {:.4f} | Train MAE {:.5f} | Train MAPE {:.5f} | Train RMSE {:.5f}'.format(
# batch_idx, teaching_force_ratio, loss.item(), t_metrics[1], t_metrics[2]))
# validation after each epoch
model.eval()
with torch.no_grad():
for _, (val_x, val_y) in enumerate(tqdm.tqdm(val_iterator)):
val_x = torch.FloatTensor(val_x)
val_y = torch.FloatTensor(val_y)
val_y_true = val_y[..., :output_dim] # delete time encoding to form as label
# val_x:[batch, seq_len, nodes, enc_input_dim]
# val_y:[batch, horizon, nodes, output_dim + 1]
val_x, val_y = val_x.to(device), val_y.to(device)
val_y_hat = model(val_x, val_y, 0)
val_y_hat = torch.transpose(torch.reshape(val_y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
total_val_metrics += eval_metrics(val_y_true.numpy(), val_y_hat.detach().cpu().numpy(), standard_scaler)
# learning rate scheduling
lr_scheduler.step()
# GPU mem usage
gpu_mem_alloc = torch.cuda.max_memory_allocated() / 1000000 if torch.cuda.is_available() else 0
# save model every epoch
torch.save(model.state_dict(), checkpoints)
# logging
val_metrics = (total_val_metrics / num_val_iteration_per_epoch).tolist()
print('Epoch {:03d} | lr {:.6f} |Train loss {:.5f} | Val MAE {:.5f} | Val MAPE {:.5f} | Val RMSE {:.5f}| GPU {:.1f} MiB'.format(
epoch, optimizer.param_groups[0]['lr'], total_loss / num_train_iteration_per_epoch, val_metrics[0], val_metrics[1], val_metrics[2], gpu_mem_alloc))
print("Training complete.")
"""
DCRNN Testing
"""
print("\nmodel testing...")
test_iterator = test_data_loader.get_iterator()
total_test_metrics = np.zeros(3)
model.eval()
with torch.no_grad():
for _, (test_x, test_y) in enumerate(tqdm.tqdm(test_iterator)):
test_x = torch.FloatTensor(test_x)
test_y = torch.FloatTensor(test_y)
test_y_true = test_y[..., :output_dim] # delete time encoding to form as label
# test_x:[batch, seq_len, nodes, enc_input_dim]
# test_y:[batch, horizon, nodes, output_dim + 1]
test_x, test_y = test_x.to(device), test_y.to(device)
test_y_hat = model(test_x, test_y, 0)
test_y_hat = torch.transpose(torch.reshape(test_y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
total_test_metrics += eval_metrics(test_y_true.numpy(), test_y_hat.detach().cpu().numpy(), standard_scaler)
test_metrics = (total_test_metrics / num_test_iteration_per_epoch).tolist()
print('Test MAE {:.5f} | Test MAPE {:.5f} | Test RMSE {:.5f}'.format(test_metrics[0], test_metrics[1], test_metrics[2]))
| 1.898438
| 2
|
setup.py
|
WesBAn/kivy_python_checkers
| 0
|
12779209
|
<filename>setup.py
import setuptools
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='kivy_python_checkers',
version='0.8',
url='https://github.com/WesBAn/kivy_python_checkers',
license='MIT License',
author='mcwesban',
author_email='<EMAIL>',
description='Simple checkers game realized with kivy + python3',
long_description=long_description,
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Environment :: MacOS X",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Unix",
"Topic :: Games/Entertainment :: Board Games",
"Natural Language :: Russian"
],
)
| 1.476563
| 1
|
frocket/common/tasks/registration.py
|
DynamicYieldProjects/funnel-rocket
| 56
|
12779210
|
"""
Task request/response classes for the registration job (discovering, validating and storing metadata for a dataset)
"""
# Copyright 2021 The Funnel Rocket Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from enum import auto
from typing import Optional
from frocket.common.dataset import DatasetInfo, DatasetPartId, DatasetSchema
from frocket.common.serializable import SerializableDataClass, AutoNamedEnum, enveloped
from frocket.common.tasks.base import BaseTaskRequest, BaseTaskResult, BlobId, BaseJobResult, BaseApiResult
class DatasetValidationMode(AutoNamedEnum):
SINGLE = auto() # Only validate a single file in the dataset (meaning no cross-file consistency checks are done!)
FIRST_LAST = auto() # Validate only first and last files (by lexicographic sorting) and cross-check them
SAMPLE = auto() # Takes a sample of files, proportional to the no.o of files and up to a configured maximum.
REGISTER_DEFAULT_FILENAME_PATTERN = '*.parquet' # Ignore files such as '_SUCCESS' and the like in discovery
REGISTER_DEFAULT_VALIDATION_MODE = DatasetValidationMode.SAMPLE
REGISTER_DEFAULT_VALIDATE_UNIQUES = True
@dataclass(frozen=True)
class RegisterArgs(SerializableDataClass):
"""Parameters collected by the CLI / API server for the registration job"""
name: str
basepath: str
group_id_column: str
timestamp_column: str
pattern: str = REGISTER_DEFAULT_FILENAME_PATTERN
validation_mode: DatasetValidationMode = REGISTER_DEFAULT_VALIDATION_MODE
validate_uniques: bool = REGISTER_DEFAULT_VALIDATE_UNIQUES
@enveloped
@dataclass(frozen=True)
class RegistrationTaskRequest(BaseTaskRequest):
dataset: DatasetInfo
part_id: DatasetPartId
# If RegisterArgs.validate_uniques=true, task should return all group IDs in file
return_group_ids: bool
@enveloped
@dataclass(frozen=True)
class RegistrationTaskResult(BaseTaskResult):
dataset_schema: Optional[DatasetSchema] # None on failures
part_id: DatasetPartId
# If RegistrationTaskRequest.return_group_ids=true, a reference to the blob with the group IDs
group_ids_blob_id: Optional[BlobId]
@dataclass(frozen=True)
class RegistrationJobResult(BaseJobResult):
dataset: DatasetInfo
@dataclass(frozen=True)
class UnregisterApiResult(BaseApiResult):
dataset_found: bool
dataset_last_used: Optional[float]
| 2.046875
| 2
|
luna/interaction/fp/fingerprint.py
|
keiserlab/LUNA
| 2
|
12779211
|
import numpy as np
from rdkit.DataStructs.cDataStructs import ExplicitBitVect, SparseBitVect
from scipy.sparse import issparse, csr_matrix
from collections import defaultdict
from rdkit import DataStructs
from luna.util.exceptions import (BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError)
from luna.version import __version__
import logging
logger = logging.getLogger()
DEFAULT_FP_LENGTH = 2**32
DEFAULT_FOLDED_FP_LENGTH = 4096
DEFAULT_FP_DTYPE = np.int32
class Fingerprint:
"""A fingerprint that stores indices of "on" bits.
Parameters
----------
indices : array_like of int
Indices of "on" bits.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
unfolded_fp : `Fingerprint` or None
The unfolded version of this fingerprint.
If None, this fingerprint may have not been folded yet.
unfolding_map : dict, optional
A mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features).
props: dict, optional
Custom properties of the fingerprint, consisting of a string keyword and
some value. It can be used, for instance, to save the ligand name
and parameters used to generate shells (IFP features).
"""
def __init__(self, indices, fp_length=DEFAULT_FP_LENGTH, unfolded_fp=None, unfolding_map=None, props=None):
indices = np.asarray(indices, dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
self._indices = np.unique(indices)
self._fp_length = fp_length
self._unfolded_fp = unfolded_fp
self._unfolding_map = unfolding_map or {}
self._props = props or {}
self.version = __version__
@classmethod
def from_indices(cls, indices, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from an array of indices.
Parameters
----------
indices : array_like of int
Indices of "on" bits.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices = np.random.randint(0, fp_length, on_bits)
>>> print(indices)
[12 15 21 0 3 27 3 7]
>>> fp = Fingerprint.from_indices(indices, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(indices, fp_length, **kwargs)
@classmethod
def from_vector(cls, vector, fp_length=None, **kwargs):
"""Initialize from a vector.
Parameters
----------
vector : :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Array of bits.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the ``vector`` shape.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> fp_length = 32
>>> vector = np.random.choice([0, 1], size=(fp_length,), p=[0.8, 0.2])
>>> print(vector)
[0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 1 0 1 1 0 0 0 0 0 0 1 0 0 0 0]
>>> fp = Fingerprint.from_vector(vector)
>>> print(fp.indices)
[ 7 8 13 17 19 20 27]
>>> print(fp.fp_length)
32
"""
if fp_length is None:
try:
fp_length = vector.shape[1]
except IndexError:
fp_length = vector.shape[0]
if issparse(vector):
indices = vector.indices.astype(np.long)
else:
indices = np.asarray(np.where(vector), dtype=np.long).flatten()
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_bit_string(cls, bit_string, fp_length=None, **kwargs):
"""Initialize from a bit string (e.g. '0010100110').
Parameters
----------
bit_string : str
String of 0s and 1s.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the string length.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> fp = Fingerprint.from_bit_string("0010100110000010")
>>> print(fp.indices)
[ 2 4 7 8 14]
>>> print(fp.fp_length)
16
"""
indices = [i for i, char in enumerate(bit_string) if char != '0']
if fp_length is None:
fp_length = len(bit_string)
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_rdkit(cls, rdkit_fp, **kwargs):
"""Initialize from an RDKit fingerprint.
Parameters
----------
rdkit_fp : :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
An existing RDKit fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
"""
if not (isinstance(rdkit_fp, ExplicitBitVect) or isinstance(rdkit_fp, SparseBitVect)):
logger.exception("Invalid fingerprint type. RDKit only accepts a SparseBitVect or ExplicitBitVect object.")
raise TypeError("Invalid fingerprint type. RDKit only accepts a SparseBitVect or ExplicitBitVect object.")
fp_length = rdkit_fp.GetNumBits()
indices = np.asarray(rdkit_fp.GetOnBits(), dtype=np.long)
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_fingerprint(cls, fp, **kwargs):
"""Initialize from an existing fingerprint.
Parameters
----------
fp : `Fingerprint`
An existing fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
"""
if not isinstance(fp, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (cls.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (cls.__class__))
unfolded_fp = fp.__class__.from_fingerprint(fp.unfolded_fp) if fp.unfolded_fp is not None else None
unfolding_map = dict(fp.unfolding_map)
props = dict(fp.props)
return cls.from_indices(fp.indices, fp.fp_length, unfolded_fp=unfolded_fp, unfolding_map=unfolding_map, props=props)
@property
def indices(self):
"""array_like of int, read-only: Indices of "on" bits."""
return self._indices
@property
def bit_count(self):
"""int, read-only: Number of "on" bits."""
return self.indices.shape[0]
@property
def density(self):
"""float, read-only: Proportion of "on" bits in fingerprint."""
return self.bit_count / self.fp_length
@property
def counts(self):
"""dict, read-only: Mapping between each index in ``indices`` to the number of counts, which is always 1 for bit fingerprints."""
return dict([(k, 1) for k in self.indices])
@property
def fp_length(self):
"""int, read-only: The fingerprint length (total number of bits)."""
return self._fp_length
@property
def unfolded_fp(self):
"""`Fingerprint` or None, read-only: The unfolded version of this fingerprint. If None, this fingerprint may have not been folded yet."""
if self._unfolded_fp is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self._unfolded_fp
@property
def unfolded_indices(self):
"""array_like of int, read-only: Indices of "on" bits in the unfolded fingerprint."""
if self._unfolding_map is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self.unfolded_fp.indices
@property
def unfolding_map(self):
"""dict, read-only: The mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features)."""
if self._unfolding_map is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self._unfolding_map
@property
def props(self):
"""dict, read-only: The custom properties of the fingerprint."""
return self._props
@property
def name(self):
"""str: The property 'name'. If it was not provided, then return an empty string."""
return self.props.get("name", "")
@name.setter
def name(self, name):
self.props["name"] = str(name)
@property
def num_levels(self):
"""int: The property 'num_levels' used to generate this fingerprint \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("num_levels", None)
@num_levels.setter
def num_levels(self, num_levels):
self.props["num_levels"] = str(num_levels)
@property
def radius_step(self):
"""float: The property 'radius_step' used to generate this fingerprint \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("radius_step", None)
@radius_step.setter
def radius_step(self, radius_step):
self.props["radius_step"] = str(radius_step)
@property
def num_shells(self):
"""int: The property 'num_shells' \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("num_shells", None)
@num_shells.setter
def num_shells(self, num_shells):
self.props["num_shells"] = str(num_shells)
def get_prop(self, key):
"""Get value of the property ``key``. If not set, raise KeyError."""
try:
return self.props[key]
except KeyError:
logger.warning("Key '%s' does not exist." % key)
return None
def set_prop(self, key, value):
"""Set value to the property ``key``."""
self.props[key] = value
def get_num_bits(self):
"""Get the fingerprint length (total number of bits)."""
return self.fp_length
def get_num_on_bits(self):
"""Get the number of "on" bits."""
return self.bit_count
def get_num_off_bits(self):
"""Get the number of "off" bits."""
return self.get_num_bits() - self.get_num_on_bits()
def get_bit(self, index):
"""Get the bit/count value at index ``index``.
Raises
------
BitsValueError
If the provided index is in a different bit scale.
"""
if index in self.counts:
return self.counts[index]
elif index >= 0 and index < self.fp_length:
return 0
else:
logger.exception("The provided index is in a different bit scale.")
raise BitsValueError("The provided index is in a different bit scale.")
def get_on_bits(self):
"""Get "on" bits.
Returns
-------
: :class:`numpy.ndarray`
"""
return np.array([k for (k, v) in self.counts.items() if v > 0])
def to_vector(self, compressed=True, dtype=DEFAULT_FP_DTYPE):
"""Convert this fingerprint to a vector of bits/counts.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_vector`.
Parameters
-------
compressed : bool
If True, build a compressed sparse matrix (scipy.sparse.csr_matrix).
dtype : data-type
The default value is np.int32.
Returns
-------
: :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Vector of bits/counts.
Return a compressed sparse matrix (`scipy.sparse.csr_matrix`) if ``compressed`` is True.
Otherwise, return a Numpy array (:class:`numpy.ndarray`)
Raises
------
BitsValueError
If some of the fingerprint indices are greater than the fingerprint length.
MemoryError
If the operation ran out of memory.
"""
data = [self.counts[i] for i in self.indices]
if compressed:
try:
row = np.zeros(self.bit_count)
col = self.indices
vector = csr_matrix((data, (row, col)), shape=(1, self.fp_length), dtype=dtype)
except ValueError as e:
logger.exception(e)
raise BitsValueError("Sparse matrix construction failed. Invalid indices or input data.")
else:
try:
# This function is causing a MemoryError exception when using a 2**32 vector.
vector = np.zeros(self.fp_length, dtype=dtype)
except MemoryError as e:
logger.exception(e)
raise MemoryError("Huge indices vector detected. An operation ran out of memory. "
"Use a different data type or apply a folding operation.")
try:
vector[self.indices] = data
except IndexError as e:
logger.exception(e)
raise BitsValueError("Some of the provided indices are greater than the fingerprint length.")
return vector
def to_bit_vector(self, compressed=True):
"""Convert this fingerprint to a vector of bits.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_bit_vector`.
Parameters
-------
compressed : bool
If True, build a compressed sparse matrix (scipy.sparse.csr_matrix).
Returns
-------
: :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Vector of bits/counts.
Return a compressed sparse matrix (`scipy.sparse.csr_matrix`) if ``compressed`` is True.
Otherwise, return a Numpy array (:class:`numpy.ndarray`)
Raises
------
BitsValueError
If some of the fingerprint indices are greater than the fingerprint length.
MemoryError
If the operation ran out of memory.
"""
return self.to_vector(compressed=compressed, dtype=np.bool_).astype(np.int8)
def to_bit_string(self):
"""Convert this fingerprint to a string of bits.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_bit_string`.
Returns
-------
: str
Raises
------
MemoryError
If the operation ran out of memory.
"""
try:
# This function is causing a MemoryError exception when using a 2**32 vector.
bit_vector = self.to_bit_vector(compressed=False).astype(np.int8)
return "".join(map(str, bit_vector))
except MemoryError as e:
logger.exception(e)
raise MemoryError("Huge indices vector detected. An operation ran out of memory. "
"Use a different data type or apply a folding operation.")
def to_rdkit(self, rdkit_fp_cls=None):
"""Convert this fingerprint to an RDKit fingerprint.
.. note::
If the fingerprint length exceeds the maximum RDKit fingerprint length (:math:`2^{31} - 1`),
this fingerprint will be folded to length :math:`2^{31} - 1` before conversion.
Returns
-------
: :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
If ``fp_length`` is less than :math:`1e5`, :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` is used.
Otherwise, :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect` is used.
"""
if rdkit_fp_cls is None:
# Classes to store explicit bit vectors: ExplicitBitVect or SparseBitVect.
# ExplicitBitVect is most useful for situations where the size of the vector is
# relatively small (tens of thousands or smaller).
# For larger vectors, use the _SparseBitVect_ class instead.
if self.fp_length < 1e5:
rdkit_fp_cls = ExplicitBitVect
else:
rdkit_fp_cls = SparseBitVect
# RDKit data structure defines fingerprints as a std:set composed of ints (signed int).
# Since we always have values higher than 0 and since the data structure contains only signed ints,
# then the max length for a RDKit fingerprint is 2^31 - 1.
# C signed int (32 bit) ranges: [-2^31, 2^31-1].
max_rdkit_fp_length = 2**31 - 1
fp_length = self.fp_length
if max_rdkit_fp_length < fp_length:
logger.warning("The current fingerprint will be folded as its size is higher than the maximum "
"size accepted by RDKit, which is 2**31 - 1.")
fp_length = max_rdkit_fp_length
indices = self.indices % max_rdkit_fp_length
rdkit_fp = rdkit_fp_cls(fp_length)
rdkit_fp.SetBitsFromList(indices.tolist())
return rdkit_fp
def fold(self, new_length=DEFAULT_FOLDED_FP_LENGTH):
"""Fold this fingerprint to size ``new_length``.
Parameters
----------
new_length : int
Length of the new fingerprint, ideally multiple of 2. The default value is 4096.
Returns
-------
: `Fingerprint`
Folded `Fingerprint`.
Raises
------
BitsValueError
If the new fingerprint length is not a multiple of 2 or is greater than the existing fingerprint length.
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices = np.random.randint(0, fp_length, on_bits)
>>> print(indices)
[12 15 21 0 3 27 3 7]
>>> fp = Fingerprint.from_indices(indices, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
>>> folded_fp = fp.fold(8)
>>> print(folded_fp.indices)
[0 3 4 5 7]
>>> print(folded_fp.to_vector(compressed=False))
[1 0 0 1 1 1 0 1]
"""
if new_length > self.fp_length:
error_msg = ("The new fingerprint length must be smaller than the existing fingerprint length.")
logger.exception(error_msg)
raise BitsValueError(error_msg)
if not np.log2(self.fp_length / new_length).is_integer():
error_msg = ("It is not possible to fold the current fingerprint into the informed new length. "
"The current length divided by the new one is not a power of 2 number.")
logger.exception(error_msg)
raise BitsValueError(error_msg)
folded_indices = self.indices % new_length
unfolding_map = defaultdict(set)
for k, v in sorted(zip(folded_indices, self.indices)):
unfolding_map[k].add(v)
props = dict(self.props)
if "fp_length" in props:
props["fp_length"] = new_length
new_fp = self.__class__(indices=folded_indices, fp_length=new_length,
unfolded_fp=self, unfolding_map=unfolding_map, props=props)
return new_fp
def unfold(self):
"""Unfold this fingerprint and return its parent fingerprint.
Returns
-------
: `Fingerprint`
"""
return self.unfolded_fp
def union(self, other):
"""Return the union of indices of two fingerprints.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("The informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("The informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.union1d(self.indices, other.indices)
def intersection(self, other):
"""Return the intersection between indices of two fingerprints.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.intersect1d(self.indices, other.indices, assume_unique=True)
def difference(self, other):
"""Return indices in this fingerprint but not in ``other``.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.setdiff1d(self.indices, other.indices, assume_unique=True)
def symmetric_difference(self, other):
"""Return indices in either this fingerprint or ``other`` but not both.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.setxor1d(self.indices, other.indices, assume_unique=True)
def calc_similarity(self, other):
"""Calculates the Tanimoto similarity between this fingeprint and ``other``.
Returns
-------
: float
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> fp1 = Fingerprint.from_bit_string("0010101110000010")
>>> fp2 = Fingerprint.from_bit_string("1010100110010010")
>>> print(fp1.calc_similarity(fp2))
0.625
"""
return DataStructs.FingerprintSimilarity(self.to_rdkit(), other.to_rdkit())
def __repr__(self):
return ("<%s: indices=%s length=%d>" %
(self.__class__, repr(self.indices).replace('\n', '').replace(' ', ''), self.fp_length))
def __eq__(self, other):
if isinstance(other, Fingerprint):
return (self.__class__ == other.__class__
and self.fp_length == other.fp_length
and np.all(np.in1d(self.indices, other.indices, assume_unique=True)))
return False
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
class CountFingerprint(Fingerprint):
"""A fingerprint that stores the number of occurrences of each index.
Parameters
----------
indices : array_like of int, optional
Indices of "on" bits. It is optional if ``counts`` is provided.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
unfolded_fp : `Fingerprint` or None
The unfolded version of this fingerprint.
If None, this fingerprint may have not been folded yet.
unfolding_map : dict, optional
A mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features).
props: dict, optional
Custom properties of the fingerprint, consisting of a string keyword and
some value. It can be used, for instance, to save the ligand name
and parameters used to generate shells (IFP features).
"""
def __init__(self, indices=None, counts=None, fp_length=DEFAULT_FP_LENGTH,
unfolded_fp=None, unfolding_map=None, props=None):
if indices is None and counts is None:
logger.exception("Indices or counts must be provided.")
raise IllegalArgumentError("Indices or counts must be provided.")
if indices is not None:
indices = np.asarray(indices, dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
if counts is None:
indices, counts = np.unique(indices, return_counts=True)
counts = dict(zip(indices, counts))
else:
indices = np.unique(indices)
if not np.all([x in indices for x in counts]):
logger.exception("At least one index from 'counts' is not in 'indices'.")
raise FingerprintCountsError("At least one index from 'counts' is not in 'indices'.")
if len(set(indices).symmetric_difference(counts)) > 0:
logger.exception("At least one index in 'indices' is not in 'counts'.")
raise FingerprintCountsError("At least one index in 'indices' is not in 'counts'.")
else:
indices = np.asarray(sorted(counts.keys()), dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
self._counts = counts
super().__init__(indices, fp_length, unfolded_fp, unfolding_map, props)
@classmethod
def from_indices(cls, indices=None, counts=None, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from an array of indices.
Parameters
----------
indices : array_like of int, optional
Indices of "on" bits. It is optional if ``counts`` is provided.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `CountFingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices, counts = np.unique(np.random.randint(0, fp_length, on_bits), return_counts=True)
>>> counts = dict(zip(indices, counts))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_indices(indices, counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(indices=indices, counts=counts, fp_length=fp_length, **kwargs)
@classmethod
def from_counts(cls, counts, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from a counting map.
Parameters
----------
counts : dict
Mapping between each index in ``indices`` to the number of counts.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `CountFingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> counts = dict(zip(*np.unique(np.random.randint(0, fp_length, on_bits),
... return_counts=True)))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_counts(counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(counts=counts, fp_length=fp_length, **kwargs)
@classmethod
def from_bit_string(cls, bit_string, counts=None, fp_length=None, **kwargs):
"""Initialize from a bit string (e.g. '0010100110').
Parameters
----------
bit_string : str
String of 0s and 1s.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the string length.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> fp = CountFingerprint.from_bit_string("0010100110000010",
... counts={2: 5, 4: 1, 7: 3, 8: 1, 14: 2})
>>> print(fp.indices)
[ 2 4 7 8 14]
>>> print(fp.counts)
{2: 5, 4: 1, 7: 3, 8: 1, 14: 2}
"""
indices = [i for i, char in enumerate(bit_string) if char != '0']
if fp_length is None:
fp_length = len(bit_string)
return cls.from_indices(indices, counts, fp_length, **kwargs)
@classmethod
def from_vector(cls, vector, fp_length=None, **kwargs):
"""Initialize from a vector.
Parameters
----------
vector : :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Array of counts.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the ``vector`` shape.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> fp_length = 32
>>> vector = np.random.choice(5, size=(fp_length,), p=[0.76, 0.1, 0.1, 0.02, 0.02])
>>> print(vector)
[0 0 0 0 2 3 0 1 0 0 2 0 0 0 1 1 2 3 1 0 1 0 0 0 2 0 0 0 1 0 0 0]
>>> fp = CountFingerprint.from_vector(vector)
>>> print(fp.indices)
[ 4 5 7 10 14 15 16 17 18 20 24 28]
>>> print(fp.counts)
{4: 2, 5: 3, 7: 1, 10: 2, 14: 1, 15: 1, 16: 2, 17: 3, 18: 1, 20: 1, 24: 2, 28: 1}
"""
if fp_length is None:
try:
fp_length = vector.shape[1]
except IndexError:
fp_length = vector.shape[0]
if issparse(vector):
indices = vector.indices.astype(np.long)
counts = vector.data
else:
indices = np.asarray(np.where(vector), dtype=np.long).flatten()
counts = vector[indices]
counts = dict(zip(indices, counts))
return cls.from_indices(indices, counts, fp_length, **kwargs)
@classmethod
def from_fingerprint(cls, fp, **kwargs):
"""Initialize from an existing fingerprint.
Parameters
----------
fp : `Fingerprint`
An existing fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
"""
if not isinstance(fp, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (cls.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (cls.__class__))
counts = dict([(i, c) for i, c in fp.counts.items() if c > 0])
unfolded_fp = fp.__class__.from_fingerprint(fp.unfolded_fp) if fp.unfolded_fp is not None else None
unfolding_map = dict(fp.unfolding_map)
props = dict(fp.props)
new_fp = cls.from_counts(counts, fp.fp_length, unfolded_fp=unfolded_fp,
unfolding_map=unfolding_map, props=props)
return new_fp
@property
def counts(self):
"""dict, read-only: Mapping between each index in ``indices`` to the number of counts."""
return self._counts
def get_count(self, index):
"""Get the count value at index ``index``. Return 0 if index is not in ``counts``."""
return self.counts.get(index, 0)
def fold(self, new_length=DEFAULT_FOLDED_FP_LENGTH):
"""Fold this fingerprint to size ``new_length``.
Parameters
----------
new_length : int
Length of the new fingerprint, ideally multiple of 2. The default value is 4096.
Returns
-------
: `Fingerprint`
Folded `Fingerprint`.
Raises
------
BitsValueError
If the new fingerprint length is not a multiple of 2 or is greater than the existing fingerprint length.
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices, counts = np.unique(np.random.randint(0, fp_length, on_bits), return_counts=True)
>>> counts = dict(zip(indices, counts))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_indices(indices, counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
>>> folded_fp = fp.fold(8)
>>> print(folded_fp.indices)
[0 3 4 5 7]
>>> print(folded_fp.to_vector(compressed=False))
[1 0 0 3 1 1 0 2]
"""
new_fp = super().fold(new_length)
new_fp._counts = dict([(folded_idx, sum([self.get_count(x) for x in unfolded_set]))
for folded_idx, unfolded_set in new_fp.unfolding_map.items()])
return new_fp
def __repr__(self):
return ("<%s: counts={%s} length=%d>" %
(self.__class__, tuple([(k, v) for k, v in self.counts.items()]), self.fp_length))
def __eq__(self, other):
if isinstance(other, Fingerprint):
return (self.__class__ == other.__class__
and self.counts == other.counts
and self.fp_length == other.fp_length
and np.all(np.in1d(self.indices, other.indices, assume_unique=True)))
return False
| 2.140625
| 2
|
testing/main.py
|
igit-cn/edgex-ui-go
| 0
|
12779212
|
from pages.driver import Driver
from pages.login import LoginPage
from pages.addNewDevice import AddNewDevice
from pages.devicesvc import DeviceService
from pages.appsvc import AppService
from pages.scheduler import Scheduler
from pages.notification import Notification
from pages.config import Config
import time
if __name__ == '__main__':
print('EdgeX GUI AUTO TESTING Starting...')
# driver = Driver()
# time.sleep(2)
lp = LoginPage()
lp.login()
time.sleep(2)
ad = AddNewDevice(lp.getDriver())
ad.addNewDevice()
# time.sleep(1)
# updateSvc = DeviceService(lp.getDriver())
# updateSvc.updateSvc()
# time.sleep(1)
# ap = AppService(lp.getDriver())
# ap.appSvcUpddate()
# time.sleep(1)
# sc = Scheduler(lp.getDriver())
# sc.addIntervalAndAction()
# time.sleep(1)
# noti = Notification(lp.getDriver())
# noti.addSub()
time.sleep(2)
lp.getDriver().quit()
| 2.0625
| 2
|
depthy/stereo/feature_methods.py
|
mfkiwl/depthy
| 14
|
12779213
|
<reponame>mfkiwl/depthy
import sys
import time as t
import numpy as np
from depthy.misc import Normalizer
def compute_census(img_l: np.ndarray = None, img_r: np.ndarray = None, offset: int = 7) -> (np.ndarray, np.ndarray):
"""
Census feature extraction (for more details see https://en.wikipedia.org/wiki/Census_transform)
:param img_l: left image
:param img_r: right image
:param offset: pixel offset on the four image borders
:return: lcensus_values, rcensus_values
"""
h, w, c = img_l.shape if len(img_l.shape) == 3 else img_l.shape + (1,)
# convert to float
img_l, img_r = Normalizer(img_l).norm_fun(), Normalizer(img_r).norm_fun()
lcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)
rcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)
print('\tLeft and right census...', end='')
sys.stdout.flush()
dawn = t.time()
# exclude pixels on the border (they will have no census values)
for y in range(offset, h-offset):
for x in range(offset, w-offset):
# extract left block region and subtract current pixel intensity as offset from it
image = img_l[y - offset:y + offset + 1, x - offset:x + offset + 1]
roi_offset = image - img_l[y, x]
# census calculation left image
lcensus_values[y, x] = vectorized_census(roi_offset)
# extract right block region and subtract current pixel intensity as offset from it
image = img_r[y - offset:y + offset + 1, x - offset:x + offset + 1]
roi_offset = image - img_r[y, x]
# census calculation right image
rcensus_values[y, x] = vectorized_census(roi_offset)
dusk = t.time()
print('\t(done in {:.2f}s)'.format(dusk - dawn))
return lcensus_values, rcensus_values
def vectorized_census(roi: np.ndarray = None) -> int:
"""
Compute census in a numpy-vectorized fashion.
:param roi: Region of Interest (RoI)
:return: census value
"""
if len(roi.shape) != 2:
raise Exception('Data must be 2-dimensional')
# binary census vector
b = np.array(roi < 0).flatten()
# remove central value
central_idx = (roi.shape[0]*roi.shape[1])//2
b = np.delete(b, central_idx)
# convert binary vector to integer
num = b.dot(1 << np.arange(b.size)[::-1])
return num
| 2.671875
| 3
|
Giveme5W1H/extractor/extractors/action_extractor.py
|
bkrrr/Giveme5W
| 410
|
12779214
|
<reponame>bkrrr/Giveme5W
import re
from nltk.tree import ParentedTree
from Giveme5W1H.extractor.candidate import Candidate
from Giveme5W1H.extractor.extractors.abs_extractor import AbsExtractor
class ActionExtractor(AbsExtractor):
"""
The ActionExtractor tries to extract the main actor and his action.
"""
def __init__(self, weights: (float, float, float) = (0.9, 0.095, 0.005), minimal_length_of_tokens: int = 3):
self._minimal_length_of_tokens = minimal_length_of_tokens
# weights used in the candidate evaluation:
# (position, frequency, named entity)
self.weights = weights
def _extract_candidates(self, document):
"""
Extracts possible agents/actions pairs from a given document.
Candidates are chosen if they belong to an coref-chain and is part of a NP-VP-NP pattern
:param document: The Document to be analyzed.
:type document: Document
:return: A List of Tuples containing all agents, actions and their position in the document.
"""
# retrieve results from preprocessing
corefs = document.get_corefs()
trees = document.get_trees()
candidates = []
for cluster in corefs:
for mention in corefs[cluster]:
# Check if mention is the subject of the sentence by matching the NP-VP-NP pattern.
#
# "One common way of defining the subject of a sentence S in English is as the noun phrase that is the
# child of S and the sibling of VP" (http://www.nltk.org/book/ch08.html)
for pattern in self._evaluate_tree(trees[mention['sentNum'] - 1]):
np_string = ''.join([p[0]['nlpToken']['originalText'] for p in pattern[0]])
if re.sub(r'\s+', '', mention['text']) in np_string:
candidate_object = Candidate()
candidate_object.set_sentence_index(pattern[2])
candidate_object.set_raw([pattern[0], pattern[1], cluster, mention['id']])
candidates.append(candidate_object)
document.set_candidates(self.get_id(), candidates)
def _evaluate_tree(self, sentence_root):
"""
Examines the passed syntactic tree to determine if it matches a NP-VP-NP pattern
This is executed per sentence
:param sentence_root: A tree to be analyzed
:type sentence_root: ParentedTree
:return: A list of Tuples containing the agent and the action described in the sentence.
"""
candidates = []
for subtree in sentence_root.subtrees():
if subtree.label() == 'NP' and subtree.parent().label() == 'S':
# Skip NPs containing a VP
if any(list(subtree.subtrees(filter=lambda t: t.label() == 'VP'))):
continue
# check siblings for VP
sibling = subtree.right_sibling()
while sibling is not None:
if sibling.label() == 'VP':
# this gives a tuple to find the way from sentence to leaf
# tree_position = subtree.leaf_treeposition(0)
entry = [subtree.pos(), self.cut_what(sibling, self._minimal_length_of_tokens).pos(),
sentence_root.stanfordCoreNLPResult['index']]
candidates.append(entry)
break
sibling = sibling.right_sibling()
return candidates
def _evaluate_candidates(self, document):
"""
Calculate a confidence score based on number of mentions, position in text and entailment of named entities
for extracted candidates.
:param document: The parsed document
:type document: Document
:param candidates: Extracted candidates to evaluate.
:type candidates:[([(String,String)], ([(String,String)])]
:return: A list of evaluated and ranked candidates
"""
ranked_candidates = []
doc_len = document.get_len()
doc_ner = document.get_ner()
doc_coref = document.get_corefs()
if any(doc_coref.values()):
# get length of longest coref chain for normalization
max_len = len(max(doc_coref.values(), key=len))
else:
max_len = 1
for candidate in document.get_candidates(self.get_id()):
candidateParts = candidate.get_raw()
verb = candidateParts[1][0][0]['nlpToken']['originalText'].lower()
# VP beginning with say/said often contain no relevant action and are therefor skipped.
if verb.startswith('say') or verb.startswith('said'):
continue
coref_chain = doc_coref[candidateParts[2]]
# first parameter used for ranking is the number of mentions, we use the length of the coref chain
score = (len(coref_chain) / max_len) * self.weights[1]
representative = None
contains_ne = False
mention_type = ''
for mention in coref_chain:
if mention['id'] == candidateParts[3]:
mention_type = mention['type']
if mention['sentNum'] < doc_len:
# The position (sentence number) is another important parameter for scoring.
# This is inspired by the inverted pyramid.
score += ((doc_len - mention['sentNum'] + 1) / doc_len) * self.weights[0]
if mention['isRepresentativeMention']:
# The representative name for this chain has been found.
tmp = document._sentences[mention['sentNum'] - 1]['tokens'][mention['headIndex'] - 1]
representative = ((tmp['originalText'], tmp), tmp['pos'])
try:
# these dose`t work, if some special characters are present
if representative[-1][1] == 'POS':
representative = representative[:-1]
except IndexError:
pass
if not contains_ne:
# If the current mention doesn't contain a named entity, check the other members of the chain
for token in doc_ner[mention['sentNum'] - 1][mention['headIndex'] - 1:mention['endIndex'] - 1]:
if token[1] in ['PERSON', 'ORGANIZATION', 'LOCATION']:
contains_ne = True
break
if contains_ne:
# the last important parameter is the entailment of a named entity
score += self.weights[2]
if score > 0:
# normalize the scoring
score /= sum(self.weights)
if mention_type == 'PRONOMINAL':
# use representing mention if the agent is only a pronoun
rp_format_fix = [(({'nlpToken': representative[0][1]}, representative[0][1]['pos']))]
ranked_candidates.append((rp_format_fix, candidateParts[1], score, candidate.get_sentence_index()))
else:
ranked_candidates.append((candidateParts[0], candidateParts[1], score, candidate.get_sentence_index()))
# split results
who = [(c[0], c[2], c[3]) for c in ranked_candidates]
what = [(c[1], c[2], c[3]) for c in ranked_candidates]
# Transform who to object oriented list
o_who = self._filterAndConvertToObjectOrientedList(who)
# Filter by text
o_who_clean = self._filter_candidate_dublicates(o_who)
document.set_answer('who', o_who_clean)
# Transform who to object oriented list
o_what = self._filterAndConvertToObjectOrientedList(what)
# Filter by text
o_what_clean = self._filter_candidate_dublicates(o_what)
document.set_answer('what', o_what_clean)
def _filterAndConvertToObjectOrientedList(self, list):
max = 0
candidates = self._filter_duplicates(list)
for candidate in candidates:
if candidate.get_score() > max:
max = candidate.get_score()
# normalize
for candidate in candidates:
score = candidate.get_score()
candidate.set_score(score / max)
# sort
candidates.sort(key=lambda x: x.get_score(), reverse=True)
return candidates
def cut_what(self, tree, min_length=0, length=0):
"""
This function is used to shorten verbphrases, it recursively traverses the parse tree depth first.
:param tree: Tree to cut
:type tree: ParentedTree
:param min_length: Desired minimal length of tokens
:type min_length: Integer
:param length: Number of tokens already included by the upper level function
:type length: Integer
:return: A subtree
"""
if type(tree[0]) is not ParentedTree:
# we found a leaf
return ParentedTree(tree.label(), [tree[0]])
else:
children = []
for sub in tree:
child = self.cut_what(sub, min_length, length)
length += len(child.leaves())
children.append(child)
if sub.label() == 'NP':
sibling = sub.right_sibling()
if length < min_length and sibling is not None and sibling.label() == 'PP':
children.append(sibling.copy(deep=True))
break
return ParentedTree(tree.label(), children)
| 2.875
| 3
|
src/mcedit2/util/settings.py
|
elcarrion06/mcedit2
| 673
|
12779215
|
<gh_stars>100-1000
"""
settings
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
from PySide import QtCore
import logging
from mcedit2.util import directories
log = logging.getLogger(__name__)
_settings = None
def Settings():
global _settings
if _settings is None:
_settings = MCESettings()
return _settings
class MCESettingsOption(QtCore.QObject):
def __init__(self, settings, key, valueType=None, default=None, *args, **kwargs):
super(MCESettingsOption, self).__init__(*args, **kwargs)
self.settings = settings
self.key = key
self.valueType = valueType
self.default = default
def value(self, default=None):
if default is None:
default = self.default
if self.valueType == "json":
value = self.settings.jsonValue(self.key, default)
else:
value = self.settings.value(self.key, default)
if self.valueType is bool:
if isinstance(value, basestring):
value = value.lower() == "true"
elif self.valueType:
value = self.valueType(value)
return value
def setValue(self, value):
if self.valueType == "json":
return self.settings.setJsonValue(self.key, value)
else:
return self.settings.setValue(self.key, value)
valueChanged = QtCore.Signal(object)
def jsonValue(self, default=None):
return self.settings.jsonValue(self.key, default)
def setJsonValue(self, value):
return self.settings.setJsonValue(self.key, value)
def connectAndCall(self, callback):
"""
Connect `callback` to this option's `valueChanged` signal, then call it with the value of this option.
:param callback:
:type callback:
:return:
:rtype:
"""
self.valueChanged.connect(callback)
callback(self.value())
class MCESettingsNamespace(object):
def __init__(self, rootSettings, prefix):
self.rootSettings = rootSettings
if not prefix.endswith("/"):
prefix = prefix + "/"
self.prefix = prefix
def getOption(self, key, type=None, default=None):
"""
Parameters
----------
key: str
type: bool | int | float | str
default: Any
Returns
-------
option: MCESettingsOption
"""
return self.rootSettings.getOption(self.prefix + key, type, default)
class MCESettings(QtCore.QSettings):
def __init__(self, *args, **kwargs):
"""
Subclass of QSettings. Adds a `getOption` method which returns an individual option as its own object. Adds
one signal for each setting, emitted when its value is changed. Also provides json encoded methods to work
around a bug in PySide.
QSettings, under PySide, does not reliably infer that a settings value should be read as a QStringList.
jsonValue and setJsonValue methods are provided that will automatically encode/decode the given value to or from json
:rtype: MCESettings
"""
dataDir = directories.getUserFilesDirectory()
iniPath = os.path.join(dataDir, "mcedit2.ini")
log.info("Loading app settings from %s", iniPath)
super(MCESettings, self).__init__(iniPath, QtCore.QSettings.IniFormat, *args,
**kwargs)
self.options = {}
#= defaultdict(lambda: QtCore.Signal(object))
def getNamespace(self, prefix):
"""
Return an MCESettingsNamespace object which can be used to access settings whose keys are all prefixed by
the given prefix
:param prefix:
:type prefix:
:return:
:rtype:
"""
return MCESettingsNamespace(self, prefix)
def getSignal(self, key):
"""
Returns a signal to be triggered when the setting `key` is changed.
The signal handler receives one argument: the setting's new value.
:param key: Settings key
:type key: str
:rtype: None
"""
return self.getOption(key).valueChanged
def emitSignal(self, key, val):
option = self.options.get(key)
if option:
option.valueChanged.emit(val)
def setValue(self, key, val):
old = self.value(key)
if old != val:
log.info("Setting %r changed to (%.40r)(...) (was (%.40r)(...))", key, val, old)
super(MCESettings, self).setValue(key, val)
self.emitSignal(key, val)
def jsonValue(self, key, default=None):
value = self.value(key, None)
if value is not None:
try:
return json.loads(value)
except ValueError as e: # No JSON object could be decoded
log.error("Failed to decode setting %s: %s", key, e)
return default
else:
return default
def setJsonValue(self, key, value):
self.setValue(key, json.dumps(value))
def getOption(self, key, type=None, default=None):
"""
Return an object that represents the setting at 'key'. The object may be used to get and set the value and
get the value's valueChanged signal. Among other uses, the object's setValue attribute may be connected to the
valueChanged signal of an input field.
:param key:
:type key:
:return:
:rtype:
"""
option = self.options.get(key)
if option:
return option
option = MCESettingsOption(self, key, type, default)
self.options[key] = option
return option
| 2.21875
| 2
|
measurements/signals.py
|
nat64check/zaphod_backend
| 1
|
12779216
|
<reponame>nat64check/zaphod_backend<filename>measurements/signals.py
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
# Copyright (c) 2018, <NAME>. This software is licensed under the BSD
# 3-Clause License. Please see the LICENSE file in the project root directory.
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
from datetime import timedelta
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from measurements.models import InstanceRun, InstanceRunResult, TestRun
from measurements.tasks import delegate_to_trillian
# noinspection PyUnusedLocal
@receiver(post_save, sender=InstanceRun, dispatch_uid='schedule_push')
def schedule_push(instance: InstanceRun, **kwargs):
# Schedule push to Trillian in the spooler
if instance.trillian_url or instance.finished:
return
delegate_to_trillian(instance.pk)
# noinspection PyUnusedLocal
@receiver(post_save, sender=InstanceRunResult, dispatch_uid='schedule_instancerunresult_analysis')
def schedule_instancerunresult_analysis(sender, instance: InstanceRunResult, **kwargs):
# When finished has changed, or if it hasn't been analysed after X minutes
if instance.tracker.has_changed('when') or instance.when < timezone.now() - timedelta(minutes=5):
instance.trigger_analysis()
# noinspection PyUnusedLocal
@receiver(post_save, sender=InstanceRun, dispatch_uid='schedule_instancerun_analysis')
def schedule_instancerun_analysis(sender, instance: InstanceRun, **kwargs):
if instance.tracker.has_changed('finished') or \
(instance.finished and instance.finished < timezone.now() - timedelta(minutes=5)):
instance.trigger_analysis()
# noinspection PyUnusedLocal
@receiver(post_save, sender=InstanceRun, dispatch_uid='schedule_instancerun_cleanup')
def schedule_instancerun_cleanup(sender, instance: InstanceRun, **kwargs):
if instance.tracker.has_changed('analysed') and instance.analysed:
instance.trigger_cleanup()
# noinspection PyUnusedLocal
@receiver(post_save, sender=TestRun, dispatch_uid='schedule_testrun_analysis')
def schedule_testrun_analysis(sender, instance: TestRun, **kwargs):
if instance.tracker.has_changed('finished') or \
(instance.finished and instance.finished < timezone.now() - timedelta(minutes=5)):
instance.trigger_analysis()
# noinspection PyUnusedLocal
@receiver(post_save, sender=InstanceRun, dispatch_uid='update_testrun_from_instancerun')
def update_testrun_from_instancerun(sender, instance: InstanceRun, **kwargs):
updated = []
if instance.started and (not instance.testrun.started or instance.testrun.started > instance.started):
instance.testrun.started = instance.started
updated.append('started')
finished = list(instance.testrun.instanceruns.values_list('finished', flat=True))
if all(finished):
instance.testrun.finished = max(finished)
updated.append('finished')
if updated:
instance.testrun.save(update_fields=updated)
| 1.835938
| 2
|
rhcephpkg/localbuild.py
|
red-hat-storage/rhcephpkg
| 2
|
12779217
|
<gh_stars>1-10
import math
from multiprocessing import cpu_count
import os
import re
import subprocess
from tambo import Transport
import rhcephpkg.log as log
import rhcephpkg.util as util
def setup_pbuilder_cache(pbuilder_cache, distro):
# Delete existing cache file if it is bogus (zero-length).
if os.path.isfile(pbuilder_cache):
if os.stat(pbuilder_cache).st_size == 0:
log.info('deleting 0 length %s', pbuilder_cache)
cmd = ['sudo', 'rm', pbuilder_cache]
subprocess.check_call(cmd)
# Set up the cache if it does not exist.
if not os.path.isfile(pbuilder_cache):
log.info('initializing pbuilder cache %s', pbuilder_cache)
cmd = ['sudo', 'pbuilder', 'create', '--debootstrapopts',
'--variant=buildd', '--basetgz', pbuilder_cache,
'--distribution', distro]
subprocess.check_call(cmd)
def get_distro():
"""
Automatically determine the distro to use, based on the dist-git branch
name.
"""
branch = util.current_branch()
branch = re.sub('^private-[^-]+-', '', branch)
parts = branch.split('-') # ['ceph', '3.0', 'ubuntu']
try:
distro = parts[2]
except IndexError:
log.error('could not parse dist-git branch name "%s" distro' % branch)
log.error('try explicitly specifying a distro with --dist')
raise
if distro != 'ubuntu':
return distro
if branch.startswith('ceph-1.3'):
return 'trusty'
if branch.startswith('ceph-2'):
return 'xenial'
if branch.startswith('ceph-3'):
return 'xenial'
# TODO: add Ubuntu 18.04 codename here for ceph-4 when available.
log.error('unknown default distro for dist-git branch name "%s"' % branch)
raise NotImplementedError('specify --dist')
class Localbuild(object):
help_menu = 'build a package on the local system'
_help = """
Build a package on the local system, using pbuilder.
Options:
--dist "xenial" or "trusty". If unspecified, rhcephpkg will choose one
based on the current branch's name.
Rules for automatic distro selection:
1) If the branch suffix is an ubuntu distro name, use that.
eg "ceph-3.0-xenial".
2) If a branch has a version number starting with "1.3", return "trusty".
eg. "ceph-1.3-ubuntu"
3) If a branch has a version number starting with "2" return "xenial".
eg. "ceph-2-ubuntu"
4) If a branch has a version number starting with "3" return "xenial".
eg. "ceph-3.0-ubuntu"
5) Otherwise raise, because we need to add more rules.
"""
name = 'localbuild'
def __init__(self, argv):
self.argv = argv
self.options = ('--dist',)
def main(self):
self.parser = Transport(self.argv, options=self.options)
self.parser.catch_help = self.help()
self.parser.parse_args()
# Allow user to override the distro.
if self.parser.has('--dist'):
if self.parser.get('--dist') is None:
raise SystemExit('Specify a distro to --dist')
distro = self.parser.get('--dist')
else:
distro = get_distro()
if self.parser.unknown_commands:
log.error('unknown option %s',
' '.join(self.parser.unknown_commands))
return self.parser.print_help()
self._run(distro)
def help(self):
return self._help
def _run(self, distro):
""" Build a package on the local system, using pbuilder. """
pkg_name = util.package_name()
os.environ['BUILDER'] = 'pbuilder'
j_arg = self._get_j_arg(cpu_count())
pbuilder_cache = '/var/cache/pbuilder/base-%s-amd64.tgz' % distro
setup_pbuilder_cache(pbuilder_cache, distro)
util.setup_pristine_tar_branch()
# TODO: we should also probably check parent dir for leftovers and warn
# the user to delete them (or delete them ourselves?)
cmd = ['gbp', 'buildpackage', '--git-dist=%s' % distro,
'--git-arch=amd64', '--git-verbose', '--git-pbuilder', j_arg,
'-us', '-uc']
log.info('building %s with pbuilder', pkg_name)
subprocess.check_call(cmd)
def _get_j_arg(self, cpus, total_ram_gb=None):
"""
Returns a string like "-j4" or "-j8". j is the number of processors,
with a maximum of x, where x = TOTAL_RAM_GB / 4.
We want to use all our processors (a high "j" value), but the build
process will fail with an "out of memory" error out if this j value is
too high.
An 8 GB system would have a maximum of -j2
A 16 GB system would have a maximum of -j4
A 32 GB system would have a maximum of -j8
"""
if total_ram_gb is None:
page_size = os.sysconf('SC_PAGE_SIZE')
mem_bytes = page_size * os.sysconf('SC_PHYS_PAGES')
# mem_gib is a decimal, eg. 7.707 on 8GB system
mem_gib = mem_bytes / (1024. ** 3)
# Round up to the nearest GB for our purposes.
total_ram_gb = math.ceil(mem_gib)
number = min(cpus, total_ram_gb / 4)
return '-j%d' % max(number, 1)
| 2.140625
| 2
|
Training - Testing/main_plus.py
|
beric7/material_segmentation
| 0
|
12779218
|
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score, jaccard_score
from model_plus import createDeepLabv3Plus
import sys
print(sys.version, sys.platform, sys.executable)
from trainer_plus import train_model
import datahandler_plus
import argparse
import os
import torch
import numpy
torch.cuda.empty_cache()
"""
Version requirements:
PyTorch Version: 1.2.0
Torchvision Version: 0.4.0a0+6b959ee
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-data_directory", help='Specify the dataset directory path')
parser.add_argument(
"-exp_directory", help='Specify the experiment directory where metrics and model weights shall be stored.')
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--batchsize", default=2, type=int)
parser.add_argument("--output_stride", default=8, type=int)
parser.add_argument("--channels", default=4, type=int)
parser.add_argument("--pretrained", default='')
parser.add_argument("--class_weights", nargs='+', default=None)
parser.add_argument("--folder_structure", default='sep', help='sep or single')
args = parser.parse_args()
bpath = args.exp_directory
print('Export Directory: ' + bpath)
data_dir = args.data_directory
print('Data Directory: ' + data_dir)
epochs = args.epochs
print('Epochs: ' + str(epochs))
batchsize = args.batchsize
print('Batch size: ' + str(batchsize))
output_stride = args.output_stride
channels = args.channels
print('Number of classes: ' + str(channels))
class_weights = args.class_weights
print('Class weights: ' + str(class_weights))
folder_structure = args.folder_structure
print('folder structure: ' + folder_structure)
model_path = args.pretrained
print('loading pre-trained model from saved state: ' + model_path)
if not os.path.exists(bpath): # if it doesn't exist already
os.makedirs(bpath)
# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017,
# on the 20 categories that are present in the Pascal VOC dataset.
if model_path != '':
try:
model = torch.load(model_path)
print('LOADED MODEL')
model.train()
except:
print('model path did not load')
model = createDeepLabv3Plus(outputchannels=channels, output_stride=output_stride)
else:
model = createDeepLabv3Plus(outputchannels=channels, output_stride=output_stride)
model.train()
# Specify the loss function
if class_weights == None:
print('class not weighted')
criterion = torch.nn.CrossEntropyLoss()
elif class_weights != None and len(class_weights) == channels:
print('class weighted')
class_weights = numpy.array(class_weights).astype(float)
torch_class_weights = torch.FloatTensor(class_weights).cuda()
criterion = torch.nn.CrossEntropyLoss(weight=torch_class_weights)
else:
print('channels did not allign with class weights - default applied')
print('class not weighted')
criterion = torch.nn.CrossEntropyLoss()
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Specify the evalutation metrics
metrics = {'f1_score': f1_score, 'jaccard_score': jaccard_score}
# Create the dataloader
if folder_structure == 'sep':
dataloaders = datahandler_plus.get_dataloader_sep_folder(data_dir, batch_size=batchsize)
else:
dataloaders = datahandler_plus.get_dataloader_single_folder(data_dir, batch_size=batchsize)
trained_model = train_model(model, criterion, dataloaders,
optimizer, bpath=bpath, metrics=metrics, num_epochs=epochs)
# Save the trained model
# torch.save({'model_state_dict':trained_model.state_dict()},os.path.join(bpath,'weights'))
torch.save(model, os.path.join(bpath, 'weights.pt'))
| 2.203125
| 2
|
app/celery_worker/tasks.py
|
newbieof410/dockerize-flask-celery
| 1
|
12779219
|
<gh_stars>1-10
import time
from app.celery_worker import celery
@celery.task
def long_time_task():
print('task begins')
time.sleep(10)
print('task finished')
| 2.125
| 2
|
feishu/api_id.py
|
crisone/feishu-python-sdk
| 44
|
12779220
|
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import TYPE_CHECKING, Tuple
from feishu.exception import LarkInvalidArguments, OpenLarkException
if TYPE_CHECKING:
from feishu.api import OpenLark
# https://open.feishu.cn/document/ukTMukTMukTM/uIzMxEjLyMTMx4iMzETM
class APIIDMixin(object):
def email_to_id(self, email):
"""邮箱转 open_id 和 user_id
:type self: OpenLark
:param email: 用户的邮箱
:type email: str
:return: open_id, user_id
:rtype: Tuple[str, str]
根据用户邮箱获取用户 open_id 和 user_id。
user_id 需要申请 user_id 的权限才能获取到
https://open.feishu.cn/document/ukTMukTMukTM/uEDMwUjLxADM14SMwATN
"""
url = self._gen_request_url('/open-apis/user/v3/email2id')
body = {'email': email}
res = self._post(url, body, with_tenant_token=True)
open_id = res.get('open_id', '') # type: str
user_id = res.get('employee_id', '') # type: str
return open_id, user_id
def open_id_to_user_id(self, open_id):
"""open_id 转 user_id
:type self: OpenLark
:param open_id: open_id
:type open_id: str
:return: user_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/openid2uid/')
body = {'open_id': open_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('user_id')
def user_id_to_open_id(self, user_id):
"""user_id 转 open_id
:type self: OpenLark
:param user_id: user_id
:type user_id: str
:return: open_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/uid2openid/')
body = {'user_id': user_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_id')
def employee_id_to_user_id(self, employee_id):
"""employee_id 转 user_id
:type self: OpenLark
:param employee_id: employee_id
:type employee_id: str
:return: user_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/eid2uid/')
body = {'employee_id': employee_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('user_id')
def user_id_to_employee_id(self, user_id):
"""user_id 转 employee_id
:type self: OpenLark
:param user_id: user_id
:type user_id: str
:return: employee_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/uid2eid/')
body = {'user_id': user_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('employee_id')
def chat_id_to_open_chat_id(self, chat_id):
"""chat_id 转 open_chat_id
:type self: OpenLark
:param chat_id: chat_id
:type chat_id: str
:return: open_chat_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/cid2ocid/')
body = {'chat_id': chat_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_chat_id')
def open_chat_id_to_chat_id(self, open_chat_id):
"""open_chat_id 转 chat_id
:type self: OpenLark
:param open_chat_id: open_chat_id
:type open_chat_id: str
:return: chat_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/ocid2cid/')
body = {'open_chat_id': open_chat_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('chat_id')
def message_id_to_open_message_id(self, message_id):
"""message_id 转 open_message_id
:type self: OpenLark
:param message_id: message_id
:type message_id: str
:return: open_message_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/mid2omid/')
body = {'message_id': message_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_message_id')
def open_message_id_to_message_id(self, open_message_id):
"""open_message_id 转 message_id
:type self: OpenLark
:param open_message_id: open_message_id
:type open_message_id: str
:return: message_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/omid2mid/')
body = {'open_message_id': open_message_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('message_id')
def department_id_to_open_department_id(self, department_id):
"""department_id 转 open_department_id
:type self: OpenLark
:param department_id: department_id
:type department_id: str
:return: open_department_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/did2odid/')
body = {'department_id': department_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_department_id')
def open_department_id_to_department_id(self, open_department_id):
"""open_department_id 转 department_id
:type self: OpenLark
:param open_department_id: open_department_id
:type open_department_id: str
:return: department_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/odid2did/')
body = {'open_department_id': open_department_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('department_id')
def get_chat_id_between_user_bot(self, open_id='', user_id=''):
"""获取机器人和用户的 chat_id
:type self: OpenLark
:param open_id: open_id
:type open_id: str
:param user_id: user_id
:return: open_chat_id, chat_id
:rtype: Tuple[str, str]
https://lark-open.bytedance.net/document/ukTMukTMukTM/uYjMxEjL2ITMx4iNyETM
"""
if open_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?open_id={}'.format(open_id))
elif user_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?user_id={}'.format(user_id))
else:
raise OpenLarkException(msg='[get_chat_id_between_user_bot] empty open_id and user_id')
res = self._get(url, with_tenant_token=True)
open_chat_id = res.get('open_chat_id', '') # type: str
chat_id = res.get('chat_id', '') # type: str
return open_chat_id, chat_id
def get_chat_id_between_users(self, to_user_id,
open_id='',
user_id=''):
"""获取用户和用户的之前的 chat_id
:type self: OpenLark
:param to_user_id: 到谁的 open_id
:type to_user_id: str
:param open_id: 从谁来的 open_id
:type open_id: str
:param user_id: 从谁来的 user_id
:type user_id: str
:return: 两个人之间的 open_chat_id, chat_id
:rtype: Tuple[str, str]
仅头条内部用户可用 需要申请权限才能获取 @fanlv
open_id 和 user_id 传一个就行
https://lark-open.bytedance.net/document/ukTMukTMukTM/uYjMxEjL2ITMx4iNyETM
"""
if open_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?open_id={}&chatter={}'.format(open_id, to_user_id))
elif user_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?user_id={}&chatter={}'.format(user_id, to_user_id))
else:
raise LarkInvalidArguments(msg='[get_chat_id_between_users] empty open_id and user_id')
res = self._get(url, with_tenant_token=True)
open_chat_id = res.get('open_chat_id', '') # type: str
chat_id = res.get('chat_id', '') # type: str
return open_chat_id, chat_id
| 2.140625
| 2
|
NewsPaperD7(final)/NewsPaper/News/migrations/0003_auto_20210415_2224.py
|
GregTMJ/django-files
| 1
|
12779221
|
<filename>NewsPaperD7(final)/NewsPaper/News/migrations/0003_auto_20210415_2224.py
# Generated by Django 3.2 on 2021-04-15 19:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('News', '0002_auto_20210415_2208'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='News.category'),
preserve_default=False,
),
migrations.DeleteModel(
name='PostCategory',
),
]
| 1.632813
| 2
|
bin/Python27/Lib/site-packages/scipy/linalg/__init__.py
|
lefevre-fraser/openmeta-mms
| 0
|
12779222
|
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
Linear algebra functions.
.. seealso::
`numpy.linalg` for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 - Pseudo-inverse using svd
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
LinAlgError
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
solve_lyapunov - Solve the (continous-time) Lyapunov equation
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
dft - Discrete Fourier transform matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
"""
from __future__ import division, print_function, absolute_import
from .linalg_version import linalg_version as __version__
from .misc import *
from .basic import *
from .decomp import *
from .decomp_lu import *
from .decomp_cholesky import *
from .decomp_qr import *
from ._decomp_qz import *
from .decomp_svd import *
from .decomp_schur import *
from ._decomp_polar import *
from .matfuncs import *
from .blas import *
from .lapack import *
from .special_matrices import *
from ._solvers import *
from ._procrustes import *
from ._decomp_update import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals',
'eigvalsh', 'lstsq', 'cholesky']:
try:
register_func(k, eval(k))
except ValueError:
pass
try:
register_func('pinv', pinv2)
except ValueError:
pass
del k, register_func
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| 2.375
| 2
|
Algorithm/BreadthFirstDirectedPaths.py
|
eroicaleo/LearningPython
| 1
|
12779223
|
<reponame>eroicaleo/LearningPython
#!/usr/bin/env python3
import math
from collections import deque
from Digraph import Digraph
class BreadthFirstDirectedPaths:
def __init__(self, G, sources):
self.marked = [0]*G.V
self.edgeTo = [0]*G.V
self.distTo = [math.inf]*G.V
self.validateVertices(sources)
self.bfs(G, sources)
def validateVertex(self, v):
V = len(self.marked)
assert 0 <= v < V, f'vertex {v} is not between 0 and {V-1}'
def validateVertices(self, sources):
if len(sources) == 0:
raise ValueError(f'validateVertices sources is empty string')
for v in sources:
self.validateVertex(v)
def bfs(self, G, sources):
queue = deque(sources)
for s in sources:
self.marked[s] = 1
self.distTo[s] = 0
while queue:
v = queue.popleft()
for w in G.getAdj(v):
if not self.marked[w]:
self.marked[w] = 1
self.edgeTo[w] = v
self.distTo[w] = self.distTo[v]+1
queue.append(w)
def hasPathTo(self, v):
self.validateVertex(v)
return self.marked[v]
def pathTo(self, v):
self.validateVertex(v)
if not self.hasPathTo(v):
return None
path = [v]
while self.distTo[v] > 0.0:
w = self.edgeTo[v]
path, v = path+[w], w
return path[::-1]
def getDistTo(self, v):
self.validateVertex(v)
return self.distTo[v]
if __name__ == '__main__':
# The test case can be downloaded from here
# https://algs4.cs.princeton.edu/42digraph/tinyDG.txt
# https://algs4.cs.princeton.edu/42digraph/mediumDG.txt
# https://algs4.cs.princeton.edu/42digraph/largeDG.txt
import sys
G = Digraph(sys.argv[1])
print(G)
sources = list(map(int, sys.argv[2:]))
bfs = BreadthFirstDirectedPaths(G, sources)
for v in range(G.V):
if bfs.hasPathTo(v):
print(f'{sources} to {v}: {"->".join(map(str, bfs.pathTo(v)))}')
else:
print(f'{sources} to {v}: not connected')
| 3.390625
| 3
|
Dataset/Leetcode/train/62/553.py
|
kkcookies99/UAST
| 0
|
12779224
|
<reponame>kkcookies99/UAST<filename>Dataset/Leetcode/train/62/553.py
class Solution:
def XXX(self, m: int, n: int) -> int:
ans = [[1 for _ in range(m)] for _ in range(n)]
for i in range(1,m):
for j in range(1,n):
ans[j][i] = ans[j][i-1] + ans[j-1][i]
return ans[-1][-1]
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
| 2.96875
| 3
|
tests/utils/singleton_provider.py
|
BoaVaga/boavaga_server
| 0
|
12779225
|
from dependency_injector.providers import Singleton
def singleton_provider(obj):
def clb():
return obj
return Singleton(clb)
| 1.90625
| 2
|
1601-1700/1681-Minimum Incompatibility/1681-Minimum Incompatibility.py
|
jiadaizhao/LeetCode
| 49
|
12779226
|
import math
import itertools
class Solution:
def minimumIncompatibility(self, nums: List[int], k: int) -> int:
n = len(nums)
if k == n:
return 0
dp = [[math.inf] * n for _ in range(1 << n)]
nums.sort()
for i in range(n):
dp[1<<i][i] = 0
for mask in range(1<<n):
n_z_bits = [j for j in range(n) if mask & (1 << j)]
if len(n_z_bits) % (n // k) == 1:
for j, l in itertools.permutations(n_z_bits, 2):
dp[mask][l] = min(dp[mask][l], dp[mask ^ (1 << l)][j])
else:
for j, l in itertools.combinations(n_z_bits, 2):
if nums[j] != nums[l]:
dp[mask][j] = min(dp[mask][j], dp[mask ^ (1 << j)][l] + nums[l] - nums[j])
return min(dp[-1]) if min(dp[-1]) != math.inf else -1
| 2.8125
| 3
|
tests/unit_tests/test_cli.py
|
JoelLefkowitz/poetry-pdf
| 1
|
12779227
|
import sys
from typing import List
from unittest.mock import patch
import pytest
from poetry_pdf.cli import parse_cli
from poetry_pdf.exceptions import InvalidCommand, InvalidSourcePath
@pytest.mark.parametrize(
"argv",
[
["poetry-pdf", "tests/fixtures/the_raven.txt"],
[
"poetry-pdf",
"tests/fixtures/the_raven.txt",
"--output-dir",
".",
],
[
"poetry-pdf",
"tests/fixtures/the_raven.txt",
"--author",
"<NAME>",
],
],
)
def test_parse_cli_valid_command(argv: List[str]) -> None:
with patch.object(sys, "argv", argv):
parse_cli()
def test_parse_cli_invalid_command() -> None:
argv = ["poetry-pdf", "tests/fixtures/the_raven.txt", "123"]
with patch.object(sys, "argv", argv), pytest.raises(
InvalidCommand
):
parse_cli()
def test_parse_cli_invalid_source() -> None:
argv = ["poetry-pdf", "tests/fixtures/not_the_raven.txt"]
with patch.object(sys, "argv", argv), pytest.raises(
InvalidSourcePath
):
parse_cli()
def test_parse_multiple_stylesheets() -> None:
argv = [
"poetry-pdf",
"tests/fixtures/the_raven.txt",
"--stylesheet",
"sheet1",
"--stylesheet",
"sheet2",
]
with patch.object(sys, "argv", argv):
stylesheets = parse_cli()[3]
assert stylesheets == ["sheet1", "sheet2"]
| 2.421875
| 2
|
intro.py
|
rmkaneko/pythonbirds-fatec
| 0
|
12779228
|
<reponame>rmkaneko/pythonbirds-fatec<filename>intro.py
def p():
"""
Essa funcao faz balh
:return: não retorna nada
"""
print('Renzo'.upper())
print(__name__)
if __name__ == '__main__':
print('Main')
def f(nome, sobrenome='<NAME>', idade=32):
return 'Olá %s %s. Minha idade é: %s' % (nome, sobrenome, idade)
print(f(idade=18, nome='Lucas'))
print(f('Renzo', '<NAME>'))
| 2.625
| 3
|
subreddit-gauge/grab.py
|
maybemaby/subreddit-gauge
| 1
|
12779229
|
#! python3
# Uses praw to pull data about subreddits.
import praw
from credentials import ( # create your own credentials file
client_id,
client_secret,
reddit_password,
useragent,
reddit_username,
)
# praw object
reddit = praw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=useragent,
username=reddit_username,
password=<PASSWORD>,
)
def get_subscribers(subreddit_, *args):
"""Gets current sub count for one or more subreddits.
Inputs
-------
str: Desired subreddit name(s)
Returns
-------
int: sub count or dict:{subreddit: int(sub count)}
"""
if len(args) > 0:
subreddit = reddit.subreddit(subreddit_)
subcount = {subreddit_: subreddit.subscribers}
for page in args:
subreddit = reddit.subreddit(page)
subcount[page] = subreddit.subscribers
return subcount
else:
subreddit = reddit.subreddit(subreddit_)
return subreddit.subscribers
def get_active(subreddit_, *args):
"""Gets amount of users actively on subreddit.
Inputs
-------
str: Desired subreddit name(s)
Returns
-------
int: Currently online user count or dict:{subreddit: int(active user count)}
"""
if len(args) > 0:
subreddit = reddit.subreddit(subreddit_)
usercounts = {subreddit_: subreddit.subscribers}
for page in args:
subreddit = reddit.subreddit(page)
usercounts[page] = subreddit.active_user_count
return usercounts
else:
subreddit = reddit.subreddit(subreddit_)
subreddit._fetch()
return subreddit.active_user_count
def get_posts(subreddit_):
"""Gets amount of posts within last hour, upvotes, and number of comments.
Inputs
-----
str: Desired subreddit name.
Returns
------
dict: {'submission(id)': ['score','len(comments)']}
"""
subreddit = reddit.subreddit(subreddit_)
posts = dict()
for submission in subreddit.top(time_filter="hour", limit=None):
score = submission.score
posts[submission.id] = [score, len(submission.comments.list())]
return posts
| 3.078125
| 3
|
budgetee-server/src/common/helper.py
|
SPQE21-22/BSPQ22-E2
| 0
|
12779230
|
<filename>budgetee-server/src/common/helper.py
"""! @package common"""
import re
from uuid import UUID
under_pat = re.compile(r'_([a-z])')
def camelize(name):
"""! It gets a string with _ and it converts it into camelized
nombre_variable => nombreVariable
@param variable string
@return variable camelized
"""
return under_pat.sub(lambda x: x.group(1).upper(), name)
def is_valid_uuid(value, version=4):
"""! Checks if UUID is valid
@param UUID value
@return True if UUID is valid
"""
try:
uuid = UUID(str(value), version=version)
except ValueError:
return False
return str(uuid) == str(value)
def not_none(s, d):
"""! returns the not none value
@param s value 1
@param d value 2
@return not none value
"""
if s is None:
return d
return s
| 2.65625
| 3
|
Python/SCRIPT PYTHON/Media_Notas.py
|
guimaraesalves/material-python
| 0
|
12779231
|
print ('==================================')
print (' EXERCÍCIO MÉDIA DAS NOTAS ')
print ('==================================')
n1 = int (input('INFORME A PRIMEIRA NOTA: '))
n2 = int (input('INFORME A SEGUNDA NOTA: '))
n3 = int (input('INFORME A TERCEIRA NOTA: '))
n4 = int (input('INFORME A QUARTA NOTA: '))
print ('A MÉDIA DAS NOTAS É: ', ((n1 + n2 + n3 + n4) / 4))
print ('------------------------------------')
print (' METROS PARA CENTÍMETROS ')
print ('------------------------------------')
metros = int (input('Informe o valor em metros: '))
print ('O valor em centimetros é: ', metros * 100)
print ('-------------------------------------')
print (' ÁREA DA CIRCUNFERÊNCIA ')
print ('-------------------------------------')
raio = int (input ('Informe o raio da circunferência: '))
print ('A área da circunferência de raio', raio, 'é:', 2 * 3.1416 * raio**2)10
| 3.796875
| 4
|
src/rstatmon/usermodel.py
|
git-ogawa/raspi-statmon
| 0
|
12779232
|
import sys
import shutil
import json
import subprocess
from typing import Union
from pathlib import Path
from jinja2 import Template
class UserModel():
"""Handles user-defined model described in python scripts.
"""
def __init__(self):
self.parent = Path(__file__).resolve().parent
self.dst = self.parent / "config/user_model/"
self.model_json = self.dst / "user_model.json"
self.user_model_key = "user_model"
self.current_model_key = "current_model"
self.models = []
self.import_models()
def register_model(self, src: str):
"""Register a specified model as new one.
Args:
src (str): The file path to register.
"""
src_abs = Path(src).resolve()
self.validate_model(src_abs)
shutil.copy(src_abs, self.dst)
self.set_current_model(src_abs)
self.update_json()
def delete_model(self, src: str):
"""Delete python file corresponding to model in the dst dir.
Args:
src (str): File name of python to delete
"""
path = self.parent / src
if path.exists():
path.unlink(src)
self.update_json()
def validate_model(self, path: Path) -> bool:
if path.suffix != ".py":
print(
"\033[31mThe file you're trying to add have no suffix 'py'\033[0m",
file=sys.stderr)
sys.exit(-1)
cmd = f"python {path}"
ret = subprocess.check_output(cmd.split()).decode("utf-8").strip("\n")
try:
f = float(ret)
int(f)
return True
except ValueError:
print(
"\033[31mThe type of return value isn't int or float\033[0m",
file=sys.stderr)
sys.exit(-1)
def update_json(self):
"""Update model json
Json consists of key and value. In case of "test.py" model, key and
value are as follows respectively.
test : test.py
"""
files = list(self.dst.glob("*.py"))
contens = {}
tmp = []
for i in files:
tmp.append(str(i.stem))
contens[self.user_model_key] = tmp
# load the current model in json
current = self.get_current_model()
if current:
contens["current_model"] = str(Path(current).stem)
else:
contens["current_model"] = ""
# write the registered models and current model into json
with open(str(self.model_json), "w") as f:
json.dump(contens, f, indent=4)
def set_current_model(self, model: Union[Path, str]):
if type(model) == str:
pass
else:
model = str(model.stem)
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
j[self.current_model_key] = model
with open(str(self.model_json), "w") as f:
json.dump(j, f, indent=4)
def remove_current_model(self):
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
j[self.current_model_key] = ""
with open(str(self.model_json), "w") as f:
json.dump(j, f, indent=4)
def import_models(self) -> list:
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
self.models = j[self.user_model_key]
return self.models
return []
def get_value(self):
f = self.get_current_model()
return self.execute(f)
def get_current_model(self) -> str:
"""Gets a file name corresponding to the model.
Returns:
str: The file name in the format of abs path.
"""
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
if self.current_model_key in j:
current_model = j[self.current_model_key]
if current_model:
self.py_file = self.parent / current_model
return str(self.py_file)
return None
def execute(self, pyfile: str):
"""Execute model file.
Args:
pyfile (str): The model file.
"""
if Path(pyfile).suffix != ".py":
pyfile += ".py"
cmd = f"python {pyfile}"
ret = subprocess.check_output(cmd.split()).decode("utf-8").strip("\n")
return ret
class JinjaTemplate():
def __init__(self, data: dict):
self.data = data
self.parent = Path(__file__).resolve().parent
self.model_prop = self.parent / "config/user_model/model_prop.json"
self.body = self.parent / "templates/jinja/body.html"
self.dst = self.parent / "templates/user_model.html"
self.org = self.parent / "templates/user_model.html.org"
def parse_dict(self):
dct = {}
colors = ["red", "blue", "yello", "green"]
it = iter(colors)
for key, value in self.data.items():
key_words = key.split("_")
if len(key_words) == 1 and not key_words[0] == "datasets":
dct[key_words[0]] = self.parse_int(value)
elif len(key_words) == 2:
if not key_words[0] in dct.keys():
dct[key_words[0]] = {}
dct[key_words[0]][key_words[1]] = self.parse_int(value)
if key_words[0] == "datasets":
dct[key_words[0]]["color"] = next(it)
else:
pass
if "add" in dct.keys():
dct.pop("add")
self.json_data = dct
with open(str(self.model_prop), "w") as f:
json.dump(self.json_data, f, indent=4)
def parse_int(self, data):
try:
return int(data)
except ValueError:
return data
def load_body(self):
with open(str(self.body), "r") as f:
s = f.read()
temp = Template(s)
body = {"body": temp.render(self.json_data)}
return body
def load_template(self, body: dict):
tmp = """
{%- raw %}
{% extends "layout.html" %}
{% block content %}
{%- endraw %}
{{ body }}
{%- raw %}
{% endblock %}
{%- endraw %}
"""
temp = Template(tmp)
html = temp.render(body)
return html
def make_template(self):
self.parse_dict()
body = self.load_body()
html = self.load_template(body)
UserModel().set_current_model(self.data["model"])
self.writer(html)
def writer(self, data: str):
with open(str(self.dst), "w") as f:
f.write(data)
def remove_model(self):
shutil.copy(self.org, self.dst)
| 2.46875
| 2
|
bin/snv_filter_1000genome.py
|
ChiLoveChuan/iTuNES-dev
| 1
|
12779233
|
import pandas as pd
import numpy as np
import sys,getopt,os
import re
#####prepare fasta format input file for netMHC######
opts,args=getopt.getopt(sys.argv[1:],"hi:g:o:s:",["input_vcf_file","input_snv_1000G_file","out_dir","sample_id"])
input_vcf_file=""
input_snv_1000G_file =""
out_dir=""
sample_id=""
USAGE='''
This script convert VEP result to fasta format file for netMHC
usage: python animo_acid_prepare.py -i <input_vcf_file> -g <input_snv_1000G_file> -o <outdir> -s <sample_id>
required argument:
-i | --input_vcf_file : input vcf file
-g | --1000G_pos : input 1000G snp position file
-o | --out_dir : output directory
-s | --sample_id : sample id
'''
for opt,value in opts:
if opt =="h":
print USAGE
sys.exit(2)
elif opt in ("-i","--input_vcf_file"):
input_vcf_file=value
elif opt in ("-g","--input_snv_1000G_file"):
input_snv_1000G_file=value
elif opt in ("-o","--out_dir"):
out_dir =value
elif opt in ("-s","--sample_id"):
sample_id =value
#print coverage
if (input_vcf_file =="" or out_dir =="" or sample_id==""):
print USAGE
sys.exit(2)
snv_1000G=[]
for line in open(input_snv_1000G_file):
snv_1000G.append(line.strip())
set_snv=set(snv_1000G)
f_out=open(out_dir+'/'+sample_id+"_SNVs_filter_1000.vcf",'w')
for line in open(input_vcf_file):
if line.startswith('#'):
f_out.write(line)
continue
else:
record=line.strip().split('\t')
pos=record[0]+':'+record[1]
if pos not in set_snv:
f_out.write(line)
else:
continue
f_out.close()
| 2.4375
| 2
|
config.py
|
SdeWit/stereotypes-cs
| 5
|
12779234
|
<reponame>SdeWit/stereotypes-cs
# pylint: disable=too-few-public-methods
"""
Module with different configuration options for the flask application
"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
"""
Parent class for different configurations.
Defines default values for dependencies.
"""
DEBUG = False
TESTING = False
CSRF_ENABLED = True
PROPAGATE_EXCEPTIONS = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
CLOUDINARY_URL = os.environ.get('CLOUDINARY_URL')
SECRET_KEY = os.environ.get('SECRET_KEY')
JWT_SECRET_KEY = os.environ.get('JWT_SECRET_KEY')
REDIS_URL = os.environ.get('REDIS_URL')
MAIL_DEBUG = True
class ProductionConfig(Config):
"""
Configuration for the production server
"""
DEBUG = False
class StagingConfig(Config):
"""
Configuration for the development server
"""
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
"""
Configuration for local testing
"""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://test:test@localhost:5432/test'
REDIS_URL = 'redis://:@localhost:6379/0'
SECRET_KEY = "test"
JWT_SECRET_KEY = "test"
MAIL_SUPPRESS_SEND = True
class CITestingConfig(TestingConfig):
"""
Configuration used for CI pipeline testing
"""
SQLALCHEMY_DATABASE_URI = 'postgresql://test:test@postgres:5432/test'
REDIS_URL = 'redis://:@redis:6379/0'
class DockerConfig(CITestingConfig):
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:password@db:5432/testdb'
| 2.046875
| 2
|
tinylocker/utils/account.py
|
dragmz/tinylock_py
| 4
|
12779235
|
<filename>tinylocker/utils/account.py
from typing import Any, Dict, List
from algosdk import account, mnemonic
from algosdk.v2client.algod import AlgodClient
class Account:
"""Represents a private key and address for an Algorand account"""
def __init__(self, privateKey: str) -> None:
self.sk = privateKey
self.addr = account.address_from_private_key(privateKey)
def getAddress(self) -> str:
return self.addr
def getPrivateKey(self) -> str:
return self.sk
def getMnemonic(self) -> str:
return mnemonic.from_private_key(self.sk)
@classmethod
def FromMnemonic(cls, m: str) -> "Account":
return cls(mnemonic.to_private_key(m))
def getBalances(client: AlgodClient, account: str) -> Dict[int, int]:
balances: Dict[int, int] = dict()
accountInfo = client.account_info(account)
# set key 0 to Algo balance
balances[0] = accountInfo["amount"]
assets: List[Dict[str, Any]] = accountInfo.get("assets", [])
for assetHolding in assets:
assetID = assetHolding["asset-id"]
amount = assetHolding["amount"]
balances[assetID] = amount
return balances
| 2.90625
| 3
|
forum/migrations/0001_initial.py
|
mapehe/mlp-forum
| 0
|
12779236
|
# Generated by Django 3.2.7 on 2021-10-03 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Post",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=250)),
("url", models.URLField()),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"created_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="post_created_by",
to=settings.AUTH_USER_MODEL,
),
),
(
"updated_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="post_updated_by",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ["-created_at"],
},
),
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("content", models.TextField()),
("edited", models.BooleanField(default="False")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"created_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comment_created_by",
to=settings.AUTH_USER_MODEL,
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comment_parent",
to="forum.post",
),
),
(
"updated_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comment_updated_by",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ["-created_at"],
},
),
]
| 1.867188
| 2
|
CRUDFilters/tests/views.py
|
timhaley94/django-crud-filters
| 5
|
12779237
|
from django.http import HttpResponse
from rest_framework import permissions
from CRUDFilters.views import CRUDFilterModelViewSet
from .models import TestClass
from .serializers import TestClassSerializer
class TestClassViewset(CRUDFilterModelViewSet):
serializer_class = TestClassSerializer
crud_model = TestClass
permission_classes = (permissions.AllowAny,)
def create(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def update(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def partial_update(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def destroy(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def patch(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
| 2.1875
| 2
|
create_facehq.py
|
Manan1811/FaceNet-Model
| 1
|
12779238
|
import os
from PIL import Image
from models.model import model
import argparse
import numpy as np
import tensorflow as tf
import shutil
def create(args):
if args.pre_trained == 'facenet':
from models.Face_recognition import FR_model
FR = FR_model()
Model = tf.keras.models.load_model(args.save_path)
path = args.img_dir + '/'
names = os.listdir(path)
Add = []
Age = []
for idx, i in enumerate(names, 0):
curr_img = Image.open(path + i)
# print(path+i)
curr_img = curr_img.resize((args.img_size, args.img_size))
curr_img = np.asarray(curr_img)
curr_img = curr_img.astype('float64')
curr_img /= 127.5
curr_img = curr_img - 1
X = [curr_img]
X = np.asarray(X)
assert X.shape == (1, args.img_size, args.img_size, 3), 'check input image shape'
X = FR(X)
y = Model(X)
Add.append(i)
Age.append(y)
if (idx + 1) % args.log_step == 0:
print('{} no of images predicted'.format(idx + 1))
os.mkdir('Face-AHQ')
# path = '/content/data/celeba_hq/train/male/'
path = args.img_dir + '/'
for i in range(len(Add)):
ages = os.listdir('Face-AHQ')
age = (int)(Age[i])
add = path + Add[i]
# creates folder
if str(age) not in ages:
os.mkdir('Face-AHQ/{}'.format(age))
dest = 'Face-AHQ/{}/{}.png'.format(age, i)
shutil.move(add, dest)
if (i + 1) % args.log_step == 0:
print('{} no of images saved'.format(i + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--pre_trained', type=str, default = 'facenet', help='pre-trained model to be used')
parser.add_argument('--img_dir', type=str, default = 'data', help='pre-trained model to be used')
parser.add_argument('--img_size', type=int, default = 160, help='size of image to be fed to the model')
parser.add_argument('--log_step', type=int, default = 50, help='number of steps to be taken before logging')
parser.add_argument('--save_path', type=str, default = 'Model_checkpoint',
help = 'path of dir where model is to be saved')
args = parser.parse_args()
create(args)
| 2.46875
| 2
|
nodular/registry.py
|
hasgeek/nodular
| 0
|
12779239
|
# -*- coding: utf-8 -*-
"""
The node registry is a place to list the relationships between node types
and their views.
Nodular does *not* provide a global instance of :class:`NodeRegistry`. Since
the registry determines what is available in an app, registries should be
constructed as app-level globals.
"""
from inspect import isclass
from collections import OrderedDict, defaultdict
from werkzeug.routing import Map as UrlMap
from .node import Node
__all__ = ['NodeRegistry']
def dottedname(entity):
"""Return a dotted name to the given named entity"""
return entity.__module__ + '.' + entity.__name__
class RegistryItem(object):
"""Container for registry entry data"""
pass
class NodeRegistry(object):
"""
Registry for node types and node views.
"""
def __init__(self):
self.nodes = OrderedDict()
self.child_nodetypes = defaultdict(set)
self.nodeviews = defaultdict(list)
self.viewlist = {}
self.urlmaps = defaultdict(lambda: UrlMap(strict_slashes=False))
def register_node(self, model, view=None, itype=None, title=None,
child_nodetypes=None, parent_nodetypes=None):
"""
Register a node.
:param model: Node model.
:param view: View for this node type (optional).
:param string itype: Register the node model as an instance type (optional).
:param string title: Optional title for the instance type.
:param list child_nodetypes: Allowed child nodetypes.
None or empty implies no children allowed.
:param list parent_nodetypes: Nodetypes that this node can be a child of.
:type model: :class:`~nodular.node.Node`
:type view: :class:`~nodular.crud.NodeView`
The special value ``'*'`` in ``child_nodetypes`` implies that this node
is a generic container. ``'*'`` in ``parent_nodetypes`` implies that
this node can appear in any container that has ``'*'`` in
``child_nodetypes``.
"""
item = RegistryItem()
item.model = model
item.nodetype = itype or model.__type__
item.title = (title or model.__title__) if itype else model.__title__
self.nodes[item.nodetype] = item
if view is not None:
self.register_view(item.nodetype, view)
self._register_parentchild(item, child_nodetypes, parent_nodetypes)
def _register_parentchild(self, regitem, child_nodetypes=None, parent_nodetypes=None):
if child_nodetypes is not None:
self.child_nodetypes[regitem.nodetype].update(
[c.__type__ if isinstance(c, Node) else c for c in child_nodetypes])
for ptype in parent_nodetypes or []:
self.child_nodetypes[ptype.__type__ if isinstance(ptype, Node) else ptype].add(regitem.nodetype)
def register_view(self, nodetype, view):
"""
Register a view.
:param string nodetype: Node type that this view renders for.
:param view: View class.
:type view: :class:`~nodular.view.NodeView`
"""
if isclass(nodetype):
nodetype = nodetype.__type__
self.nodeviews[nodetype].append(view)
dotted_view_name = dottedname(view)
self.viewlist[dotted_view_name] = view
# Combine URL rules from across views for the same nodetype
for rule in view.url_map.iter_rules():
rule = rule.empty()
rule.endpoint = dotted_view_name + '/' + rule.endpoint
self.urlmaps[nodetype].add(rule)
self.urlmaps[nodetype].update()
| 2.46875
| 2
|
venv/Lib/site-packages/tqdm/_dist_ver.py
|
mintzer/pupillometry-rf-back
| 0
|
12779240
|
__version__ = '4.63.1'
| 1.085938
| 1
|
scanners/zap-advanced/scanner/tests/test_zap_spider_http.py
|
kevin-yen/secureCodeBox
| 488
|
12779241
|
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2021 iteratec GmbH
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock, Mock
from unittest import TestCase
from zapclient.configuration import ZapConfiguration
class ZapSpiderHttpTests(TestCase):
@pytest.mark.unit
def test_has_spider_configurations(self):
config = ZapConfiguration("./tests/mocks/context-with-overlay/", "https://www.secureCodeBox.io/")
self.assertIsNone(config.get_active_spider_config)
config = ZapConfiguration("./tests/mocks/scan-full-bodgeit-docker/", "http://bodgeit:8080/")
self.assertIsNotNone(config.get_active_spider_config)
| 2.015625
| 2
|
scan_cities.py
|
jo-wen/city_crawl
| 0
|
12779242
|
#!/usr/bin/env python3
"""
This script doesn't work as is (and is written dumbly), but has all the pieces I used to organize the data.
parsing a csv file from https://simplemaps.com/data/us-cities
checks if each city has:
1) a .gov site
2) a .us site
3) a .com site
4) includes http and https entries
5) includes www entries as well
for now this is just a giant text file to be gone through
"""
import csv
import urllib.request
csv_file = 'uscities.csv'
f_city = 'city_urls'
f_checked = 'checked_cities'
def get_all_cities():
all_cities = []
with open(csv_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
all_cities.append(row[0])
csvfile.close()
return all_cities
def create_urls():
f_cities = open(f_city, 'a+')
for city in get_all_cities():
check_gov = "http://{}.gov".format(city.lower().replace(" ", ""))
check_us = "http://{}.us".format(city.lower().replace(" ", ""))
check_com = "http://{}.com".format(city.lower().replace(" ", ""))
check_gov_https = "https://{}.gov".format(city.lower().replace(" ", ""))
check_us_https = "https://{}.us".format(city.lower().replace(" ", ""))
check_com_https = "https://{}.com".format(city.lower().replace(" ", ""))
while f_cities:
f_cities.write(' '.join(map(str, check_gov)))
f_cities.write(' '.join(map(str, check_us)))
f_cities.write(' '.join(map(str, check_com)))
f_cities.write(' '.join(map(str, check_gov_https)))
f_cities.write(' '.join(map(str, check_us_https)))
f_cities.write(' '.join(map(str, check_com_https)))
f_cities.close()
csv_file = 'uscities.csv'
f_city = 'city_urls'
f_checked = 'checked_cities'
all_cities = []
with open(csv_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
all_cities.append(row[0])
f_cities = open(f_city, 'a')
for city in all_cities:
check_gov = "{}.gov".format(city.lower().replace(" ", ""))
check_us = "{}.us".format(city.lower().replace(" ", ""))
check_com = "{}.com".format(city.lower().replace(" ", ""))
# check_gov_https = "https://{}.gov".format(city.lower().replace(" ", ""))
# check_us_https = "https://{}.us".format(city.lower().replace(" ", ""))
# check_com_https = "https://{}.com".format(city.lower().replace(" ", ""))
f_cities.write(check_gov + '\n')
f_cities.write(check_us + '\n')
f_cities.write(check_com + '\n')
# f_cities.write(check_gov_https + '\n')
# f_cities.write(check_us_https + '\n')
# f_cities.write(check_com_https + '\n')
# with www
for city in all_cities:
check_gov = "http://www.{}.gov".format(city.lower().replace(" ", ""))
check_us = "http://www.{}.us".format(city.lower().replace(" ", ""))
check_com = "http://www.{}.com".format(city.lower().replace(" ", ""))
check_gov_https = "https://www.{}.gov".format(city.lower().replace(" ", ""))
check_us_https = "https://www.{}.us".format(city.lower().replace(" ", ""))
check_com_https = "https://www.{}.com".format(city.lower().replace(" ", ""))
f_cities.write(check_gov + '\n')
f_cities.write(check_us + '\n')
f_cities.write(check_com + '\n')
f_cities.write(check_gov_https + '\n')
f_cities.write(check_us_https + '\n')
f_cities.write(check_com_https + '\n')
f_cities.close()
f_checked = open('checked_cities', 'a')
for line in open(f_city, 'r', newline="\n"):
print(line)
print(req.urlopen(line).reason())
break
gov = urllib.request.urlopen(check_gov)
us = urllib.request.urlopen(check_us)
com = urllib.request.urlopen(check_com)
gov_https = urllib.request.urlopen(check_gov_https)
us_https = urllib.request.urlopen(check_us_https)
com_https = urllib.request.urlopen(check_com_https)
gov_status = city, gov.url, gov.status, gov.reason
us_status = city, us.url, us.status, us.reason
com_status = city, com.url, com.status, com.reason
while f_cities:
f_cities.close()
| 3.90625
| 4
|
Project/proj.packaging/src/Unity.py
|
luxiaodong/Game
| 1
|
12779243
|
# -*- coding: utf-8 -*-
import sys
import os
UNITY_PATH = "/Applications/Unity/Hub/Editor/2019.4.18f1c1/Unity.app/Contents/MacOS/Unity"
class Unity(object):
# @staticmethod
# def SwitchPlatorm()
# @staticmethod
# def GeneratorWrapCode():
# Unity.ExecuteScript("CSObjectWrapEditor.Generator", "ClearAll")
# Unity.ExecuteScript("CSObjectWrapEditor.Generator", "GenAll")
# @staticmethod
# def ExportProject(platform):
# Unity.ExecuteScript("SwitchScene", "Export" + platform.capitalize() + "Release")
@staticmethod
def BuildAssetBundle(platform):
Unity.ExecuteScript("AssetsBundleBuilder", "BuildAssets" + platform.capitalize(), "BuildAssetBundle")
@staticmethod
def ExecuteScript(className, funcName, logName):
logFile = os.environ["BUILD_DIR"] + "/Log/" + logName + ".log";
args1 = UNITY_PATH
args2 = "-quit -batchmode"
args3 = "-logFile " + logFile
args4 = "-projectPath " + os.environ["GAME_DIR"]
args5 = "-nographics"
args6 = "-executeMethod " + className+"."+funcName
cmd = "'%s' %s %s %s %s %s" % (args1, args2, args3, args4, args5, args6)
os.system("echo "+cmd+" >> "+logFile+" 2>&1")
os.system(cmd + " >> "+logFile+" 2>&1")
# os.system(cmd)
# Log.AppendFile(tmpFile)
# @staticmethod
# def AppendFile(file):
# sys.stdin = open(file,"r")
# sys.stdout = open(os.environ["LOG_FILE"],"a")
# sys.stdout.write(sys.stdin.read())
# @staticmethod
# def Cmd(cmd):
# if os.environ.has_key("LOG_FILE"):
| 2.34375
| 2
|
nbs/matricula.py
|
ronaldokun/cpm-joao-XXIII
| 0
|
12779244
|
<gh_stars>0
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
# Access and edit Google Sheets by gspread
import gspread
# Module to transform gsheets to data frame
import gspread_dataframe as gs_to_df
from oauth2client.service_account import ServiceAccountCredentials
import datetime as dt
from pathlib import *
import sys
path = PurePath('__file__')
sys.path.insert(0, str(Path(path.parent).resolve().parent))
from cpm import functions as f
TEMPLATE = "Feedback_Template"
MATRICULA = "3. Planilha Matrículas 2019 - 1o sem"
MATR_ABA = "<NAME>"
MATR_CLEANED = "J23_Matrícula_2019-1S"
def check_feedback(gc, name):
aloc = gc.open(name)
# Convert gsheet to df
aloc = gs_to_df.get_as_dataframe(aloc, dtype=str)
# Transform String Dates to datetime
f = lambda x : dt.datetime.strptime(x, "%d/%m/%Y")
aloc['Data'] = aloc['Data'].map(f)
# correct 'nan' strings to ''
aloc.replace('nan', '', inplace=True)
def split_date_hour(col):
return pd.Series(col.split(" "))
def concat_names(x,y):
return x + " " + y
def split_celfone(col):
if type(col) == str:
pattern = ".*\(.*(\d{2})\).*(\d{5})(\d{4}).*"
split = re.split(pattern, col)
if len(split) >= 4:
return "(" + split[1] + ")" + " " + split[2] + "-" + split[3]
return col
return col
def split_fone(col):
if type(col) == str:
pattern = ".*\(.*(\d{2})\).*(\d{4}|\d{4})(\d{4}).*"
split = re.split(pattern, col)
if len(split) >= 4:
return "(" + split[1] + ")" + " " + split[2] + "-" + split[3]
return col
return col
def preprocess_df(df):
presencial = df["Data/Hora preenchimento"] == "Presencial"
espera = df["Data/Hora preenchimento"] == "Lista de Espera"
pre = df[~ presencial & ~ espera]["Data/Hora preenchimento"]
data_hora = pre.apply(split_date_hour)
data = pd.Series.append(df[presencial]["Data/Hora preenchimento"],
df[espera]["Data/Hora preenchimento"])
data = data.append(data_hora.iloc[:, 0]).sort_index()
hora = pd.Series.append(df[presencial]["Data/Hora preenchimento"],
df[espera]["Data/Hora preenchimento"])
hora = hora.append(data_hora.iloc[:, 1]).sort_index()
df.rename(columns={"Data/Hora preenchimento": "Data_Pré_Matrícula"},
inplace=True)
df["Data_Pré_Matrícula"] = data
df["Hora_Pré_Matrícula"] = hora
df["Nome"] = df["Nome"].apply(str.upper).apply(str.strip)
df["Sobrenome"] = df["Sobrenome"].apply(str.upper).apply(str.strip)
df["Nome Responsável"] = df["Nome Responsável"].apply(str.upper).apply(str.strip)
df["Sobrenome Responsável"] = df["Sobrenome Responsável"].apply(str.upper).apply(str.strip)
df["Nome Responsável"] = concat_names(df["Nome Responsável"],
df["Sobrenome Responsável"])
del df["Sobrenome Responsável"]
df["Nome"] = concat_names(df["Nome"], df["Sobrenome"])
del df["Sobrenome"]
df.rename(columns={"Telefone Celular ex: (011) 00000-0000": "Tel_Celular"},
inplace=True)
df["Tel_Celular"] = df["Tel_Celular"].apply(split_celfone)
df.rename(columns={"Telefone Fixo ex: (011) 000-0000": "Tel_Fixo"},
inplace=True)
df["Tel_Fixo"] = df["Tel_Fixo"].apply(split_fone)
df.rename(columns={"Celular do Responsável": "Celular_Responsável"},
inplace=True)
df["Celular_Responsável"] = df["Celular_Responsável"].apply(split_celfone)
df.rename(columns={"RG \n(apenas números)" : "RG"}, inplace=True)
return df
def main():
gc = f.authenticate()
wb = f.load_workbooks_from_drive()[MATRICULA]
df = f.load_sheet_from_workbook(wb, MATR_ABA, skiprows=[1,2])[1]
df = df.fillna('')
df = preprocess_df(df)
df.to_csv("matricula.csv", sep=",", index=False, columns=COLUNAS, na_rep='')
df = pd.read_csv("matricula.csv", dtype=str, na_values='')
matricula = gc.open(MATR_CLEANED)
wks = matricula.worksheet("JoãoXXIII")
wks.clear()
gs_to_df.set_with_dataframe(worksheet=wks, dataframe=df)
main()
planilhas
| 2.4375
| 2
|
utool/util_tags.py
|
Erotemic/utool
| 8
|
12779245
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import re
import operator
from utool import util_inject
print, rrr, profile = util_inject.inject2(__name__)
def modify_tags(tags_list, direct_map=None, regex_map=None, regex_aug=None,
delete_unmapped=False, return_unmapped=False,
return_map=False):
import utool as ut
tag_vocab = ut.unique(ut.flatten(tags_list))
alias_map = ut.odict()
if regex_map is not None:
alias_map.update(**ut.build_alias_map(regex_map, tag_vocab))
if direct_map is not None:
alias_map.update(ut.odict(direct_map))
new_tags_list = tags_list
new_tags_list = ut.alias_tags(new_tags_list, alias_map)
if regex_aug is not None:
alias_aug = ut.build_alias_map(regex_aug, tag_vocab)
aug_tags_list = ut.alias_tags(new_tags_list, alias_aug)
new_tags_list = [ut.unique(t1 + t2) for t1, t2 in zip(new_tags_list, aug_tags_list)]
unmapped = list(set(tag_vocab) - set(alias_map.keys()))
if delete_unmapped:
new_tags_list = [ut.setdiff(tags, unmapped) for tags in new_tags_list]
toreturn = None
if return_map:
toreturn = (alias_map,)
if return_unmapped:
toreturn = toreturn + (unmapped,)
if toreturn is None:
toreturn = new_tags_list
else:
toreturn = (new_tags_list,) + toreturn
return toreturn
def tag_coocurrence(tags_list):
import utool as ut
co_occur_list = []
for tags in tags_list:
for combo in ut.combinations(tags, 2):
key = tuple(sorted(combo))
co_occur_list.append(key)
co_occur = ut.dict_hist(co_occur_list, ordered=True)
# co_occur[key] += 1
#co_occur = ut.odict(co_occur)
return co_occur
def tag_hist(tags_list):
import utool as ut
return ut.dict_hist(ut.flatten(tags_list), ordered=True)
def build_alias_map(regex_map, tag_vocab):
"""
Constructs explicit mapping. Order of items in regex map matters.
Items at top are given preference.
Example:
>>> # DISABLE_DOCTEST
>>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]
>>> tag_vocab = ut.flat_unique(*tags_list)
>>> regex_map = [('t[3-4]', 'A9'), ('t0', 'a0')]
>>> unmapped = list(set(tag_vocab) - set(alias_map.keys()))
"""
import utool as ut
import re
alias_map = ut.odict([])
for pats, new_tag in reversed(regex_map):
pats = ut.ensure_iterable(pats)
for pat in pats:
flags = [re.match(pat, t) for t in tag_vocab]
for old_tag in ut.compress(tag_vocab, flags):
alias_map[old_tag] = new_tag
identity_map = ut.take_column(regex_map, 1)
for tag in ut.filter_Nones(identity_map):
alias_map[tag] = tag
return alias_map
def alias_tags(tags_list, alias_map):
"""
update tags to new values
Args:
tags_list (list):
alias_map (list): list of 2-tuples with regex, value
Returns:
list: updated tags
CommandLine:
python -m utool.util_tags alias_tags --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]
>>> ut.build_alias_map()
>>> result = alias_tags(tags_list, alias_map)
>>> print(result)
"""
def _alias_dict(tags):
tags_ = [alias_map.get(t, t) for t in tags]
return list(set([t for t in tags_ if t is not None]))
tags_list_ = [_alias_dict(tags) for tags in tags_list]
return tags_list_
# def _fix_tags(tags):
# return {six.text_type(t.lower()) for t in tags}
# tags_list_ = list(map(_fix_tags, tags_list))
# re_list = [re.compile(pat) for pat, val in alias_map]
# val_list = ut.take_column(alias_map, 0)
# def _alias_regex(tags):
# new_tags = 0
# for t in tags:
# matched = [re_.match(t) is not None for re_ in re_list]
# matched_idx = ut.where(matched)
# assert len(matched_idx) <= 1, 'more than one tag in %r matched pattern' % (tags,)
# if len(matched_idx) > 0:
# repl_tags = ut.take(val_list, matched_idx)
# new_tags.extend(repl_tags)
# else:
# new_tags.append(t)
# return new_tags
# # tags_list_ = [_alias_regex(tags) for tags in tags_list_]
# return tags_list_
def filterflags_general_tags(tags_list, has_any=None, has_all=None,
has_none=None, min_num=None, max_num=None,
any_startswith=None, any_endswith=None,
in_any=None, any_match=None, none_match=None,
logic='and', ignore_case=True):
r"""
maybe integrate into utool? Seems pretty general
Args:
tags_list (list):
has_any (None): (default = None)
has_all (None): (default = None)
min_num (None): (default = None)
max_num (None): (default = None)
Notes:
in_any should probably be ni_any
TODO: make this function more natural
CommandLine:
python -m utool.util_tags --exec-filterflags_general_tags
python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx
python -m utool.util_tags --exec-filterflags_general_tags:0
python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n
python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o
python -m utool.util_tags --exec-filterflags_general_tags:1
python -m utool.util_tags --exec-filterflags_general_tags:2
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']]
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list)
>>> print('kwargs = %r' % (kwargs,))
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> print(flags)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']]
>>> has_all = 'n'
>>> min_num = 1
>>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']]
>>> kwargs = {
>>> 'any_endswith': 'n',
>>> 'any_match': None,
>>> 'any_startswith': 'n',
>>> 'has_all': None,
>>> 'has_any': None,
>>> 'has_none': None,
>>> 'max_num': 3,
>>> 'min_num': 1,
>>> 'none_match': ['P'],
>>> }
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> filtered = ut.compress(tags_list, flags)
>>> result = ('result = %s' % (ut.repr2(filtered),))
result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']]
"""
import numpy as np
import utool as ut
def _fix_tags(tags):
if ignore_case:
return set([]) if tags is None else {six.text_type(t.lower()) for t in tags}
else:
return set([]) if tags is None else {six.text_type() for t in tags}
if logic is None:
logic = 'and'
logic_func = {
'and': np.logical_and,
'or': np.logical_or,
}[logic]
default_func = {
'and': np.ones,
'or': np.zeros,
}[logic]
tags_list_ = [_fix_tags(tags_) for tags_ in tags_list]
flags = default_func(len(tags_list_), dtype=np.bool)
if min_num is not None:
flags_ = [len(tags_) >= min_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if max_num is not None:
flags_ = [len(tags_) <= max_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_any is not None:
has_any = _fix_tags(set(ut.ensure_iterable(has_any)))
flags_ = [len(has_any.intersection(tags_)) > 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_none is not None:
has_none = _fix_tags(set(ut.ensure_iterable(has_none)))
flags_ = [len(has_none.intersection(tags_)) == 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_all is not None:
has_all = _fix_tags(set(ut.ensure_iterable(has_all)))
flags_ = [len(has_all.intersection(tags_)) == len(has_all) for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
def _test_item(tags_, fields, op, compare):
t_flags = [any([compare(t, f) for f in fields]) for t in tags_]
num_passed = sum(t_flags)
flag = op(num_passed, 0)
return flag
def _flag_tags(tags_list, fields, op, compare):
flags = [_test_item(tags_, fields, op, compare) for tags_ in tags_list_]
return flags
def _exec_filter(flags, tags_list, fields, op, compare):
if fields is not None:
fields = ut.ensure_iterable(fields)
if ignore_case:
fields = [f.lower() for f in fields]
flags_ = _flag_tags(tags_list, fields, op, compare)
logic_func(flags, flags_, out=flags)
return flags
flags = _exec_filter(
flags, tags_list, any_startswith,
operator.gt, six.text_type.startswith)
flags = _exec_filter(
flags, tags_list, in_any,
operator.gt, operator.contains)
flags = _exec_filter(
flags, tags_list, any_endswith,
operator.gt, six.text_type.endswith)
flags = _exec_filter(
flags, tags_list, any_match,
operator.gt, lambda t, f: re.match(f, t))
flags = _exec_filter(
flags, tags_list, none_match,
operator.eq, lambda t, f: re.match(f, t))
return flags
if __name__ == '__main__':
r"""
CommandLine:
python -m utool.util_tags
python -m utool.util_tags --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 2.34375
| 2
|
wine.py
|
athena15/knn
| 1
|
12779246
|
<reponame>athena15/knn<filename>wine.py
import knn
import numpy as np
import pandas as pd
import requests
from io import StringIO
# Adding a note so I can commit changes
# Import wine classification data from UC Irvine's website
col_names = ['Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids',
'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
s = requests.get(url).text
df = pd.read_csv(StringIO(s), names=col_names)
# Creating a column denoting whether a wine is above the median alcohol percentage
def high_alc(x):
if x >= 13.05:
return 1
else:
return 0
df['high_alcohol'] = df['Alcohol'].apply(high_alc)
# Run k-nearest neighbors classification to predict whether a wine will be classified as 'high alcohol' or not.
knn.nearest_neighbors_workflow(df, 'high_alcohol')
| 3.265625
| 3
|
scripts/clean_data.py
|
alvesmatheus/fala-camarada
| 7
|
12779247
|
<gh_stars>1-10
import os
import re
import pandas as pd
RAW_DATA_DIR_PATH = 'data/raw'
READY_DATA_DIR_PATH = 'data/ready'
RAW_COMMITTEE_SCHEDULE_PATH = 'data/raw/agenda_comissoes.csv'
READY_COMMITTEE_SCHEDULE_PATH = 'data/ready/metadados_transcricoes.csv'
TARGET_YEARS = [year for year in range(1995, 2022)]
def fix_committee_schedule():
raw_committee_schedule = pd.read_csv(RAW_COMMITTEE_SCHEDULE_PATH)
committee_schedule = raw_committee_schedule.drop_duplicates('id_evento')
committee_schedule.replace(
'AP c/ Convidado',
'Audiência Pública com Convidado',
inplace=True)
committee_schedule.replace(
'AP c/ Ministro',
'Audiência Pública com Ministro',
inplace=True)
committee_schedule = committee_schedule.fillna({
'categoria_comissao': 'Outros',
'categoria_evento': 'Outros'
})
committee_schedule.to_csv(READY_COMMITTEE_SCHEDULE_PATH, index=False)
def fix_committee_speeches():
for year in TARGET_YEARS:
data_path = f'{RAW_DATA_DIR_PATH}/discursos_comissoes_{year}.csv'
year_speeches = pd.read_csv(data_path)
events = year_speeches['id_evento'].drop_duplicates().values.tolist()
for event in events:
speeches = year_speeches[year_speeches['id_evento'] == event]
dir_path = f'{READY_DATA_DIR_PATH}/{year}'
filename = f'''{str(event).replace('/', '-')}'''
speeches.to_csv(f'{dir_path}/{filename}.csv', index=False)
def main():
if not os.path.exists(READY_DATA_DIR_PATH):
os.mkdir(READY_DATA_DIR_PATH)
for year in TARGET_YEARS:
path = f'{READY_DATA_DIR_PATH}/{year}'
if not os.path.exists(path):
os.mkdir(path)
fix_committee_schedule()
fix_committee_speeches()
if __name__ == '__main__':
main()
| 2.859375
| 3
|
app.py
|
cadullms/pyminemap
| 0
|
12779248
|
import mcpi.minecraft as minecraft
from flask import render_template
from flask import Flask
from flask import jsonify
app = Flask(__name__)
@app.route('/')
def pyminemapIndex():
return render_template('index.html')
@app.route('/list')
def pyminemapList():
try:
positionstexte = []
mc = minecraft.Minecraft.create()
playerIds = mc.getPlayerEntityIds()
for playerId in playerIds:
position = mc.entity.getTilePos(playerId)
positionstexte.append("Spielerposition: x=" + str(position.x) + " y=" + str(position.y) + " z=" + str(position.z))
return render_template('list.html', positionstexte=positionstexte)
except (ConnectionResetError, ConnectionRefusedError):
return render_template('list.html', positionstext=None)
@app.route('/map')
def pyminemapMap():
return render_template('map.html')
@app.route('/api/players/positions', methods = ['GET'])
def getPlayerPositions():
try:
positions = []
mc = minecraft.Minecraft.create()
for playerId in mc.getPlayerEntityIds():
playerPosition = mc.entity.getPos(playerId)
position = {
'playerId': playerId,
'x': playerPosition.x,
'y': playerPosition.y,
'z': playerPosition.z
}
positions.append(position)
return jsonify(positions)
except (ConnectionResetError, ConnectionRefusedError):
return jsonify([])
@app.route('/api/worldDimensions', methods = ['GET'])
def getWorldDimensions():
# see: https://www.stuffaboutcode.com/p/minecraft-api-reference.html
try:
mc = minecraft.Minecraft.create()
x = 0
while mc.getBlock(x,0,0) != 95:
x += 1
z = 0
while mc.getBlock(0,0,z) != 95:
z += 1
maxX = x - 1
minX = maxX - 256
maxZ = z - 1
minZ = maxZ - 256
return jsonify({'minX':minX,'maxX':maxX,'minZ':minZ,'maxZ':maxZ})
except (ConnectionResetError, ConnectionRefusedError):
return jsonify(None)
| 2.765625
| 3
|
Lang.py
|
codedecde/Recognizing-Textual-Entailment
| 36
|
12779249
|
from __future__ import unicode_literals, print_function, division
from collections import Counter
from nltk.tokenize import TweetTokenizer
import cPickle as cp
import io
import numpy as np
PAD_TOKEN = 0
SOS_TOKEN = 1
EOS_TOKEN = 2
VOCAB_SIZE = 10000
class Lang(object):
def __init__(self, name, lowercase=True, tokenizer=None):
self.name = name
self.word_count = Counter()
self.tokenizer = tokenizer
self.lowercase = lowercase # To lowercase all words encountered
self.embedding_matrix = None
self.PAD_TOK_VEC = None
self.UNK_TOK_VEC = None
def tokenize_sent(self, sentence):
if self.tokenizer is None:
return sentence.split(u' ')
else:
return self.tokenizer.tokenize(sentence)
def add_sentence(self, sentence):
for w in self.tokenize_sent(sentence):
if self.lowercase:
w = w.lower()
self.word_count[w] += 1
def generate_vocab(self):
vocab = self.word_count.most_common(VOCAB_SIZE)
self.word2ix = {"<PAD>": PAD_TOKEN, "<SOS>": SOS_TOKEN, "<EOS>": EOS_TOKEN}
for w, _ in vocab:
self.word2ix[w] = len(self.word2ix)
self.ix2word = {self.word2ix[w]: w for w in self.word2ix}
def add_word(self, word, embedding=None):
assert word not in self.word2ix, "Already present in vocab"
self.word2ix[word] = len(self.word2ix)
self.ix2word[self.word2ix[word]] = word
if self.embedding_matrix is not None:
_, n_embed = self.embedding_matrix.shape
embedding = embedding if embedding is not None else np.random.normal(0, 1, (1, n_embed))
self.embedding_matrix = np.concatenate([self.embedding_matrix, embedding], axis=0)
def __getitem__(self, item):
if type(item) == str or type(item) == unicode:
# Encode the string to be unicode
item = unicode(item)
if self.lowercase:
item = item.lower()
return self.word2ix[item] if item in self.word2ix else len(self.word2ix)
else:
return self.ix2word[item] if item in self.ix2word else u"<UNK>"
def __len__(self):
assert len(self.ix2word) == len(self.word2ix), "Index not built using generate_vocab and add_word"
return len(self.ix2word)
def save_file(self, filename):
cp.dump(self.__dict__, open(filename, 'wb'))
def load_file(self, filename):
self.__dict__ = cp.load(open(filename))
def get_embedding_matrix(self):
if self.embedding_matrix is None:
return None
_embedding_matrix = np.concatenate([self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC], axis=0)
return _embedding_matrix
def build_vocab(filename, l):
with io.open(filename, encoding='utf-8', mode='r', errors='replace') as f:
for line in f:
line = line.strip().split('\t')
l.add_sentence(line[0])
l.add_sentence(line[1])
l.generate_vocab()
return l
def build_embedding_matrix_from_gensim(l_en, gensim_model, embedding_dim=300):
l_en.PAD_TOK_VEC = np.random.normal(0, 1, (1, embedding_dim))
l_en.UNK_TOK_VEC = np.random.normal(0, 1, (1, embedding_dim))
l_en.embedding_matrix = np.random.normal(0, 1, (len(l_en) - 1, embedding_dim)) # PAD TOKEN ENCODED SEPARATELY
for w in l_en.word2ix:
if l_en.word2ix[w] == PAD_TOKEN:
# PAD TOKEN ENCODED SEPARATELY
continue
if w in gensim_model.wv:
l_en.embedding_matrix[l_en.word2ix[w] - 1] = gensim_model.wv[w]
return l_en
if __name__ == "__main__":
# ROOT_DIR = "/home/bass/DataDir/RTE/"
ROOT_DIR = ""
DATA_FILE = ROOT_DIR + "data/train.txt"
# DATA_FILE ="data/tiny_eng-fra.txt"
l_en = Lang('en', tokenizer=TweetTokenizer())
l_en = build_vocab(DATA_FILE, l_en)
save_file_name = ROOT_DIR + 'data/vocab.pkl'
l_en.save_file(save_file_name)
| 2.625
| 3
|
apis/utils/constants.py
|
kothiyayogesh11/yk11_api
| 0
|
12779250
|
<filename>apis/utils/constants.py
# User related settings
USER_TYPE = "Vendor"
# Email Settings
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = '<EMAIL>'
MAIL_PASSWORD = '<PASSWORD>'
# Server URLS
NOTIFACTION_CLIENT = "http://api.notifications.wellnessta.in/api/v1/notifications/"
#NOTIFACTION_CLIENT = 'http://localhost:4000/api/v1/notifications/'
VENDOR_CLIENT = "https://api.vendor.wellnessta.in/api/v1/"
VENDOR_WEB = "https://vendor.beta.wellnessta.com/"
PAYMENT_CLIENT = "http://api.payment.wellnessta.in/api/v1/"
#PAYMENT_CLIENT = "http://localhost:5000/api/v1/"
ADMIN_WEB = "https://admin.beta.wellnessta.com/"
CLIENT_WEB = "https://wellnessta.in/"
# GST Crads
GST_CLIENT_URL = "https://commonapi.mastersindia.co/"
GST_CONTANTTYPE = "application/json"
GST_AUTHORIZATION = "Bearer "
GST_CLIENT_ID = "AzgFbKsSuksqsmMCzi"
GST_CLIENT_SECRET = "jG3RERRRFUUbXsJV3xP4dAeQ"
GST_GRANTTYPE = "password"
GST_USER = "<EMAIL>"
GST_PASSWORD = "<PASSWORD>"
#admin email
ADMIN_EMAIL="<EMAIL>"
SMS_VALID="10 min"
# Barter
BATER_EMAIL = "<EMAIL>"
| 1.453125
| 1
|