blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56d57686f92e0970a3e91838c2390f7a81b41c96
|
5062c7bdd302388668a79591d56f6fef667d2324
|
/polls/polls1/admin.py
|
dbe803981a4c8a9f9097ed134ee1c43a2198d0e8
|
[] |
no_license
|
amt3/polling_app
|
cd6e44ab357e9a1f102d8a131eef95193ff62b96
|
d71e5dacd8dfe242abe671bcb29addb7a8630c4f
|
refs/heads/master
| 2021-01-19T19:38:05.412034
| 2017-04-18T19:51:26
| 2017-04-18T19:51:26
| 88,431,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
from django.contrib import admin
from .models import Choice, Question, comment, User
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
#admin.site.register(Choice)
admin.site.register(comment)
admin.site.register(User)
|
[
"amtpagrut@gmail.com"
] |
amtpagrut@gmail.com
|
c6677fbf3c7a82ee6b99d6bb0c784f16490e8eac
|
87673969182ddaffc976ee66016b98cc56a3f0a3
|
/Second_Min_No_List.py
|
6a3cea7e7e70d91b1a2038f9379cc41a9e153ce2
|
[] |
no_license
|
vikramattri123/265172_python
|
d588b6e28b20033fe0a64b53d7bb53ea83a47b58
|
7435782d68f3be84580b673dc5f837494d4d2346
|
refs/heads/main
| 2023-04-11T19:25:43.014809
| 2021-04-29T18:20:23
| 2021-04-29T18:20:23
| 359,191,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
a = int(input("Enter No : "))
list= [ ]
t=99999999999
for i in range(0,a):
c = int(input())
list.append(c)
if(c<t):
c1=t;
t=c;
print(c1);
|
[
"vikramattri123@gmail.com"
] |
vikramattri123@gmail.com
|
2fa12de4cea03dc4cd887a32b0e4c644c2ac1660
|
7e747b7b8d3d30a994f6dafc3d5a5f7ba1f76af5
|
/WikiTrans/plugin.py
|
9d50a4c2d8b857e9eb7e384e7424450eeb88bc11
|
[] |
no_license
|
fbesser/ProgValSupybot-plugins
|
37640f861c8635a654fbad41bb8cdf3bc6b0c6f2
|
5a87323ac418e537852f5a4d38dd414c7b82b145
|
refs/heads/master
| 2020-12-30T17:19:33.911178
| 2011-11-01T18:47:57
| 2011-11-01T18:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('WikiTrans')
import urllib
@internationalizeDocstring
class WikiTrans(callbacks.Plugin):
"""Add the help for "@plugin help WikiTrans" here
This should describe *how* to use this plugin."""
threaded = True
def translate(self, irc, msg, args, src, target, word):
"""<from language> <to language> <word>
Translates the <word> (also works with expressions) using Wikipedia
interlanguage links."""
try:
page = utils.web.getUrlFd('http://%s.wikipedia.org/wiki/%s' %
(src, urllib.quote_plus(word.replace(' ', '_'))))
except:
irc.error(_('This word can\'t be found on Wikipedia'))
return
start = ('\t\t\t\t\t<li class="interwiki-%s"><a '
'href="http://%s.wikipedia.org/wiki/') % \
(target, target)
for line in page:
if line.startswith(start):
irc.reply(line[len(start):].split('"')[2])
return
irc.error(_('No translation found'))
translate = wrap(translate, ['something', 'something', 'text'])
Class = WikiTrans
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
[
"progval@gmail.com"
] |
progval@gmail.com
|
7738aa87e518aeffeaeb7d4c0c9c5946d12b319e
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2008.1/applications/admin/multitail/actions.py
|
d4c6ef77d21dc229418a24b7aaf616ffdbe43415
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def build():
autotools.make("all")
def install():
pisitools.dobin("multitail")
pisitools.dosed('multitail.conf', 'check_mail:5', 'check_mail:0') # disable check mail feature by default
pisitools.insinto("/etc", "multitail.conf")
pisitools.dodoc("Changes", "license.txt", "readme.txt")
pisitools.dohtml("manual*.html")
pisitools.doman("multitail.1")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
f2f71ed9879b1dd22cc2ccd3cb1b8c85b92403af
|
50f7deafdc78088cc5d3c083a48c1903b95bb812
|
/app1/views.py
|
131d193670281057b152061a633dbe72e9a064bf
|
[] |
no_license
|
ritztech/django_project
|
4ad6f1200d410204065caa0f81711013697d9674
|
ca09af8180a5cd045408091a91f70e8726b6ff5f
|
refs/heads/master
| 2021-01-16T05:05:14.524771
| 2020-02-26T12:19:50
| 2020-02-26T12:19:50
| 242,985,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
#return HttpResponse("<h1>Hello response to main page</h1> ")
return render(request,'index.html',{'name':'sudhir shrivastvava','job':'IIT KGP'})
def add(request):
val1=int(request.POST["num1"])
val2=int(request.POST["num2"])
val3= val1 + val2
return render(request,"result.html",{'result':val3})
|
[
"ritzz.technologies@gmail.com"
] |
ritzz.technologies@gmail.com
|
d3370eaf70619b36f388d94de9592ce4897efb59
|
495e2201cb5f5e690fd432995ab426fe524b45ad
|
/Strings/venv/Scripts/easy_install-3.7-script.py
|
83ec03b6681bf67a582b2289cba7cb5c60b9d45a
|
[] |
no_license
|
IngridFCosta/Exercicios-de-Python-Curso-em-video
|
0f22d7e17470464f4ede955fab0e11edb89cbedf
|
a32811d3a01c7f017882cead0051ba536b05ffbf
|
refs/heads/master
| 2022-12-06T04:45:53.589063
| 2020-08-26T23:34:45
| 2020-08-26T23:34:45
| 288,519,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
#!C:\Users\usuario\PycharmProjects\Strings\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"49729290+IngridFCosta@users.noreply.github.com"
] |
49729290+IngridFCosta@users.noreply.github.com
|
f93f6e8d846bb344aae690e3f73a9e3bca9acc5b
|
b34a1d253457a2c015247f498ddc5e7dc13a0961
|
/CyberCrew/task4/src/backend/code/routes.py
|
a9adcde9acfd64d3860f4e6fde83d3713c4e6f5b
|
[] |
no_license
|
madwayz/tasks_dev
|
c8efc957e2c0197a187297642ca9a797f6f37ad8
|
07f6f587ca4cda464f3c853fcd84f75cc3e45036
|
refs/heads/master
| 2022-04-24T04:53:35.515953
| 2020-04-27T11:09:13
| 2020-04-27T11:09:13
| 254,447,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
from flask import request, jsonify
import requests as req
from code import app
@app.route('/resolve', methods=['GET', 'POST'])
def resolve():
domain = request.args.get('domain')
if not domain:
return jsonify({'error': 'Check fields and try again.'})
if '://' in domain:
return jsonify({'error': 'The "domain"" field expected a domain, but received a URL'})
headers = {'FLAG': 'FLAG{7bdaf9e80649ba47499e3fc}'}
try:
req.head(f'http://{domain}', headers=headers)
except req.exceptions.ConnectionError:
return {'status': 'fail'}
headers = {
'domainName': domain,
'search': domain,
'web-lookup-search': 'true',
}
response = req.post(f'http://ip-api.com/json/{domain}', headers=headers).json()
return jsonify(response)
|
[
"djurball@list.ru"
] |
djurball@list.ru
|
b662605ed67fe1a81b64f8c3935cb676d56be5c0
|
3b20a0bb1e1a408c0fbba4429c40aed625cd8430
|
/library/migrations/0001_initial.py
|
c4e8b2d4a84c58eeb4cdc2c5a1343aa96e1e78ae
|
[] |
no_license
|
rmhanchate/libmanpro
|
5e1274f384a00e38837d1b22bf7ae9c16ec970ef
|
9e83df1649f3bc6d1e4c19e56a8321caae92e7f7
|
refs/heads/master
| 2022-11-07T09:16:29.510053
| 2020-06-12T15:25:37
| 2020-06-12T15:25:37
| 271,769,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-16 13:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.CharField(db_column='ID', primary_key=True, serialize=False, max_length=255)),
('title', models.CharField(db_column='Title', max_length=255)),
('link', models.CharField(db_column='Link', max_length=255)),
],
options={
'db_table': 'book',
'managed': False,
},
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.CharField(db_column='id', max_length=255, primary_key=True, serialize=False)),
('author', models.CharField(db_column='Author', max_length=255, null=True)),
],
options={
'db_table': 'author',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.CharField(db_column='ID', primary_key=True, serialize=False, max_length=255)),
('class', models.FloatField(db_column='Class', max_length=255)),
('genre', models.CharField(db_column='Genre', max_length=255)),
],
options={
'db_table': 'genre',
'managed': False,
},
),
]
|
[
"rmhanchate.181ec135@nitk.edu.in"
] |
rmhanchate.181ec135@nitk.edu.in
|
ed679385dfdf421b67dac7110709160cbba2b2c2
|
68ed4d0aed6e55f8af151276a09e0d08d9d61387
|
/prototype/data/imagenet_dataloader.py
|
055cd060445fa83dbd576810bf3f2b4181531dad
|
[] |
no_license
|
Rivulet-1993/prototype
|
8cb1dbc9034f495c5d53ce99e0b4e5c5a5e62d33
|
193462e72b6e58b7b4a826bc9ae7f013efc2c76d
|
refs/heads/main
| 2023-03-25T17:36:50.129634
| 2021-03-17T02:45:27
| 2021-03-17T02:45:27
| 349,042,207
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,189
|
py
|
from torch.utils.data import DataLoader
from torchvision import transforms
from .datasets import ImageNetDataset
from .transforms import build_transformer, TwoCropsTransform, GaussianBlur
from .auto_augmentation import ImageNetPolicy
from .sampler import build_sampler
from .metrics import build_evaluator
from .pipelines import ImageNetTrainPipeV2, ImageNetValPipeV2
from .nvidia_dali_dataloader import DaliDataloader
def build_common_augmentation(aug_type):
"""
common augmentation settings for training/testing ImageNet
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if aug_type == 'STANDARD':
augmentation = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.2, 0.2, 0.2, 0.1),
transforms.ToTensor(),
normalize,
]
elif aug_type == 'AUTOAUG':
augmentation = [
transforms.RandomResizedCrop(224),
ImageNetPolicy(),
transforms.ToTensor(),
normalize,
]
elif aug_type == 'MOCOV1':
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomGrayscale(p=0.2),
transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
elif aug_type == 'MOCOV2' or aug_type == 'SIMCLR':
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
elif aug_type == 'LINEAR':
augmentation = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
elif aug_type == 'ONECROP':
augmentation = [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
else:
raise RuntimeError("undefined augmentation type for ImageNet!")
if aug_type in ['MOCOV1', 'MOCOV2', 'SIMCLR']:
return TwoCropsTransform(transforms.Compose(augmentation))
else:
return transforms.Compose(augmentation)
def build_imagenet_train_dataloader(cfg_dataset, data_type='train'):
"""
build training dataloader for ImageNet
"""
cfg_train = cfg_dataset['train']
# build dataset
if cfg_dataset['use_dali']:
# NVIDIA dali preprocessing
assert cfg_train['transforms']['type'] == 'STANDARD', 'only support standard augmentation'
dataset = ImageNetDataset(
root_dir=cfg_train['root_dir'],
meta_file=cfg_train['meta_file'],
read_from=cfg_dataset['read_from'],
)
else:
image_reader = cfg_dataset[data_type].get('image_reader', {})
# PyTorch data preprocessing
if isinstance(cfg_train['transforms'], list):
transformer = build_transformer(cfgs=cfg_train['transforms'],
image_reader=image_reader)
else:
transformer = build_common_augmentation(cfg_train['transforms']['type'])
dataset = ImageNetDataset(
root_dir=cfg_train['root_dir'],
meta_file=cfg_train['meta_file'],
transform=transformer,
read_from=cfg_dataset['read_from'],
image_reader_type=image_reader.get('type', 'pil'),
)
# build sampler
cfg_train['sampler']['kwargs'] = {}
cfg_dataset['dataset'] = dataset
sampler = build_sampler(cfg_train['sampler'], cfg_dataset)
if cfg_dataset['last_iter'] >= cfg_dataset['max_iter']:
return {'loader': None}
# build dataloader
if cfg_dataset['use_dali']:
# NVIDIA dali pipeline
pipeline = ImageNetTrainPipeV2(
data_root=cfg_train['root_dir'],
data_list=cfg_train['meta_file'],
sampler=sampler,
crop=cfg_dataset['input_size'],
colorjitter=[0.2, 0.2, 0.2, 0.1]
)
loader = DaliDataloader(
pipeline=pipeline,
batch_size=cfg_dataset['batch_size'],
epoch_size=len(sampler),
num_threads=cfg_dataset['num_workers'],
last_iter=cfg_dataset['last_iter']
)
else:
# PyTorch dataloader
loader = DataLoader(
dataset=dataset,
batch_size=cfg_dataset['batch_size'],
shuffle=False,
num_workers=cfg_dataset['num_workers'],
pin_memory=True,
sampler=sampler
)
return {'type': 'train', 'loader': loader}
def build_imagenet_test_dataloader(cfg_dataset, data_type='test'):
"""
build testing/validation dataloader for ImageNet
"""
cfg_test = cfg_dataset['test']
# build evaluator
evaluator = None
if cfg_test.get('evaluator', None):
evaluator = build_evaluator(cfg_test['evaluator'])
if cfg_dataset['use_dali']:
# NVIDIA dali preprocessing
assert cfg_test['transforms']['type'] == 'ONECROP', 'only support onecrop augmentation'
dataset = ImageNetDataset(
root_dir=cfg_test['root_dir'],
meta_file=cfg_test['meta_file'],
read_from=cfg_dataset['read_from'],
evaluator=evaluator,
)
else:
image_reader = cfg_dataset[data_type].get('image_reader', {})
# PyTorch data preprocessing
if isinstance(cfg_test['transforms'], list):
transformer = build_transformer(cfgs=cfg_test['transforms'],
image_reader=image_reader)
else:
transformer = build_common_augmentation(cfg_test['transforms']['type'])
dataset = ImageNetDataset(
root_dir=cfg_test['root_dir'],
meta_file=cfg_test['meta_file'],
transform=transformer,
read_from=cfg_dataset['read_from'],
evaluator=evaluator,
image_reader_type=image_reader.get('type', 'pil'),
)
# build sampler
assert cfg_test['sampler'].get('type', 'distributed') == 'distributed'
cfg_test['sampler']['kwargs'] = {'dataset': dataset, 'round_up': False}
cfg_dataset['dataset'] = dataset
sampler = build_sampler(cfg_test['sampler'], cfg_dataset)
# build dataloader
if cfg_dataset['use_dali']:
# NVIDIA dali pipeline
pipeline = ImageNetValPipeV2(
data_root=cfg_test['root_dir'],
data_list=cfg_test['meta_file'],
sampler=sampler,
crop=cfg_dataset['input_size'],
size=cfg_dataset['test_resize'],
)
loader = DaliDataloader(
pipeline=pipeline,
batch_size=cfg_dataset['batch_size'],
epoch_size=len(sampler),
num_threads=cfg_dataset['num_workers'],
dataset=dataset,
)
else:
# PyTorch dataloader
loader = DataLoader(
dataset=dataset,
batch_size=cfg_dataset['batch_size'],
shuffle=False,
num_workers=cfg_dataset['num_workers'],
pin_memory=cfg_dataset['pin_memory'],
sampler=sampler
)
return {'type': 'test', 'loader': loader}
def build_imagenet_search_dataloader(cfg_dataset, data_type='arch'):
"""
build ImageNet dataloader for neural network search (NAS)
"""
cfg_search = cfg_dataset[data_type]
# build dataset
if cfg_dataset['use_dali']:
# NVIDIA dali preprocessing
assert cfg_search['transforms']['type'] == 'ONECROP', 'only support onecrop augmentation'
dataset = ImageNetDataset(
root_dir=cfg_search['root_dir'],
meta_file=cfg_search['meta_file'],
read_from=cfg_dataset['read_from'],
)
else:
image_reader = cfg_dataset[data_type].get('image_reader', {})
# PyTorch data preprocessing
if isinstance(cfg_search['transforms'], list):
transformer = build_transformer(cfgs=cfg_search['transforms'],
image_reader=image_reader)
else:
transformer = build_common_augmentation(cfg_search['transforms']['type'])
dataset = ImageNetDataset(
root_dir=cfg_search['root_dir'],
meta_file=cfg_search['meta_file'],
transform=transformer,
read_from=cfg_dataset['read_from'],
image_reader_type=image_reader.get('type', 'pil'),
)
# build sampler
assert cfg_search['sampler'].get('type', 'distributed_iteration') == 'distributed_iteration'
cfg_search['sampler']['kwargs'] = {}
cfg_dataset['dataset'] = dataset
sampler = build_sampler(cfg_search['sampler'], cfg_dataset)
if cfg_dataset['last_iter'] >= cfg_dataset['max_iter']:
return {'loader': None}
# build dataloder
if cfg_dataset['use_dali']:
# NVIDIA dali pipeline
pipeline = ImageNetValPipeV2(
data_root=cfg_search['root_dir'],
data_list=cfg_search['meta_file'],
sampler=sampler,
crop=cfg_dataset['input_size'],
size=cfg_dataset['test_resize'],
)
loader = DaliDataloader(
pipeline=pipeline,
batch_size=cfg_dataset['batch_size'],
epoch_size=len(sampler),
num_threads=cfg_dataset['num_workers'],
)
else:
# PyTorch dataloader
loader = DataLoader(
dataset=dataset,
batch_size=cfg_dataset['batch_size'],
shuffle=False,
num_workers=cfg_dataset['num_workers'],
pin_memory=cfg_dataset['pin_memory'],
sampler=sampler
)
return {'type': data_type, 'loader': loader}
|
[
"yuankun@sensetime.com"
] |
yuankun@sensetime.com
|
a1b0ca45b0570f7898d0bc8d827982ad2d66ed91
|
ac97d0698f2e51163c4bc00efe3693cb64da84b6
|
/ASCII.py
|
e9e9328807a979b7b8081b523b05006fb70f117f
|
[] |
no_license
|
Sudhapraba/python
|
832208777b95370d7bedd81f62b42523cb232dee
|
586c863042bbba3e22d3a7ce94c1cc4dc422aad5
|
refs/heads/master
| 2022-12-17T18:10:33.548695
| 2020-08-31T17:19:39
| 2020-08-31T17:19:39
| 272,482,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
c = 'a'
print ( "The ASCII value of " + c + " is" , ord(c))
|
[
"noreply@github.com"
] |
Sudhapraba.noreply@github.com
|
253aeb248a0697cd79511e0f0c7c3d3d8349dff5
|
8dae4378bf317d1e3ec514061dcc109e6f60dff3
|
/quizzes/graphtraversal_practice.py
|
59d31ea80848b02398c34b2bd46069fa15b8b129
|
[] |
no_license
|
bongbaybien/MLND
|
85270e78e11c5af5acba491a6ffd3ebfbb7ddb6e
|
882e35bade8870a29597d1c431d32e59c8bb0c8c
|
refs/heads/master
| 2021-01-20T07:53:32.510123
| 2017-11-20T20:08:06
| 2017-11-20T20:08:06
| 80,651,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,175
|
py
|
class Node(object):
def __init__(self, value):
self.value = value
self.edges = []
self.visited = False
class Edge(object):
def __init__(self, value, node_from, node_to):
self.value = value
self.node_from = node_from
self.node_to = node_to
# You only need to change code with docs strings that have TODO.
# Specifically: Graph.dfs_helper and Graph.bfs
# New methods have been added to associate node numbers with names
# Specifically: Graph.set_node_names
# and the methods ending in "_names" which will print names instead
# of node numbers
class Graph(object):
def __init__(self, nodes=None, edges=None):
self.nodes = nodes or []
self.edges = edges or []
self.node_names = []
self._node_map = {}
def set_node_names(self, names):
"""The Nth name in names should correspond to node number N.
Node numbers are 0 based (starting at 0).
"""
self.node_names = list(names)
def insert_node(self, new_node_val):
"Insert a new node with value new_node_val"
new_node = Node(new_node_val)
self.nodes.append(new_node)
self._node_map[new_node_val] = new_node
return new_node
def insert_edge(self, new_edge_val, node_from_val, node_to_val):
"Insert a new edge, creating new nodes if necessary"
nodes = {node_from_val: None, node_to_val: None}
for node in self.nodes:
if node.value in nodes:
nodes[node.value] = node
if all(nodes.values()):
break
for node_val in nodes:
nodes[node_val] = nodes[node_val] or self.insert_node(node_val)
node_from = nodes[node_from_val]
node_to = nodes[node_to_val]
new_edge = Edge(new_edge_val, node_from, node_to)
node_from.edges.append(new_edge)
node_to.edges.append(new_edge)
self.edges.append(new_edge)
def get_edge_list(self):
"""Return a list of triples that looks like this:
(Edge Value, From Node, To Node)"""
return [(e.value, e.node_from.value, e.node_to.value)
for e in self.edges]
def get_edge_list_names(self):
"""Return a list of triples that looks like this:
(Edge Value, From Node Name, To Node Name)"""
return [(edge.value,
self.node_names[edge.node_from.value],
self.node_names[edge.node_to.value])
for edge in self.edges]
def get_adjacency_list(self):
"""Return a list of lists.
The indecies of the outer list represent "from" nodes.
Each section in the list will store a list
of tuples that looks like this:
(To Node, Edge Value)"""
max_index = self.find_max_index()
adjacency_list = [[] for _ in range(max_index)]
for edg in self.edges:
from_value, to_value = edg.node_from.value, edg.node_to.value
adjacency_list[from_value].append((to_value, edg.value))
return [a or None for a in adjacency_list] # replace []'s with None
def get_adjacency_list_names(self):
"""Each section in the list will store a list
of tuples that looks like this:
(To Node Name, Edge Value).
Node names should come from the names set
with set_node_names."""
adjacency_list = self.get_adjacency_list()
def convert_to_names(pair, graph=self):
node_number, value = pair
return (graph.node_names[node_number], value)
def map_conversion(adjacency_list_for_node):
if adjacency_list_for_node is None:
return None
return map(convert_to_names, adjacency_list_for_node)
return [map_conversion(adjacency_list_for_node)
for adjacency_list_for_node in adjacency_list]
def get_adjacency_matrix(self):
"""Return a matrix, or 2D list.
Row numbers represent from nodes,
column numbers represent to nodes.
Store the edge values in each spot,
and a 0 if no edge exists."""
max_index = self.find_max_index()
adjacency_matrix = [[0] * (max_index) for _ in range(max_index)]
for edg in self.edges:
from_index, to_index = edg.node_from.value, edg.node_to.value
adjacency_matrix[from_index][to_index] = edg.value
return adjacency_matrix
def find_max_index(self):
"""Return the highest found node number
Or the length of the node names if set with set_node_names()."""
if len(self.node_names) > 0:
return len(self.node_names)
max_index = -1
if len(self.nodes):
for node in self.nodes:
if node.value > max_index:
max_index = node.value
return max_index
def find_node(self, node_number):
"Return the node with value node_number or None"
return self._node_map.get(node_number)
def _clear_visited(self):
for node in self.nodes:
node.visited = False
def dfs_helper(self, start_node):
"""TODO: Write the helper function for a recursive implementation
of Depth First Search iterating through a node's edges. The
output should be a list of numbers corresponding to the
values of the traversed nodes.
ARGUMENTS: start_node is the starting Node
MODIFIES: the value of the visited property of nodes in self.nodes
RETURN: a list of the traversed node values (integers).
"""
ret_list = [start_node.value]
# Your code here
# mark node as visited
self.start_node.visited = True
# find adjacent nodes
adj_nodes = [edge.node_to for edge in start_node.edges if edge.node_from == start_node]
# select next node and repeat
for node in adj_nodes:
if node.visited == False:
ret_list.extend(self.dfs_helper(node))
return ret_list
def dfs(self, start_node_num):
"""Outputs a list of numbers corresponding to the traversed nodes
in a Depth First Search.
ARGUMENTS: start_node_num is the starting node number (integer)
MODIFIES: the value of the visited property of nodes in self.nodes
RETURN: a list of the node values (integers)."""
self._clear_visited()
start_node = self.find_node(start_node_num)
return self.dfs_helper(start_node)
def dfs_names(self, start_node_num):
"""Return the results of dfs with numbers converted to names."""
return [self.node_names[num] for num in self.dfs(start_node_num)]
def bfs(self, start_node_num):
"""TODO: Create an iterative implementation of Breadth First Search
iterating through a node's edges. The output should be a list of
numbers corresponding to the traversed nodes.
ARGUMENTS: start_node_num is the node number (integer)
MODIFIES: the value of the visited property of nodes in self.nodes
RETURN: a list of the node values (integers)."""
node = self.find_node(start_node_num)
self._clear_visited()
ret_list = [node.value]
# Your code here
while len(ret_list) < len(self.nodes):
# find adjacent nodes
adj_nodes = [edge.node_to for edge in node.edges if (edge.node_from == node) and (edge.node_to not in ret_list)]
# add all adj nodes to ret_list:
ret_list.extend([node.value for node in adj_nodes])
# mark start_node as visited
node.visited = True
# set next start_node
for next_node in adj_nodes:
if next_node.visited == False:
node = next_node
break
return ret_list[:-1]
def bfs_names(self, start_node_num):
"""Return the results of bfs with numbers converted to names."""
return [self.node_names[num] for num in self.bfs(start_node_num)]
graph = Graph()
# You do not need to change anything below this line.
# You only need to implement Graph.dfs_helper and Graph.bfs
graph.set_node_names(('Mountain View', # 0
'San Francisco', # 1
'London', # 2
'Shanghai', # 3
'Berlin', # 4
'Sao Paolo', # 5
'Bangalore')) # 6
graph.insert_edge(51, 0, 1) # MV <-> SF
graph.insert_edge(51, 1, 0) # SF <-> MV
graph.insert_edge(9950, 0, 3) # MV <-> Shanghai
graph.insert_edge(9950, 3, 0) # Shanghai <-> MV
graph.insert_edge(10375, 0, 5) # MV <-> Sao Paolo
graph.insert_edge(10375, 5, 0) # Sao Paolo <-> MV
graph.insert_edge(9900, 1, 3) # SF <-> Shanghai
graph.insert_edge(9900, 3, 1) # Shanghai <-> SF
graph.insert_edge(9130, 1, 4) # SF <-> Berlin
graph.insert_edge(9130, 4, 1) # Berlin <-> SF
graph.insert_edge(9217, 2, 3) # London <-> Shanghai
graph.insert_edge(9217, 3, 2) # Shanghai <-> London
graph.insert_edge(932, 2, 4) # London <-> Berlin
graph.insert_edge(932, 4, 2) # Berlin <-> London
graph.insert_edge(9471, 2, 5) # London <-> Sao Paolo
graph.insert_edge(9471, 5, 2) # Sao Paolo <-> London
# (6) 'Bangalore' is intentionally disconnected (no edges)
# for this problem and should produce None in the
# Adjacency List, etc.
import pprint
pp = pprint.PrettyPrinter(indent=2)
print("Edge List")
pp.pprint(graph.get_edge_list_names())
print("\nAdjacency List")
pp.pprint(graph.get_adjacency_list_names())
print("\nAdjacency Matrix")
pp.pprint(graph.get_adjacency_matrix())
print("\nDepth First Search")
pp.pprint(graph.dfs_names(2))
# Should print:
# Depth First Search
# ['London', 'Shanghai', 'Mountain View', 'San Francisco', 'Berlin', 'Sao Paolo']
print("\nBreadth First Search")
pp.pprint(graph.bfs_names(2))
# test error reporting
# pp.pprint(['Sao Paolo', 'Mountain View', 'San Francisco', 'London', 'Shanghai', 'Berlin'])
# Should print:
# Breadth First Search
# ['London', 'Shanghai', 'Berlin', 'Sao Paolo', 'Mountain View', 'San Francisco']
|
[
"myhoangnguyen@gmail.com"
] |
myhoangnguyen@gmail.com
|
da5606d8b80d3dca999333c24fad9c9c7ce317d1
|
29214e439ac6293605871774e3321c0a4045f689
|
/DiscordBot.py
|
e3d95e2519f4223688fcc3ecb8a8f02aac3efb81
|
[] |
no_license
|
Richard-Tyrrell/DiscordBot
|
f5edffeedacbd939114469f0f530a13b380cf1d1
|
01788b350e7605aeb4265d318dca71c7a6d549ec
|
refs/heads/main
| 2023-03-11T03:04:12.122832
| 2021-02-23T05:43:34
| 2021-02-23T05:43:34
| 338,903,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,101
|
py
|
import random
import discord
from discord.ext import commands
urls = {"php": "https://www.php.net/docs.php",
"bash": "https://ryanstutorials.net/bash-scripting-tutorial/\nhttps://www.youtube.com/watch?v=oxuRxtrO2Ag\nhttps://tldp.org/LDP/Bash-Beginners-Guide/html/Bash-Beginners-Guide.html\nhttps://devhints.io/bash\nhttps://www.shellscript.sh/",
"mySQL":"https://dev.mysql.com/doc/refman/8.0/en/language-structure.html",
"python":"https://docs.python.org/3/",
"discord":"https://discordpy.readthedocs.io/en/latest/#manuals\nhttps://github.com/Rapptz/discord.py/tree/master/examples\nhttps://discord.com/invite/r3sSKJJ\nhttps://www.techwithtim.net/tutorials/discord-py/hosting-a-discord-bot-for-free/",
"c":"First link is to Lib Gen\nhttp://libgen.is/book/index.php?md5=556E6BEE561B776C95C6872C441BAAD1\nSecond link is a link directly to Steph's message with a download\nhttps://discord.com/channels/751791015630995496/751791015630995499/802322088894660629",
"commands":"-bash\n-c\n-commands\n-disc\n-linux\n-php\n-python\n-playlist\n-njit\n-receipt\n-libgen",
"linux":"https://ubuntu.com/download \nhttps://getfedora.org/",
"Playlist":"https://open.spotify.com/playlist/576j38Yts0TeQGzOpPHvTm?si=ugUeJNk3T26mYJLmCznP6g",
"njit":"Fuck NJIT.\nhttps://twitter.com/NJIT/status/971122883815763968",
"receipt":"Don't let any of the school administration victimize you for things you didn't do. Hold everyone in the same position accountable for the things they say - keep the fucking receipts and save the fucking recordings. Above all else, never forget that the issues at NJIT are not just a result of the student population\nhttps://discord.com/channels/751791015630995496/751791015630995499/791839370440343562",
"LibGen":"Here's a link to Steph's message explaing how to use Lib Gen to get your books for free. Don't ever overpay for something that's free.\nMake sure you select the right format\nhttps://discord.com/channels/751791015630995496/751791015630995499/801991957068513310"}#dict of all the links
cdPhrases=['Fuck off.',
'My dude chill out',
'I *JUST* posted those links.',
'Listen it aint hardwork but it is honest work.',
'I am going to delete canvas if you bug me again.',
'That is it you will not like me when I am angry.',
'Hail Hydra! You prick!',
'Heil BASSEL! OUR LORD AND SAVIOR!',
'Somebody get this guy a body bag for when I am done with him.'] #Spam phrases for those dickheads trying to abuse the bot
client=commands.Bot(command_prefix='-',case_insensitive=True)#this is what the bot will be called ie bot or client,also not case sensitivity
@client.event
async def on_ready():
await client.change_presence(activity=discord.Game("Thinking about putting grades in."))#Sets the game being played to this custom message, bot status is Online always
print('My guy I am busy deleting Canvas.')
#Gives the links for the command "!php"
@client.command(name='php')
@commands.cooldown(1, 60, commands.BucketType.user)
async def php(ctx):
if (ctx.author.bot): return
response=urls["php"]
await ctx.send(response)
#Cooldown error handling
@php.error
async def php_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!mySQL"
@client.command(name='mySQL')
@commands.cooldown(1, 60, commands.BucketType.user)
async def mySQL(ctx):
if (ctx.author.bot): return
response=urls["mySQL"]
await ctx.send(response)
#Cooldown error handling
@mySQL.error
async def mySQL_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!python"
@client.command(name='python')
@commands.cooldown(1, 60, commands.BucketType.user)
async def python(ctx):
if (ctx.author.bot): return
response=urls["python"]
await ctx.send(response)
#Cooldown error handling
@python.error
async def python_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!bash"
@client.command(name='bash')
@commands.cooldown(1, 60, commands.BucketType.user)
async def bash(ctx):
if (ctx.author.bot): return
response=urls["bash"]
await ctx.send(response)
#Cooldown error handling
@bash.error
async def bash_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!c"
@client.command(name='c')
@commands.cooldown(1, 60, commands.BucketType.user)
async def c(ctx):
if (ctx.author.bot): return
response=urls["c"]
await ctx.send(response)
#Cooldown error handling
@c.error
async def c_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!disc"
@client.command(name='disc')
@commands.cooldown(1, 60, commands.BucketType.user)
async def disc(ctx):
if (ctx.author.bot): return
response=urls["discord"] #This gives a ton of resources for discord.py
await ctx.send(response)
#Cooldown error handling
@disc.error
async def disc_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!linux"
@client.command(name='linux')
@commands.cooldown(1, 60, commands.BucketType.user)
async def linux(ctx):
if (ctx.author.bot): return
response=urls["linux"]
await ctx.send(response)
#Cooldown error handling
@linux.error
async def linux_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!playlist"
@client.command(name='playlist')
@commands.cooldown(1, 60, commands.BucketType.user)
async def linux(ctx):
if (ctx.author.bot): return
response=urls["Playlist"]
await ctx.send(response)
#Cooldown error handling
@linux.error
async def linux_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!njit"
@client.command(name='NJIT')
@commands.cooldown(1, 60, commands.BucketType.user)
async def njit(ctx):
if (ctx.author.bot): return
response=urls["njit"]
await ctx.send(response)
#Cooldown error handling
@njit.error
async def njit_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!receipt"
@client.command(name='receipt')
@commands.cooldown(1, 60, commands.BucketType.user)
async def receipt(ctx):
if (ctx.author.bot): return
response=urls["receipt"]
await ctx.send(response)
#Cooldown error handling
@receipt.error
async def recepit_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Gives the links for the command "!libgen"
@client.command(name='libgen')
@commands.cooldown(1, 60, commands.BucketType.user)
async def receipt(ctx):
if (ctx.author.bot): return
response=urls["LibGen"]
await ctx.send(response)
#Cooldown error handling
@receipt.error
async def libgen_error(ctx, error):
pharses=random.choice(cdPhrases)
await ctx.send(pharses)
#Dm's user a list of commands to use
@client.command(name='commands')
async def on_message(ctx):
if (ctx.author.bot): return
houseKeeping='Each command has resources for that language or topic,the following are a list of commands avaible to you:\n '
response=urls["commands"]#Dict entry for all the commands
await ctx.author.send(houseKeeping+response)
@client.event #Error handling for invalid commands,Sends the message below to tell them how to get the list of valid commands
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send('That command is invalid. Please use -commands to recieve a dm telling you the vaild commands.')
client.run('Bot Token removed for public viewing')#Bot token goes here
|
[
"noreply@github.com"
] |
Richard-Tyrrell.noreply@github.com
|
45992b9ea477d620e303825fd506022644b7690e
|
28a131b8a1d4d96b571571e932cf5da6a4c7801d
|
/nahida_LL_end_10.py
|
e1899df424712f74cc2435f8b69c97fb70dbf68c
|
[] |
no_license
|
Nahida-Jannat/project-linked-list
|
4cdcb2d6159b9cd931398c92924965e2812d6355
|
8556102eff9431f65c87404240c347bb43e6d467
|
refs/heads/master
| 2023-06-13T00:54:54.355159
| 2021-07-09T16:20:24
| 2021-07-09T16:20:24
| 384,088,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
# Linked list implementation
# Add value at end in linked list
# NODE --> Data | Next
class Node:
def __init__(self, data = None, next = None):
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = Node()
def display(self):
if self.head is None:
print("Empty")
return
current_node = self.head
info_str = ''
while current_node:
info_str = info_str + str(current_node.data) + '-->'
current_node = current_node.next
print(info_str)
def append_at_end(self, data):
current_node = self.head
while current_node.next:
current_node= current_node.next
current_node.next = Node(data, None)
if __name__ == '__main__':
ll = LinkedList()
# item added at end
ll.append_at_end(100)
ll.append_at_end(200)
ll.append_at_end(300)
ll.append_at_end(400)
ll.display()
|
[
"nahidajannat28@gmail.com"
] |
nahidajannat28@gmail.com
|
ab55b92d33494a8c73eb619ba5a67fe3a1fd13f6
|
11c211dc48b6b33c2ad2cf0e6e7116ace526594b
|
/Applications/Raspi-Trek/Raspi-Trek/bin/pip2.7
|
8e9f871274acf108b21f540b64c79d5d9f3abf04
|
[] |
no_license
|
thecloudist/awareness
|
1f481187a1e563069338aa68631f3829b0e4ce8f
|
5d55adb5921b72a54c30ca8fc235d6f6c09156a7
|
refs/heads/master
| 2020-09-27T03:03:16.651252
| 2016-10-18T04:38:00
| 2016-10-18T04:38:00
| 67,518,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
7
|
#!/home/pi/Awareness/Applications/Raspi-Trek/Raspi-Trek/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"salvideoguy@gmail.com"
] |
salvideoguy@gmail.com
|
254775a06265133704a20d0e692dfed518932ec8
|
a1615563bb9b124e16f4163f660d677f3224553c
|
/LI/lib/python3.8/site-packages/sklearn/cluster/_kmeans.py
|
d10dfba0d08b386d76c1a0437f97daf76dc5cd9e
|
[
"MIT"
] |
permissive
|
honeybhardwaj/Language_Identification
|
2a247d98095bd56c1194a34a556ddfadf6f001e5
|
1b74f898be5402b0c1a13debf595736a3f57d7e7
|
refs/heads/main
| 2023-04-19T16:22:05.231818
| 2021-05-15T18:59:45
| 2021-05-15T18:59:45
| 351,470,447
| 5
| 4
|
MIT
| 2021-05-15T18:59:46
| 2021-03-25T14:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 78,853
|
py
|
"""K-means clustering."""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from threadpoolctl import threadpool_limits
from threadpoolctl import threadpool_info
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import _deprecate_positional_args
from ..utils import check_array
from ..utils import gen_batches
from ..utils import check_random_state
from ..utils import deprecated
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils._openmp_helpers import _openmp_effective_n_threads
from ..exceptions import ConvergenceWarning
from ._k_means_fast import CHUNK_SIZE
from ._k_means_fast import _inertia_dense
from ._k_means_fast import _inertia_sparse
from ._k_means_fast import _mini_batch_update_csr
from ._k_means_lloyd import lloyd_iter_chunked_dense
from ._k_means_lloyd import lloyd_iter_chunked_sparse
from ._k_means_elkan import init_bounds_dense
from ._k_means_elkan import init_bounds_sparse
from ._k_means_elkan import elkan_iter_chunked_dense
from ._k_means_elkan import elkan_iter_chunked_sparse
###############################################################################
# Initialization heuristic
def _kmeans_plusplus(X, n_clusters, x_squared_norms,
random_state, n_local_trials=None):
"""Computational component for initialization of n_clusters by
k-means++. Prior validation of data is assumed.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The inital centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.randint(n_samples)
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
return centers, indices
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset."""
if tol == 0:
return 0
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
@_deprecate_positional_args
def k_means(X, n_clusters, *, sample_weight=None, init='k-means++',
precompute_distances='deprecated', n_init=10, max_iter=300,
verbose=False, tol=1e-4, random_state=None, copy_x=True,
n_jobs='deprecated', algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
init : {'k-means++', 'random'}, callable or array-like of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization:
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose `n_clusters` observations (rows) at random from data
for the initial centroids.
If an array is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, n_clusters and a
random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
.. deprecated:: 0.23
'precompute_distances' was deprecated in version 0.23 and will be
removed in 1.0 (renaming of 0.25). It has no effect.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified. If False, the original data is modified, and put back
before the function returns, but small numerical differences may be
introduced by subtracting and then adding the data mean. Note that if
the original data is not C-contiguous, a copy will be made even if
copy_x is False. If the original data is sparse, but not in CSR format,
a copy will be made even if copy_x is False.
n_jobs : int, default=None
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
``None`` or ``-1`` means using all processors.
.. deprecated:: 0.23
``n_jobs`` was deprecated in version 0.23 and will be removed in
1.0 (renaming of 0.25).
algorithm : {"auto", "full", "elkan"}, default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient on data with well-defined
clusters, by using the triangle inequality. However it's more memory
intensive due to the allocation of an extra array of shape
(n_samples, n_clusters).
For now "auto" (kept for backward compatibiliy) chooses "elkan" but it
might change in the future for a better heuristic.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
est = KMeans(
n_clusters=n_clusters, init=init, n_init=n_init, max_iter=max_iter,
verbose=verbose, precompute_distances=precompute_distances, tol=tol,
random_state=random_state, copy_x=copy_x, n_jobs=n_jobs,
algorithm=algorithm
).fit(X, sample_weight=sample_weight)
if return_n_iter:
return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_
else:
return est.cluster_centers_, est.labels_, est.inertia_
def _kmeans_single_elkan(X, sample_weight, centers_init, max_iter=300,
verbose=False, x_squared_norms=None, tol=1e-4,
n_threads=1):
"""A single run of k-means elkan, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : array-like of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
x_squared_norms : array-like, default=None
Precomputed x_squared_norms.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
n_samples = X.shape[0]
n_clusters = centers_init.shape[0]
# Buffers to avoid new allocations at each iteration.
centers = centers_init
centers_new = np.zeros_like(centers)
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
labels = np.full(n_samples, -1, dtype=np.int32)
labels_old = labels.copy()
center_half_distances = euclidean_distances(centers) / 2
distance_next_center = np.partition(np.asarray(center_half_distances),
kth=1, axis=0)[1]
upper_bounds = np.zeros(n_samples, dtype=X.dtype)
lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
init_bounds = init_bounds_sparse
elkan_iter = elkan_iter_chunked_sparse
_inertia = _inertia_sparse
else:
init_bounds = init_bounds_dense
elkan_iter = elkan_iter_chunked_dense
_inertia = _inertia_dense
init_bounds(X, centers, center_half_distances,
labels, upper_bounds, lower_bounds)
strict_convergence = False
for i in range(max_iter):
elkan_iter(X, sample_weight, centers, centers_new,
weight_in_clusters, center_half_distances,
distance_next_center, upper_bounds, lower_bounds,
labels, center_shift, n_threads)
# compute new pairwise distances between centers and closest other
# center of each center for next iterations
center_half_distances = euclidean_distances(centers_new) / 2
distance_next_center = np.partition(
np.asarray(center_half_distances), kth=1, axis=0)[1]
if verbose:
inertia = _inertia(X, sample_weight, centers, labels)
print(f"Iteration {i}, inertia {inertia}")
centers, centers_new = centers_new, centers
if np.array_equal(labels, labels_old):
# First check the labels for strict convergence.
if verbose:
print(f"Converged at iteration {i}: strict convergence.")
strict_convergence = True
break
else:
# No strict convergence, check for tol based convergence.
center_shift_tot = (center_shift**2).sum()
if center_shift_tot <= tol:
if verbose:
print(f"Converged at iteration {i}: center shift "
f"{center_shift_tot} within tolerance {tol}.")
break
labels_old[:] = labels
if not strict_convergence:
# rerun E-step so that predicted labels match cluster centers
elkan_iter(X, sample_weight, centers, centers, weight_in_clusters,
center_half_distances, distance_next_center,
upper_bounds, lower_bounds, labels, center_shift,
n_threads, update_centers=False)
inertia = _inertia(X, sample_weight, centers, labels)
return labels, inertia, centers, i + 1
def _kmeans_single_lloyd(X, sample_weight, centers_init, max_iter=300,
verbose=False, x_squared_norms=None, tol=1e-4,
n_threads=1):
"""A single run of k-means lloyd, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode
x_squared_norms : ndarray of shape (n_samples,), default=None
Precomputed x_squared_norms.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
n_clusters = centers_init.shape[0]
# Buffers to avoid new allocations at each iteration.
centers = centers_init
centers_new = np.zeros_like(centers)
labels = np.full(X.shape[0], -1, dtype=np.int32)
labels_old = labels.copy()
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
lloyd_iter = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
lloyd_iter = lloyd_iter_chunked_dense
_inertia = _inertia_dense
strict_convergence = False
# Threadpoolctl context to limit the number of threads in second level of
# nested parallelism (i.e. BLAS) to avoid oversubsciption.
with threadpool_limits(limits=1, user_api="blas"):
for i in range(max_iter):
lloyd_iter(X, sample_weight, x_squared_norms, centers, centers_new,
weight_in_clusters, labels, center_shift, n_threads)
if verbose:
inertia = _inertia(X, sample_weight, centers, labels)
print(f"Iteration {i}, inertia {inertia}.")
centers, centers_new = centers_new, centers
if np.array_equal(labels, labels_old):
# First check the labels for strict convergence.
if verbose:
print(f"Converged at iteration {i}: strict convergence.")
strict_convergence = True
break
else:
# No strict convergence, check for tol based convergence.
center_shift_tot = (center_shift**2).sum()
if center_shift_tot <= tol:
if verbose:
print(f"Converged at iteration {i}: center shift "
f"{center_shift_tot} within tolerance {tol}.")
break
labels_old[:] = labels
if not strict_convergence:
# rerun E-step so that predicted labels match cluster centers
lloyd_iter(X, sample_weight, x_squared_norms, centers, centers,
weight_in_clusters, labels, center_shift, n_threads,
update_centers=False)
inertia = _inertia(X, sample_weight, centers, labels)
return labels, inertia, centers, i + 1
def _labels_inertia(X, sample_weight, x_squared_norms, centers,
n_threads=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples to assign to the labels. If sparse matrix, must
be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers.
n_threads : int, default=None
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
labels : ndarray of shape (n_samples,)
The resulting assignment.
inertia : float
Sum of squared distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
n_clusters = centers.shape[0]
n_threads = _openmp_effective_n_threads(n_threads)
labels = np.full(n_samples, -1, dtype=np.int32)
weight_in_clusters = np.zeros(n_clusters, dtype=centers.dtype)
center_shift = np.zeros_like(weight_in_clusters)
if sp.issparse(X):
_labels = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
_labels = lloyd_iter_chunked_dense
_inertia = _inertia_dense
_labels(X, sample_weight, x_squared_norms, centers, centers,
weight_in_clusters, labels, center_shift, n_threads,
update_centers=False)
inertia = _inertia(X, sample_weight, centers, labels)
return labels, inertia
class KMeans(TransformerMixin, ClusterMixin, BaseEstimator):
"""K-Means clustering.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, default=8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random'}, callable or array-like of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization:
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose `n_clusters` observations (rows) at random from data
for the initial centroids.
If an array is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, n_clusters and a
random state and return an initialization.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm for a
single run.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
precompute_distances : {'auto', True, False}, default='auto'
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances.
False : never precompute distances.
.. deprecated:: 0.23
'precompute_distances' was deprecated in version 0.22 and will be
removed in 1.0 (renaming of 0.25). It has no effect.
verbose : int, default=0
Verbosity mode.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified. If False, the original data is modified, and put back
before the function returns, but small numerical differences may be
introduced by subtracting and then adding the data mean. Note that if
the original data is not C-contiguous, a copy will be made even if
copy_x is False. If the original data is sparse, but not in CSR format,
a copy will be made even if copy_x is False.
n_jobs : int, default=None
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
``None`` or ``-1`` means using all processors.
.. deprecated:: 0.23
``n_jobs`` was deprecated in version 0.23 and will be removed in
1.0 (renaming of 0.25).
algorithm : {"auto", "full", "elkan"}, default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient on data with well-defined
clusters, by using the triangle inequality. However it's more memory
intensive due to the allocation of an extra array of shape
(n_samples, n_clusters).
For now "auto" (kept for backward compatibiliy) chooses "elkan" but it
might change in the future for a better heuristic.
.. versionchanged:: 0.18
Added Elkan algorithm
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers. If the algorithm stops before fully
converging (see ``tol`` and ``max_iter``), these will not be
consistent with ``labels_``.
labels_ : ndarray of shape (n_samples,)
Labels of each point
inertia_ : float
Sum of squared distances of samples to their closest cluster center.
n_iter_ : int
Number of iterations run.
See Also
--------
MiniBatchKMeans : Alternative online implementation that does incremental
updates of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
-----
The k-means problem is solved using either Lloyd's or Elkan's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
If the algorithm stops before fully converging (because of ``tol`` or
``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent,
i.e. the ``cluster_centers_`` will not be the means of the points in each
cluster. Also, the estimator will reassign ``labels_`` after the last
iteration to make ``labels_`` consistent with ``predict`` on the training
set.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> kmeans.predict([[0, 0], [12, 3]])
array([1, 0], dtype=int32)
>>> kmeans.cluster_centers_
array([[10., 2.],
[ 1., 2.]])
"""
@_deprecate_positional_args
def __init__(self, n_clusters=8, *, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='deprecated',
verbose=0, random_state=None, copy_x=True,
n_jobs='deprecated', algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_params(self, X):
# precompute_distances
if self.precompute_distances != 'deprecated':
warnings.warn("'precompute_distances' was deprecated in version "
"0.23 and will be removed in 1.0 (renaming of 0.25)"
". It has no effect", FutureWarning)
# n_jobs
if self.n_jobs != 'deprecated':
warnings.warn("'n_jobs' was deprecated in version 0.23 and will be"
" removed in 1.0 (renaming of 0.25).", FutureWarning)
self._n_threads = self.n_jobs
else:
self._n_threads = None
self._n_threads = _openmp_effective_n_threads(self._n_threads)
# n_init
if self.n_init <= 0:
raise ValueError(
f"n_init should be > 0, got {self.n_init} instead.")
self._n_init = self.n_init
# max_iter
if self.max_iter <= 0:
raise ValueError(
f"max_iter should be > 0, got {self.max_iter} instead.")
# n_clusters
if X.shape[0] < self.n_clusters:
raise ValueError(f"n_samples={X.shape[0]} should be >= "
f"n_clusters={self.n_clusters}.")
# tol
self._tol = _tolerance(X, self.tol)
# algorithm
if self.algorithm not in ("auto", "full", "elkan"):
raise ValueError(f"Algorithm must be 'auto', 'full' or 'elkan', "
f"got {self.algorithm} instead.")
self._algorithm = self.algorithm
if self._algorithm == "auto":
self._algorithm = "full" if self.n_clusters == 1 else "elkan"
if self._algorithm == "elkan" and self.n_clusters == 1:
warnings.warn("algorithm='elkan' doesn't make sense for a single "
"cluster. Using 'full' instead.", RuntimeWarning)
self._algorithm = "full"
# init
if not (hasattr(self.init, '__array__') or callable(self.init)
or (isinstance(self.init, str)
and self.init in ["k-means++", "random"])):
raise ValueError(
f"init should be either 'k-means++', 'random', a ndarray or a "
f"callable, got '{self.init}' instead.")
if hasattr(self.init, '__array__') and self._n_init != 1:
warnings.warn(
f"Explicit initial center position passed: performing only"
f" one init in {self.__class__.__name__} instead of "
f"n_init={self._n_init}.", RuntimeWarning, stacklevel=2)
self._n_init = 1
def _validate_center_shape(self, X, centers):
"""Check if centers is compatible with X and n_clusters."""
if centers.shape[0] != self.n_clusters:
raise ValueError(
f"The shape of the initial centers {centers.shape} does not "
f"match the number of clusters {self.n_clusters}.")
if centers.shape[1] != X.shape[1]:
raise ValueError(
f"The shape of the initial centers {centers.shape} does not "
f"match the number of features of the data {X.shape[1]}.")
def _check_test_data(self, X):
X = self._validate_data(X, accept_sparse='csr', reset=False,
dtype=[np.float64, np.float32],
order='C', accept_large_sparse=False)
return X
def _check_mkl_vcomp(self, X, n_samples):
"""Warns when vcomp and mkl are both present"""
# The BLAS call inside a prange in lloyd_iter_chunked_dense is known to
# cause a small memory leak when there are less chunks than the number
# of available threads. It only happens when the OpenMP library is
# vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653
if sp.issparse(X):
return
active_threads = int(np.ceil(n_samples / CHUNK_SIZE))
if active_threads < self._n_threads:
modules = threadpool_info()
has_vcomp = "vcomp" in [module["prefix"] for module in modules]
has_mkl = ("mkl", "intel") in [
(module["internal_api"], module.get("threading_layer", None))
for module in modules]
if has_vcomp and has_mkl:
if not hasattr(self, "batch_size"): # KMeans
warnings.warn(
f"KMeans is known to have a memory leak on Windows "
f"with MKL, when there are less chunks than available "
f"threads. You can avoid it by setting the environment"
f" variable OMP_NUM_THREADS={active_threads}.")
else: # MiniBatchKMeans
warnings.warn(
f"MiniBatchKMeans is known to have a memory leak on "
f"Windows with MKL, when there are less chunks than "
f"available threads. You can prevent it by setting "
f"batch_size >= {self._n_threads * CHUNK_SIZE} or by "
f"setting the environment variable "
f"OMP_NUM_THREADS={active_threads}")
def _init_centroids(self, X, x_squared_norms, init, random_state,
init_size=None):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape \
(n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
"""
n_samples = X.shape[0]
n_clusters = self.n_clusters
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
if isinstance(init, str) and init == 'k-means++':
centers, _ = _kmeans_plusplus(X, n_clusters,
random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, str) and init == 'random':
seeds = random_state.permutation(n_samples)[:n_clusters]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(
centers, dtype=X.dtype, copy=False, order='C')
self._validate_center_shape(X, centers)
if sp.issparse(centers):
centers = centers.toarray()
return centers
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
.. versionadded:: 0.20
Returns
-------
self
Fitted estimator.
"""
X = self._validate_data(X, accept_sparse='csr',
dtype=[np.float64, np.float32],
order='C', copy=self.copy_x,
accept_large_sparse=False)
self._check_params(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# Validate init array
init = self.init
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype, copy=True, order='C')
self._validate_center_shape(X, init)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if self._algorithm == "full":
kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
kmeans_single = _kmeans_single_elkan
best_inertia = None
for i in range(self._n_init):
# Initialize centers
centers_init = self._init_centroids(
X, x_squared_norms=x_squared_norms, init=init,
random_state=random_state)
if self.verbose:
print("Initialization complete")
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, sample_weight, centers_init, max_iter=self.max_iter,
verbose=self.verbose, tol=self._tol,
x_squared_norms=x_squared_norms, n_threads=self._n_threads)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels
best_centers = centers
best_inertia = inertia
best_n_iter = n_iter_
if not sp.issparse(X):
if not self.copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn(
"Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, self.n_clusters),
ConvergenceWarning, stacklevel=2)
self.cluster_centers_ = best_centers
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to transform.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
return self.fit(X, sample_weight=sample_weight).labels_
def fit_transform(self, X, y=None, sample_weight=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to transform.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
X_new : ndarray of shape (n_samples, n_clusters)
X transformed in the new space.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X, sample_weight=sample_weight)._transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to transform.
Returns
-------
X_new : ndarray of shape (n_samples, n_clusters)
X transformed in the new space.
"""
check_is_fitted(self)
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""Guts of transform method; no input validation."""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
return _labels_inertia(X, sample_weight, x_squared_norms,
self.cluster_centers_, self._n_threads)[0]
def score(self, X, y=None, sample_weight=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self)
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
return -_labels_inertia(X, sample_weight, x_squared_norms,
self.cluster_centers_)[1]
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
},
}
def _mini_batch_step(X, sample_weight, x_squared_norms, centers, weight_sums,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The original data array.
sample_weight : array-like of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
centers : ndarray of shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
old_center_buffer : int
Copy of old centers for monitoring convergence.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
distances : ndarray of shape (n_samples,), dtype=float, default=None
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_reassign : bool, default=False
If True, centers with very low counts are randomly reassigned
to observations.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization and to
pick new clusters amongst observations with uniform probability. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
reassignment_ratio : float, default=.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
squared_diff : ndarray of shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, sample_weight,
x_squared_norms, centers)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low weight
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = \
np.argsort(weight_sums)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(X.shape[0], replace=False,
size=n_reassigns)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(
X, new_centers.astype(np.intp, copy=False),
np.where(to_reassign)[0].astype(np.intp, copy=False),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _mini_batch_update_csr(
X, sample_weight, x_squared_norms, centers, weight_sums,
nearest_center, old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
wsum = sample_weight[center_mask].sum()
if wsum > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= weight_sums[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += \
np.sum(X[center_mask] *
sample_weight[center_mask, np.newaxis], axis=0)
# update the count statistics for this center
weight_sums[center_idx] += wsum
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / weight_sums[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic."""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""
Mini-Batch K-Means clustering.
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, default=8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random'}, callable or array-like of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization:
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose `n_clusters` observations (rows) at random from data
for the initial centroids.
If an array is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, n_clusters and a
random state and return an initialization.
max_iter : int, default=100
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
batch_size : int, default=100
Size of the mini batches.
verbose : int, default=0
Verbosity mode.
compute_labels : bool, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization and
random reassignment. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
tol : float, default=0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
max_no_improvement : int, default=10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
If `None`, `init_size= 3 * batch_size`.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels_ : int
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
n_iter_ : int
Number of batches processed.
counts_ : ndarray of shape (n_clusters,)
Weigth sum of each cluster.
.. deprecated:: 0.24
This attribute is deprecated in 0.24 and will be removed in
1.1 (renaming of 0.26).
init_size_ : int
The effective number of samples used for the initialization.
.. deprecated:: 0.24
This attribute is deprecated in 0.24 and will be removed in
1.1 (renaming of 0.26).
See Also
--------
KMeans : The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
Examples
--------
>>> from sklearn.cluster import MiniBatchKMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 0], [4, 4],
... [4, 5], [0, 1], [2, 2],
... [3, 2], [5, 5], [1, -1]])
>>> # manually fit on batches
>>> kmeans = MiniBatchKMeans(n_clusters=2,
... random_state=0,
... batch_size=6)
>>> kmeans = kmeans.partial_fit(X[0:6,:])
>>> kmeans = kmeans.partial_fit(X[6:12,:])
>>> kmeans.cluster_centers_
array([[2. , 1. ],
[3.5, 4.5]])
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> # fit on the whole data
>>> kmeans = MiniBatchKMeans(n_clusters=2,
... random_state=0,
... batch_size=6,
... max_iter=10).fit(X)
>>> kmeans.cluster_centers_
array([[3.95918367, 2.40816327],
[1.12195122, 1.3902439 ]])
>>> kmeans.predict([[0, 0], [4, 4]])
array([1, 0], dtype=int32)
"""
@_deprecate_positional_args
def __init__(self, n_clusters=8, *, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super().__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
@deprecated("The attribute 'counts_' is deprecated in 0.24" # type: ignore
" and will be removed in 1.1 (renaming of 0.26).")
@property
def counts_(self):
return self._counts
@deprecated("The attribute 'init_size_' is deprecated in " # type: ignore
"0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def init_size_(self):
return self._init_size
@deprecated("The attribute 'random_state_' is deprecated " # type: ignore
"in 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def random_state_(self):
return getattr(self, "_random_state", None)
def _check_params(self, X):
super()._check_params(X)
# max_no_improvement
if self.max_no_improvement is not None and self.max_no_improvement < 0:
raise ValueError(
f"max_no_improvement should be >= 0, got "
f"{self.max_no_improvement} instead.")
# batch_size
if self.batch_size <= 0:
raise ValueError(
f"batch_size should be > 0, got {self.batch_size} instead.")
# init_size
if self.init_size is not None and self.init_size <= 0:
raise ValueError(
f"init_size should be > 0, got {self.init_size} instead.")
self._init_size = self.init_size
if self._init_size is None:
self._init_size = 3 * self.batch_size
if self._init_size < self.n_clusters:
self._init_size = 3 * self.n_clusters
elif self._init_size < self.n_clusters:
warnings.warn(
f"init_size={self._init_size} should be larger than "
f"n_clusters={self.n_clusters}. Setting it to "
f"min(3*n_clusters, n_samples)",
RuntimeWarning, stacklevel=2)
self._init_size = 3 * self.n_clusters
self._init_size = min(self._init_size, X.shape[0])
# reassignment_ratio
if self.reassignment_ratio < 0:
raise ValueError(
f"reassignment_ratio should be >= 0, got "
f"{self.reassignment_ratio} instead.")
def fit(self, X, y=None, sample_weight=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
.. versionadded:: 0.20
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse='csr',
dtype=[np.float64, np.float32],
order='C', accept_large_sparse=False)
self._check_params(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# Validate init array
init = self.init
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype, copy=True, order='C')
self._validate_center_shape(X, init)
n_samples, n_features = X.shape
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
self._check_mkl_vcomp(X, self.batch_size)
validation_indices = random_state.randint(0, n_samples,
self._init_size)
X_valid = X[validation_indices]
sample_weight_valid = sample_weight[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(self._n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, self._n_init, init))
weight_sums = np.zeros(self.n_clusters, dtype=sample_weight.dtype)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = self._init_centroids(
X, x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
init_size=self._init_size)
# Compute the label assignment on the init dataset
_mini_batch_step(
X_valid, sample_weight_valid,
x_squared_norms[validation_indices], cluster_centers,
weight_sums, old_center_buffer, False, distances=None,
verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, sample_weight_valid,
x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, self._n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self._counts = weight_sums
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], sample_weight[minibatch_indices],
x_squared_norms[minibatch_indices],
self.cluster_centers_, self._counts,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + int(self._counts.min())) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = \
self._labels_inertia_minibatch(X, sample_weight)
return self
def _labels_inertia_minibatch(self, X, sample_weight):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but prevents
memory errors / segfaults.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
sample_weight : array-like of shape (n_samples,)
The weights for each observation in X.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], sample_weight[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
"""
is_first_call_to_partial_fit = not hasattr(self, 'cluster_centers_')
X = self._validate_data(X, accept_sparse='csr',
dtype=[np.float64, np.float32],
order='C', accept_large_sparse=False,
reset=is_first_call_to_partial_fit)
self._random_state = getattr(self, "_random_state",
check_random_state(self.random_state))
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
x_squared_norms = row_norms(X, squared=True)
if is_first_call_to_partial_fit:
# this is the first call to partial_fit on this object
self._check_params(X)
# Validate init array
init = self.init
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype, copy=True, order='C')
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, X.shape[0])
# initialize the cluster centers
self.cluster_centers_ = self._init_centroids(
X, x_squared_norms=x_squared_norms,
init=init,
random_state=self._random_state,
init_size=self._init_size)
self._counts = np.zeros(self.n_clusters,
dtype=sample_weight.dtype)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self._random_state.randint(
10 * (1 + self._counts.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, sample_weight, x_squared_norms,
self.cluster_centers_, self._counts,
np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self._random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, sample_weight, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X, sample_weight)[0]
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
def kmeans_plusplus(X, n_clusters, *, x_squared_norms=None,
random_state=None, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
.. versionadded:: 0.24
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds from.
n_clusters : int
The number of centroids to initialize
x_squared_norms : array-like of shape (n_samples,), default=None
Squared Euclidean norm of each data point.
random_state : int or RandomState instance, default=None
Determines random number generation for centroid initialization. Pass
an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)).
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The inital centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Examples
--------
>>> from sklearn.cluster import kmeans_plusplus
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0)
>>> centers
array([[10, 4],
[ 1, 0]])
>>> indices
array([4, 2])
"""
# Check data
check_array(X, accept_sparse='csr',
dtype=[np.float64, np.float32])
if X.shape[0] < n_clusters:
raise ValueError(f"n_samples={X.shape[0]} should be >= "
f"n_clusters={n_clusters}.")
# Check parameters
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
else:
x_squared_norms = check_array(x_squared_norms,
dtype=X.dtype,
ensure_2d=False)
if x_squared_norms.shape[0] != X.shape[0]:
raise ValueError(
f"The length of x_squared_norms {x_squared_norms.shape[0]} should "
f"be equal to the length of n_samples {X.shape[0]}.")
if n_local_trials is not None and n_local_trials < 1:
raise ValueError(
f"n_local_trials is set to {n_local_trials} but should be an "
f"integer value greater than zero.")
random_state = check_random_state(random_state)
# Call private k-means++
centers, indices = _kmeans_plusplus(X, n_clusters, x_squared_norms,
random_state, n_local_trials)
return centers, indices
|
[
"honey.bhardwaj.18cse@bmu.edu.in"
] |
honey.bhardwaj.18cse@bmu.edu.in
|
4930c4485ebc8161f7e8797e7c0615a7ea8ab655
|
9d6a3789c083b170920707597b9785d02704b66b
|
/TimeTracker/db.py
|
9e37d3c0ec235d00ac776cf5d472d98a06fab6fd
|
[] |
no_license
|
jamesfowkes/TimeTracker
|
6d7736ecb6e895d1c191809ba3f262bb6f8ebd98
|
4293725e91519a4d02591f1629d94a68c0ab6100
|
refs/heads/master
| 2021-01-17T10:10:09.410146
| 2017-02-01T07:06:56
| 2017-02-01T07:06:56
| 32,203,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
from flask.ext.sqlalchemy import SQLAlchemy
db = None
def add_db(app):
global db
if "SQLALCHEMY_DATABASE_URI" not in app.config:
try:
app.config["SQLALCHEMY_DATABASE_URI"] = app.config["DATABASE"] or app.config["DATABASE_URI"]
except:
raise Exception("Application configuration must specify DATABASE or DATABASE_URI")
db = SQLAlchemy(app)
|
[
"jamesfowkes@gmail.com"
] |
jamesfowkes@gmail.com
|
3f125e6743ed4c1551826d92be3ce0b2771a742e
|
e9de15ca55e02587f7d1267189f8cde01e1d0f84
|
/meetings/forms.py
|
42eefb28f239ec3b5b9b238feaefbab604b35bdd
|
[] |
no_license
|
jestanoff/meeting-planner
|
187fd1680eabc3e72cd76da6158cee3ba9ede448
|
0c9271ee59019ef35ca6860f8f68d0a889b37f49
|
refs/heads/master
| 2022-04-26T14:03:59.001498
| 2020-04-13T14:34:57
| 2020-04-13T14:34:57
| 255,338,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
from datetime import date
from django.forms import ModelForm, DateInput, TimeInput, TextInput, IntegerField
from django.core.exceptions import ValidationError
from .models import Meeting
class MeetingForm(ModelForm):
class Meta:
model = Meeting
fields = '__all__'
widgets = {
'date': DateInput(attrs={"type": "date"}),
'start': TimeInput(attrs={"type": "time"}),
'duration': TextInput(attrs={"type": "number", "min": "1", "max": "4"})
}
def clean_date(self):
d = self.cleaned_data.get("date")
if d < date.today():
raise ValidationError("Meetings cannot be in the past")
return d
|
[
"stefan.dzhestanov@waitrose.co.uk"
] |
stefan.dzhestanov@waitrose.co.uk
|
40dbf560537807d786d128ec6c2b0025d1d91c10
|
a3c104bdff7fac31948ef3b9d15e9fa687d306de
|
/train.py
|
e2df88522a7f7515ca01ab8ecaeee94c22492c48
|
[] |
no_license
|
n1kun-j/facerec
|
50c425d3657e6f902670723843037f491a617f6a
|
d743c9bd7289665a0a2e302926d6cab65329f99b
|
refs/heads/master
| 2022-04-10T04:15:49.581504
| 2020-03-25T19:48:40
| 2020-03-25T19:48:40
| 250,075,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,491
|
py
|
import tkinter as tk
from tkinter import Message ,Text
import cv2
import os,errno
import shutil
import csv
import numpy as np
from PIL import Image, ImageTk
import pandas as pd
import datetime
import time
import tkinter.ttk as ttk
import tkinter.font as font
import pickle
window = tk.Tk()
window.title("Face Recognition System")
dialog_title = 'QUIT'
dialog_text = 'Are you sure?'
window.configure(background='black')
photo1=tk.PhotoImage(file="logo.png")
tk.Label (window,image=photo1,bg="black").grid(row=0,column=0,sticky=tk.N)
window.grid_rowconfigure(0, weight=1)
window.grid_columnconfigure(0, weight=1)
#message = tk.Label(window, text="Face Recognitition System" ,bg="white" ,fg="black" ,width=50 ,height=3,font=('times', 30, 'italic bold '))
#message.place(x=200, y=20)
lbl = tk.Label(window, text="Enter ID",width=20 ,height=2 ,fg="white" ,bg="black" ,font=('times', 15, ' bold ') )
lbl.place(x=400, y=200)
txt = tk.Entry(window,width=20 ,bg="white" ,fg="black",font=('times', 15, ' bold '))
txt.place(x=700, y=215)
lbl2 = tk.Label(window, text="Enter Name",width=20 ,fg="white" ,bg="black" ,height=2 ,font=('times', 15, ' bold '))
lbl2.place(x=400, y=300)
txt2 = tk.Entry(window,width=20 ,bg="white" ,fg="black",font=('times', 15, ' bold ') )
txt2.place(x=700, y=315)
lbl3 = tk.Label(window, text="Notification : ",width=20 ,fg="white" ,bg="black" ,height=2 ,font=('times', 15, ' bold'))
lbl3.place(x=400, y=400)
message = tk.Label(window, text="" ,bg="white" ,fg="black" ,width=30 ,height=2, activebackground = "white" ,font=('times', 15, ' bold '))
message.place(x=700, y=400)
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def TakeImages():
Id=(txt.get())
name=(txt2.get())
if(is_number(Id) and name.isalpha()):
cam = cv2.VideoCapture(0)
harcascadePath = "haarcascade_frontalface_default.xml"
face_cascade=cv2.CascadeClassifier(harcascadePath)
sampleNum=0
while(True):
global ret, frame
ret, frame = cam.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.32, 3)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
sampleNum=sampleNum+1
try:
if not os.path.exists("TrainingImage/" + name):
os.makedirs("TrainingImage/" + name)
cv2.imwrite("TrainingImage/"+ name +"/" + name + "." + str(Id) +'.'+ str(sampleNum) + ".jpg", gray[y:y+h,x:x+w])
except OSError as e:
if e.errno != errno.EEXIST:
raise
cv2.imshow('frame',frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
elif sampleNum>60:
break
cam.release()
cv2.destroyAllWindows()
res = "Images Saved for ID : " + Id +" Name : "+ name
message.configure(text= res)
def TrainImages():
BASE_DIR=os.path.dirname(os.path.abspath(__file__))
image_dir=os.path.join(BASE_DIR,"TrainingImage")
current_id=0
label_ids={}
y_labels=[]
x_train=[]
for root,dirs,files in os.walk(image_dir):
for file in files:
if file.endswith("jpg"):
path= os.path.join(root,file)
label = os.path.basename(root).replace(" ", "-").lower()
if not label in label_ids:
label_ids[label]=current_id
current_id+=1
id_=label_ids[label]
pil_image=Image.open(path).convert("L")
size=(550,550)
final_image=pil_image.resize(size,Image.ANTIALIAS)
image_array=np.array(pil_image,"uint8")
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath);
recognizer=cv2.face.LBPHFaceRecognizer_create()
faces=faceCascade.detectMultiScale(image_array,1.3,5)
for (x,y,w,h) in faces:
roi=image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
with open("labels.picle",'wb') as f:
pickle.dump(label_ids,f)
recognizer=cv2.face.LBPHFaceRecognizer_create()
recognizer.train(x_train,np.array(y_labels))
recognizer.save("trainner.yml")
res = "Image Trained"
message.configure(text= res)
def TrackImages():
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")
with open("labels.picle",'rb') as f:
og_labels=pickle.load(f)
labels={v:k for k,v in og_labels.items()}
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath);
df=pd.read_csv("StudentDetails\StudentDetails.csv")
cam = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame =cam.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.32,3)
for(x,y,w,h) in faces:
roi_gray = gray[y:y+h,x:x+w]
roi_color = frame[y:y+h,x:x+w]
id_, conf = recognizer.predict(gray[y:y+h,x:x+w])
if conf>=4 and conf <=65:
font=cv2.FONT_HERSHEY_SIMPLEX
name=labels[id_]
color=(255,255,255)
stroke=2
cv2.rectangle(frame,(x,y),(x+w,y+h),(225,0,0),2)
cv2.putText(frame,name,(x,y),font,1,color,stroke,cv2.LINE_AA)
if (cv2.waitKey(1)==ord('q')):
break
else:
Id='Unknown'
name=str(Id)
font=cv2.FONT_HERSHEY_SIMPLEX
color=(255,255,255)
stroke=2
cv2.rectangle(frame,(x,y),(x+w,y+h),(225,0,0),2)
cv2.putText(frame,name,(x,y),font,1,color,stroke,cv2.LINE_AA)
cv2.imshow('frame',frame)
if (cv2.waitKey(1)==ord('q')):
break
cam.release()
cv2.destroyAllWindows()
takeImg = tk.Button(window, text="Take Images", command=TakeImages ,fg="navy" ,bg="cyan" ,width=20 ,height=3, activebackground = "gray" ,font=('times', 15, ' bold '))
takeImg.place(x=120, y=500)
trainImg = tk.Button(window, text="Train Images", command=TrainImages ,fg="navy" ,bg="cyan" ,width=20 ,height=3, activebackground = "gray" ,font=('times', 15, ' bold '))
trainImg.place(x=420, y=500)
trackImg = tk.Button(window, text="Track Images", command=TrackImages ,fg="navy" ,bg="cyan" ,width=20 ,height=3, activebackground = "gray" ,font=('times', 15, ' bold '))
trackImg.place(x=720, y=500)
quitWindow = tk.Button(window, text="Quit", command=window.destroy ,fg="navy" ,bg="cyan" ,width=20 ,height=3, activebackground = "gray" ,font=('times', 15, ' bold '))
quitWindow.place(x=1020, y=500)
window.mainloop()
|
[
"noreply@github.com"
] |
n1kun-j.noreply@github.com
|
98e8b565cd089e3a308e54abac9b7c71e93f4631
|
8cac3b19b7e3d69ccafe0277d2d9e6416270a361
|
/face_anonymizer/core/__init__.py
|
00fdba77f8cc50c8d16d6ef64d89ee48bdfe6958
|
[] |
no_license
|
natecrisler/face-anonymizer
|
06516059aa4d40aec363c4feb8ea82ca369bae5c
|
1b3eb113abb1663a31555950e1587d5cbfbe77da
|
refs/heads/master
| 2022-01-07T18:32:51.755718
| 2019-01-21T18:50:42
| 2019-01-21T18:50:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
from typing import Optional
import cv2
import numpy as np
from face_anonymizer.core.detection import FaceDetector
from face_anonymizer.core.detection import FaceExtractor
from face_anonymizer.core.manipulation import Manipulator
def pixelate_faces_from_path(fd: FaceDetector, image_path: str,
detection_threshold: Optional[float] = None,
strength: int = 10) -> np.ndarray:
"""Pixelates the faces in the image at the provided path
Args:
fd: an instance of `FaceDetector`
image_path: the path of the image we want to pixelate the faces
detection_threshold: minimum confidence to consider a region as a face
strength: pixelation strength
Returns:
the modified image
"""
image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
return pixelate_faces(fd, image, detection_threshold=detection_threshold,
strength=strength)
def pixelate_faces(fd: FaceDetector, image: np.ndarray,
detection_threshold: Optional[float] = None,
strength: int = 10, copy: bool = False) -> np.ndarray:
"""Pixelates the faces in the provided image
Args:
fd: an instance of `FaceDetector`
image: the image we want to pixelate the faces
detection_threshold: minimum confidence to consider a region as a face
strength: pixelation strength
copy: whether to modify a copy of the provided image
Returns:
the modified image which is a copy if `copy` is `True`
"""
bboxes = list(fd.detect(image, threshold=detection_threshold))
faces = FaceExtractor.extract(image, *bboxes)
faces_pixelated = (Manipulator.pixelate(face) for face in faces)
if copy:
image = image.copy()
for bbox, fp in zip(bboxes, faces_pixelated):
Manipulator.replace_bounding_box(image, bbox, fp)
return image
|
[
"loumarvincaraig@gmail.com"
] |
loumarvincaraig@gmail.com
|
38c754330d7496b646e8667ba7c58a949f263a6c
|
9ecb57431990491ed6710bdebab678216e8457e3
|
/reading_videos.py
|
11b9469f3473f76a053f197b31a94fde73da578f
|
[] |
no_license
|
jacob-02/OpenCV_Studies
|
7c810754a99bb91319848a0728ebf3b28a20b7c6
|
494329f099bc07b0d6e726a2d3448f755d3834f6
|
refs/heads/master
| 2023-04-14T18:52:11.779870
| 2021-04-26T13:08:07
| 2021-04-26T13:08:07
| 359,331,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
import cv2 as cv
capture = cv.VideoCapture(
'/home/jacob3006/Videos/st23_naruto-shippuuden-dub-episode-476.1618454201.mp4') # We can also use integers instead of the path file. Those reference cameras.
while True:
isTrue, frame = capture.read()
cv.imshow('Anime', frame)
if cv.waitKey(20) & 0xFF == ord('d'):
break
capture.release()
cv.destroyAllWindows()
# -215 assertion failed error occurs due to lack of the file being present at the specified location
|
[
"jvsanoj23@gmail.com"
] |
jvsanoj23@gmail.com
|
b940b6ea692a545a3e86f40c92f1bb62f8deeab7
|
3442f797c0d566bcfdb016d17d473c133b3403ba
|
/variables.py
|
a8f7ab5add793b9fa8a645d966955b16a29ae593
|
[] |
no_license
|
ryancodes116/python_sandbox
|
216dec01f5c5728e9f2402c383dcf1ca052d56d2
|
02d746a4164eebdda5ae9b3c29c83236b0b8fb17
|
refs/heads/master
| 2022-11-15T01:33:47.898610
| 2020-07-14T17:12:58
| 2020-07-14T17:12:58
| 279,128,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
# A variable is a container for a value, which can be of various types
'''
This is a
multiline comment
or docstring (used to define a functions purpose)
can be single or double quotes
'''
"""
VARIABLE RULES:
- Variable names are case sensitive (name and NAME are different variables)
- Must start with a letter or an underscore
- Can have numbers but can not start with one
"""
x = 1 # int
y = 2.5 # float
name = 'John' # str
is_cool = True # bool
# Multiple assignment
x, y, name, is_cool = (1, 2.5, 'John', True)
# Basic math
a = x + y
# Casting
x = str(x)
y = int(y)
z = float(y)
print(type(z), z)
|
[
"ryanmoulton116@gmail.com"
] |
ryanmoulton116@gmail.com
|
f6d5e4cdcbf3d490ddba43494f37a07091965456
|
490f13d332d93b14431a5da8e075774bcc6aee3b
|
/utils/product_attributes/ameublement.py
|
82420ddd5352998b3fcde564a57d84af533ceb70
|
[] |
no_license
|
rickyakilimali/ria
|
c9769825fc2b1f1514906b1ac4c30c2e8fe25dfd
|
4c54e4326ff312e231eac6484d09476d61fb564a
|
refs/heads/master
| 2021-08-24T11:39:49.940193
| 2017-12-09T15:13:35
| 2017-12-09T15:13:35
| 113,436,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
TYPES_ARMOIRE = (
('Armoire basse','Armoire basse'),
('Armoire haute','Armoire haute'),
('Armoire mi-haute','Armoire mi-haute'),
)
TYPE_SIEGE = (
('Chaise accueil','Chaise accueil'),
('Chaise visiteur','Chaise visiteur'),
('Fauteuil de Direction','Fauteuil de Direction'),
('Siège de réunion','Siège de réunion'),
)
TYPE_PORTE_ARMOIRE = (
('Battantes','Battantes'),
('Rideaux','Rideaux'),
)
REVETEMENT_SIEGE = (
('1','Bois'),
('2','Cuir'),
('3','Métal'),
('4','Plastique'),
)
DIMENSION_AMEUBLEMENT = (
('01','100x120cm'),
('02','180x100cm'),
('03','198x120cm'),
('04','210x270cm'),
)
MATIERE_AMEUBLEMENT = (
('01','Bois'),
('02','Métal'),
)
|
[
"jusciamua@gmail.com"
] |
jusciamua@gmail.com
|
60b3118eead8402276c713c5d5a088f5dfd1ae57
|
0f6cfed075d9967cfe9ec98ae00b06e7b6b91c3b
|
/left_rotation.py
|
32ac48771507545a4a7ca5fa460ec3aac4619eb8
|
[] |
no_license
|
engrjepmanzanillo/hacker_rank_practice
|
c585b59a0fb11d958d8bafde8b481aaade902850
|
d99cc6de10876f1958549b1b1b4c75cdf06c1c60
|
refs/heads/master
| 2020-08-04T23:20:51.689116
| 2019-11-12T11:36:51
| 2019-11-12T11:36:51
| 212,311,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
#!/bin/python3
#author: @engrjepmanzanillo
import math
import os
import random
import re
import sys
def list_slicer(list):
list = [str(x) for x in list]
list = ' '.join(list)
return list
if __name__ == '__main__':
#nd = input().split()
n = 5
d = 4
a = [1,2,3,4,5]
b = list_slicer(a[:d-n])
c = list_slicer(a[d:])
print(c,b)
|
[
"engr.jeffmanzanillo@gmail.com"
] |
engr.jeffmanzanillo@gmail.com
|
2c750ffdee421e7f29d213e636594221cd64bdf7
|
e0ba958c23fc9a43efb02fc9e4308095bb8a7670
|
/chapter02/18.py
|
b701d1252ecebae1a5b40fcc5698df610121bc15
|
[] |
no_license
|
go-inoue/nlp100knock
|
5c9b5301d5f90bfb239d910e4f06a9581e669b7c
|
f341bf4d09eb44489aec976bce4a23c3d248657e
|
refs/heads/master
| 2020-12-24T06:38:08.220639
| 2016-08-24T11:00:27
| 2016-08-24T11:00:27
| 65,361,403
| 0
| 1
| null | 2016-08-10T10:42:25
| 2016-08-10T07:42:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 711
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
18. 各行を3コラム目の数値の降順にソート
各行を3コラム目の数値の逆順で整列せよ(注意: 各行の内容は変更せずに並び替えよ).
確認にはsortコマンドを用いよ(この問題はコマンドで実行した時の結果と合わなくてもよい).
"""
import sys
def sort3(f):
lines = f.readlines()
l = [line.strip().split('\t') for line in lines]
sorted_lines = sorted(l, key=lambda x: float(x[2]), reverse=True)
return sorted_lines
def main():
with open(sys.argv[1], 'r') as f:
for i in sort3(f):
print('\t'.join(i))
if __name__ == "__main__":
main()
|
[
"go.inoue.gi@gmail.com"
] |
go.inoue.gi@gmail.com
|
e8c01110712e423217ecb0d38555a5fe4b3928d6
|
521dce61e99b248b20610ebe681f611ff9f36a58
|
/Codeforces problemset/1409A - Yet Another Two Integers Problem.py
|
9afd0db4228507cdaa8374cb4f366196c24e68e0
|
[] |
no_license
|
ommiy2j/Codeforces
|
ff0ca0129d442c14438d54c98673efd17c1bb8f0
|
2025c4ae11acca801fca4871dbc169c456f30ff9
|
refs/heads/master
| 2023-04-01T14:48:08.092036
| 2021-04-04T18:32:38
| 2021-04-04T18:32:38
| 320,491,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
for i in range(int(input())):
a,b=map(int,input().split())
x=abs(a-b)
c=x%10
y=x//10
if(c==0):
print(x//10)
else:
print(y+1)
|
[
"ommiy2j@gmail.com"
] |
ommiy2j@gmail.com
|
78008ba9b5ecc4d38d9252059452933b0a8c5d7e
|
5e01b849530ceac9f62ef2fb85497792bbe4c15a
|
/Jan16/lang-train-save.py
|
219c8a6b40d11cb18490277f082b41c41fccad2c
|
[] |
no_license
|
cheesecat47/ML_DL_Jan2020
|
2206599c0eb20eebdd152d1e3b27e72ffa2c6900
|
15bffd8c9c19d9ff2871aa7afe95607f95e491fe
|
refs/heads/master
| 2021-07-16T18:10:20.609018
| 2021-01-19T00:48:01
| 2021-01-19T00:48:01
| 232,076,415
| 0
| 0
| null | 2020-01-28T04:58:03
| 2020-01-06T10:23:19
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
from sklearn import svm
import joblib
import json
# 각 언어의 출현 빈도 데이터(JSON) 읽어들이기
with open('LANG/freq.json', 'r', encoding='utf-8') as fp:
d = json.load(fp)
data = d[0] # 파일별 데이터만 추출
# 데이터 학습하기
clf = svm.SVC()
clf.fit(data['freqs'], data['labels'])
# 학습 데이터 저장하기
joblib.dump(clf, 'LANG/freq.pkl')
print('ok')
|
[
"cheesecat47@gmail.com"
] |
cheesecat47@gmail.com
|
ddf76264acb11a5e171585c688801d0531658285
|
4613b6938496bae29749dafd44ffae3fad31defd
|
/python-exercises/ex1.py
|
2f193c644d4207f48750ad7ede995b0fd543d960
|
[] |
no_license
|
Marlar22/coding-sprint
|
b2f45aaf820f213d3fc30b3f1c842b23f758864e
|
3068db7fd2723571702266842904df5e491d8d40
|
refs/heads/master
| 2020-03-21T14:53:29.893527
| 2018-06-26T04:17:18
| 2018-06-26T04:17:18
| 138,680,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
print("Hello World!")
print("Hello Again")
print("I like typing this.")
print("This is fun.")
print('Yay! Printing.')
print("I'd much rather you 'not'.")
print('I "said" do not touch this.')
|
[
""
] | |
d7b906834fe670221921b20d1eaa42719b2d1f6a
|
90353c1ae2abff43240ab0401e20ca2b82ec8a13
|
/clock_face.py
|
b59d1a3f7481b27942978c3b80b1579af199d3cf
|
[] |
no_license
|
andreygor444/Clock
|
5e5f8d111c9dac5cac06ce22828a7f1fa2bdd975
|
4cfd44b5d14e47a648cac0d6ea74a729d78f837e
|
refs/heads/master
| 2023-04-29T13:55:58.826145
| 2021-05-22T19:37:01
| 2021-05-22T19:37:01
| 369,117,509
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
from turtle import Turtle
from clock_number import ClockNumber
from math import sin, cos, pi
from utils import can_exit_while_drawing
class ClockFace:
"""Класс циферблата часов"""
def __init__(self, radius, center, numbers_count=12):
self.radius = radius
self._center = center
self._render()
self._make_numbers(numbers_count)
@can_exit_while_drawing
def _render(self):
"""Отрисовывает круглый контур циферблата и точку в центре"""
t = Turtle()
t.speed(0)
t.width(self.radius // 80)
t.up()
t.goto((self._center[0], self._center[1] - self.radius))
t.seth(0)
t.down()
t.circle(self.radius)
t.up()
t.goto((self._center[0], self._center[1]))
t.down()
t.dot(self.radius / 15)
t.hideturtle()
def _make_numbers(self, numbers_count):
self._numbers = []
number_size = self.radius / 6
for i in range(1, numbers_count + 1):
angle = pi * 2 / numbers_count * i
x = self._center[0] + self.radius * 0.8 * sin(angle)
y = self._center[1] + self.radius * 0.8 * cos(angle)
number = ClockNumber(i, (x, y), number_size)
self._numbers.append(number)
|
[
"andreygor444@gmail.com"
] |
andreygor444@gmail.com
|
27e9c00748769bcf5d2150f4023cacd79bf41ac2
|
c8d1b3c16804823b9e751576f59593df908021ea
|
/codeup/기초100제/1012.py
|
c67c536f98a8fbf02b2e99c8647a44feb244ca9e
|
[] |
no_license
|
giljun/Algorithm
|
f2809c56efc45c518fef4b9d931e3ece580d209c
|
bce6a30dcdbe4f4382c45c2f7b415425ce345953
|
refs/heads/master
| 2021-06-16T06:47:51.902748
| 2021-04-21T14:40:01
| 2021-04-21T14:40:01
| 192,660,401
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
number = float(input())
print("%f"% number)
|
[
"giljune91@gmail.com"
] |
giljune91@gmail.com
|
24deb0da2773b154bb9bd7926e0b22c0d72f9736
|
b72cc0de3990caf5dd7989506d80fe4aa448d4fb
|
/lighting.py
|
8ec008410ad9b616774ffecb4438ce6167502eee
|
[] |
no_license
|
tanzimelahi/compiler
|
23113b015aff1ed96954176bcb129c8034ca79ec
|
8323604c8d4bd69dcca099149c298d952d338e0a
|
refs/heads/master
| 2022-06-20T22:12:58.089069
| 2020-05-13T02:33:55
| 2020-05-13T02:33:55
| 263,503,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,681
|
py
|
import math
from subprocess import Popen, PIPE
from os import remove
import random
#constants
XRES = 500
YRES = 500
MAX_COLOR = 255
RED = 0
GREEN = 1
BLUE = 2
DEFAULT_COLOR = [0, 0, 0]
def new_screen( width = XRES, height = YRES ):
screen = []
for y in range( height ):
row = []
screen.append( row )
for x in range( width ):
screen[y].append( DEFAULT_COLOR[:] )
return screen
def new_zbuffer( width = XRES, height = YRES ):
zb = []
for y in range( height ):
row = [ float('-inf') for x in range(width) ]
zb.append( row )
return zb
def plot( screen, zbuffer,x,y,z,color):
newy = YRES - 1 - y
z = int((z * 1000)) / 1000.0
if ( x >= 0 and x < XRES and newy >= 0 and newy < YRES and zbuffer[int(newy)][int(x)] <= z):
screen[int(newy)][int(x)] = color[:]
zbuffer[int(newy)][int(x)] = z
def clear_screen( screen ):
for y in range( len(screen) ):
for x in range( len(screen[y]) ):
screen[y][x] = DEFAULT_COLOR[:]
def clear_zbuffer( zb ):
for y in range( len(zb) ):
for x in range( len(zb[y]) ):
zb[y][x] = float('-inf')
def save_ppm( screen, fname ):
f = open( fname, 'wb' )
ppm = 'P6\n' + str(len(screen[0])) +' '+ str(len(screen)) +' '+ str(MAX_COLOR) +'\n'
f.write(ppm.encode())
for y in range( len(screen) ):
for x in range( len(screen[y]) ):
pixel = screen[y][x]
f.write( bytes(pixel) )
f.close()
def save_ppm_ascii( screen, fname ):
f = open( fname, 'w' )
ppm = 'P3\n' + str(len(screen[0])) +' '+ str(len(screen)) +' '+ str(MAX_COLOR) +'\n'
for y in range( len(screen) ):
row = ''
for x in range( len(screen[y]) ):
pixel = screen[y][x]
row+= str( pixel[ RED ] ) + ' '
row+= str( pixel[ GREEN ] ) + ' '
row+= str( pixel[ BLUE ] ) + ' '
ppm+= row + '\n'
f.write( ppm )
f.close()
def save_extension( screen, fname ):
ppm_name = fname[:fname.find('.')] + '.ppm'
save_ppm_ascii( screen, ppm_name )
p = Popen( ['convert', ppm_name, fname ], stdin=PIPE, stdout = PIPE )
p.communicate()
remove(ppm_name)
def display( screen ):
ppm_name = 'pic.ppm'
save_ppm_ascii( screen, ppm_name )
p = Popen( ['display', ppm_name], stdin=PIPE, stdout = PIPE )
p.communicate()
remove(ppm_name)
#commands from here:
#for the first two octants, the x0 must be smaller than x1
def firstoct(screen,buffer,x0,y0,z0,x1,y1,z1,color):#oct 1 and 5
x=x0
y=y0
A=y1-y0
B=-(x1-x0)
d=2*A+B
dz=(z1-z0)/(x1-x0+1)
z=z0
while x<x1:
plot(screen,buffer,x,y,z,color)
if d>=0:
y+=1
d=d+2*B
x=x+1
d=d+2*A
z+=dz
plot(screen,buffer,x1,y1,z1,color)
def secondoct(screen,buffer,x0,y0,z0,x1,y1,z1,color):#oct 2 and 6
x=x0
y=y0
A=y1-y0
B=-(x1-x0)
d=A+2*B
z=z0
dz=(z1-z0)/(y1-y0+1)
while y<y1:
plot(screen,buffer,x,y,z,color)
if d<=0:
d=d+2*A
x+=1
y=y+1
d=d+2*B
z+=dz
plot(screen,buffer,x1,y1,z1,color)
def thirdoct(screen,buffer,x0,y0,z0,x1,y1,z1,color):#oct 3 and 7 remember the x0 and x1 hierarchy is reversed for this one
x=x0
y=y0
A=y1-y0
B=-(x1-x0)
d=A+2*B
z=z0
dz=(z1-z0)/(y1-y0+1)
while y<y1:
plot(screen,buffer,x,y,z,color)
if d>=0:
x=x-1
d=d-2*A
y=y+1
d=d+2*B
z+=dz
plot(screen,buffer,x1,y1,z1,color)
def fourthoct(screen,buffer,x0,y0,z0,x1,y1,z1,color): #oct 4 and 8
x=x0
y=y0
A=y1-y0
B=-(x1-x0)
d=A+2*B
z=z0
dz=(z1-z0)/(x1-x0+1)
while x<x1:
plot(screen,buffer,x,y,z,color)
if d<=0:
y=y-1
d=d-2*B
x=x+1
d=d+2*A
z+=dz
plot(screen,buffer,x1,y1,z1,color)
def oneSlopePos(screen,buffer,x0,y0,z0,x1,y1,z1,color):
x=x0
y=y0
z=z0
dz=(z1-z0)/(x1-x0+1)
while x<=x1:
plot(screen,buffer,x,y,z,color)
x+=1
y+=1
z+=dz
def oneSlopeNeg(screen,buffer,x0,y0,z0,x1,y1,z1,color):
x=x0
y=y0
z=z0
dz=(z1-z0)/(x1-x0+1)
while x<=x1:
plot(screen,buffer,x,y,z,color)
x+=1
y-=1
z+=dz
def zeroSlope(screen,buffer,x0,y0,z0,x1,y1,z1,color):
x=x0
y=y0
z=z0
dz=(z1-z0)/(x1-x0+1)
while(x<=x1):
plot(screen,buffer,x,y,z,color)
x+=1
z+=dz
def undefinedSlope(screen,buffer,x0,y0,z0,x1,y1,z1,color):
x=x0
y=y0
z=z0
dz=(z1-z0)/(y1-y0+1)
while y<=y1:
plot(screen,buffer,x,y,z,color)
y+=1
z+=dz
def drawline(screen,buffer,x0,y0,z0,x1,y1,z1,color):# whenever possible x0 must be greater than x1(left to right orientation)
if(x0>x1):
store=x0
x0=x1
x1=store
storage=y0
y0=y1
y1=storage
store=z0
z0=z1
z1=store
if(x0==x1):
if(y1<y0):
store=y0
y0=y1
y1=store
store=z0
z0=z1
z1=store
undefinedSlope(screen,buffer,x0,y0,z0,x1,y1,z1,color)
elif(y0==y1):
zeroSlope(screen,buffer,x0,y0,z0,x1,y1,z1,color)
elif abs(x1-x0)>=abs(y1-y0):
if y1>y0:
firstoct(screen,buffer,x0,y0,z0,x1,y1,z1,color)
else:
fourthoct(screen,buffer,x0,y0,z0,x1,y1,z1,color)
else:
if y1>y0:
secondoct(screen,buffer,x0,y0,z0,x1,y1,z1,color)
else:
thirdoct(screen,buffer,x1,y1,z1,x0,y0,z0,color)
def new_matrix(rows = 4, cols = 4):
m = []
for c in range( cols ):
m.append( [] )
for r in range( rows ):
m[c].append( 0 )
return m
def update_matrix(row,column,matrix,value):
matrix[column][row]=value
def up(matrix,row,column,value):
matrix[column][row]=value
def print_matrix(matrix):
result=""
row=len(matrix[0])
col=len(matrix)
for x in range(row):
for y in range(col):
add=str(matrix[y][x])
if len(add)==1:
result+=add+" "
elif len(add)==2:
result+=add+" "
else:
result+=add+" "
result+="\n"
print(result)
def ident(matrix):
row=len(matrix[0])
col=row
for x in range(row):
for y in range(col):
if(x==y):
matrix[y][x]=1
else:
matrix[y][x]=0
def matrix_multiplication(m1,m2): #this func works fine
result=new_matrix(len(m1[0]),len(m2))
for secondCol in range(len(m2)):
for y in range(len(m1[0])):
add=0
for x in range(len(m1)):
add+=(m1[x][y]*m2[secondCol][x])
result[secondCol][y]=add
for x in range(len(m2)):
m2[x]=result[x]
def empty_matrix():
m = []
m.append( [] )
return m
#test cases
#that ends here
def add_point(matrix,x,y,z=0):
if len(matrix[0])==0:
matrix[0].append(x)
matrix[0].append(y)
matrix[0].append(z)
matrix[0].append(1)
else:
matrix.append([])
matrix[len(matrix)-1].append(x)
matrix[len(matrix)-1].append(y)
matrix[len(matrix)-1].append(z)
matrix[len(matrix)-1].append(1)
def update_point(matrix,x,y,z,unit=1): #same as add_point but can modify the '1's used as helper func for rotation
if len(matrix[0])==0:
matrix[0].append(x)
matrix[0].append(y)
matrix[0].append(z)
matrix[0].append(unit)
else:
matrix.append([])
matrix[len(matrix)-1].append(x)
matrix[len(matrix)-1].append(y)
matrix[len(matrix)-1].append(z)
matrix[len(matrix)-1].append(unit)
def add_edge(matrix,x0,y0,z0,x1,y1,z1):
add_point(matrix,x0,y0,z0)
add_point(matrix,x1,y1,z1)
def add_polygon(matrix,x0,y0,z0,x1,y1,z1,x2,y2,z2):
add_point(matrix,x0,y0,z0)
add_point(matrix,x1,y1,z1)
add_point(matrix,x2,y2,z2)
def dot_product(vector1,vector2):
return (vector1[0]*vector2[0]+vector1[1]*vector2[1]+vector1[2]*vector2[2])
def vector_substraction(vector1,vector2):
return [vector1[0]-vector2[0],vector1[1]-vector2[1],vector1[2]-vector2[2]]
def cross_product(a,b):# a and b are vectors []
return [a[1]*b[2]-a[2]*b[1],a[2]*b[0]-a[0]*b[2],a[0]*b[1]-a[1]*b[0]]
#print(cross_product([2,4,3],[1,2,4]))
def surface_normal(polygon_matrix,index): #one change made here that is different from notes
a=vector_substraction(polygon_matrix[index+1],polygon_matrix[index])
b=(vector_substraction(polygon_matrix[index+2],polygon_matrix[index])) #returned surface normal to its regular note form
#print(cross_product(a,b))
return cross_product(a,b)
def normalize(vector):
magnitude = math.sqrt( vector[0] * vector[0] +
vector[1] * vector[1] +
vector[2] * vector[2])
for i in range(3):
vector[i] = vector[i] / magnitude
def backface_culling(n,v):# n being the normal of a triangle and v the z unit vector
result=dot_product(n,v)
if (result>0):
return True
else:
return False
def get_lighting(A,P,L,V,N,Ka,Kd,Ks): # all being triplets or vectors
I_ambient=[]
for x in range(len(A)):
I_ambient.append(A[x]*Ka[x])
I_diffuse=[]
storage=[]
for x in range(len(P)):
storage.append(P[x]*Kd[x])
normalize(N)
normalize(L)
normalize(V)
result=dot_product(N,L)
for x in range(len(P)):
storage[x]=storage[x]*result
for x in range(len(storage)): #copied storage to Idiffuse
I_diffuse.append(storage[x])
I_specular=[]
store=[]
for x in range(len(P)):
store.append(P[x]*Ks[x])
Two_N=[]
for x in range(len(N)):
Two_N.append(N[x]*2)
for x in range(len(Two_N)):
Two_N[x]=Two_N[x]*result
answer=vector_substraction(Two_N,L)
memory=dot_product(answer,V)**4
for x in range(len(P)):
I_specular.append(store[x]*memory)
I=[]
for x in range(len(I_ambient)):
I.append(int(I_ambient[x]+I_diffuse[x]+I_specular[x]))
for x in range(len(I)):
if (I[x]>255):
I[x]=255
elif(I[x]<0):
I[x]=0
#print(I)
return I
def ytop(matrix,step): #helper func for add_polygons
result=step+2
y_top=matrix[result][1]
if(matrix[step+1][1]>y_top):
y_top=matrix[step+1][1]
result=step+1
if(matrix[step][1]>y_top):
y_top=matrix[step][1]
result=step
return y_top
def ytop_step(matrix,step):
result=step+2
y_top=matrix[result][1]
if(matrix[step+1][1]>y_top):
y_top=matrix[step+1][1]
result=step+1
if(matrix[step][1]>y_top):
y_top=matrix[step][1]
result=step
return result
def xtop(matrix,step):
result=step+2
y_top=matrix[result][1]
if(matrix[step+1][1]>y_top):
y_top=matrix[step+1][1]
result=step+1
if(matrix[step][1]>y_top):
y_top=matrix[step][1]
result=step
x_top=matrix[result][0]
return x_top
def ybot(matrix,step):
bottom=step+2
y_bottom=matrix[bottom][1]
if(matrix[step+1][1]<y_bottom):
y_bottom=matrix[step+1][1]
bottom=step+1
if(matrix[step][1]<y_bottom):
y_bottom=matrix[step][1]
bottom=step
return y_bottom
def ybot_step(matrix,step):
bottom=step+2
y_bottom=matrix[bottom][1]
if(matrix[step+1][1]<y_bottom):
y_bottom=matrix[step+1][1]
bottom=step+1
if(matrix[step][1]<y_bottom):
y_bottom=matrix[step][1]
bottom=step
return bottom
def xbot(matrix,step):
bottom=step+2
y_bottom=matrix[bottom][1]
if(matrix[step+1][1]<y_bottom):
y_bottom=matrix[step+1][1]
bottom=step+1
if(matrix[step][1]<y_bottom):
y_bottom=matrix[step][1]
bottom=step
x_bottom=matrix[bottom][0]
return x_bottom
def zbot(matrix,step):
bottom=step+2
y_bottom=matrix[bottom][1]
if(matrix[step+1][1]<y_bottom):
y_bottom=matrix[step+1][1]
bottom=step+1
if(matrix[step][1]<y_bottom):
y_bottom=matrix[step][1]
bottom=step
x_bottom=matrix[bottom][0]
z_bottom=matrix[bottom][2]
return z_bottom
def ztop(matrix,step):
result=step+2
y_top=matrix[result][1]
if(matrix[step+1][1]>y_top):
y_top=matrix[step+1][1]
result=step+1
if(matrix[step][1]>y_top):
y_top=matrix[step][1]
result=step
x_top=matrix[result][0]
z_top=matrix[result][2]
return z_top
def add_polygons(screen,buffer,matrix,A,P,L,V,Ka,Kd,Ks):
step=0
while(step<len(matrix)):
normal=surface_normal(matrix,step)
color=get_lighting(A,P,L,V,normal,Ka,Kd,Ks)
if(normal[2]>0):
for x in range(len(color)):
if color[x]<0:
print(normal)
drawline(screen,buffer,matrix[step][0],matrix[step][1],matrix[step][2],matrix[step+1][0],matrix[step+1][1],matrix[step+1][2],color)
drawline(screen,buffer,matrix[step+1][0],matrix[step+1][1],matrix[step+1][2],matrix[step+2][0],matrix[step+2][1],matrix[step+2][2],color)
drawline(screen,buffer,matrix[step+2][0],matrix[step+2][1],matrix[step+2][2],matrix[step][0],matrix[step][1],matrix[step][2],color)
yb=ybot(matrix,step) #scanline conversion starts here
yt=ytop(matrix,step)
xt=xtop(matrix,step)
xb=xbot(matrix,step)
zb=zbot(matrix,step)
zt=ztop(matrix,step)
xm=0
zm=0
ym=0
if matrix[step+2][1]!=yb and matrix[step+2][1]!=yt:
ym=matrix[step+2][1]
xm=matrix[step+2][0]
zm=matrix[step+2][2]
elif matrix[step+1][1]!=yb and matrix[step+1][1]!=yt:
ym=matrix[step+1][1]
xm=matrix[step+1][0]
zm=matrix[step+1][2]
elif matrix[step][1]!=yb and matrix[step][1]!=yt:
ym=matrix[step][1]
xm=matrix[step][0]
zm=matrix[step][2]
else:
b=0
t=0
if(matrix[step][1]==yb):
b+=1
elif(matrix[step][1]==yt):
t+=1
if(matrix[step+1][1]==yb):
b+=1
elif(matrix[step+1][1]==yt):
t+=1
if(matrix[step+2][1]==yb):
b+=1
elif(matrix[step+2][1]==yt):
t+=1
if(t>=2):
count=ytop_step(matrix,step)
if step!=count and matrix[step][1]==yt:
xm=matrix[step][0]
ym=matrix[step][1]
zm=matrix[step][2]
elif (step+1)!=count and matrix[step+1][1]==yt:
xm=matrix[step+1][0]
ym=matrix[step+1][1]
zm=matrix[step+1][2]
else:
xm=matrix[step+2][0]
ym=matrix[step+2][1]
zm=matrix[step+2][2]
if(b>=2):
count=ybot_step(matrix,step)
if step!=count and matrix[step][1]==yb:
xm=matrix[step][0]
ym=matrix[step][1]
zm=matrix[step][2]
elif (step+1)!=count and matrix[step+1][1]==yb:
xm=matrix[step+1][0]
ym=matrix[step+1][1]
zm=matrix[step+1][2]
else:
xm=matrix[step+2][0]
ym=matrix[step+2][1]
zm=matrix[step+2][2]
dx=(xt-xb)/(yt-yb+1)
dx1=(xm-xb)/(ym-yb+1)
dx1_1=(xt-xm)/(yt-ym+1)
dz0=(zt-zb)/(yt-yb+1)
dz1=(zm-zb)/(ym-yb+1)
dz1_1=(zt-zm)/(yt-ym+1)
x0=xb
x1=xb
z0=zb
z1=zb
y=yb
while(y<=ym):
drawline(screen,buffer,x0,y,z0,x1,y,z1,color)
x0+=dx
x1+=dx1
z0+=dz0
z1+=dz1
y+=1
dx1=dx1_1
x1=xm
z1=zm
dz1=dz1_1
while(y<=yt):
drawline(screen,buffer,x0,y,z0,x1,y,z1,color)
x0+=dx
x1+=dx1
z0+=dz0
z1+=dz1
y+=1 #scanline conversino ends here
step+=3
def add_lines(screen,buffer,matrix,color):
step=0
while(step<len(matrix)):
#print("x0:"+str(matrix[step][0])+" "+"y0:"+str(matrix[step][1])+" "+"x1:"+str(matrix[step+1][0])+" "+"y1:"+str(matrix[step+1][1]))
drawline(screen,buffer,matrix[step][0],matrix[step][1],matrix[step][2],matrix[step+1][0],matrix[step+1][1],matrix[step+1][2],color)
step+=2
def scale(sx,sy,sz):
info=[sx,sy,sz]
matrix=new_matrix(4,4)
ident(matrix)
for col in range(len(matrix)-1):
for row in range(len(matrix[0])-1):
if row==col:
matrix[col][row]=info[col]
return matrix
def move(a,b,c):
info=[a,b,c]
matrix=new_matrix(4,4)
ident(matrix)
for row in range(len(matrix)-1):
matrix[3][row]=info[row]
return matrix
def x_rotation(angle):
angle=math.radians(angle)
matrix=empty_matrix()
update_point(matrix,1,0,0,0)
update_point(matrix,0,math.cos(angle),1*math.sin(angle),0)
update_point(matrix,0,-1*math.sin(angle),math.cos(angle),0)
update_point(matrix,0,0,0,1)
return matrix
def y_rotation(angle):
angle=math.radians(angle)
matrix=empty_matrix()
update_point(matrix,math.cos(angle),0,-1*math.sin(angle),0)
update_point(matrix,0,1,0,0)
update_point(matrix,1*math.sin(angle),0,math.cos(angle),0)
update_point(matrix,0,0,0,1)
return matrix
def z_rotation(angle):
angle=math.radians(angle)
matrix=empty_matrix()
update_point(matrix,math.cos(angle),1*math.sin(angle),0,0)
update_point(matrix,-1*math.sin(angle),math.cos(angle),0,0)
update_point(matrix,0,0,1,0)
update_point(matrix,0,0,0,1)
return matrix
def rotation (angle,axis_of_rotation):
if(axis_of_rotation=="x"):
return x_rotation(angle)
elif axis_of_rotation=="y":
return y_rotation(angle)
else:
return z_rotation(angle)
def bezier(matrix,x0,y0,x1,y1,x2,y2,x3,y3):
ax=-x0+3*x1-3*x2+x3
bx=3*x0-6*x1+3*x2
cx=-3*x0+3*x1
dx=x0
ay=-y0+3*y1-3*y2+y3
by=3*y0-6*y1+3*y2
cy=-3*y0+3*y1
dy=y0
t=0
input_x=int(ax*math.pow(t,3)+bx*math.pow(t,2)+cx*t+dx)
input_y=int(ay*math.pow(t,3)+by*math.pow(t,2)+cy*t+dy)
t+=0.0001
new_input_x=int(ax*math.pow(t,3)+bx*math.pow(t,2)+cx*t+dx)
new_input_y=int(ay*math.pow(t,3)+by*math.pow(t,2)+cy*t+dy)
add_edge(matrix,input_x,input_y,0,new_input_x,new_input_y,0)
input_x=new_input_x
input_y=new_input_y
t+=0.0001
while(t<=1):
new_input_x=int(ax*math.pow(t,3)+bx*math.pow(t,2)+cx*t+dx)
new_input_y=int(ay*math.pow(t,3)+by*math.pow(t,2)+cy*t+dy)
add_edge(matrix,input_x,input_y,0,new_input_x,new_input_y,0)
input_x=new_input_x
input_y=new_input_y
t+=0.0001
def hermite(matrix,x0,y0,x1,y1,rx0,ry0,rx1,ry1):
my_matrix=empty_matrix()
m2x=empty_matrix()
add_point(m2x,x0,x1,rx0)
update_matrix(3,0,m2x,rx1)
add_point(my_matrix,2,-3,0)
add_point(my_matrix,-2,3,0)
add_point(my_matrix,1,-2,1)
add_point(my_matrix,1,-1,0)
info=[1,0,0,0]
for x in range(4):
update_matrix(3,x,my_matrix,info[x])
matrix_multiplication(my_matrix,m2x)
ax=m2x[0][0]
bx=m2x[0][1]
cx=m2x[0][2]
dx=m2x[0][3]
m2y=empty_matrix()
add_point(m2y,y0,y1,ry0)
update_matrix(3,0,m2y,ry1)
matrix_multiplication(my_matrix,m2y)
ay=m2y[0][0]
by=m2y[0][1]
cy=m2y[0][2]
dy=m2y[0][3]
t=0
input_x=int(ax*math.pow(t,3)+bx*math.pow(t,2)+cx*t+dx)
input_y=int(ay*math.pow(t,3)+by*math.pow(t,2)+cy*t+dy)
t+=0.0001
new_input_x=int(ax*math.pow(t,3)+bx*math.pow(t,2)+cx*t+dx)
new_input_y=int(ay*math.pow(t,3)+by*math.pow(t,2)+cy*t+dy)
add_edge(matrix,input_x,input_y,0,new_input_x,new_input_y,0)
input_x=new_input_x
input_y=new_input_y
t+=0.0001
while(t<=1):
new_input_x=int(ax*math.pow(t,3)+bx*math.pow(t,2)+cx*t+dx)
new_input_y=int(ay*math.pow(t,3)+by*math.pow(t,2)+cy*t+dy)
add_edge(matrix,input_x,input_y,0,new_input_x,new_input_y,0)
input_x=new_input_x
input_y=new_input_y
t+=0.0001
def circle(matrix,cx,cy,cz,r,step=1000):
i=0
t=0
input_x=int(math.cos(math.pi*2*t)*r+cx)
input_y=int(math.sin(math.pi*2*t)*r+cy)
i+=1
t=i/step
new_input_x=int(math.cos(math.pi*2*t)*r+cx)
new_input_y=int(math.sin(math.pi*2*t)*r+cy)
add_edge(matrix,input_x,input_y,0,new_input_x,new_input_y,0)
input_x=new_input_x
input_y=new_input_y
i+=1
while(i<=step):
t=i/step
new_input_x=int(math.cos(math.pi*2*t)*r+cx)
new_input_y=int(math.sin(math.pi*2*t)*r+cy)
add_edge(matrix,input_x,input_y,0,new_input_x,new_input_y,0)
input_x=new_input_x
input_y=new_input_y
i+=1
def apply(transform,edge):
matrix_multiplication(transform,edge)
def old_parser(fl_name,screen,buffer,color,edge,triangle_matrix,stack):
fl=open(fl_name,"r")
data=fl.readlines()
i=0
A=[255,255,255]
P=[250,0,255]
L=[1,0.5,1]
V=[0,0,1]
Ka=[0.1,0.1,0.1]
Kd=[0.5,0.5,0.5]
Ks=[0.5,0.5,0.5]
while(i<len(data)):
#print(data[i].strip())
if data[i].strip()=="line":
coords=data[i+1].split()
x0=int(coords[0])
y0=int(coords[1])
z0=int(coords[2])
x1=int(coords[3])
y1=int(coords[4])
z1=int(coords[5])
add_edge(edge,x0,y0,z0,x1,y1,z1)
apply(stack[-1],edge)
add_lines(screen,buffer,edge,color)
edge=empty_matrix()
elif data[i].strip()=="clear":
clear_screen(screen)
clear_zbuffer(buffer)
elif data[i].strip()=="scale":
coords=data[i+1].split()
sx=int(coords[0])
sy=int(coords[1])
sz=int(coords[2])
transform=scale(sx,sy,sz)
apply(stack[-1],transform)
stack[-1]=transform
elif data[i].strip()=="circle":
coords=data[i+1].split()
x=int(coords[0])
y=int(coords[1])
z=int(coords[2])
r=int(coords[3])
circle(edge,x,y,z,r)
apply(stack[-1],edge)
add_lines(screen,buffer,edge,color)
edge=empty_matrix()
elif data[i].strip()=="move":
coords=data[i+1].split()
a=int(coords[0])
b=int(coords[1])
c=int(coords[2])
transform=move(a,b,c)
apply(stack[-1],transform)
stack[-1]=transform
elif data[i].strip()=="rotate":
coords=data[i+1].split()
angle=int(coords[1])
axis=coords[0]
transform=rotation(angle,axis)
apply(stack[-1],transform)
stack[-1]=transform
elif data[i].strip()=="save":
coords=data[i+1].split()
save_ppm(screen,coords[0])
elif data[i].strip()=="display":
display(screen)
elif data[i].strip()=="sphere":
coords=data[i+1].split()
x=int(coords[0])
y=int(coords[1])
z=int(coords[2])
r=int(coords[3])
sphere(triangle_matrix,x,y,z,r)
apply(stack[-1],triangle_matrix)
add_polygons(screen,buffer,triangle_matrix,A,P,L,V,Ka,Kd,Ks)
triangle_matrix=empty_matrix()
elif data[i].strip()=="box":
coords=data[i+1].split()
x=int(coords[0])
y=int(coords[1])
z=int(coords[2])
x_width=int(coords[3])
y_width=int(coords[4])
z_width=int(coords[5])
box(triangle_matrix,x,y,z,x_width,y_width,z_width)
apply(stack[-1],triangle_matrix)
add_polygons(screen,buffer,triangle_matrix,A,P,L,V,Ka,Kd,Ks)
triangle_matrix=empty_matrix()
elif data[i].strip()=="torus":
coords=data[i+1].split()
x=int(coords[0])
y=int(coords[1])
z=int(coords[2])
r=int(coords[3])
R=int(coords[4])
torus(triangle_matrix,x,y,z,r,R)
apply(stack[-1],triangle_matrix)
add_polygons(screen,buffer,triangle_matrix,A,P,L,V,Ka,Kd,Ks)
triangle_matrix=empty_matrix()
elif data[i].strip()=="push":
push(stack)
elif data[i].strip()=="pop":
stack.pop()
i+=1
def line_box(matrix,x,y,z,x_width,y_width,z_width): #changes made in line_box y+ switched to y-
add_edge(matrix,x,y,z,x+x_width,y,z)#1 and 3
add_edge(matrix,x,y,z,x,y-y_width,z)#1 and 2
add_edge(matrix,x,y-y_width,z,x+x_width,y-y_width,z)# 2 and 4
add_edge(matrix,x+x_width,y,z,x+x_width,y-y_width,z)#3 and 4
add_edge(matrix,x,y,z,x,y,z+z_width)#1 and 6
add_edge(matrix,x,y-y_width,z,x,y-y_width,z+z_width)#2 and 5
add_edge(matrix,x,y-y_width,z+z_width,x,y,z+z_width)#5 and 6
add_edge(matrix,x,y,z+z_width,x+x_width,y,z+z_width)#6 and 7
add_edge(matrix,x,y-y_width,z+z_width,x+x_width,y-y_width,z+z_width)#5 and 8
add_edge(matrix,x+x_width,y-y_width,z+z_width,x+x_width,y,z+z_width)#8 and 7
add_edge(matrix,x+x_width,y,z,x+x_width,y,z+z_width)#3 and 7
add_edge(matrix,x+x_width,y-y_width,z,x+x_width,y-y_width,z+z_width)#4 and 8
def box( polygons, x, y, z, width, height, depth ):
x1 = x + width
y1 = y - height
z1 = z - depth
#front
add_polygon(polygons, x, y, z, x1, y1, z, x1, y, z)
add_polygon(polygons, x, y, z, x, y1, z, x1, y1, z)
#back
add_polygon(polygons, x1, y, z1, x, y1, z1, x, y, z1)
add_polygon(polygons, x1, y, z1, x1, y1, z1, x, y1, z1)
#right side
add_polygon(polygons, x1, y, z, x1, y1, z1, x1, y, z1)
add_polygon(polygons, x1, y, z, x1, y1, z, x1, y1, z1)
#left side
add_polygon(polygons, x, y, z1, x, y1, z, x, y, z)
add_polygon(polygons, x, y, z1, x, y1, z1, x, y1, z)
#top
add_polygon(polygons, x, y, z1, x1, y, z, x1, y, z1)
add_polygon(polygons, x, y, z1, x, y, z, x1, y, z)
#bottom
add_polygon(polygons, x, y1, z, x1, y1, z1, x1, y1, z)
add_polygon(polygons, x, y1, z, x, y1, z1, x1, y1, z1)
#def sphere(matrix,cx,cy,cz,radius,step=100):
def line_sphere(matrix,cx,cy,cz,radius,step=10):
rot=0
t=0
i=0
j=0
x0=radius*math.cos(2*math.pi*0)+cx
y0=radius*math.sin(2*math.pi*0)*math.cos(math.pi*0)+cy
z0=radius*math.sin(2*math.pi*0)*math.sin(math.pi*0)+cz
i+=1
while j<=step:
rot=j/step
while i<=step:
print(i)
t=i/step
x=radius*math.cos(1*math.pi*t)+cx
y=radius*math.sin(1*math.pi*t)*math.cos(2*math.pi*rot)+cy
z=radius*math.sin(1*math.pi*t)*math.sin(2*math.pi*rot)+cz
add_edge(matrix,x0,y0,z0,x,y,z)
x0=x
y0=y
z0=z
i+=1
i=0
j+=1
def sphere(matrix,cx,cy,cz,radius,step=20):# n is 1+step
rot=0
t=0
i=0
j=0
n=step+1
edge=empty_matrix()
while (j<=step):
rot=j/step
while(i<=step):
t=i/step
x=radius*math.cos(1*math.pi*t)+cx
y=radius*math.sin(1*math.pi*t)*math.cos(2*math.pi*rot)+cy
z=radius*math.sin(1*math.pi*t)*math.sin(2*math.pi*rot)+cz
add_point(edge,x,y,z)
i+=1
if(j>=1):
k=0
while(k<step):
if(k!=step-1):
x=edge[(j-1)*(n)+k][0]
y=edge[(j-1)*(n)+k][1]
z=edge[(j-1)*(n)+k][2]
x1=edge[(j-1)*(n)+k+1+n][0]
y1=edge[(j-1)*n+k+1+n][1]
z1=edge[(j-1)*n+k+1+n][2]
x2=edge[(j-1)*n+k+1][0]
y2=edge[(j-1)*n+k+1][1]
z2=edge[(j-1)*n+k+1][2]
add_polygon(matrix,x,y,z,x1,y1,z1,x2,y2,z2)
if(k!=0):
x1=edge[(j-1)*n+k+n][0]
y1=edge[(j-1)*n+k+n][1]
z1=edge[(j-1)*n+k+n][2]
x2=edge[(j-1)*n+k+n+1][0]
y2=edge[(j-1)*n+k+n+1][1]
z2=edge[(j-1)*n+k+n+1][2]
add_polygon(matrix,x,y,z,x1,y1,z1,x2,y2,z2)
k+=1
i=0
j+=1
def line_torus(matrix,cx,cy,cz,r,R,step=100):
rot=0
t=0
i=0
j=0
x0=math.cos(rot)*(r*math.cos(t)+R)+cx
y0=math.sin(t)*r+cy
z0=-1*math.sin(rot)*(r*math.cos(t)+cx+R)+cz
i+=1
while j<=step:
rot=j/step
while i<=step:
t=i/step
x=math.cos(2*math.pi*rot)*(r*math.cos(2*math.pi*t)+R)+cx
y=r*math.sin(2*math.pi*t)+cy
z=-1*math.sin(2*math.pi*rot)*(r*math.cos(2*math.pi*t)+R)+cz
add_edge(matrix,x0,y0,z0,x,y,z)
x0=x
y0=y
z0=z
i+=1
i=0
j+=1
def torus(matrix,cx,cy,cz,r,R,step=20):
rot=0
t=0
i=0
j=0
edge=empty_matrix()
n=step+1
while j<=step:
rot=j/step
while i<=step:
t=i/step
x=math.cos(2*math.pi*rot)*(r*math.cos(2*math.pi*t)+R)+cx
y=r*math.sin(2*math.pi*t)+cy
z=-1*math.sin(2*math.pi*rot)*(r*math.cos(2*math.pi*t)+R)+cz
add_point(edge,x,y,z)
i+=1
if j>=1:
k=0
while(k<step):
x=edge[(j-1)*(n)+k][0]
y=edge[(j-1)*(n)+k][1]
z=edge[(j-1)*(n)+k][2]
x1=edge[(j-1)*(n)+k+n][0]
y1=edge[(j-1)*(n)+k+n][1]
z1=edge[(j-1)*(n)+k+n][2]
x2=edge[(j-1)*(n)+k+n+1][0]
y2=edge[(j-1)*(n)+k+n+1][1]
z2=edge[(j-1)*(n)+k+n+1][2]
add_polygon(matrix,x,y,z,x1,y1,z1,x2,y2,z2)
x1=edge[(j-1)*(n)+k+n+1][0]
y1=edge[(j-1)*(n)+k+n+1][1]
z1=edge[(j-1)*(n)+k+n+1][2]
x2=edge[(j-1)*(n)+k+1][0]
y2=edge[(j-1)*(n)+k+1][1]
z2=edge[(j-1)*(n)+k+1][2]
add_polygon(matrix,x,y,z,x1,y1,z1,x2,y2,z2)
k+=1
i=0
j+=1
def pop(stack):
stack.pop()
def push(stack):
matrix=new_matrix()
i=0
while i<4:
j=0
while j<4:
up(matrix,i,j,stack[-1][j][i])
j+=1
i+=1
stack.append(matrix)
|
[
"telahi00@stuy.edu"
] |
telahi00@stuy.edu
|
efd9651775535caed7b13bced02debc6bfdc5175
|
1b5001dd4ae97909af1c7d6d05885dfb64466362
|
/routes.py
|
a310f909ea7e9375bec4f6224efa15febbb41797
|
[] |
no_license
|
dorican/fwsgi_app
|
889453f2b8e75bc28857019ec5da3279ac9cb55b
|
969509fd4fc2de577d61a6170c0e4c5f6153c828
|
refs/heads/master
| 2022-12-30T16:25:06.375480
| 2020-08-24T12:17:55
| 2020-08-24T12:17:55
| 289,912,404
| 0
| 0
| null | 2020-10-13T06:05:27
| 2020-08-24T11:45:28
|
Python
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
from views import IndexView, AboutView, OtherView
routes = {
'/': IndexView(),
'/about/': AboutView(),
'/other/': OtherView(),
}
|
[
"linux@linux.org"
] |
linux@linux.org
|
76f1f51e2945735880579913d05f9284bad0b654
|
27e216a75d5266c885d6ea8cbf96efcefed739a1
|
/TranslateRNA.py
|
c6dd146434f840e3221a4711c1f13b1eb245eb83
|
[] |
no_license
|
RagaviSrinivasan/Rosalind
|
9b5a6fd4a63982e5b9a5cbb177b99aed53083629
|
c254485ab8add2613a079dcab8f3a0db362eaa05
|
refs/heads/master
| 2021-01-19T13:27:04.935274
| 2017-08-02T04:08:44
| 2017-08-02T04:08:44
| 82,415,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,295
|
py
|
# -----------------------------------------------------------------------------------------------------------
# Rosalind problems
# 6) Translate the given RNA into protein sequence
# -----------------------------------------------------------------------------------------------------------
from Bio.Seq import Seq
RNA = Seq("AUGCUUCCCAAAAGUGUCCGCUAUGAUGUCAAUAUUUCUUGUGCCUUCCAACGCUUUUCGGCUGUUGUAUGGGUAAAUGGUCCAUGCGACCCUUCACGAAACGGAAAAGUCCCACUCUCGAGUCCAAAACGGAGCUCUAAUAACAGGAGACGUCUCUCGGAGAGGCUUCACUCAUGCAAUUCUACAGGAGAACCGAGAAUUAUCCGUGGUUGGCCUGUGUACAAACACAAGUCUAAUGUACCUACAGCGUUUGCGCCUUUUCACCAGCGAAGUACACGACGGCCGUUAUCACCUUCAACCACGAUCUUGGGCAUGCGCAGCGGUAGUUCAUCACUUCCGACAAGAGCGCCGGCCGAUACAGAUCCGGGCCUAAUUCAUGCGCUAUCGGCACCGGGGCCUUGUUACUAUAUUGCGUGGUAUAUCCGAUUUUACGCAAUAGGGUCGUUUAUGUCGUUAAGCCGCAUGUCAGCCGCCGGAAUCCAGUUGGUGGCCUCACCAGGGCCCGUCUUCGGCGAUGCGUGGAACUGGAUGGUUUUAGUCCAAUUUAUUUCUGAGGAGGGCUUACUGUGUCAGCGACAAGAGCCCUUCCCCCCAUACCAACGCACACAUGGCCUUCCAGGGCUGUGUCUUGCCAAGGUGAAGAGACGAGCAUGUCGGAACCUCCAAAUACCGUCAUAUCCAGCCUCCCGUAGGACAUCCAAACAGCCUCAACCAUCGUCUAUGGGAGACCCCACCGCGUUCUCAACCAGGGCCGCUGUAACUUGUGCGUACGAAAGGCUCGUACAUGGAUGUUGCGGCUGUAUGAAUACCGCGCAAUUGAGGAAAGCCCUAAGAAUAAUUUGUAAUAAUACCCUGCGUUCUGCCCAUUUCCGUGGUCUCAGUGUGGUGGGGUUUCAGGGCAGGUUUAUCGGAACGUUGCCCCCGGGUUCUAAGUCUUGGCGUGCGAGAGUGAUUUCGGGUAUAUUCUCAUGUGGUAGGCUCUGCUGGGGUUAUAGUGUUGACAAGCUUACUGUCCUCACCAAUGAGUUCCGACGCAUUCCGACCAUUAGCAGAGCAAAUGAGUGUAUUUUGCCAUACCAGGCACGAGCUUCGGUAUACAAGUUAUUAAGUACCUGCAACCUUAUUCCCAUACCUACGGUGGAAUAUCGCAAAACUCCGGGCACCGACAUACGGUCUCCCCUCCUUCGCUUUUUUGACCCGCGCCCACAUAGGCCGCGCUACGACGUUGGCAGGCCUUCGAUUCAGGAACCUACAACCGAAACCAAGGUGGAUGUUCAUAUAGCAUCUGACCCAGUUAAGCCGAAUGAAAACAUCCGCUCCCCUCAGUUUGGUUUAAGCGGUUGUCCAGCUCGAGUGCCGUUCCAGCGCGAGAAUUGCAGAGGCGGUCCACAGGAGUAUCGCAUAGCGCUCUACUGCUUACGUAGACAAUUCCUGCUAGUGGUCCAGUUGACCCUGCACAUUCGUAAAUUUCCACCACAACAUGCUCAAGCUUAUGGAUACAGGGAUUGUCAGAAUGUCUACAUGAGUCCUAAAAAUCAGAGGGCGGCAGAAUCUCAGGAUGUGCGGCUCGGCAGAGUGGGCGCACUCAGCGAUGAUAUAGAUAAUGAGUGUUGCGGACGCGGGAAGAUGACGAUGGAAUCGUCGCUUCUACCGGGUUGGCAAAGACUUCCGAAGCAUGGAUUCGACCGUAUCGGACUGAUUGCUAGGCCUCUGCAUCCUGAGAGGGACCAGUGUUAUACUAUAUGUUAUACAAAAGUACAUCUAGAAGUAUACUCCAUACUGCCUCCGGGUCCAAUUACGCGAAUGGAAGGAAAGGCACCCUGUCCACAACCGCUAUUUGCAUUAUACCGCGGAGAUCGGAACAUUAAGAUCAGGGAAGGAGAGUAUCUCAAACUCUAUGUAUUGAAUUGUUGCACAUUGUGUGGCGCAACGUCCCGCAACGGAAUGUUAAUCACUGCACUGCAAACUCCUGGAGGGAGUGUUUCAUUAAAUUUGUUCGUACGUCUCCUUUGGAGUUCACCCUGUUACGGUAACUAUACCCUUAGAUUUUCGUACGGCUCCUUAAAGAGCCAUGCGGAGAUGAUGAAAUGGUCCCGAAUUGUUCCUUAUCCUCUGGCGUAUAUCCUCGACGGGGGGUUUAUGCAGUCCUUAGCAAACGUAAUUCCAAGUGUAUACAGUAAAACAAUAGCGAGUAUUCCGUCCCACUCACGCAACAUCGCAUCAUUCUGCGCAUAUGCUCGAAUGUCUGCAGCGUUGGCUUAUGGGAGCCCGGCCCUUUACUUCCUUAUGGCUAGGCUGCCACAUCCAGCAUACUCCCAACUUAGAGCGUGCCUCUCAGGUCUGGAAGUACGAAGACAACCGAUGGGCUCUGAACGAUUUGGAAAAUUUUGGUUAAACUUUGGCGUCUUCUCUAUUGCAGGCUGCUGCAGACUUGAUGUGCCGAGGCCCGGAAAGUUUGUGUCGAAGUCCGGACGCCACCGGCUGACCAAUAGGCUAUCAGACAAUAAAAAUACCGCUGACAGAUUCUGCAUGUUCGCACUAGGUUUGAGAUACUUCCACGCUUUUCGUCCCUUCUCAAGCUCUUCUAGCAUACCGGAUACAUCUGGGUCCGCCAAAGCCCCUAAUUACUAUGGAGCCAUACACCAAUUCCCACGGGUCAGGAGUGCAAGCCAAGCUGAAAAGACAGUACUCCAAGGUUGGUCGACUGACUAUUUUGACCUUCUGCCAUUGCAUCGGAGGCACAAGUUCCGAAUGGAGUUUUGGACGGGAAUACUCACCACAGGUGUCUUCGUUUCGUUGGAACAAAAUAAAAAUGGUUCGAGGUUAGCGCUGGAACAACCCCCUGAGUCAAGAAUAACGACUCCUCGCGCGCAUCUGUGUCUCCUAAUUGACUGGACACGCUGCAGAUAUCGCAUUAAAAAGACUGUAUGCCCUUAUAGAAAGAUCACGUGGACGCUCGAGAUUCCUCUAUCCUACAAGGACGAACGUGCGCGCCACACUCAAGUCUACGCUUCAACGUAUACCCUGUCUAGUCCCGGUAGCCGGACUUGGGGCACACACUGUUCCCAGAUGUGCUGGCGGUCGCAAAGUGGGGCCAAAAUUAAUGCAGAACCGUUUAAACGGCCUCGUACGCGUGGUCGAAAGGAUUCAAUAAGACAGGAAAUCGCGCCCGCCUCGAGUGCGAACCAUUCUGUGUCGAUUUGCACUCAGAUUGGUAUAGCAAUGAUAUACUGUAUUGAAUUUAGCGAAAUUGGUGCGGCGGGAUUAACCAUGAUGUUUGGAUGGAUCAUCAAGAUAUUCUGUUUACUGAGUAUUCGCGUAGGUCUCACAGUGUACUCUUUCAUACCAAGUCGGCCCAACUGGCUAGUUAGGCCCCUGGUGUCCGAAAGGCGUAGUUACCUGGGAGUGGAACCCUUCACGUCGUUCAGGCGUGGCAACUUUGAAUUCCAUUGUGUAGAUCCCAACAGUUUAAGCCGCUGGCUUCCUGGAUUGGAUAACCGCCUCAAUUAUAUGCUUGGCGACCGGUUGGAGGGCCCGUCUGUAGCUAUCAUUUCUCCUAGCAUACAUCAAUCCCUACGUACGAAAAGGACGGGAUCACUGCUCGAAUCUGAACAGGCUAACGAGAAUCGGAGAGGCGCCUCAACACCGCAAAGUAAUCCAGUACUCCUACUUCUGCAGACCCUUUCAUACGACGCACAAGCGCUAAAAAAUUUGCUUACUUACCGCAGCAACUAUUGGAUGCCUACAACUCAGUUCCGCCUAGACAGUUGCGUCGACAAACCAACACAAAGACCAAUGGUUGGCAAAUGCCAGCCAGACUGUCGCCCCUCGACUCCCCCAAGCAACAUAGAUGUUCCCGCCCCUGUGGUUAAUCCUUCUUUCUCCAAGUAUCGCCUACGAUGGAGAAUUACUAGCGUUGCAAGUGCAUUCAGAGUCGGUAGUCUUGUUCUCAGCAGUACCAGGUCGCUUCAAAUAAAGAGGAUGUUUGUUAAGUCUGUAACACGGCGAUAUGCAAAUGGUGGGGCUAUUGGGAACAGCCUGCAAGUGUGUGUUUUUUCAAAGUUCCCUCUCAAAAGCCCCACUUCACCCAGCCCAGACUCGUGUUUGCUAUACACAUUCAGCUCCCUACUGGUAAGAGCAACGUGUUUAAAAGAUAUCCCAGUCACUGUAGGUGCGCGCAGGAACUUACGAGCACGUUCAUAUCGGCUCGAGGAGUGGUUCAUCAACCAAAGACUAAAAUGUCCAUGGAGCGCGAGGUCUCCUCAUAAUUGGUACGACUUUUUCACAGCUGUUUAUCUUGUACUUGUAACGCUGAACGAUCCACAACUUGAGCGCGUGAGCCGCGAAUGUAAGUCAACGUAUGGCUUCCUCAAGCCCAUUGUUAAAAACGCGAACAUUCUGGGGGGCGAUCUAGAAGAAAGUUCCUUAGGUUCACCUUACACAGUCGUACGUUUGCAGCCCAGAAAACGGCUUCUUCUUUCUAUUCGGGCAAAGGGGUUAAUUACCUCUGUGUACACAUCAAACUUUCGAUUACUCGCAUCGACCAUUUAUGCUGGCCCUAUUAAAAUCGAUUCGCUCGCAGUAAGCUUCAACAUUUCGAGAAUACUAAAUGGUACUUUGGAGAAUGGAAGGCUGUACGGCUCCUUGGGAGGACCAACAUUGAAACUUGUAACCCUUUAUGUAUCAAAGGUCACCACGGCCCAUGCUCACGUAAAAGGCGAACCCAGUGUAGUGGAGACUUCCAGUUACCCGGAUAACGUAUACGCGGGCGGGCCGGGUGGCCUAAGGCGCUGUUCCGCCCCUGGGGGCAGUAUCAUUACCUUAGGCGUGAUUGGCUGGAUCUCACGGGCCAGUCAUGAGUCAUUGGAUUCGUAUUGCCAAAUACGAAGUGCAAAGAGGGUUGGAGGGCGCAUUCGUUACGGGUAUCUUCAGCCUCGGCGAAUAGAAGGCCGUCGUAUCUUUUUUAAUUUCUCGCAUACCUCCCUGGGUUGUUCCAGCAGUGGAUACUACCGUUACCCCUUAACGCUCGUCAAAGAUAACCCUACAGGAUGCCUUUCCUGGGAGAGAGCUACGGUGCCUGACUGUGAUGAACGAGCGGGAGCAAGCGUCGGGUAUCUCCUGCGAGACCGGCGUCGGGUUAUGGUGAAAUCUAUCUCGCCGCUAAGUCCGAGAGCACGAUGGAAAGCGCAGUCGAGAACGUGCGUAUGCUUGAUCGACCUCCCAAGUGUUACAACGGAUUUUCAUAAGUGUAGCACCAGGAUGGAAGGAAAAAGGCCCCCAGCUGUGCUCGUCUGUUUUGAGGUGUCGUAUUCCUCGCAAGAGAUUAAGUUAAGAACAGUCGAGUGGGUUUUGUUCGAUACAUAUUUAGAGGUUUCGGCACGGUUAGCGUGGACUUUACAUCAGACUGGCGCUGAGACGACCUCGUGCGAGGCGCACGGUCGCUCUGCUGAGGAAUUCCUCCUUAAACGACCACGUUCUGCCUGCCAGGCACAAGCCAGGACAGGGGAUAGUUCGAGAAGCCGAAUCUUGUCGGUCAGAAAUAUCCCAGGGGUAAAUCCAUGCUCAGAAACAACUUACCCCAAAGAUCUUCCGGUCGCGCGAGUGACCACCAUAUUCAUACACACUGACAGCAAUCUACCAAGACGGCCAUUAAUAUGGUUAGGUCUUAGGUCUUUCACUGUGUACGAGCUGGGCCUACCGACACAAUCGCAAUCAAUAUACCGUAUUGUUGAGCAUCAUUCGGUUACGAUCAUUCAGGUCAACGGUCGAUACACUGACAAUGAGUGCCGAUUUAUGGAGUCGGCCCGCGACCGACUCAAUUAUCGUACAGGCGCAAUACCCCGUGCGCUUUCUACGUCUGGUACACGUCUAUACGGAUGCACUUUAGAAGACCGGCUUCGUGGAAAUACGACGACGUGGGUAAAGCCUAUCCCAGCACUCGCGCCCAAUAGAAUAGGGCAUCUGGCUUCUAUUAUUUCCAAGCGGGGCAACACUAUUCCUCUCCUUGGUCUUACGAGGGCGACCCGAAUGUGCCGCUCAAGGUUCAGCAGAAUGUUAUCGUUGGAGAGAUGCGCCUCAGAGUGCGUCAUGUGUAAUCGUGAGAAACGUGCACCAGUGACAAAUGCUCAUGGCCCGGGCCGCUUACAUGCCAAUCCUACUAUCCCGCCAAGAGGUUGCUGGGACGCUCAAAGCCACCCUUGGAGGCCACGGCACUACCAUGUCCACGGUCGUCGCCGGGAUGUCGUUACAGUUACAUGUCUGUACUUGGGGAUAUUUGGGUUAUGUUGGUUGAUGGAUGCCGGACCGCGUCGCUAUAAUUCCAGCUUUCAACAGGCCCUCCCCACUUCGAAUGAGUCGGACGGAGAAACAGCGAGGACUCAGUCACCAGUGGGUAUGGAUCCCGCUCUAAUCACGUACGGGACAUGUCCUAACACCGUAGCGACACUUGAAGCGCCCGAAUCUGCAGGCUGCUCAUGCAUUAAGGAGCUCUGGAAGUCACAUUCAUUUCUCGUCGUUGAUAAUCGUUUUAGACGAAGCAUAACGCCGCGUGUGGACGAUGCUCGCCUUUUUAUACAUCCAGCCAAAAAUCUGGGUCCGAAUGUAUCAUGUGGUUACCUGCCAGCCAAUCCAACAUGCCUUAUAGCAAAGAUACCGCCUGUCCGCUCUUCCAGCCGCGACCGUCCAAUGGCAGCCCUCCUAUUCUUGAACGUAGCGGUUACCACUUGUGUCAGCCGGCAAUUUCCUAGCCCUACAGUACACUCGGGCCUCGUCUACAAUGCCGUUGUCGCAAUCGUACAGUGGAUCUGCAACUAUAGUGACGCCGAGCACAAUGGUUUGUUUACACAGUCUGUUAGAAAGCCUGAGGAGAGGGGGUGUGAAGACCGCCGGGCCCGGUUUGACGUUCGGAUGACCGUACAACGAUGUCAUCCCGGGAUUCCUCGACUGUUCCCCCAAUUGUGUGGUCUUAAAUCGGAAUGCGCCGCAGAGUCCAGGGGAUUAAAAAACCUGCAAUCUCUGCAUCCGCCAUUCAAAAGAUUGUCUACUCUCCAAUUACAAGCACGAGCCGUAAUUGUAUACCGAGCCAGAGCGUAUUUGAAGCUAGCCCACAUUUCUAUCCUCCUUCAGGCCUCGUGUUCAAACAAGUCCUUAUCGAACCCACUGCGGCGAUUAGCAGUUCCAACCCCGCGUCCGCAUCGCUGCACACCAAGCGUCAGGUGCCAAGAAAGGACGGAUGAGAGCACCACCCGGAUUUACCGCCUGACAGGCCAGAGGAUCCAUGACAGGCUAUGUUUUGAUGAUAGGCGAAAGAGCCAACGCCGUCGGUACUCAUUUAAGGACUUGAGUCUUCGUACAUCUAAGCAGCUGUUGGCCGCCCCUAAUAGGGGCGUACCGAACUCACUAGAUGAGAGCAACAUCUACUAUUUACAAACACCACGGCCCGUUGCCCGCGGAGCAAAGAGGGCUCUGAAGCAGAGCAGGUUGCCCCAUCUUCAAACGCACGAUUCGCAGCAUCACAAAAGAAACGAGGUAGACUGUUCUCCACGGACGCAAGCAAGAGCUCGUCAGGCCCGGCUAGUUAUCAACUAUCUUUGGCCAGCUGAGCCAGCCGGACAUAUUACUAUUGUAUUAAUGAUUUGCCGAAAAGGUCGCUCGACCUGCCCAUCCAGAAGCCGACAUACCUUCCAUCGGAGGGCAGACAAGAUUGUAACGGCUACCCGAUGGUCCCCUACUCCCUAUAGAUCUCACGCGUGUAUGAGUAGCAGCAGGCAAACUAGACCGAGAGCAGGGGAGCUGACUUCAGGUCACACUGUAGCAGUUCCUUUGCAGGUUACACUAUUCACUAUAUGCACACUACUAAUUGUGAACCGUCGUGUCAUUUAUAUAACCCUUACAAAUUCGAUCUUGCAUGAAAAAGACCGCUGCGAGCGUUCAAUUGCAACUUUUCCUAUCAAGCCAAAGGUGAAAAACUUUCCCGAUGACCUACCCUCGGAGGACUCUGCCUCUAAUCGUUAUGGACACUCGGGUGAUCGCAGUUGCAGCCCUGCUAUCAUCCUCUCCCCGCCACUCGAGUUUGAGAUCUCGUUUAUCGCAGUUUACCAGUUGGGUAUGGCCCAGUUGCAUUGGGCGUUCCCCGCCCGCACUAAGCACGGAGCUUCCGAAGAUCCCAGGACUGACCAUUGGCAGCACCGCUUGCUUGUUUACGCCGUCGGAUCAACGAUAGUGUUACCGAGGGACUUUACUAUUCCAGAGAUAGCAUGUAGUAUCUCGUCUGUUCGCUUACUUAUACUGGCUCCCAGCACAGAGGCCAUCUCGUUGAGGGUGAUGAGUCGAUUAGAAGACAAGAGAGCCGAAGUAGACGGUCCGAGAUCUUGGAAGCCGCGGAAGAUCGCAUCUGGGACCCCGAUUACAACACCCGGAAGCUACGCCUAUUUGGUGUGCAGCCAUCCGAUUCUCACCAAAACUGGGGCGUCGCGAUGGACGGGUGUACCUCGCUUCUAUGAAACAUGUCAACAGGACAUUUGUGAUCUUGCUCUUUCCACCCUUGAUAGAGUCGAAAGACCACUUAUUCGAGGGCUGAUACCUUCUAAGAGAUGGCGCUCUGGAACUGUCGCCGCGGGCUCGCUGCCUAAGGGGGCACCAGACUUUUACAGACGGGUCUCGGUGAGGGGACGUUGCAAUAUGCAGAAGCUAAACGAGGCGCUGCAACUGAAGUAUACAAAAGAUUUAGCGAGAAGAGCCUUAUGCCUGAAAAGUCCUCCACCUUAUUUAUUUGUGCCCGGUUCCGAGCCCAAGCUCCUCGUUUCACUUAUGGUGCGGGAUUGGAGUUCGGACGCCGCACGAUUAUUUCUCUACAAGUGCUGCGGGGUAUGCAUCGGGGUAAGGAACUUCAUACGCUGUGCGCGGAGCUCUAUAGUAACUAUCGGAGGUCGCGGAAGACUGUAG")
print RNA.translate()
|
[
"rxs147930@utdallas.edu"
] |
rxs147930@utdallas.edu
|
7ab268a728e769aad1f9b367f30a8bc426d89554
|
03aebbf4fe13a3a0d79f738a0c2cd7741a788cdd
|
/Book_automation/ch6/picnictable.py
|
40d50ac5d63f97f40eb413d4cc21c287b27316b2
|
[] |
no_license
|
Maxim-Krivobokov/python-practice
|
c69d2fbe601ed7e8388a564bf20a7ceab283919d
|
718698dfaeabd5ea000bce3a20bf18d1efe938c2
|
refs/heads/master
| 2021-05-24T10:27:29.022718
| 2020-07-22T14:34:15
| 2020-07-22T14:34:15
| 253,518,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
def printPicnic(itemsDict, leftWidth, rightWidth):
print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))
for k,v in itemsDict.items():
print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))
picnicItems = {'sandwitches': 4, 'apples': 5, 'beer': 3, 'cookies': 9000}
printPicnic(picnicItems, 12, 5)
printPicnic(picnicItems, 20, 6)
|
[
"Krivobokov-M@KRIVOBOKOV-M"
] |
Krivobokov-M@KRIVOBOKOV-M
|
ab543bfcf0f2d90f21dbc29d0df4ece30ade650a
|
57c82ffda28ffe76f9f30ee268fdb74de40e0b80
|
/smartcab/decay.py
|
31063b66e4785ed1aa72afb60bc27ffae256c96e
|
[] |
no_license
|
walter090/smartcab
|
77bbf67f2b183b427f4ccd901f771db1f881ed41
|
4e8d4d578a0b8dbc4461387b73f46c063988d30c
|
refs/heads/master
| 2021-01-20T10:30:29.096433
| 2017-03-23T21:17:26
| 2017-03-23T21:17:26
| 83,933,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import numpy as np
def linear(epsilon, a, t):
return epsilon - a if t > 0 else epsilon
def exponential(a, t):
return a ** t
def quadratic(t):
return 1/float(t ** 2)
def e_ex(a, t):
return np.exp(-a * t)
def cosine(a, t):
return np.cos(a * t)
|
[
"walter.wu090@gmail.com"
] |
walter.wu090@gmail.com
|
c9fc9d35cf11ca71bed235ddcb8f9cbdde5fc8ae
|
ee40ecc1af2a91797f9cfa5dc7418f9f055094db
|
/Aula 17-18 (listas).py
|
65df1134433047583aef452490174e0a79100310
|
[
"MIT"
] |
permissive
|
juliosimply/Aulas-Python-Guanabara
|
5ca5802634468c1b02208918661c720775784bc0
|
fc3d230e4646cf83250404a20dadeef90ab2e676
|
refs/heads/main
| 2023-03-19T02:54:05.728655
| 2021-03-07T20:31:31
| 2021-03-07T20:31:31
| 335,390,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
# aula de listas
# o metodo .append('novoItem') vai adicionar novo item a um alista (no final da lista)
# o metodo .insert(0, 'novoItem') vai adicionar um novo item porem na posição "zero" conforme o argumento
# para apagar a 3 formas = del novoItem[3] / novoItem.pop(3) / item.remove('nomeDoItem') em todos os casos remove o item e recontar os indices
'''num = [2,5,9,1]
num[2] = 4
num.sort()
num.append(9)
num.sort(reverse=True)
num.insert(1, 35)
num.pop() #elimina o ultimo
num.pop(3) # elimina na posição 3
num.remove(5) #elimina o ELEMENTO '5' da lista
print(num)
print(f'essa lista tem {len(num)} elementos')
valores = list()
valores.append(5)
valores.append(9)
valores.append(4)
for c, v in enumerate (valores): # pegando as chaves e os valores com o enumerate
print(f'Na posição {c} encontrei o valor {v}')
print('Cheguei ao final da lista')
for cont in range(0,3):
valores.append(int(input('Digite um valor')))
print(f'Encontrei esse valores digitados {valores}')''''''
#a = [1,2,3,4]
#b=a[:] # nesse caso com o "[:]" ele faz uma copia independente de a, se não colocar ele simplesmente atribui uma a outra'''
# Aula 018
'''teste = list()
teste.append('Julio')
teste.append(37)
galera = list()
galera.append(teste[:]) # importancia dos ":" sem ele uma sobrepoe a outra
teste[0] = 'maria'
teste[1] = 22
galera.append(teste)
print(galera)
galera = [['julio', 37], ['paulo', 25],['vania', 60], ['maria', 45]]
for p in galera:
print(f'o {p[0]} tem {p[1]} anos de idade')'''
galera = list()
dado = list()
totmaior= totmenor=0
for c in range (0,3):
dado.append(str(input('Qual o seu nome?')))
dado.append(int(input('Qual a sua idade')))
galera.append(dado[:])
dado.clear()
print(galera)
for p in galera:
if p[1] >= 21:
print(f'{p[0]} é maior de 21 anos')
totmaior +=1
else:
print(f'{p[0]} é menores de 21 anos')
totmenor +=1
print(f'temos {totmaior} maiores de idade e {totmenor} menores de idade')
|
[
"julio_simply@hotmail.com"
] |
julio_simply@hotmail.com
|
e68408b11f88bf7efdc54910a6bd17966fb12078
|
4d4a85879d35605315b23135e86cee6e4bffdf88
|
/tests/test_windows.py
|
326e6772a16fa722d84cc904f55dd488b4ac44b3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
alfred82santa/httpie
|
b8029ff50e560257adb93db6f3d6ea5c02d22504
|
631e332dad122359840dda59b0d81a2ebec062dc
|
refs/heads/master
| 2021-01-18T15:47:46.052220
| 2014-04-25T11:57:33
| 2014-04-25T11:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
import os
import tempfile
import pytest
from tests import TestEnvironment, http, httpbin, Environment
from httpie.compat import is_windows
@pytest.mark.skipif(not is_windows, reason='windows-only')
class TestWindowsOnly:
def test_windows_colorized_output(self):
# Spits out the colorized output.
http(httpbin('/get'), env=Environment())
class TestFakeWindows:
def test_output_file_pretty_not_allowed_on_windows(self):
env = TestEnvironment(is_windows=True)
output_file = os.path.join(
tempfile.gettempdir(), '__httpie_test_output__')
r = http('--output', output_file,
'--pretty=all', 'GET', httpbin('/get'), env=env)
assert 'Only terminal output can be colorized on Windows' in r.stderr
|
[
"jakub@roztocil.name"
] |
jakub@roztocil.name
|
6ac8817decaba91bb4b2cff0ab6e720f441aca4e
|
c19acc5dafb40bb0252bc15513dab3f165d48060
|
/deep_q_learn_snake.py
|
8465ca4e27b0a4f4fec26a9ef10b2859e3b6f149
|
[
"MIT"
] |
permissive
|
dsjohns2/Snake-AI
|
062f7bdc9394da273e04a1bedc783faf0bfb1ad5
|
7e18abcea4abcf25089eaec5fdf140acd558ed6b
|
refs/heads/master
| 2020-03-21T16:40:42.983877
| 2018-06-27T04:40:22
| 2018-06-27T04:40:22
| 138,785,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
from __future__ import print_function
import random
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import tkinter
import matplotlib.pyplot as plt
# Neural Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.num_input_params = 5
self.num_classes = 1
self.fc1 = nn.Linear(self.num_input_params, 120)
self.fc2 = nn.Linear(120, 120)
self.fc3 = nn.Linear(120, 60)
self.fc4 = nn.Linear(60, self.num_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
return x
# Train the Snake network
net = Net()
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=.0001)
dimensions = (5, 5)
for episode in range(0, 1000000):
print("Episode: " + str(episode))
snake_location = [(random.randint(0, dimensions[0]-1), random.randint(0, dimensions[1]-1))]
free_space = []
for i in range(dimensions[0]):
for j in range(dimensions[1]):
free_space.append((i, j))
free_space.remove(snake_location[0])
alive = True
eaten = True
score = 0
while(alive):
# Spawn ball if needed
if(eaten):
ball_location = free_space[random.randint(0, len(free_space)-1)]
free_space.remove(ball_location)
eaten = False
# Move Snake
initial_state = snake_location[0]
direction = random.randint(0, 3)
if(direction == 0):
snake_location = [(snake_location[0][0]-1, snake_location[0][1])] + snake_location
elif(direction == 1):
snake_location = [(snake_location[0][0], snake_location[0][1]+1)] + snake_location
elif(direction == 2):
snake_location = [(snake_location[0][0]+1, snake_location[0][1])] + snake_location
else:
snake_location = [(snake_location[0][0], snake_location[0][1]-1)] + snake_location
free_space.append(snake_location[-1])
del(snake_location[-1])
new_state = snake_location[0]
# Check if died or eaten
if(snake_location[0] in free_space):
free_space.remove(snake_location[0])
reward = -1
elif(snake_location[0] == ball_location):
eaten = True
reward = 10
score += 100
else:
reward = -100
alive = False
action = direction
lr = .5
dv = .5
if(reward == -100):
optimizer.zero_grad()
X = np.array([initial_state[0], initial_state[1], ball_location[0], ball_location[1], action])
X = X.astype(np.float32)
X = torch.from_numpy(X)
q_guess = net(X)
q_target = (1 - lr) * net(X) + lr * reward
loss = criterion(q_guess, q_target.detach())
loss.backward()
optimizer.step()
else:
optimizer.zero_grad()
X = np.array([initial_state[0], initial_state[1], ball_location[0], ball_location[1], action])
X = X.astype(np.float32)
X = torch.from_numpy(X)
new_X_actions = np.array([[new_state[0], new_state[1], ball_location[0], ball_location[1], 0], [new_state[0], new_state[1], ball_location[0], ball_location[1], 1], [new_state[0], new_state[1], ball_location[0], ball_location[1], 2], [new_state[0], new_state[1], ball_location[0], ball_location[1], 3]])
new_X_actions = new_X_actions.astype(np.float32)
new_X_actions = torch.from_numpy(new_X_actions)
new_q_vals = net(new_X_actions)
new_q_vals = new_q_vals.detach().numpy()
q_guess = net(X)
q_target = (1 - lr) * net(X) + lr * (reward + dv * np.amax(new_q_vals))
loss = criterion(q_guess, q_target.detach())
loss.backward()
optimizer.step()
# Save the Neural Net
torch.save(net, "deep_q_net.pt")
|
[
"dansj@stanford.edu"
] |
dansj@stanford.edu
|
252869c50aaf0b28ef745e92beba86c02c0dd1ab
|
607fbaa551fd3e10d8b767b3707bc2f7bc9d9ea5
|
/pendulo.py
|
845b3ff085bb3f148c30cede4c36f1ae3b70445e
|
[] |
no_license
|
natalypulido/EjerciciosClaseMetodosComputacionales
|
3fd3ef84a129d2685e6d43fd338faef2bd100e7f
|
faab59b175eb3330db4cd3da448f7b5b67758b0c
|
refs/heads/master
| 2021-01-01T19:22:07.693940
| 2017-07-27T20:00:29
| 2017-07-27T20:00:29
| 98,574,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
h=0.001
g = 9.8
l=1
mi=-5
ma=5
##Cantidad de puntos
n=int((ma-mi)/h)
#print n
##x
t=np.zeros(n)
##teta
y_1= np.zeros(n)
y_2= np.zeros(n)
##Teniendo en cuenta Second Order ODEs
def funcion_y1prime (t, y_1, y_2):
return y_2
def funcion_y2prime (t, y_1, y_2):
return (-g/l)*np.sin(y_1)
##Condiciones iniciales
t[0]=mi
y_1[0]=1
y_2[0]=0
#print y_1[0]
#print y_2[0]
##ME GUIE DEL NOTE BOOK - METODO EULER
##Calculamos primeras derivadas
for i in range (1, n):
y1prime = funcion_y1prime(t[i-1], y_1[i-1], y_2[i-1])
#print y1prime
y2prime = funcion_y2prime(t[i-1], y_1[i-1], y_2[i-1])
#print y2prime
t[i] = t[i-1] + h
y_1[i] = y_1[i-1] + h * funcion_y1prime(t[i-1], y_1[i-1], y_2[i-1])
y_2[i] = y_2[i-1] + h * funcion_y2prime(t[i-1], y_1[i-1], y_2[i-1])
#plt.plot(t,y_1)
plt.plot(t,y_2)
plt.xlabel('x')
plt.ylabel('y(x)')
#plt.show()
##Cambio de coordenadas
x=np.sin(y_1)*l
y=np.cos(y_1)*l
#print x
#print y
plt.plot(t,x)
#plt.show()
##Animation
fig = plt.figure()
ax = plt.axes(xlim=(-5,5), ylim(-2,2))
line, = ax.plt([], [], lw=2)
def init():
line.set_data([],[])
return line,
##Se me dificulto la parte de la animacion
|
[
"noreply@github.com"
] |
natalypulido.noreply@github.com
|
0d807fe80d041b739d1168b2dba4cb8b6812f554
|
8c5065f7832b56bab3841bcc8bbcc2a36d831328
|
/api/user.py
|
23fcea55b44eb1d6c3f1f7ff4b2ef47efebc24cf
|
[] |
no_license
|
anpavlov/tp_db
|
9899c6b578cdb2669c2253c39cab88647c8b190e
|
310761120bf817734d562f3a17117fd165f6d059
|
refs/heads/master
| 2021-01-19T15:33:35.649962
| 2015-06-27T20:17:43
| 2015-06-27T20:17:43
| 31,472,827
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,616
|
py
|
# import logging
from ext import mysql, get_followers, user_exists, get_subs
from flask import request, jsonify, Blueprint
from werkzeug.exceptions import BadRequest
from datetime import datetime
user_api = Blueprint('user_api', __name__)
# usersLog = logging.getLogger('usersLog')
# emailLog = logging.getLogger('emailLog')
@user_api.route('/create/', methods=['POST'])
def user_create():
try:
req_json = request.get_json()
except BadRequest:
# usersLog.error('Cant parse json')
return jsonify(code=2, response="Cant parse json")
if not ('username' in req_json and 'about' in req_json and 'name' in req_json and 'email' in req_json):
# usersLog.error('Wrong parameters')
return jsonify(code=3, response="Wrong parameters")
new_user_username = req_json['username']
new_user_about = req_json['about']
new_user_name = req_json['name']
new_user_email = req_json['email']
# emailLog.info('%s', new_user_email)
if 'isAnonymous' in req_json:
if req_json['isAnonymous'] is not False and req_json['isAnonymous'] is not True:
# usersLog.error('Wrong parameters')
return jsonify(code=3, response="Wrong parameters")
new_user_is_anon = req_json['isAnonymous']
else:
new_user_is_anon = False
conn = mysql.get_db()
cursor = conn.cursor()
if user_exists(cursor, new_user_email):
# usersLog.error('User with such email already exists')
return jsonify(code=5, response="User with such email already exists!")
sql_data = (new_user_about, new_user_email, new_user_is_anon, new_user_name, new_user_username)
cursor.execute('INSERT INTO User VALUES (null,%s,%s,%s,%s,%s)', sql_data)
conn.commit()
resp = {
"email": new_user_email,
"username": new_user_username,
"about": new_user_about,
"name": new_user_name,
"isAnonymous": new_user_is_anon,
"id": cursor.lastrowid,
}
return jsonify(code=0, response=resp)
@user_api.route('/details/', methods=['GET'])
def user_details():
req_params = request.args
if not ('user' in req_params):
return jsonify(code=3, response="Wrong parameters")
user_email = req_params['user']
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute('SELECT * FROM User WHERE email=%s', (user_email,))
user_data = cursor.fetchone()
if user_data is None:
return jsonify(code=1, response="No user with such email!")
follow_info = get_followers(cursor, user_email)
subs_list = get_subs(cursor, user_email)
resp = {
"id": user_data[0],
"about": user_data[1],
"email": user_data[2],
"isAnonymous": bool(user_data[3]),
"name": user_data[4],
"username": user_data[5],
"followers": follow_info['followers'],
"following": follow_info['following'],
"subscriptions": subs_list
}
return jsonify(code=0, response=resp)
@user_api.route('/follow/', methods=['POST'])
def user_follow():
try:
req_json = request.get_json()
except BadRequest:
return jsonify(code=2, response="Cant parse json")
if not ('follower' in req_json and 'followee' in req_json):
return jsonify(code=3, response="Wrong parameters")
follower_email = req_json['follower']
followee_email = req_json['followee']
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute('SELECT * FROM User WHERE email=%s', (follower_email,))
follower_data = cursor.fetchone()
if follower_data is None:
return jsonify(code=1, response="No user with such email!")
if not user_exists(cursor, followee_email):
return jsonify(code=1, response="No user with such email!")
cursor.execute("INSERT IGNORE INTO Followers VALUES (%s, %s)", (follower_email, followee_email))
conn.commit()
follow_info = get_followers(cursor, follower_email)
subs_list = get_subs(cursor, follower_email)
resp = {
"id": follower_data[0],
"about": follower_data[1],
"email": follower_data[2],
"isAnonymous": bool(follower_data[3]),
"name": follower_data[4],
"username": follower_data[5],
"followers": follow_info['followers'],
"following": follow_info['following'],
"subscriptions": subs_list
}
return jsonify(code=0, response=resp)
@user_api.route('/listFollowers/', methods=['GET'])
def user_list_followers():
req_params = request.args
if not ('user' in req_params):
return jsonify(code=3, response="Wrong parameters")
user_email = req_params['user']
if "since_id" in req_params:
since_id = req_params['since_id']
try:
since_id = int(since_id)
except ValueError:
return jsonify(code=3, response="Wrong parameters")
else:
since_id = 0
if "limit" in req_params:
limit = req_params['limit']
try:
limit = int(limit)
except ValueError:
return jsonify(code=3, response="Wrong parameters")
else:
limit = None
if "order" in req_params:
order = req_params['order']
if order != 'asc' and order != 'desc':
return jsonify(code=3, response="Wrong parameters")
else:
order = 'desc'
conn = mysql.get_db()
cursor = conn.cursor()
if not user_exists(cursor, user_email):
return jsonify(code=1, response="No user with such email!")
query = "SELECT id, about, email, isAnonymous, name, username FROM Followers F " +\
"JOIN User U ON F.follower_email=U.email WHERE id>=%s AND followee_email=%s ORDER BY name "
query += order
query += " LIMIT %s" if limit is not None else ""
sql_data = (since_id, user_email, limit) if limit is not None else (since_id, user_email)
cursor.execute(query, sql_data)
data = cursor.fetchall()
resp = []
for f in data:
follow_info = get_followers(cursor, f[2])
subs_list = get_subs(cursor, f[2])
user = {
"id": f[0],
"about": f[1],
"email": f[2],
"isAnonymous": bool(f[3]),
"name": f[4],
"username": f[5],
"followers": follow_info['followers'],
"following": follow_info['following'],
"subscriptions": subs_list
}
resp.append(user)
return jsonify(code=0, response=resp)
@user_api.route('/listFollowing/', methods=['GET'])
def user_list_following():
req_params = request.args
if not ('user' in req_params):
return jsonify(code=3, response="Wrong parameters")
user_email = req_params['user']
if "since_id" in req_params:
since_id = req_params['since_id']
try:
since_id = int(since_id)
except ValueError:
return jsonify(code=3, response="Wrong parameters")
else:
since_id = 0
if "limit" in req_params:
limit = req_params['limit']
try:
limit = int(limit)
except ValueError:
return jsonify(code=3, response="Wrong parameters")
else:
limit = None
if "order" in req_params:
order = req_params['order']
if order != 'asc' and order != 'desc':
return jsonify(code=3, response="Wrong parameters")
else:
order = 'desc'
conn = mysql.get_db()
cursor = conn.cursor()
if not user_exists(cursor, user_email):
return jsonify(code=1, response="No user with such email!")
query = "SELECT id, about, email, isAnonymous, name, username FROM Followers F " +\
"JOIN User U ON F.followee_email=U.email WHERE id>=%s AND follower_email=%s ORDER BY name "
query += order
query += " LIMIT %s" if limit is not None else ""
sql_data = (since_id, user_email, limit) if limit is not None else (since_id, user_email)
cursor.execute(query, sql_data)
data = cursor.fetchall()
resp = []
for f in data:
follow_info = get_followers(cursor, f[2])
subs_list = get_subs(cursor, f[2])
user = {
"id": f[0],
"about": f[1],
"email": f[2],
"isAnonymous": bool(f[3]),
"name": f[4],
"username": f[5],
"followers": follow_info['followers'],
"following": follow_info['following'],
"subscriptions": subs_list
}
resp.append(user)
return jsonify(code=0, response=resp)
@user_api.route('/listPosts/', methods=['GET'])
def user_list_posts():
req_params = request.args
if not ('user' in req_params):
return jsonify(code=3, response="Wrong parameters")
user_email = req_params['user']
if "limit" in req_params:
limit = req_params['limit']
try:
limit = int(limit)
except ValueError:
return jsonify(code=3, response="Wrong parameters")
else:
limit = None
if "order" in req_params:
order = req_params['order']
if order != 'asc' and order != 'desc':
return jsonify(code=3, response="Wrong parameters")
else:
order = 'desc'
if "since" in req_params:
since = req_params['since']
try:
datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
except ValueError:
return jsonify(code=3, response="Wrong parameters")
else:
since = 0
conn = mysql.get_db()
cursor = conn.cursor()
if not user_exists(cursor, user_email):
return jsonify(code=1, response="No user with such email!")
query = "SELECT id, forum, thread, parent, message, DATE_FORMAT(date,'%%Y-%%m-%%d %%T') d," \
"isApproved, isHighlighted, isEdited, isSpam, isDeleted, likes, dislikes, points FROM Post " \
"WHERE date>=%s AND user=%s ORDER BY d "
query += order
query += " LIMIT %s" if limit is not None else ""
sql_data = (since, user_email, limit) if limit is not None else (since, user_email)
cursor.execute(query, sql_data)
data = cursor.fetchall()
resp = []
for p in data:
post = {
"id": p[0],
"forum": p[1],
"thread": p[2],
"user": user_email,
"parent": p[3],
"message": p[4],
"date": p[5],
"isApproved": bool(p[6]),
"isHighlighted": bool(p[7]),
"isEdited": bool(p[8]),
"isSpam": bool(p[9]),
"isDeleted": bool(p[10]),
"likes": p[11],
"dislikes": p[12],
"points": p[13]
}
resp.append(post)
return jsonify(code=0, response=resp)
@user_api.route('/unfollow/', methods=['POST'])
def user_unfollow():
try:
req_json = request.get_json()
except BadRequest:
return jsonify(code=2, response="Cant parse json")
if not ('follower' in req_json and 'followee' in req_json):
return jsonify(code=3, response="Wrong parameters")
follower_email = req_json['follower']
followee_email = req_json['followee']
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute("SELECT * FROM User WHERE email=%s", (follower_email,))
follower_data = cursor.fetchone()
if follower_data is None:
return jsonify(code=1, response="No user with such email!")
if not user_exists(cursor, followee_email):
return jsonify(code=1, response="No user with such email!")
cursor.execute("DELETE FROM Followers WHERE follower_email=%s AND followee_email=%s",
(follower_email, followee_email))
conn.commit()
follow_info = get_followers(cursor, follower_email)
subs_list = get_subs(cursor, follower_email)
resp = {
"id": follower_data[0],
"about": follower_data[1],
"email": follower_data[2],
"isAnonymous": bool(follower_data[3]),
"name": follower_data[4],
"username": follower_data[5],
"followers": follow_info['followers'],
"following": follow_info['following'],
"subscriptions": subs_list
}
return jsonify(code=0, response=resp)
@user_api.route('/updateProfile/', methods=['POST'])
def user_update_profile():
try:
req_json = request.get_json()
except BadRequest:
return jsonify(code=2, response="Cant parse json")
if not ('user' in req_json and 'about' in req_json and 'name' in req_json):
return jsonify(code=3, response="Wrong parameters")
user_email = req_json['user']
new_user_about = req_json['about']
new_user_name = req_json['name']
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute("SELECT id, username, isAnonymous FROM User WHERE email=%s", (user_email,))
user_data = cursor.fetchone()
if user_data is None:
return jsonify(code=1, response="No user with such email!")
sql_data = (new_user_about, new_user_name, user_email)
cursor.execute("UPDATE User SET about=%s, name=%s WHERE email=%s", sql_data)
conn.commit()
follow_info = get_followers(cursor, user_email)
subs_list = get_subs(cursor, user_email)
resp = {
"email": user_email,
"username": user_data[1],
"about": new_user_about,
"name": new_user_name,
"isAnonymous": bool(user_data[2]),
"id": user_data[0],
"followers": follow_info['followers'],
"following": follow_info['following'],
"subscriptions": subs_list
}
return jsonify(code=0, response=resp)
|
[
"dir94@mail.ru"
] |
dir94@mail.ru
|
d7a94ae36c04dd3ff4a389885add6569ac78eae9
|
5c41a2c6e3a60a8d3031d4413dbdb50c6a242f23
|
/K-Net Network Configuration Tool/center_left_frame.py
|
45054198044f874b0490de2fc75f463819c91607
|
[] |
no_license
|
ssc1982/network-configuration-tool
|
4c979995e7a4faad2d1d425111dce4ef9df80d24
|
0bb9c0f7540dcfa999c9ad4a31eb8803b4bf8d82
|
refs/heads/master
| 2021-01-25T10:22:14.855826
| 2018-03-09T02:00:43
| 2018-03-09T02:00:43
| 123,350,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,429
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import threading
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import *
from dbHandler import creat_item, read_item, delete_item, update_item
from popupwindow import deviceinfoInputDialog
class devicemgt(Frame):
def __init__(self, parent, btm_frame, _center_right_nb, master=None, cnf={}, **kw):
super(devicemgt, self).__init__(master=None, cnf={}, **kw, )
self.tree = ttk.Treeview(self, height=18)
self._btm_frame = btm_frame
self._center_right_nb = _center_right_nb
self.device_layout()
def device_layout(self):
# define the tree columns and
self.tree['columns'] = ('#1', '#2', '#3', '#4')
self.tree.column('#0', width=70)
self.tree.column('#1', width=66)
self.tree.column('#2', width=66)
self.tree.column('#3', width=66)
self.tree.column('#4', width=66)
# define the heading name for each column
self.tree.heading('#0', text='Hostname')
self.tree.heading('#1', text='IP Address')
self.tree.heading('#2', text='Username')
self.tree.heading('#3', text='Password')
self.tree.heading('#4', text='Enable')
# refresh the layout and recontruct the tree
self.refresh_layout()
# add scrollbar on the right side of center_left frame
scrollbar = Scrollbar(self, width=20, orient='vertical', command=self.tree.yview)
self.tree.configure(yscrollcommand=scrollbar.set)
# add button group on the center_left frame
add_btn = Button(self, text='Add', command=self.add_device)
edit_btn = Button(self, text='Edit', command=self.edit_device)
delete_btn = Button(self, text='Delete', command=self.delete_device)
#ping_btn = Button(self, text='Ping', command=lambda : self.thread_task(self.ping_device))
ping_btn = Button(self, text='Ping', command=lambda: self.thread_task(self.ping_device))
tracert_btn = Button(self, text='Traceroute', command=lambda : self.thread_task(self.tracert_device))
telnet_btn = Button(self, text='Telnet', command=lambda : self.thread_task(self.telnet_device))
# define grid layout of tree, scrollbar and buttons
self.tree.grid(row=1, columnspan=6, sticky="nswe")
scrollbar.grid(row=1, column=6, sticky='nsw')
add_btn.grid(row=2, column=0, sticky='e')
edit_btn.grid(row=2, column=1, sticky='we')
delete_btn.grid(row=2, column=2, sticky='we')
ping_btn.grid(row=2, column=3, sticky='we')
tracert_btn.grid(row=2, column=4, sticky='we')
telnet_btn.grid(row=2, column=5, sticky='we')
def tree_constructor(self, _deviceinfo_tuple):
if 'router' == _deviceinfo_tuple.deviceType:
if not self.tree.exists('router'):
global node1
node1 = self.tree.insert('', 0, 'router', text=_deviceinfo_tuple.deviceType)
self.tree.item('router', open=True)
self.tree.insert(node1, 1, text=_deviceinfo_tuple.hostname, values=(_deviceinfo_tuple.ipAddress,
_deviceinfo_tuple.username,
_deviceinfo_tuple.password,
_deviceinfo_tuple.enable))
else:
self.tree.insert(node1, 1, text=_deviceinfo_tuple.hostname, values=(_deviceinfo_tuple.ipAddress,
_deviceinfo_tuple.username,
_deviceinfo_tuple.password,
_deviceinfo_tuple.enable))
elif 'switch' == _deviceinfo_tuple.deviceType:
##alternatively:
if not self.tree.exists('switch'):
global node2
node2 = self.tree.insert("", 0, 'switch', text=_deviceinfo_tuple.deviceType)
self.tree.item('switch', open=True)
self.tree.insert(node2, 1, text=_deviceinfo_tuple.hostname, values=(_deviceinfo_tuple.ipAddress,
_deviceinfo_tuple.username,
_deviceinfo_tuple.password,
_deviceinfo_tuple.enable))
else:
self.tree.insert(node2, 1, text=_deviceinfo_tuple.hostname, values=(_deviceinfo_tuple.ipAddress,
_deviceinfo_tuple.username,
_deviceinfo_tuple.password,
_deviceinfo_tuple.enable))
else:
print('wrong device type')
#check if the object is empty
def is_empty(self, obj):
if obj:
return True
else:
return False
def show_warnning(func):
def wrapper(self):
if self.is_empty(self.tree.selection()):
func(self)
else:
showwarning('Warning', 'Please at least select one device !')
return
return wrapper
def add_device(self):
deviceinfo_inputDialog = deviceinfoInputDialog(())
self.wait_window(deviceinfo_inputDialog)
if deviceinfo_inputDialog.deviceinfo_tuple is None:
return
elif not 'router' == deviceinfo_inputDialog.deviceinfo_tuple[0] and \
not 'switch' == deviceinfo_inputDialog.deviceinfo_tuple[0]:
print(not 'router' == deviceinfo_inputDialog.deviceinfo_tuple[0])
#print(not 'switch' == deviceinfo_inputDialog.deviceinfo_tuple[0])
return
creat_item(deviceinfo_inputDialog.deviceinfo_tuple)
self.refresh_layout()
self._btm_frame.logging(deviceinfo_inputDialog.deviceinfo_tuple[2],
'A new device has been added to the system !')
@show_warnning
def edit_device(self):
if len(self.tree.selection()) > 1:
showerror('More Items', 'You can\'t edit more than one device once !')
return
_deviceinfo_dict = self.tree.item(self.tree.selection())
deviceinfo_inputDialog = deviceinfoInputDialog(self.tree.parent(self.tree.selection()), _deviceinfo_dict)
self.wait_window(deviceinfo_inputDialog)
if deviceinfo_inputDialog.deviceinfo_tuple is None:
return
update_item(deviceinfo_inputDialog.deviceinfo_tuple)
self.refresh_layout()
self._btm_frame.logging(deviceinfo_inputDialog.deviceinfo_tuple[2],
'An update has been made to this device !')
@show_warnning
def delete_device(self):
rs = askokcancel('Danger Action', 'Are you sure you want to delete the device ?')
if rs:
for iid in self.tree.selection():
_devices_dict = self.tree.item(iid)
if 0 == len(_devices_dict['values']):
showerror('Worng Item', 'This is not a device !')
return
delete_item(_devices_dict['values'][0])
self.refresh_layout()
self._btm_frame.logging(_devices_dict['values'][0], 'This device has been removed from the system !')
def refresh_layout(self):
self.tree.delete(*self.tree.get_children())
_deviceinfo_list = read_item()
for _deviceinfo_tuple in _deviceinfo_list:
self.tree_constructor(_deviceinfo_tuple)
def thread_task(self, func):
_thread = threading.Thread(target=func)
_thread.start()
#_thread.join()
def selection_items(func):
def wrapper(self):
for iid in self.tree.selection():
_devices_dict = self.tree.item(iid)
if 0 == len(_devices_dict['values']):
showerror('Worng Item', 'This is not a device !')
return
func(self)
return wrapper
@show_warnning
def ping_device(self):
for iid in self.tree.selection():
_devices_dict = self.tree.item(iid)
if 0 == len(_devices_dict['values']):
showerror('Worng Item', 'This is not a device !')
return
process = subprocess.Popen(['ping', '-n', '5', '-l', '1470', _devices_dict['values'][0]],
stdout = subprocess.PIPE, stderr = subprocess.PIPE )
_output, _error = process.communicate()
self._btm_frame.logging(_devices_dict['values'][0], _output.decode('utf-8'))
print(_output.decode('utf-8'))
#print(_error.decode('UTF-8'))
#@show_warnning
def telnet_device(self):
_devices_list = []
for iid in self.tree.selection():
_devices_dict = self.tree.item(iid)
if 0 == len(_devices_dict['values']):
showerror('Worng Item', 'This is not a device !')
return
_devices_list.append(_devices_dict)
#self._btm_frame.logging(_devices_dict['values'][0], 'Sending request to IP Address: %s '% str(_devices_dict['values'][0]))
self._center_right_nb.telnetTab_layout(_devices_list)
@show_warnning
def tracert_device(self):
for iid in self.tree.selection():
_devices_dict = self.tree.item(iid)
if 0 == len(_devices_dict['values']):
showerror('Worng Item', 'This is not a device !')
return
process = subprocess.Popen(['tracert', '-d', _devices_dict['values'][0]],
stdout = subprocess.PIPE, stderr = subprocess.PIPE )
_output, _error = process.communicate()
print(_output.decode('utf-8'))
self._btm_frame.logging(_devices_dict['values'][0], _output.decode('utf-8'))
#print(_error.decode('UTF-8'))
|
[
"ssc1982@gmail.com"
] |
ssc1982@gmail.com
|
2c6fe46823e39e688f819c469c220c6d403c6e2b
|
1f660e892411c3c750f372719d1b1264e87b9552
|
/z-score2.py
|
38cd3d56ea66ebc24d1ea5ce13c3ebaf3c16194b
|
[] |
no_license
|
sahasra09/C111HW
|
dbb76953d2be24f2069b02640ed0a817b2b3cafd
|
9fb45622862618fa749a89f6d8777616e1618d8a
|
refs/heads/main
| 2023-08-31T15:15:24.556865
| 2021-10-24T16:02:16
| 2021-10-24T16:02:16
| 419,700,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
import plotly.figure_factory as ff
import plotly.graph_objects as go
import statistics
import random
import pandas as pd
import csv
df = pd.read_csv("School2.csv")
data = df["Math_score"].tolist()
def random_set_of_mean(counter):
dataset = []
for i in range(0, counter):
random_index= random.randint(0,len(data)-1)
value = data[random_index]
dataset.append(value)
mean = statistics.mean(dataset)
return mean
mean_list = []
for i in range(0,1000):
set_of_means= random_set_of_mean(100)
mean_list.append(set_of_means)
std_deviation = statistics.stdev(mean_list)
mean = statistics.mean(mean_list)
print("mean of sampling distribution:- ",mean)
print("Standard deviation of sampling distribution:- ", std_deviation)
first_std_deviation_start, first_std_deviation_end = mean-std_deviation, mean+std_deviation
second_std_deviation_start, second_std_deviation_end = mean-(2*std_deviation), mean+(2*std_deviation)
third_std_deviation_start, third_std_deviation_end = mean-(3*std_deviation), mean+(3*std_deviation)
df = pd.read_csv("School_1_Sample.csv")
data = df["Math_score"].tolist()
mean_of_sample1 = statistics.mean(data)
print("Mean of sample 1:- ",mean_of_sample1)
fig = ff.create_distplot([mean_list], ["student marks"], show_hist=False)
fig.add_trace(go.Scatter(x=[mean, mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[mean_of_sample1, mean_of_sample1], y=[0, 0.17], mode="lines", name="MEAN OF STUDENTS WHO HAD MATH LABS"))
fig.add_trace(go.Scatter(x=[first_std_deviation_end, first_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1 END"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end, second_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2 END"))
fig.add_trace(go.Scatter(x=[third_std_deviation_end, third_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 3 END"))
fig.show()
z_score = (mean_of_sample1-mean)/std_deviation
print("The z score is = ",z_score)
|
[
"noreply@github.com"
] |
sahasra09.noreply@github.com
|
906e2d2b5c18baee10a4d6b8d3e55666624c3c73
|
a2cb144815aca6dd67cbc92bd5dbed74b4e7fe43
|
/app.py
|
6908f2b2b5a6cd693c984c12dbc5d431d31f8982
|
[] |
no_license
|
Preymark9/Project-Week-2
|
939c5f0f9f89b94d961f3aba274e9e56336e803c
|
ff612feb0c40f952e8f21761e195a39bbfa9d63f
|
refs/heads/master
| 2020-04-01T16:31:04.741734
| 2018-10-30T23:23:16
| 2018-10-30T23:23:16
| 153,384,499
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_boxoffice
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/craigslist_app"
mongo = PyMongo(app)
# Or set inline
# mongo = PyMongo(app, uri="mongodb://localhost:27017/craigslist_app")
@app.route("/")
def index():
movie_data = mongo.db.movie_data.find_one()
return render_template("index.html", movie_data=movie_data)
@app.route("/scrape")
def scraper():
movie_data = mongo.db.movie_data
listings_data = scrape_boxoffice.scrape()
movie_data.update({}, listings_data, upsert=True)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
Preymark9.noreply@github.com
|
d8612b48276983c71640d96cc08cdc06b9e74a4e
|
b832fecd4dbf14de9b4e1b6fe8710c89290000f0
|
/LeetCode/111.二叉树的最小深度.py
|
7efc41d4fac1ac08f83e966c73dce6e3c09ff392
|
[] |
no_license
|
biground/MyNote
|
0a5eeaf9f5cd01dc83ee54252874016ac0b29c83
|
6b45279f02321b833ef1d1bb73cd729c90f0a0ed
|
refs/heads/master
| 2021-05-17T14:23:51.479022
| 2021-03-04T01:09:32
| 2021-03-04T01:09:32
| 250,819,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
#
# @lc app=leetcode.cn id=111 lang=python3
#
# [111] 二叉树的最小深度
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def calcDepth(self, root) -> int:
if not root:
return 1
left = self.calcDepth(root.left)
right = self.calcDepth(root.right)
return min(left if left > 1 else right, right if right > 1 else left) + 1
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
left = self.calcDepth(root.left)
right = self.calcDepth(root.right)
return min(left if left > 1 else right, right if right > 1 else left)
# @lc code=end
|
[
"cyt4byouth@163.com"
] |
cyt4byouth@163.com
|
a74f6601353e6a1dcfdfb5396a1d035394617695
|
08e3777e801866170925b402b0233a608f0fa46b
|
/lesson_3_standart_library/Task_3_4.py
|
e9b0378151af9022b85f9776fedb063f23d8677b
|
[] |
no_license
|
Pavlmir/python-interview-geekbr
|
38f198c02b2274abfb0d14d94f512e15931cac63
|
3672dda6e8380d58d36fd528cfbc249388c6c143
|
refs/heads/master
| 2023-08-15T12:34:57.480044
| 2021-09-18T09:13:44
| 2021-09-18T09:13:44
| 286,043,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
# 4. Написать программу, в которой реализовать две функции. В первой должен создаваться простой текстовый файл.
# Если файл с таким именем уже существует, выводим соответствующее сообщение. Необходимо открыть
# файл и подготовить два списка: с текстовой и числовой информацией. Для создания списков использовать генераторы.
# Применить к спискам функцию zip(). Результат выполнения этой функции должен должен быть обработан и записан в файл
# таким образом, чтобы каждая строка файла содержала текстовое и числовое значение. Вызвать вторую функцию.
# В нее должна передаваться ссылка на созданный файл. Во второй функции необходимо реализовать открытие файла
# и простой построчный вывод содержимого. Вся программа должна запускаться по вызову первой функции.
import os
import random
from functools import reduce
LINES_COUNT = STRING_SIZE = 7
def get_random_string():
return reduce(lambda string, char: string + char, [chr(random.randint(ord('a'), ord('z'))) for _ in range(STRING_SIZE)])
def create_text_file(name):
if os.path.isfile(name):
print('Файл с таким именем уже существует')
return False
with open(name, 'w', encoding='utf-8') as d:
numbers = [random.randint(0, 100) for _ in range(LINES_COUNT)]
strings = [get_random_string() for _ in range(LINES_COUNT)]
d.writelines([f'{number} {text}\n' for number, text in zip(numbers, strings)])
return d
def print_text_file(desc):
with open(desc.name, 'r', encoding='utf-8') as d:
for line in d:
print(line)
descriptor = create_text_file('new_file.txt')
if descriptor:
print_text_file(descriptor)
|
[
"mirprost@gmail.com"
] |
mirprost@gmail.com
|
4e6734d92bcb9101ba205b9adf9f9491db79aaec
|
b9bc8ea312b421604e12b259babc459fbc474d99
|
/ProyectTK/Models/clientesDB.py
|
aeefc7bacaa5ccd515f995e720f37374d1ffacd7
|
[] |
no_license
|
Cristhian-32/ProyectTK
|
4d55e867679e0a10b7c010061856463a20a28e9d
|
4769394431a5c1fc1851d1813b1a4dcabf69d4db
|
refs/heads/main
| 2023-07-01T14:17:28.462144
| 2021-07-22T18:51:15
| 2021-07-22T18:51:15
| 388,562,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
import sqlite3
class DataCliente:
def __init__(self):
self.conn = sqlite3.connect('basedata.db')
self.cursor = self.conn.cursor()
def insertItems(self, code, name, supp, price, cant):
sql = "INSERT INTO clientes (name, phone, mail, adress, reference ) VALUES(?,?,?,?,?)"
params = (code, name, price, supp, cant)
self.cursor.execute(sql, params)
self.conn.commit()
def returnOneItem(self, ejm):
sql = "SELECT * FROM clientes WHERE phone = '{}'".format(ejm)
self.cursor.execute(sql)
return self.cursor.fetchone()
def returnAllElements(self):
sql = "SELECT * FROM clientes ORDER BY id"
self.cursor.execute(sql)
return self.cursor.fetchall()
def delete(self, ejm):
sql = "DELETE FROM clientes WHERE phone = '{}'".format(ejm)
self.cursor.execute(sql)
self.conn.commit()
def updateItem(self, elem, pls):
sql = "UPDATE clientes SET name ='{}', phone='{}', mail='{}', adress='{}', reference='{}' WHERE phone = '{}'".format(elem[0], elem[1], elem[2], elem[3], elem[4], pls)
self.cursor.execute(sql)
self.conn.commit()
|
[
"noreply@github.com"
] |
Cristhian-32.noreply@github.com
|
4b8d6192e4c368f83baac2ca24e9a5d5267cc2e4
|
98e4dc41e3d994dfb55a2553c79d1b61590ecca6
|
/PY/facts/fact2.py
|
0f60feac5e1681e6685352cae49069d18d19a04e
|
[] |
no_license
|
krohak/Project_Euler
|
b753c4f3bbf26a5eff3203e27482599d1e089fc6
|
1d8a2326543d69457f1971af9435b3e93ab32f52
|
refs/heads/master
| 2022-09-02T10:48:59.472111
| 2022-08-18T11:11:16
| 2022-08-18T11:11:16
| 111,204,162
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# Positive Infinity
p_infinity = float('Inf')
if 99999999999999 > p_infinity:
print("The number is greater than Infinity!")
else:
print("Infinity is greatest")
# Negetive Infinity
n_infinity = float('-Inf')
if -99999999999999 < n_infinity:
print("The number is lesser than Negative Infinity!")
else:
print("Negative Infinity is least")
|
[
"rohaksinghal14@gmail.com"
] |
rohaksinghal14@gmail.com
|
4ea31074125f6450c978cc6ec7b64798c6647fa9
|
b922f4cc4b8877713147e93c1cccc26d0d52f729
|
/flybird_table/yuanyang_env.py
|
4dc8f6fc76041c58beae8d11d7d940cbab0a4199
|
[] |
no_license
|
pittacus/rl_course
|
63e9b9ba5796d3f7af62bd683df7ea83723a148d
|
f8d5c78a08947bab97f6392aba3f2530c2217702
|
refs/heads/master
| 2021-06-27T00:22:56.581433
| 2017-09-16T13:03:43
| 2017-09-16T13:03:43
| 103,752,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,257
|
py
|
# coding=utf8
import pygame
from load import *
import math
import time
import random
class YuanYangEnv:
def __init__(self):
self.viewer=None
self.FPSCLOCK = pygame.time.Clock()
self.actions=['e','s','w','n']
self.states=[]
for i in range(0,100):
self.states.append(i)
#屏幕大小
self.screen_size=(400,300)
self.bird_position=(0,0)
self.limit_distance_x=40
self.limit_distance_y=30
self.obstacle_size=[40,20]
self.obstacle1_x = []
self.obstacle1_y = []
self.obstacle2_x = []
self.obstacle2_y = []
self.state=0
self.next_state=[0,0]
self.gamma=0.8
for i in range(12):
#第一个障碍物
self.obstacle1_x.append(120)
if i <= 5:
self.obstacle1_y.append(20 * i)
else:
self.obstacle1_y.append(20 * (i + 3))
# 第二个障碍物
self.obstacle2_x.append(240)
if i <= 6:
self.obstacle2_y.append(20 * i)
else:
self.obstacle2_y.append(20 * (i + 3))
self.bird_male_init_position=[0.0,0.0]
self.bird_male_position = [0, 0]
self.bird_female_init_position=[360,0]
#def step(self):
def reset(self):
#随机产生初始状态
flag1=1
flag2=1
while flag1 or flag2 ==1:
state=self.states[int(random.random()*len(self.states))]
state_position = self.state_to_position(state)
flag1 = self.collide(state_position)
flag2 = self.find(state_position)
# self.bird_male_position=self.state_to_position(state)
return state
#将状态转换为坐标值
def state_to_position(self,state):
i=int(state/10)
j=state%10
position=[0,0]
position[0]=40*j
position[1]=30*i
return position
def position_to_state(self,position):
i=position[0]/40
j=position[1]/30
return int(i+10*j)
def transform(self,state, action):
#将当前状态转化为坐标
current_position=self.state_to_position(state)
next_state = [0,0]
flag_collide=0
flag_find=0
#判断当前坐标是否与障碍物碰撞
flag_collide=self.collide(current_position)
#判断状态是否是终点
flag_find=self.find(current_position)
if flag_collide==1 or flag_find==1:
return state, 0, True
#状态转移
if action=='e':
next_state[0]=current_position[0]+40
next_state[1]=current_position[1]
if action=='s':
next_state[0]=current_position[0]
next_state[1]=current_position[1]+30
if action=='w':
next_state[0] = current_position[0] - 40
next_state[1] = current_position[1]
if action=='n':
next_state[0] = current_position[0]
next_state[1] = current_position[1] - 30
#判断next_state是否与障碍物碰撞
flag_collide = self.collide(next_state)
#如果碰撞,那么回报为-1,并结束
if flag_collide==1:
return self.position_to_state(current_position),-1,True
#判断是否终点
flag_find = self.find(next_state)
if flag_find==1:
return self.position_to_state(next_state),1,True
return self.position_to_state(next_state), 0, False
def render(self):
if self.viewer is None:
pygame.init()
self.viewer=pygame.display.set_mode(self.screen_size,0,32)
self.bird_male = load_bird_male()
self.bird_female = load_bird_female()
self.background = load_background()
self.obstacle = load_obstacle()
#self.viewer.blit(self.bird_male, self.bird_male_init_position)
self.viewer.blit(self.bird_female, self.bird_female_init_position)
self.viewer.blit(self.background, (0, 0))
# 画障碍物
# self.viewer.empty()
#擦除
self.viewer.blit(self.background,(0,0))
self.viewer.blit(self.bird_female, self.bird_female_init_position)
for i in range(12):
self.viewer.blit(self.obstacle, (self.obstacle1_x[i], self.obstacle1_y[i]))
self.viewer.blit(self.obstacle, (self.obstacle2_x[i], self.obstacle2_y[i]))
# self.viewer.clear()
self.viewer.blit(self.bird_male, self.bird_male_position)
#self.viewer.blit(self.bird_female, self.bird_female_init_position)
pygame.display.update()
time.sleep(0.1)
self.FPSCLOCK.tick(30)
def collide(self,state_position):
flag = 1
flag1 = 1
flag2 = 1
# 判断第一个障碍物
dx = []
dy = []
for i in range(12):
dx1 = abs(self.obstacle1_x[i] - state_position[0])
dx.append(dx1)
dy1 = abs(self.obstacle1_y[i] - state_position[1])
dy.append(dy1)
mindx = min(dx)
mindy = min(dy)
if mindx >= self.limit_distance_x or mindy >= self.limit_distance_y:
flag1 = 0
# 判断第二个障碍物
second_dx = []
second_dy = []
for i in range(12):
dx2 = abs(self.obstacle2_x[i] - state_position[0])
second_dx.append(dx2)
dy2 = abs(self.obstacle2_y[i] - state_position[1])
second_dy.append(dy2)
mindx = min(second_dx)
mindy = min(second_dy)
if mindx >= self.limit_distance_x or mindy >= self.limit_distance_y:
flag2 = 0
if flag1 == 0 and flag2 == 0:
flag = 0
if state_position[0] > 360 or state_position[0] < 0 or state_position[1] > 270 or state_position[1] < 0:
flag = 1
return flag
def find(self,state_position):
flag=0
if abs(state_position[0]-self.bird_female_init_position[0])<self.limit_distance_x and abs(state_position[1]-self.bird_female_init_position[1])<self.limit_distance_y:
flag=1
return flag
if __name__=="__main__":
yy=YuanYangEnv()
yy.render()
speed = 50
clock = pygame.time.Clock()
state=0
# for i in range(12):
# flag_collide = 0
# obstacle1_coord = [yy.obstacle1_x[i],yy.obstacle1_y[i]]
# obstacle2_coord = [yy.obstacle2_x[i],yy.obstacle2_y[i]]
# flag_collide = yy.collide(obstacle1_coord)
# print(flag_collide)
# print(yy.collide(obstacle2_coord))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
# time_passed_second = clock.tick()/1000
# i= int(state/10)
# j=state%10
# yy.bird_male_position[0]=j*40
# yy.bird_male_position[1]=i*30
# time.sleep(0.2)
# pygame.display.update()
# state+=1
# yy.render()
# print(yy.collide())
|
[
"pittacus@labook.local"
] |
pittacus@labook.local
|
5aacaead1097698c67a0cf2f9cb30544aab59130
|
87a4fd12425a507e757da6e1bef4c68ee42a16e5
|
/gemd/entity/attribute/property.py
|
0cc36322b3c4b74a09fbbfe30f767df554a1e8e3
|
[
"Apache-2.0"
] |
permissive
|
lkubie/gemd-python
|
8e548355ee40dba8cb5e961dac464202e005ed78
|
1f98fb471060bbba312b86474a5bf6818f317fe1
|
refs/heads/master
| 2023-04-14T22:34:13.898549
| 2020-06-17T21:08:38
| 2020-06-17T21:08:38
| 273,071,479
| 0
| 0
|
Apache-2.0
| 2020-06-17T20:25:39
| 2020-06-17T20:25:38
| null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from gemd.entity.attribute.base_attribute import BaseAttribute
class Property(BaseAttribute):
"""
Property of a material, measured in a MeasurementRun or specified in a MaterialSpec.
Properties are characteristics of a material that could be measured, e.g. chemical composition,
density, yield strength.
"""
typ = "property"
|
[
"maxhutch@gmail.com"
] |
maxhutch@gmail.com
|
ecae270e06b48e6ab922ff9dcc8126b2f6b9f6ac
|
3b89c0a97ac6b58b6923a213bc8471e11ad4fe69
|
/python/CodingExercises/PairsPositiveNegativeValuesArray.py
|
bdc19b169e9ffca83c2d50eef9cb877d49558397
|
[] |
no_license
|
ksayee/programming_assignments
|
b187adca502ecf7ff7b51dc849d5d79ceb90d4a6
|
13bc1c44e1eef17fc36724f20b060c3339c280ea
|
refs/heads/master
| 2021-06-30T07:19:34.192277
| 2021-06-23T05:11:32
| 2021-06-23T05:11:32
| 50,700,556
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
'''
Pairs of Positive Negative values in an array
Given an array of distinct integers,
print all the pairs having positive value and negative value of a number that exists in the array.
We need to print pairs in order of their occurrences.
A pair whose any element appears first should be printed first.
Examples:
Input : arr[] = { 1, -3, 2, 3, 6, -1 }
Output : -1 1 -3 3
Input : arr[] = { 4, 8, 9, -4, 1, -1, -8, -9 }
Output : -1 1 -4 4 -8 8 -9 9
'''
def PairsPositiveNegativeValuesArray(ary):
stk=[]
dict={}
fnl_lst=[]
for l in ary:
stk.append(l)
if l not in dict.keys():
if (-1)*l in dict.keys():
dict[(-1)*l]=2
else:
dict[l]=1
for l in stk:
if (l in dict.keys() or (-1)*l in dict.keys()):
try:
if dict[l]==2:
if l<0:
fnl_lst.append(l)
fnl_lst.append((-1)*l)
else:
fnl_lst.append((-1) * l)
fnl_lst.append(l)
dict[l]=0
except:
if dict[(-1)*l]==2:
if l<0:
fnl_lst.append(l)
fnl_lst.append((-1)*l)
else:
fnl_lst.append((-1) * l)
fnl_lst.append(l)
dict[(-1) * l] = 0
return fnl_lst
def main():
ary=[1, -3, 2, 3, 6, -1]
print(PairsPositiveNegativeValuesArray(ary))
ary = [4, 8, 9, -4, 1, -1, -8, -9]
print(PairsPositiveNegativeValuesArray(ary))
if __name__=='__main__':
main()
|
[
"kartiksayee@gmail.com"
] |
kartiksayee@gmail.com
|
0ae17f38d0c3ba400e6be65953b83ffc4b6b185d
|
d697718afa04781434c332fe5f60496a57e0ee05
|
/accounts/admin.py
|
f868d81b4a03d6cdfde555adba2523e2e524278f
|
[] |
no_license
|
nazninnahartumpa/python_ecommerce_project
|
361c5865507c0248841441393b727cddaedad59a
|
b84ce2f7d99f9613e6ed864e10aef9566505f88c
|
refs/heads/master
| 2020-04-15T04:03:24.379587
| 2019-01-17T10:05:16
| 2019-01-17T10:05:16
| 164,369,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .forms import UserAdminCreationForm, UserAdminChangeForm
from .models import GuestEmail
User = get_user_model()
class UserAdmin(BaseUserAdmin):
form = UserAdminChangeForm
add_form = UserAdminCreationForm
list_display = ('email', 'admin')
list_filter = ('admin','staff','active',)
fieldsets = (
(None, {'fields': ('full_name','email','password')}),
#('Full Name', {'fields': ('full_name',)}),
('Permissions', {'fields':('admin','staff','active',)}),
)
add_fieldsets = (
(None, {
'classes':('wide',),
'fields':('email', 'password1', 'password2')},
),
)
search_fields = ('email','full_name',)
ordering = ('email',)
filter_horizontal = ()
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
class GuestEmailAdmin(admin.ModelAdmin):
search_fields = ['email']
class Meta:
model = GuestEmail
admin.site.register(GuestEmail, GuestEmailAdmin)
|
[
"naznintumpa5@gmail.com"
] |
naznintumpa5@gmail.com
|
2e50fcf5c6af2c79c0cb4a889025145bc74a0d6d
|
e0fbc96bec9e83bc3fc3482e432bd2c6b6ad05a6
|
/MRPT/vqz_old/atoms/V_1/mrpt.py
|
db346b0534f264bcf8532306ef09918e32752d1f
|
[
"MIT"
] |
permissive
|
mussard/share_data_benchmark
|
fe2cbd95879e069be2475d39b191de4f04e140ee
|
c02bfa4017b9008800cabe47d7c7959f82c26060
|
refs/heads/master
| 2020-03-11T21:25:00.264437
| 2019-04-29T00:28:13
| 2019-04-29T00:28:13
| 130,264,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,856
|
py
|
import json
from pyscf import gto,scf,mcscf, fci, lo, ci, cc
from pyscf.scf import ROHF, UHF,ROKS
import numpy as np
import pandas as pd
# THIS IS WERE IT STARTS ====================================
df=json.load(open("../../../trail.json"))
spins={'Sc':1, 'Ti':2, 'V':3, 'Cr':6, 'Mn':5, 'Fe':4, 'Cu':1}
nd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,5)}
cas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}
datacsv={}
for nm in ['atom','charge','method','basis','pseudopotential',
'totalenergy','totalenergy-stocherr','totalenergy-syserr']:
datacsv[nm]=[]
basis='vqz'
el='V'
charge=1
mol=gto.Mole()
mol.ecp={}
mol.basis={}
mol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])
mol.basis[el]=gto.basis.parse(df[el][basis])
mol.charge=charge
if el == 'Cr' or el == 'Cu':
mol.spin=spins[el]-charge
else:
mol.spin=spins[el]+charge
mol.build(atom="%s 0. 0. 0."%el,verbose=4)
m=ROHF(mol)
m.level_shift=1000.0
dm=m.from_chk("../../../../HF/atoms/"+el+basis+str(charge)+".chk")
hf=m.kernel(dm)
m.analyze()
from pyscf.shciscf import shci
mc = shci.SHCISCF(m, 6, cas[el]-charge)
#mc.fcisolver.conv_tol = 1e-14
mc.fcisolver.mpiprefix="mpirun -np 28"
mc.fcisolver.num_thrds=12
mc.verbose = 4
cas=mc.kernel()[0]
from pyscf.icmpspt import icmpspt
pt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\
pttype="MRLCC",\
third_order=True,\
fully_ic=True,\
do_dm4=True)
datacsv['atom'].append(el)
datacsv['charge'].append(charge)
datacsv['method'].append('MRPT')
datacsv['basis'].append(basis)
datacsv['pseudopotential'].append('trail')
datacsv['totalenergy'].append(cas+pt)
datacsv['totalenergy-stocherr'].append(0.0)
datacsv['totalenergy-syserr'].append(0.0)
pd.DataFrame(datacsv).to_csv(el+".csv",index=False)
|
[
"bastien.mussard@colorado.edu"
] |
bastien.mussard@colorado.edu
|
39b1899c62b519397f4996966f3bd887223073db
|
9641aa545391761533aaf67959a088240d32d873
|
/image-classification/personalities/testPersonalities.py
|
4677edc19bb322fa10553d53d9d753c36053a667
|
[] |
no_license
|
akshay-kamloo/Predictive-Analysis-in-Indian-Healthcare-Retinopathy
|
ea522053cda86939cd964f4c00c3c03bdb0a1a1c
|
177ddf69f03751e84071fa2efab9a604c5a3cb0e
|
refs/heads/master
| 2020-04-01T11:37:15.820271
| 2020-01-11T18:16:15
| 2020-01-11T18:16:15
| 153,169,966
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
from sparkdl import readImages
from pyspark.sql.functions import lit
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
from sparkdl import DeepImageFeaturizer
import org.apache.spark.sql
import org.apache.spark.sql._
img_dir = "/home/mvk/images_classification-master/personalities"
jobs_df = readImages(img_dir + "/jobs").withColumn("label", lit(1))
zuckerberg_df = readImages(img_dir + "/zuckerberg").withColumn("label", lit(0))
jobs_train, jobs_test = jobs_df.randomSplit([0.6, 0.4])
zuckerberg_train, zuckerberg_test = zuckerberg_df.randomSplit([0.6, 0.4])
train_df = jobs_train.unionAll(zuckerberg_train)
test_df = jobs_test.unionAll(zuckerberg_test)
featurizer = DeepImageFeaturizer(inputCol="image", outputCol="features", modelName="InceptionV3")
lr = LogisticRegression(maxIter=20, regParam=0.05, elasticNetParam=0.3, labelCol="label")
p = Pipeline(stages=[featurizer, lr])
p_model = p.fit(train_df)
predictions = p_model.transform(test_df)
predictions.select("filePath", "prediction").show(truncate=False)
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
df = p_model.transform(test_df)
df.show()
predictionAndLabels = df.select("prediction", "label")
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print("Training set accuracy = " + str(evaluator.evaluate(predictionAndLabels)))
|
[
"noreply@github.com"
] |
akshay-kamloo.noreply@github.com
|
418d221217f93d833a5be72fb393a36c2a10e7d9
|
a250b4d880d4a9c2898bf8b2732171c58b78dcf3
|
/01_Jump_to_Python/Chapt04/169_ex2.py
|
e59f7406df148dd54bfca8a6dabb1230c80af0dc
|
[] |
no_license
|
psh89224/Bigdata
|
b399e8c253fddf96ffcc0be6b470beefdd592974
|
543b11a019ae905fd7452d47d5004aa5829b22ae
|
refs/heads/master
| 2020-03-15T09:27:23.972490
| 2018-11-08T00:52:55
| 2018-11-08T00:52:55
| 132,075,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
f=open("sample.txt", 'r')
lines = f.read()
print(lines)
f.close()
total = 0
for line in lines:
score =
total +=
average = total/10
print(average)
f = open("result.txt", 'w')
f.write(average)
f.close()
|
[
"USER@teset.com"
] |
USER@teset.com
|
20ec09e78d343668973d121c889e2e3673710b9a
|
22c552e93ee31da3e98f66f68eb7f099ba38f3ca
|
/day_18_1.py
|
c546fee5f5112f54ee0efd56aca710b796db2d3f
|
[] |
no_license
|
rishikesh-madabhushi/AdventOfCode
|
9e3b6c828c7d8a8b462d25ed65671746d536919d
|
4a31d46d4f149f771facc5176f79544052cace1d
|
refs/heads/main
| 2023-02-05T00:42:29.434509
| 2020-12-25T14:51:02
| 2020-12-25T14:51:02
| 317,571,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
import re
f = open("day_18.txt")
#f = open("test.txt")
def expr(a, op, b):
if op == '+':
return a + b
elif op == '-':
return a - b
elif op == '*':
return a * b
else:
return a / b
def evaluate(terms, idx):
value = 0
new_idx = idx + 1
if terms[idx].isnumeric():
value = int(terms[idx])
elif terms[idx] == '(':
(value, new_idx) = evaluate(terms, new_idx)
if terms[new_idx] == ')':
new_idx = new_idx + 1
else:
raise("What's going on?")
while new_idx < len(terms):
if terms[new_idx] == ')':
return(value, new_idx)
operand2 = 0
operand1 = value
operator = terms[new_idx]
new_idx += 1
if operator == '*':
(operand2, new_idx) = evaluate(terms, new_idx)
else:
if terms[new_idx].isnumeric():
operand2 = int(terms[new_idx])
new_idx += 1
elif terms[new_idx] == '(':
(operand2, new_idx) = evaluate(terms, new_idx + 1)
if terms[new_idx] != ')':
raise("Hmmm")
else:
new_idx = new_idx + 1
else:
raise("What's going on?")
value = expr(operand1, operator, operand2)
return (value, new_idx)
sumall = 0
for l in f.readlines():
l = re.sub(r"(\(|\))", r" \1 ", l)
terms = l.rstrip().split()
print(terms)
(value, new_idx) = evaluate(terms, 0)
sumall += value
print(sumall)
|
[
"rishikesh.ice@gmail.com"
] |
rishikesh.ice@gmail.com
|
48ce8fdc65cfefe7d1d3168389758bf06cd3fa4d
|
7dd1a817022df97f78e98be873c81dd90d861c85
|
/US32_test.py
|
65c3c7b1a7cf709e5daaee1942eee05c081a1645
|
[] |
no_license
|
DaiJiChen/Gencom-file-diagnose
|
c8bd1f946bcfbcbe77ff6e2c6e3c632a5717ccaa
|
a7253b72722ab5e17ea06b6c8a2481c041a29242
|
refs/heads/master
| 2023-01-01T14:27:18.665900
| 2020-10-25T03:16:09
| 2020-10-25T03:16:09
| 210,180,800
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
"""
@author: ziming
"""
import unittest
import Parser
import validate
class Testing(unittest.TestCase):
# A successful case
def test1(self):
gc = Parser.Gedcom("gedcomfile.ged")
self.assertEqual(gc.displayOutput("print US32"),1)
# A failure case
def test2(self):
gc = Parser.Gedcom("bad_gedcomfile.ged")
self.assertEqual(gc.displayOutput("print US32"),1)
unittest.main()
|
[
"noreply@github.com"
] |
DaiJiChen.noreply@github.com
|
e0149e86a8d13396f8f06d0ab0b5b292bef2779c
|
ebd344b81dea258d8ef98c92376356a823ac2ffc
|
/python/det_response.py
|
bb8a4e3c9a5ded653595f9e5fc1530aeb5bd61ec
|
[] |
no_license
|
clark2668/icetradio
|
208691500a43e80ba94373bffea84ca409c9df0e
|
39e476794ae7466d158ecfbdb3d8530857e114cb
|
refs/heads/master
| 2023-01-31T21:11:41.804621
| 2020-12-16T23:47:01
| 2020-12-16T23:47:01
| 277,856,058
| 0
| 0
| null | 2020-10-28T17:20:47
| 2020-07-07T15:36:32
|
Python
|
UTF-8
|
Python
| false
| false
| 6,782
|
py
|
# python includes
import numpy as np
import os
from scipy.interpolate import interp1d
from icecube import icetray, dataclasses, icetradio
from icecube.dataclasses import I3Particle
from icecube.icetradio import util_geo, util_dataclasses
from NuRadioMC.SignalGen import askaryan
from NuRadioReco.utilities import units, fft
from radiotools import helper as hp
from NuRadioReco.detector import antennapattern
def fold_efields(efield, zenith, azimuth, antenna_orientation, antenna_pattern):
"""
A function to do fold efields with the antenna response
Apply the complex response of the antenna (the vector effective length)
to an efield, and return the efield after the antenna
Parameters
----------
signal: icetradio.I3EField
the efield at the antenna
zenith: float
the zenith angle (in radians!) of the signal incident on the antenna
azimuth: float
the azimuth angle (in radians!) of the signal incident on the antenna
antenna_orientation: array
array of floats, specifically the orientation_theta, orientation_phi,
rotation_theta, and rotation_phi, as they are defined in the NuRadioReco framework
see https://nu-radio.github.io/NuRadioReco/pages/detector_database_fields.html#antenna-table
or also the definitions in I3IceAntennaGeo
https://code.icecube.wisc.edu/projects/icecube/browser/IceCube/sandbox/brianclark/ehe/radio/trunk/dataclasses/public/dataclasses/geometry/I3IceAntennaGeo.h
antenna_pattern: NuRadioReco.detector.antennapattern
the antenna pattern for this antenna
Returns
-------
trace:
the voltage trace that will be observed after being folded with the antenna
"""
# get the frequencies where the efield needs to be evaluated
ff = util_dataclasses.get_frequencies_I3EField(efield)
# get the fourier transforms of the field
eTheta_freq = fft.time2freq(efield.eTheta.trace, efield.eTheta.samplingRate)
ePhi_freq = fft.time2freq(efield.ePhi.trace, efield.ePhi.samplingRate)
# get the vector effective length (VEL)
antenna_response = antenna_pattern.get_antenna_response_vectorized(ff, zenith, azimuth, *antenna_orientation)
VEL = np.array([antenna_response['theta'], antenna_response['phi']])
voltage_fft = np.sum(VEL * np.array([eTheta_freq, ePhi_freq]), axis=0)
# we need to make sure to cancel out the DC offset
voltage_fft[np.where(ff < 5 * units.MHz)] = 0.
voltage_trace = fft.freq2time(voltage_fft, efield.eR.samplingRate)
return voltage_trace
def apply_amplifier_filter(voltage_trace, dT, amplifier_filter_response):
"""
A function to apply amplifier+filter responses to a voltage trace
Apply the complex response of the amplifier and filter (magnitude and phase)
to a voltage trace, and return the trace after amplification
Parameters
----------
voltage_trace: array
the trace to which we want to apply the amplifier and filter
dT: float
the time between samples of the voltage trace
azimuth: float
the azimuth angle (in radians!) of the signal incident on the antenna
amplifier_filter_response: array
The dict containing the amplifier + filter response
As loaded in the load_filter_amplifier_response function
Returns
-------
trace:
the voltage trace that will be observed after applying the amps + filters
"""
orig_frequencies = amplifier_filter_response['frequencies']
orig_phase = amplifier_filter_response['phase']
orig_gain = amplifier_filter_response['gain']
# interpolate the phase and gain
interp_phase = interp1d(orig_frequencies, np.unwrap(orig_phase), bounds_error=False, fill_value=0)
interp_gain = interp1d(orig_frequencies, orig_gain, bounds_error=False, fill_value=0)
num_samples = len(voltage_trace) # the number of samples
frequencies = np.fft.rfftfreq(num_samples, dT)
gain = interp_gain(frequencies)
phase = np.exp(1j * interp_phase(frequencies))
the_fft = fft.time2freq(voltage_trace, 1./dT)
the_fft*=(gain*phase)
the_result_trace = fft.freq2time(the_fft, 1./dT)
return the_result_trace
def load_filter_amplifier_response(amplifier_filter_model):
"""
A function to do load the amplifier+filter responses
The intput should be a ASCII file
It should be the style of the ARA filter/amps file
https://github.com/ara-software/AraSim/blob/master/data/ARA_Electronics_TotalGain_TwoFilters.txt
We will always skip the first three rows
It should be a csv file, with the first column being frequency in MHz
The second column being gain in linear units
The third column being the phase in radians
The files should be put in the icetradio "data" directory
Parameters
----------
amplifier_filter_model: string
name of the amplifier filter model to be used (no .txt)
Returns
-------
antenna_model_dict: dictionary
dictionary containing the gain and phases
"""
data = np.loadtxt(os.path.join(os.environ['icetradio_path'],'data',amplifier_filter_model+'.txt'),
skiprows=3, delimiter=',')
response = {}
response['frequencies'] = data[:,0]*units.MHz #radians
response['gain'] = data[:,1] #unitless
response['phase'] = data[:, 2] * units.rad #radians
return response
def load_amplifier_filter_responses(antgeomap, amplifier_model_dict):
"""
A function to do load the amplifier/filter responses
Load all the response objects for every amplifier in the geomap.
Insert them as a key in the dictionary so we can call them later
Parameters
----------
antgeomap: I3IceAntennaGeometry geometry object
a map of IceAntKeys to IceAntGeo objects
amplifier_model_dict: dictionary
dictionary of amplifier+filter responses
Returns
-------
void
"""
for iceantkey, g in antgeomap:
amplifier_filter_model = g.amplifierFilterModel # get the amplifier + filter model
if amplifier_filter_model not in amplifier_model_dict.keys():
# only add if it's not already in the dict
the_model = load_filter_amplifier_response(amplifier_filter_model)
amplifier_model_dict[amplifier_filter_model] = the_model
def load_antenna_responses(antgeomap, antenna_pattern_dict):
"""
A function to do load the antenna responses
Load all the response objects for every antenna in the geomap.
Insert them as a key in the dictionary so we can call them later,
Parameters
----------
antgeomap: I3IceAntennaGeometry geometry object
a map of IceAntKeys to IceAntGeo objects
antenna_model_dict: dictionary
dictionary of
Returns
-------
void
"""
for iceantkey, g in antgeomap:
antenna_model = g.antennaModel # get the antenna model
if antenna_model not in antenna_pattern_dict.keys():
# only add this antenna if it's not already in the dictt
antenna_provider = antennapattern.AntennaPatternProvider()
antenna_pattern = antenna_provider.load_antenna_pattern(antenna_model)
antenna_pattern_dict[antenna_model] = antenna_pattern
|
[
"baclark@msu.edu"
] |
baclark@msu.edu
|
69bbe8fd1d442b7dff97819078e37bd83f805deb
|
df13b3eef82df0fbf19298595848826dd824ab6e
|
/csdn_scrapy/main.py
|
9cc779a9ccbbc83dfe96048a8cb2584d13a2ba56
|
[] |
no_license
|
bbbwang/csdn_scrapy
|
1714af43023e9a17a4d1be50d9efcacc64cc056f
|
a4bbc2a63fa641c93dc1ec75a8742d9eb6987fc0
|
refs/heads/master
| 2020-07-24T20:14:53.346322
| 2019-09-20T01:31:58
| 2019-09-20T01:31:58
| 208,036,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from scrapy import cmdline
import time
#在编译器中执行命令
cmdline.execute('scrapy crawl csdn.com'.split())
# import os
# while 1:
# os.system("scrapy crawl csdn.com")
# time.sleep(3600) # 休眠2分钟
|
[
"amettursun@ebay.com"
] |
amettursun@ebay.com
|
a382c54444b6d892d8c4f5f2a50f6ab3e9bf21da
|
b37c027a3f63305345f266e8f4f944721adbb956
|
/TESTING/CorrelationEstimator/testing_ce.py
|
489b9b1be556497d1460eb397af904fdbc8bde02
|
[] |
no_license
|
andrehoracio97/investigacao
|
fdfb663867e6fe9f240bb828b7b96b99323f8be3
|
5dd1fad12f4991bb737ed236426247dfb52333eb
|
refs/heads/master
| 2022-10-11T02:08:30.478893
| 2020-06-16T09:58:13
| 2020-06-16T09:58:13
| 193,519,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,605
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Testing Ce
# GNU Radio version: 3.7.13.5
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.filter import pfb
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import pmt
import random
import sip
import sys
import time
from gnuradio import qtgui
class testing_ce(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Testing Ce")
Qt.QWidget.__init__(self)
self.setWindowTitle("Testing Ce")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "testing_ce")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.sps = sps = 4
self.nfilts = nfilts = 32
self.eb = eb = 0.22
self.tx_rrc_taps = tx_rrc_taps = firdes.root_raised_cosine(nfilts, nfilts, 1.0, eb, 5*sps*nfilts)
self.pld_const = pld_const = digital.constellation_rect(([0.707+0.707j, -0.707+0.707j, -0.707-0.707j, 0.707-0.707j]), ([0, 1, 3, 2]), 4, 2, 2, 1, 1).base()
self.pld_const.gen_soft_dec_lut(8)
self.taps_per_filt = taps_per_filt = len(tx_rrc_taps)/nfilts
self.samp_rate_array_MCR = samp_rate_array_MCR = [7500000,5000000,3750000,3000000,2500000,2000000,1500000,1000000,937500,882352,833333,714285,533333,500000,421052,400000,380952]
self.rxmod = rxmod = digital.generic_mod(pld_const, False, sps, True, eb, False, False)
self.rrc_taps = rrc_taps = firdes.root_raised_cosine(sps, sps, 1.0, eb, 11*sps)
self.ac_hex = ac_hex = [0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc]
self.variable_qtgui_range_0_1 = variable_qtgui_range_0_1 = 39
self.variable_qtgui_range_0 = variable_qtgui_range_0 = 50
self.samp_rate = samp_rate = samp_rate_array_MCR[15]
self.rx_rrc_taps = rx_rrc_taps = firdes.root_raised_cosine(nfilts, nfilts*sps, 1.0, eb, 11*sps*nfilts)
self.rx_psf_taps = rx_psf_taps = firdes.root_raised_cosine(nfilts, sps*nfilts, 1.0, eb, 11*sps*nfilts)
self.modulated_sync_word = modulated_sync_word = digital.modulate_vector_bc(rxmod .to_basic_block(), (ac_hex), ([1]))
self.mark_delay = mark_delay = 87
self.frequencia_usrp = frequencia_usrp = 484e6
self.filt_delay_0 = filt_delay_0 = 1+(taps_per_filt-1)/2
self.filt_delay = filt_delay = 1+(len(rrc_taps)-1)/2
self.MCR = MCR = "master_clock_rate=60e6"
##################################################
# Blocks
##################################################
self._variable_qtgui_range_0_1_range = Range(0, 73, 1, 39, 200)
self._variable_qtgui_range_0_1_win = RangeWidget(self._variable_qtgui_range_0_1_range, self.set_variable_qtgui_range_0_1, 'Gain_RX', "counter_slider", float)
self.top_grid_layout.addWidget(self._variable_qtgui_range_0_1_win, 0, 2, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 3):
self.top_grid_layout.setColumnStretch(c, 1)
self._variable_qtgui_range_0_range = Range(0, 90, 1, 50, 200)
self._variable_qtgui_range_0_win = RangeWidget(self._variable_qtgui_range_0_range, self.set_variable_qtgui_range_0, 'Gain_TX', "counter_slider", float)
self.top_grid_layout.addWidget(self._variable_qtgui_range_0_win, 0, 1, 1, 1)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join(("serial=F5EAC0", MCR)),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_source_0.set_center_freq(frequencia_usrp, 0)
self.uhd_usrp_source_0.set_gain(variable_qtgui_range_0_1, 0)
self.uhd_usrp_source_0.set_antenna('RX2', 0)
self.uhd_usrp_source_0.set_auto_dc_offset(True, 0)
self.uhd_usrp_source_0.set_auto_iq_balance(True, 0)
self.uhd_usrp_sink_0_0 = uhd.usrp_sink(
",".join(("serial=F5EAE1", MCR)),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_sink_0_0.set_samp_rate(samp_rate)
self.uhd_usrp_sink_0_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_sink_0_0.set_center_freq(frequencia_usrp, 0)
self.uhd_usrp_sink_0_0.set_gain(variable_qtgui_range_0, 0)
self.uhd_usrp_sink_0_0.set_antenna('TX/RX', 0)
self.qtgui_time_sink_x_1_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"RX USRP", #name
1 #number of inputs
)
self.qtgui_time_sink_x_1_0.set_update_time(0.10)
self.qtgui_time_sink_x_1_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_1_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_1_0.enable_tags(-1, True)
self.qtgui_time_sink_x_1_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_1_0.enable_autoscale(False)
self.qtgui_time_sink_x_1_0.enable_grid(False)
self.qtgui_time_sink_x_1_0.enable_axis_labels(True)
self.qtgui_time_sink_x_1_0.enable_control_panel(False)
self.qtgui_time_sink_x_1_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_1_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_1_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_1_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_1_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_1_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_1_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_1_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_1_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_1_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_1_0_win = sip.wrapinstance(self.qtgui_time_sink_x_1_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_1_0_win, 1, 4, 1, 1)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 5):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_1 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"TX USRP", #name
1 #number of inputs
)
self.qtgui_time_sink_x_1.set_update_time(0.10)
self.qtgui_time_sink_x_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_1.enable_tags(-1, True)
self.qtgui_time_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_1.enable_autoscale(False)
self.qtgui_time_sink_x_1.enable_grid(False)
self.qtgui_time_sink_x_1.enable_axis_labels(True)
self.qtgui_time_sink_x_1.enable_control_panel(False)
self.qtgui_time_sink_x_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_1.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_1.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_1.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_1_win = sip.wrapinstance(self.qtgui_time_sink_x_1.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_1_win, 1, 3, 1, 1)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(3, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1 = qtgui.time_sink_f(
100*2, #size
samp_rate, #samp_rate
'Rx Data', #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_1.set_update_time(0.10)
self.qtgui_time_sink_x_0_1.set_y_axis(-1, 256)
self.qtgui_time_sink_x_0_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, 'packet_length_tag_key')
self.qtgui_time_sink_x_0_1.enable_autoscale(True)
self.qtgui_time_sink_x_0_1.enable_grid(True)
self.qtgui_time_sink_x_0_1.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1.enable_control_panel(False)
self.qtgui_time_sink_x_0_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_1.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_1_win, 2, 3, 1, 1)
for r in range(2, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(3, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0 = qtgui.time_sink_f(
512, #size
1, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0_0.set_y_axis(-100, 4000)
self.qtgui_time_sink_x_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 15, 0, 'corr_est')
self.qtgui_time_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_0_0_win, 3, 2, 1, 1)
for r in range(3, 4):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 3):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_c(
512, #size
1, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0.set_y_axis(-100, 100)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 15, 0, 'corr_est')
self.qtgui_time_sink_x_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_0_win, 2, 2, 1, 1)
for r in range(2, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 3):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_const_sink_x_0_0_0_1 = qtgui.const_sink_c(
1024, #size
"RX Constellation", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0_0_0_1.set_update_time(0.10)
self.qtgui_const_sink_x_0_0_0_1.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0_0_0_1.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0_0_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0_0_0_1.enable_autoscale(False)
self.qtgui_const_sink_x_0_0_0_1.enable_grid(False)
self.qtgui_const_sink_x_0_0_0_1.enable_axis_labels(True)
if not True:
self.qtgui_const_sink_x_0_0_0_1.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0_0_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0_0_0_1.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0_0_0_1.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0_0_0_1.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0_0_0_1.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0_0_0_1.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0_0_0_1.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_0_0_1_win = sip.wrapinstance(self.qtgui_const_sink_x_0_0_0_1.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_0_0_1_win, 2, 1, 1, 1)
for r in range(2, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_const_sink_x_0_0_0_0 = qtgui.const_sink_c(
1024, #size
"TX Constellation", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0_0_0_0.set_update_time(0.10)
self.qtgui_const_sink_x_0_0_0_0.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0_0_0_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0_0_0_0.enable_autoscale(False)
self.qtgui_const_sink_x_0_0_0_0.enable_grid(False)
self.qtgui_const_sink_x_0_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_const_sink_x_0_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0_0_0_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0_0_0_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0_0_0_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0_0_0_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_0_0_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_0_0_0_win, 1, 2, 1, 1)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 3):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_const_sink_x_0 = qtgui.const_sink_c(
512, #size
"", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0.set_update_time(0.10)
self.qtgui_const_sink_x_0.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0.enable_autoscale(False)
self.qtgui_const_sink_x_0.enable_grid(False)
self.qtgui_const_sink_x_0.enable_axis_labels(True)
if not False:
self.qtgui_const_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_win, 1, 1, 1, 1)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self.pfb_arb_resampler_xxx_0 = pfb.arb_resampler_ccf(
sps,
taps=(tx_rrc_taps),
flt_size=nfilts)
self.pfb_arb_resampler_xxx_0.declare_sample_delay(filt_delay)
self.digital_pfb_clock_sync_xxx_0_0 = digital.pfb_clock_sync_ccf(sps, 6.28/400.0, (rx_rrc_taps), nfilts, nfilts/2, 1.5, 1)
self.digital_map_bb_0_0 = digital.map_bb((pld_const.pre_diff_code()))
self.digital_map_bb_0 = digital.map_bb((pld_const.pre_diff_code()))
self.digital_costas_loop_cc_0_0 = digital.costas_loop_cc(6.28/100.0, pld_const.arity(), False)
self.digital_correlate_access_code_xx_ts_0_0 = digital.correlate_access_code_bb_ts(digital.packet_utils.default_access_code,
1, 'packet_len')
self.digital_corr_est_cc_0 = digital.corr_est_cc((modulated_sync_word), sps, mark_delay, 0.99)
self.digital_constellation_decoder_cb_0_0 = digital.constellation_decoder_cb(pld_const)
self.digital_chunks_to_symbols_xx_0_0 = digital.chunks_to_symbols_bc((pld_const.points()), 1)
self.blocks_stream_mux_0_1_0 = blocks.stream_mux(gr.sizeof_char*1, (96, 896))
self.blocks_repack_bits_bb_2 = blocks.repack_bits_bb(1, 8, '', False, gr.GR_MSB_FIRST)
self.blocks_repack_bits_bb_1 = blocks.repack_bits_bb(8, 1, '', False, gr.GR_MSB_FIRST)
self.blocks_repack_bits_bb_0_1 = blocks.repack_bits_bb(1, pld_const.bits_per_symbol(), '', False, gr.GR_MSB_FIRST)
self.blocks_repack_bits_bb_0 = blocks.repack_bits_bb(pld_const.bits_per_symbol(), 1, '', False, gr.GR_MSB_FIRST)
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_vcc((0.7, ))
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_char*1, '/home/andre/Desktop/Trasmited/trasmit_10_mb.txt', False)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.blocks_file_sink_0_0_0_2 = blocks.file_sink(gr.sizeof_char*1, '/home/andre/Desktop/Trasmited/depois.txt', False)
self.blocks_file_sink_0_0_0_2.set_unbuffered(False)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
self.blocks_char_to_float_1_0_1 = blocks.char_to_float(1, 1)
self.acode_1104 = blocks.vector_source_b([0x1, 0x0, 0x1, 0x0, 0x1, 0x1, 0x0, 0x0, 0x1, 0x1, 0x0, 0x1, 0x1, 0x1, 0x0, 0x1, 0x1, 0x0, 0x1, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x1, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0], True, 1, [])
##################################################
# Connections
##################################################
self.connect((self.acode_1104, 0), (self.blocks_stream_mux_0_1_0, 0))
self.connect((self.blocks_char_to_float_1_0_1, 0), (self.qtgui_time_sink_x_0_1, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.qtgui_time_sink_x_0_0_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_repack_bits_bb_1, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.qtgui_const_sink_x_0_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.qtgui_time_sink_x_1, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.uhd_usrp_sink_0_0, 0))
self.connect((self.blocks_repack_bits_bb_0, 0), (self.digital_correlate_access_code_xx_ts_0_0, 0))
self.connect((self.blocks_repack_bits_bb_0_1, 0), (self.digital_map_bb_0, 0))
self.connect((self.blocks_repack_bits_bb_1, 0), (self.blocks_stream_mux_0_1_0, 1))
self.connect((self.blocks_repack_bits_bb_2, 0), (self.blocks_file_sink_0_0_0_2, 0))
self.connect((self.blocks_stream_mux_0_1_0, 0), (self.blocks_repack_bits_bb_0_1, 0))
self.connect((self.digital_chunks_to_symbols_xx_0_0, 0), (self.pfb_arb_resampler_xxx_0, 0))
self.connect((self.digital_constellation_decoder_cb_0_0, 0), (self.digital_map_bb_0_0, 0))
self.connect((self.digital_corr_est_cc_0, 1), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.digital_corr_est_cc_0, 0), (self.digital_pfb_clock_sync_xxx_0_0, 0))
self.connect((self.digital_corr_est_cc_0, 1), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.digital_correlate_access_code_xx_ts_0_0, 0), (self.blocks_char_to_float_1_0_1, 0))
self.connect((self.digital_correlate_access_code_xx_ts_0_0, 0), (self.blocks_repack_bits_bb_2, 0))
self.connect((self.digital_costas_loop_cc_0_0, 0), (self.digital_constellation_decoder_cb_0_0, 0))
self.connect((self.digital_costas_loop_cc_0_0, 0), (self.qtgui_const_sink_x_0, 0))
self.connect((self.digital_map_bb_0, 0), (self.digital_chunks_to_symbols_xx_0_0, 0))
self.connect((self.digital_map_bb_0_0, 0), (self.blocks_repack_bits_bb_0, 0))
self.connect((self.digital_pfb_clock_sync_xxx_0_0, 0), (self.digital_costas_loop_cc_0_0, 0))
self.connect((self.pfb_arb_resampler_xxx_0, 0), (self.blocks_multiply_const_vxx_1, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.digital_corr_est_cc_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.qtgui_const_sink_x_0_0_0_1, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.qtgui_time_sink_x_1_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "testing_ce")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_sps(self):
return self.sps
def set_sps(self, sps):
self.sps = sps
self.set_rxmod(digital.generic_mod(self.pld_const, False, self.sps, True, self.eb, False, False))
self.pfb_arb_resampler_xxx_0.set_rate(self.sps)
def get_nfilts(self):
return self.nfilts
def set_nfilts(self, nfilts):
self.nfilts = nfilts
self.set_taps_per_filt(len(self.tx_rrc_taps)/self.nfilts)
def get_eb(self):
return self.eb
def set_eb(self, eb):
self.eb = eb
self.set_rxmod(digital.generic_mod(self.pld_const, False, self.sps, True, self.eb, False, False))
def get_tx_rrc_taps(self):
return self.tx_rrc_taps
def set_tx_rrc_taps(self, tx_rrc_taps):
self.tx_rrc_taps = tx_rrc_taps
self.set_taps_per_filt(len(self.tx_rrc_taps)/self.nfilts)
self.pfb_arb_resampler_xxx_0.set_taps((self.tx_rrc_taps))
def get_pld_const(self):
return self.pld_const
def set_pld_const(self, pld_const):
self.pld_const = pld_const
self.set_rxmod(digital.generic_mod(self.pld_const, False, self.sps, True, self.eb, False, False))
def get_taps_per_filt(self):
return self.taps_per_filt
def set_taps_per_filt(self, taps_per_filt):
self.taps_per_filt = taps_per_filt
self.set_filt_delay_0(1+(self.taps_per_filt-1)/2)
def get_samp_rate_array_MCR(self):
return self.samp_rate_array_MCR
def set_samp_rate_array_MCR(self, samp_rate_array_MCR):
self.samp_rate_array_MCR = samp_rate_array_MCR
self.set_samp_rate(self.samp_rate_array_MCR[15])
def get_rxmod(self):
return self.rxmod
def set_rxmod(self, rxmod):
self.rxmod = rxmod
def get_rrc_taps(self):
return self.rrc_taps
def set_rrc_taps(self, rrc_taps):
self.rrc_taps = rrc_taps
self.set_filt_delay(1+(len(self.rrc_taps)-1)/2)
def get_ac_hex(self):
return self.ac_hex
def set_ac_hex(self, ac_hex):
self.ac_hex = ac_hex
def get_variable_qtgui_range_0_1(self):
return self.variable_qtgui_range_0_1
def set_variable_qtgui_range_0_1(self, variable_qtgui_range_0_1):
self.variable_qtgui_range_0_1 = variable_qtgui_range_0_1
self.uhd_usrp_source_0.set_gain(self.variable_qtgui_range_0_1, 0)
def get_variable_qtgui_range_0(self):
return self.variable_qtgui_range_0
def set_variable_qtgui_range_0(self, variable_qtgui_range_0):
self.variable_qtgui_range_0 = variable_qtgui_range_0
self.uhd_usrp_sink_0_0.set_gain(self.variable_qtgui_range_0, 0)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
self.uhd_usrp_sink_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_1_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_1.set_samp_rate(self.samp_rate)
def get_rx_rrc_taps(self):
return self.rx_rrc_taps
def set_rx_rrc_taps(self, rx_rrc_taps):
self.rx_rrc_taps = rx_rrc_taps
self.digital_pfb_clock_sync_xxx_0_0.update_taps((self.rx_rrc_taps))
def get_rx_psf_taps(self):
return self.rx_psf_taps
def set_rx_psf_taps(self, rx_psf_taps):
self.rx_psf_taps = rx_psf_taps
def get_modulated_sync_word(self):
return self.modulated_sync_word
def set_modulated_sync_word(self, modulated_sync_word):
self.modulated_sync_word = modulated_sync_word
def get_mark_delay(self):
return self.mark_delay
def set_mark_delay(self, mark_delay):
self.mark_delay = mark_delay
self.digital_corr_est_cc_0.set_mark_delay(self.mark_delay)
def get_frequencia_usrp(self):
return self.frequencia_usrp
def set_frequencia_usrp(self, frequencia_usrp):
self.frequencia_usrp = frequencia_usrp
self.uhd_usrp_source_0.set_center_freq(self.frequencia_usrp, 0)
self.uhd_usrp_sink_0_0.set_center_freq(self.frequencia_usrp, 0)
def get_filt_delay_0(self):
return self.filt_delay_0
def set_filt_delay_0(self, filt_delay_0):
self.filt_delay_0 = filt_delay_0
def get_filt_delay(self):
return self.filt_delay
def set_filt_delay(self, filt_delay):
self.filt_delay = filt_delay
def get_MCR(self):
return self.MCR
def set_MCR(self, MCR):
self.MCR = MCR
def main(top_block_cls=testing_ce, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
[
"andresilvamail@gmail.com"
] |
andresilvamail@gmail.com
|
c33b3d02d04a7bac703bf073187bcc1ba14afbf0
|
60bc7f0f94ee1e79be14005b1fef33e3e2fb5ee5
|
/build/catkin_generated/generate_cached_setup.py
|
93a6b9945f90528516f27a8c15d81aa1167bf4d2
|
[] |
no_license
|
kohtaro246/adp
|
bb0c93bf4176ce00090f3a1a5097a2ff028fe0f5
|
7f84efa694f5e39db2b481298be6e2a6ff679b02
|
refs/heads/master
| 2023-01-31T11:12:30.101732
| 2020-12-17T10:07:53
| 2020-12-17T10:07:53
| 321,580,127
| 0
| 1
| null | 2020-12-17T10:07:54
| 2020-12-15T06:57:19
|
Makefile
|
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/adp/catkin_ws/devel/env.sh')
output_filename = '/home/adp/catkin_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"kohtaro246@gmail.com"
] |
kohtaro246@gmail.com
|
a507d6f697a47fe68ceec087548744c10e06ba94
|
097c76a4ba7e0247cf10739f61bc8e620cdb7af9
|
/HW_4/7.py
|
7ed26441e82f8eaa81fbe3a71c47e436d632209a
|
[] |
no_license
|
mihirsamdarshi/CMSI185Repo
|
ca07fc8940893c61376a9934817f457fe8f38205
|
a370778027cb716d432bb095f4d9381c50bdcbdb
|
refs/heads/master
| 2020-12-02T10:30:58.710306
| 2016-12-13T23:43:55
| 2016-12-13T23:43:55
| 67,733,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
speed = float(input('Enter a rotation speed: '))
if ((2 * (speed**2))/ 3) > 60:
print('Rope will snap')
else:
print('Rope will not snap')
|
[
"mihirsamdarshi@yahoo.com"
] |
mihirsamdarshi@yahoo.com
|
94ae81be542e5eb6598216269feb217867c98289
|
66ad6bd8e1743ff1bce5b0f6860041de0ba5d91e
|
/functions_clerigo.py
|
5ad15aff62b19edfbecaf7f209a3d5535b1d0188
|
[] |
no_license
|
MarkClerigo/fcpc_python_projects
|
d6ff9d1a8188596f72e31d92273bb0273a7665b6
|
2236484a62adb78eed65b667a426252ef905516a
|
refs/heads/master
| 2020-04-08T09:56:55.917797
| 2019-03-12T01:50:51
| 2019-03-12T01:50:51
| 159,246,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
#Create Function
#Default Value
def xyz(a=10,b=20,c=30):
print(a+b+c)
xyz()
#Explicit Assignment of parameters
def asd(a,b,c):
print(a+b+c)
asd(5,15,20)
|
[
"noreply@github.com"
] |
MarkClerigo.noreply@github.com
|
e6fcacc37f9c61488f4594ec494cc707bb731492
|
43a281752d9e1c18713aa069b218d7dab9fff92a
|
/run/move-pseudo.py
|
2bbd76282385a108ad0f0711baa34c038ee82e08
|
[
"MIT"
] |
permissive
|
osmanbaskaya/mapping-impact
|
9d0a5098bdcf0fcad4d7a53c1c3c1e15281bd423
|
8024dd3b916ac2dfc336221dd32faba4c0a98442
|
refs/heads/master
| 2020-05-18T02:29:47.486594
| 2016-02-18T21:33:34
| 2016-02-18T21:33:34
| 12,935,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
import os, shutil
out = '../data/pos-filtering/'
missing = open('dropbox-missing.txt').readlines()
files = os.listdir(out)
total = 0
for f in files:
fn = f.rsplit('.', 2)[0]
for m in missing:
m = m.strip()
if m == fn:
total += 1
shutil.copy(out + f, 'missing/' + f)
print total, len(missing)
assert len(missing) == (total / 5)
|
[
"obaskaya@ku.edu.tr"
] |
obaskaya@ku.edu.tr
|
365076fc80033c34b7408188d707ce611220bfc4
|
12d7a543a4013bfbeae29de2b5470dba8402236f
|
/3 제약충족 문제/csp.py
|
6e441471337df1b38fd97c6d1e8b5eef142e12a7
|
[] |
no_license
|
painh/classicpythonalgorithm
|
a23c188654174cfb326932f23852877df687a555
|
933dcd7bf55589a28882592dc7a551210bb0e8dc
|
refs/heads/master
| 2022-12-18T20:48:58.986235
| 2020-09-16T14:53:58
| 2020-09-16T14:53:58
| 293,810,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,577
|
py
|
from typing import Generic, TypeVar, Dict, List, Optional
from abc import ABC, abstractmethod
V = TypeVar('V') # 변수 타입
D = TypeVar('D') # 도멩니 타입
class Constraint(Generic[V, D], ABC):
def __init__(self, variables: List[V]) -> None:
self.variables = variables
@abstractmethod
def satisfied(self, assignment: Dict[V, D]) -> bool:
...
class CSP(Generic[V, D]):
def __init__(self, variables: List[V], domains: Dict[V, List[D]]) -> None:
self.variables: List[V] = variables
self.domains: Dict[V, List[D]] = domains
self.constrainsts: Dict[V, List[Constraint[V, D]]] = {}
for variable in self.variables:
self.constrainsts[variable] = []
if variable not in self.domains:
raise LookupError("모든 변수에 도메인이 할당 되어야 합니다.")
def add_constraint(self, constraint: Constraint[V, D]) -> None:
for variable in constraint.variables:
if variable not in self.variables:
raise LookupError("제약 조건 벼수가 아닙니다.")
else:
self.constrainsts[variable].append(constraint)
def consistent(self, variable: V, assigment: Dict[V, D]) -> bool:
for constraint in self.constrainsts[variable]:
if not constraint.satisfied(assigment):
return False
return True
def backtracking_search(self, assignment: Dict[V, D] = {}) -> Optional[Dict[V, D]]:
# asigment는 모든 변수가 할당될 떄 완료된다.(기저조건)
if len(assignment) == len(self.variables):
return assignment
# 할당되지 않은 모든 변수를 가져온다
unassigned: List[V] = [
v for v in self.variables if v not in assignment]
# 할당되지 않은 첫번쨰 변수의 가능한 모든 도메인값을 가져온다
first: V = unassigned[0]
for value in self.domains[first]:
local_assignment = assignment.copy()
local_assignment[first] = value
# local_assignment 값이 알관적이면 재귀 호출한다
if self.consistent(first, local_assignment):
result: Optional[Dict[V, D]] = self.backtracking_search(
local_assignment)
# 결과를 찾디 못하면 백트래킹을 종료한다. -> 반대 아님?
if result is not None:
return result
return None
|
[
"gthpgth@gmail.com"
] |
gthpgth@gmail.com
|
cf7687d3fabcd7f8948426fb39baf0835acabec6
|
c7846ee0828539c2a2019928c1cbf3abd35665bf
|
/2117_홈 방범 서비스.py
|
f9dffdb1022e9bb1bbf469a49a04df36144470e6
|
[] |
no_license
|
whiteblue0/sw_problems
|
10476601c8d6d68d42e2f30af87fcde1e5dbbcc5
|
1cefc6236cccc20477bf4eadb458a0fd06b09126
|
refs/heads/master
| 2020-06-20T10:44:57.463275
| 2020-05-03T07:27:57
| 2020-05-03T07:27:57
| 197,098,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
import sys
sys.stdin = open("2117.txt")
from collections import deque
# 우,하,상,좌
dx = [1,0,0,-1]
dy = [0,1,-1,0]
def isbenefit(house,cost):
return bool((house*M - cost)>=0)
def ispass(y,x):
return 0<=x<N and 0<=y<N and not visited[y][x]
def bfs(sy,sx,K):
global result
house = 0
que = deque()
que.append((sy,sx))
visited[sy][sx] = 1
if data[sy][sx]:
house += 1
while que:
y,x = que.popleft()
for i in range(4):
ny,nx = y+dy[i],x+dx[i]
if ispass(ny,nx) and visited[y][x] < K:
visited[ny][nx] = visited[y][x] + 1
if data[ny][nx]:
house += 1
que.append((ny,nx))
cost = K*K + (K - 1)*(K - 1)
if isbenefit(house,cost) and house > result:
result = house
T = int(input())
for tc in range(1,T+1):
N,M = map(int,input().split())
data = [list(map(int,input().split())) for _ in range(N)]
result = 0
for i in range(N):
for j in range(N):
for k in range(1,N+3):
visited = [[0] * N for _ in range(N)]
bfs(i,j,k)
# for _ in range(N):
# print(visited[_])
# print()
print("#{} {}".format(tc,result))
|
[
"ghn03153@gmail.com"
] |
ghn03153@gmail.com
|
4f77bcb3f264e2e7bc0b5737d7497ffa7ecbe21b
|
ccd0843ef54fa8679bb57abc61b39e8bb2fa04af
|
/elecciones/eleccion/models.py
|
d9eca84f2e1d983f14403a8a52be93c57ed4e0a4
|
[] |
no_license
|
i32enrea/PW
|
a770f9aefa535e62290546ba54acd1097f325dc3
|
00aa9ef0122759d296063a717d073bb4c4b35ea1
|
refs/heads/master
| 2021-01-22T16:13:40.685336
| 2017-09-04T19:03:23
| 2017-09-04T19:03:23
| 102,391,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class partidos(models.Model):
nombre = models.CharField(max_length = 100)
def __unicode__(self):
return self.nombre
class mesa(models.Model):
nombre = models.CharField(max_length = 100)
partidos = models.ManyToManyField(partidos)
def __unicode__(self):
return self.nombre
class circunscripcion(models.Model):
nombre = models.CharField(max_length = 100)
mesas = models.ManyToManyField(mesa)
def __unicode__(self):
return self.nombre
class resultado(models.Model):
partido = models.ForeignKey(partidos, related_name='partido')
mesa = models.ForeignKey(mesa, related_name='mesa')
resultado = models.IntegerField(default=0)
def __unicode__(self):
return self.resultado
|
[
"noreply@github.com"
] |
i32enrea.noreply@github.com
|
f519dfba6b3570aa5fb76916bf826540617da5bc
|
742b733a72d51d545f8f78ef027b594a872fad2b
|
/ml/ft_tools.py
|
9571e2aa50eaa1d97aae86e8ab61b25e608fa6d0
|
[
"MIT"
] |
permissive
|
rongyua/QAJudge
|
9ef4ef1d1080972f34267e663b4ed0da672e40b9
|
393d8cafac090c1161157d25080c6b73713676a9
|
refs/heads/master
| 2023-08-10T14:34:27.364093
| 2020-04-11T10:24:06
| 2020-04-11T10:24:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
possible_set = [
(196, 0),
(348, 0),
# (133, 1),
(264, 0),
(133, 0),
(354, 0),
(234, 0),
(266, 0),
(274, 0),
(347, 0),
(267, 0),
(263, 0),
(238, 0),
(275, 0),
(345, 0),
(312, 0),
# (134, 0),
(224, 0),
(141, 0),
(128, 0),
(303, 0),
# (150, 0),
# (269, 0),
]
def get_ft_id(data):
cnt = 0
ft_list = data["meta_info"]["law"]
se = set()
for x, y, z in ft_list:
se.add((x, y))
temp = list(se)
for x, y in temp:
if (x, y) in possible_set:
cnt += 1
if cnt != 1:
return -1
for x, y in temp:
if (x, y) in possible_set:
for a in range(0, len(possible_set)):
if possible_set[a] == (x, y):
return a
raise NotImplementedError
def get_ft_num():
return len(possible_set)
|
[
"wangyz17@tsinghua.org.cn"
] |
wangyz17@tsinghua.org.cn
|
f18a7797acace22a421ead142f9757dccadc59cb
|
0f16edb46a48f9b5a125abb56fc0545ede1d65aa
|
/utilities/src/d1_util/cert_create_ca.py
|
5bd75a9a50d2557873a14e63de246905aa5cae15
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/d1_python
|
5e685f1af0c356190f2d6df45d1ac849e2f56972
|
d72a9461894d9be7d71178fb7310101b8ef9066a
|
refs/heads/master
| 2023-08-29T03:16:38.131760
| 2023-06-27T21:59:37
| 2023-06-27T21:59:37
| 60,103,877
| 15
| 12
|
Apache-2.0
| 2023-09-06T18:27:53
| 2016-05-31T16:01:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2017 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a self signed root Certificate Authority (CA) certificate.
The certificate can be used for issuing certificates and sign CSRs that are locally
trusted.
This is an example on how to use the DataONE Client and Common libraries for Python. It
shows how to:
- Use the d1_common.cert.x509 module to create a local self-signed CA certificate.
"""
import argparse
import os
import d1_common.cert.x509
import d1_common.util
import d1_common.utils.ulog
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("common_name", action="store", help="E.g., localCA")
parser.add_argument(
"ca_path", action="store", help="Save path for PEM formatted CA certificate"
)
args = parser.parse_args()
d1_common.utils.ulog.setup(is_debug=args.debug)
try:
create_ca(args)
except CACreateError as e:
print("Error: {}".format((str(e))))
except KeyboardInterrupt:
print("Interrupted")
def create_ca(args):
ca_private_key = d1_common.cert.x509.generate_private_key()
ca_name = d1_common.cert.x509.create_simple_dn(args.common_name)
ca_cert = d1_common.cert.x509.generate_ca_cert(ca_name, ca_private_key)
ca_pw_bytes = d1_common.cert.x509.input_key_passphrase("CA private key")
pem_path = (
args.ca_path if args.ca_path.lower().endswith(".pem") else args.ca_path + ".pem"
)
d1_common.cert.x509.save_pem(
pem_path, d1_common.cert.x509.serialize_cert_to_pem(ca_cert)
)
print("Wrote CA certificate to: {}".format(pem_path))
key_path = os.path.splitext(pem_path)[0] + ".key.pem"
d1_common.cert.x509.save_pem(
key_path,
d1_common.cert.x509.serialize_private_key_to_pem(ca_private_key, ca_pw_bytes),
)
print("Wrote CA private key to: {}".format(key_path))
class CACreateError(Exception):
pass
if __name__ == "__main__":
main()
|
[
"git@dahlsys.com"
] |
git@dahlsys.com
|
9793b580cb50d08f82da0188eb8812aad0659086
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02850/s368986446.py
|
f1b857d567b3088a886070f3484aa90d7fee1e94
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import collections
n = int(input())
graph = [tuple(map(int, input().split())) for _ in range(n - 1)]
tree = [[] for _ in range(n)]
deg = [0] * n
color = {}
for a, b in graph:
a, b = min(a - 1, b - 1), max(a - 1, b - 1)
deg[a] += 1
deg[b] += 1
tree[a].append(b)
tree[b].append(a)
color[(a, b)] = 0
color_max = max(deg)
print(color_max)
c = [0] * n
c[0] = -1
que = collections.deque([0])
while len(que) != 0:
i = que.popleft()
tmp = 1
for j in tree[i]:
if c[j] != 0: continue
a, b = min(i, j), max(i, j)
if tmp == c[i]: tmp += 1
color[(a, b)] = tmp
c[j] = tmp
que.append(j)
tmp += 1
for a, b in graph:
a, b = min(a - 1, b - 1), max(a - 1, b - 1)
print(color[(a, b)])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
14bf2a0b021e5c07f08fe12e443059dd2e1061c3
|
418d25dd2ffbec0a9f159753ac3ed4ead1a874bb
|
/Se6 Ta1 Q2.py
|
430cb3dc4a52538523d6a7cea2cfa5a1701cb298
|
[] |
no_license
|
ivens-da-silva-brito/S-6
|
7fba794724051af306a38b5eef10206b9e219532
|
1cf14ea09353b7cee6be56499e602fb84984bd97
|
refs/heads/master
| 2022-12-24T08:44:31.850575
| 2020-09-28T07:46:46
| 2020-09-28T07:46:46
| 299,229,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
cont= 0
total = 0
while True:
numero = int(input())
cont += 1
total += numero
if numero ==0:
cont-=1
break
if cont ==0:
print("nao houv numero valido")
else:
print(total/cont)
|
[
"noreply@github.com"
] |
ivens-da-silva-brito.noreply@github.com
|
942f213b2565a626678cf125cb3bc61b7f42ee89
|
630e5afcde3ff732eaa27791e78dcd854abd7fdd
|
/dictionaries/favourite_languages.py
|
494397e917bc0de77f05d4806f10bc94fa362371
|
[] |
no_license
|
beanie29/pythonstudy
|
9bf0bfe0f5504e202f1777d6b6bb3c3d6a11edd1
|
490ae6ea26af99823f6e5a8432c5fa462e9c4016
|
refs/heads/master
| 2020-08-11T20:43:55.918852
| 2019-11-05T22:09:23
| 2019-11-05T22:09:23
| 214,624,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
favourite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'],
'phil': ['python', 'haskell'],
}
for name, languages in favourite_languages.items():
if len(languages) == 1:
verb = 'is'
else:
verb = 'are'
print(f"\n{name.title()}'s favourite languages {verb}")
for language in languages:
print(f"\t{language.title()}")
|
[
"ISSUMCH@ad.wbs.ac.uk"
] |
ISSUMCH@ad.wbs.ac.uk
|
5e448cecca1e048433816f17a2ddebcb7702f039
|
2d2c10ffa7aa5ee35393371e7f8c13b4fab94446
|
/projects/ai/ad/src/model.py
|
65b0001051c970db25690816cd3ea14e11041b24
|
[] |
no_license
|
faker2081/pikachu2
|
bec83750a5ff3c7b5a26662000517df0f608c1c1
|
4f06d47c7bf79eb4e5a22648e088b3296dad3b2d
|
refs/heads/main
| 2023-09-02T00:28:41.723277
| 2021-11-17T11:15:44
| 2021-11-17T11:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,144
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file model.py
# \author chenghuige
# \date 2020-04-12 20:13:51.596792
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
from absl import flags
FLAGS = flags.FLAGS
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
import numpy as np
import melt
import gezi
logging = gezi.logging
# from config import *
from projects.ai.ad.src.config import *
# self.encoder = melt.layers.transformer.Encoder(num_layers=5, d_model=16, num_heads=2,
# dff=16, maximum_position_encoding=100, rate=1)
# self.encoder = tf.keras.layers.GRU(32, return_sequences=False,
# dropout=0.1, recurrent_dropout=0.1)
# class Baseline(keras.Model):
# def __init__(self):
# super(Baseline, self).__init__()
# self.cemb = tf.keras.layers.Embedding(5000000, FLAGS.emb_size, name='cemb')
# self.encoder = melt.layers.Pooling('sum')
# self.dense_age = keras.layers.Dense(1)
# self.dense_gender = keras.layers.Dense(1)
# def call(self, input):
# # gezi.set('input', input)
# creative_ids = input['creative_ids']
# x = self.encoder(self.cemb(creative_ids))
# self.age = self.dense_age(x)
# self.gender = self.dense_gender(x)
# self.pred_age = tf.math.sigmoid(self.age)
# self.pred_gender = tf.math.sigmoid(self.gender)
# return self.gender
# class ClsTransformer(keras.Model):
# def __init__(self):
# super(ClsTransformer, self).__init__()
# # self.cemb = tf.keras.layers.Embedding(5000000, FLAGS.emb_size, name='cemb')
# self.aemb = tf.keras.layers.Embedding(4000000, FLAGS.emb_size, name='aemb')
# self.pemb = tf.keras.layers.Embedding(20, FLAGS.emb_size, name='pemb')
# self.iemb = tf.keras.layers.Embedding(400, FLAGS.emb_size, name='iemb')
# self.temb = tf.keras.layers.Embedding(100, FLAGS.emb_size, name='temb')
# # self.ctemb = tf.keras.layers.Embedding(200, FLAGS.emb_size, name='ctemb')
# self.encoder = melt.layers.transformer.Encoder(num_layers=FLAGS.num_layers, d_model=256, num_heads=FLAGS.num_heads,
# dff=512, maximum_position_encoding=FLAGS.max_len + 1, rate=FLAGS.dropout)
# self.combine = melt.layers.SemanticFusionCombine()
# self.dense_age = keras.layers.Dense(10)
# self.dense_gender = keras.layers.Dense(1)
# def call(self, input):
# # gezi.set('input', input)
# LEN = FLAGS.max_len
# x_in = input['ad_ids'][:,:LEN]
# x_mask = tf.not_equal(x_in, 0)
# x_len = melt.length(x_in)
# # x_c = self.cemb(x_in)
# x_a = self.aemb(x_in)
# x_p = self.pemb(input['product_categories'][:,:LEN])
# x_i = self.iemb(input['industries'][:,:LEN])
# x_t = self.temb(input['times'][:,:LEN])
# # x_ct = self.ctemb(input['click_times'][:,:5000])
# x = tf.concat([x_a, x_p, x_i, x_t], axis=-1)
# # x_other = tf.concat([x_p, x_i, x_t], axis=-1)
# # x = self.combine(x_a, x_other)
# x = self.encoder(x, mask=x_mask)
# x = melt.layers.Pooling(FLAGS.pooling)(x, x_len)
# self.age = self.dense_age(x)
# self.gender = self.dense_gender(x)
# # self.pred_age = tf.math.sigmoid(self.age)
# self.pred_age = tf.argmax(self.age, axis=1)
# self.pred_gender = tf.math.sigmoid(self.gender)
# return self.gender
# class ClsTransformer2(keras.Model):
# def __init__(self):
# super(ClsTransformer2, self).__init__()
# # self.cemb = tf.keras.layers.Embedding(5000000 + 2, FLAGS.emb_size, name='cemb')
# self.aemb = tf.keras.layers.Embedding(4000000 + 2, FLAGS.emb_size, name='aemb')
# self.pemb = tf.keras.layers.Embedding(20 + 2, FLAGS.emb_size, name='pemb')
# self.iemb = tf.keras.layers.Embedding(400 + 2, FLAGS.emb_size, name='iemb')
# self.temb = tf.keras.layers.Embedding(100 + 2, FLAGS.emb_size, name='temb')
# # self.ctemb = tf.keras.layers.Embedding(200, FLAGS.emb_size, name='ctemb')
# self.encoder = melt.layers.transformer.Encoder(num_layers=FLAGS.num_layers, d_model=256, num_heads=FLAGS.num_heads,
# dff=512, maximum_position_encoding=FLAGS.max_len + 10, rate=FLAGS.dropout)
# self.combine = melt.layers.SemanticFusionCombine()
# self.dense_age = keras.layers.Dense(10)
# self.dense_gender = keras.layers.Dense(1)
# def call(self, input):
# # gezi.set('input', input)
# LEN = FLAGS.max_len
# ad_ids = input['ad_ids'][:,:LEN]
# dummy = tf.zeros_like(ad_ids)[:,:1]
# mask = tf.cast(tf.not_equal(ad_ids, 0), tf.int64)
# delta = mask * 2
# ad_ids = tf.concat([dummy + 1, dummy + 2, ad_ids + delta], axis=-1)
# product_categories = input['product_categories'][:,:LEN]
# product_categories = tf.concat([dummy + 1, dummy + 2, product_categories + delta], axis=-1)
# industries = input['industries'][:,:LEN]
# industries = tf.concat([dummy + 1, dummy + 2, industries + delta], axis=-1)
# times = input['times'][:,:LEN]
# times = tf.concat([dummy + 1, dummy + 2, times + delta], axis=-1)
# x_in = ad_ids
# x_mask = tf.not_equal(x_in, 0)
# x_len = melt.length(x_in)
# # x_c = self.cemb(x_in)
# x_a = self.aemb(x_in)
# x_p = self.pemb(product_categories)
# x_i = self.iemb(industries)
# x_t = self.temb(times)
# # x_ct = self.ctemb(input['click_times'][:,:5000])
# x = tf.concat([x_a, x_p, x_i, x_t], axis=-1)
# # x_other = tf.concat([x_p, x_i, x_t], axis=-1)
# # x = self.combine(x_a, x_other)
# x = self.encoder(x, mask=x_mask)
# # x = melt.layers.Pooling(FLAGS.pooling)(x, x_len)
# x_age = x[:, 0, :]
# x_gender = x[:, 1, :]
# self.age = self.dense_age(x_age)
# self.gender = self.dense_gender(x_gender)
# # self.pred_age = tf.math.sigmoid(self.age)
# self.pred_age = tf.argmax(self.age, axis=1)
# self.pred_gender = tf.math.sigmoid(self.gender)
# return self.gender
class ClsModel(keras.Model):
def __init__(self):
super(ClsModel, self).__init__()
# self.cemb = tf.keras.layers.Embedding(3420000, FLAGS.emb_size, name='cemb')
if FLAGS.use_w2v:
emb = np.load('../input/all/glove-min5/emb.npy')
FLAGS.vocab_size = emb.shape[0]
FLAGS.emb_size = emb.shape[1]
self.aemb = tf.keras.layers.Embedding(FLAGS.vocab_size, FLAGS.emb_size, name='aemb',
embeddings_initializer=tf.constant_initializer(emb),
trainable=FLAGS.train_emb)
else:
self.aemb = tf.keras.layers.Embedding(FLAGS.vocab_size, FLAGS.emb_size, name='aemb',
trainable=True)
self.piemb = tf.keras.layers.Embedding(70000, FLAGS.emb_size, name='piemb')
self.pemb = tf.keras.layers.Embedding(20, FLAGS.emb_size, name='pemb')
self.iemb = tf.keras.layers.Embedding(400, FLAGS.emb_size, name='iemb')
self.temb = tf.keras.layers.Embedding(100, FLAGS.emb_size, name='temb')
# self.ctemb = tf.keras.layers.Embedding(200, FLAGS.emb_size, name='ctemb')
Encoder = getattr(tf.keras.layers, FLAGS.encoder)
Encoder = tf.compat.v1.keras.layers.CuDNNGRU
# self.encoder = Encoder(FLAGS.hidden_size, return_sequences=True)
# dropout=FLAGS.dropout, recurrent_dropout=FLAGS.rdropout)
self.encoder = melt.layers.CudnnRnn(num_layers=FLAGS.num_layers,
num_units=FLAGS.hidden_size,
keep_prob=1. - FLAGS.dropout,
share_dropout=False,
recurrent_dropout=True,
concat_layers=FLAGS.concat_layers,
bw_dropout=True,
residual_connect=False,
train_init_state=False,
cell='lstm')
# self.dropout = tf.keras.layers.Dropout(FLAGS.dropout)
if FLAGS.lm_target:
vsize = 1000000 if FLAGS.lm_target in ['ad_ids', 'creative_ids'] else 70000
self.sampled_weight = self.add_weight(name='sampled_weight',
shape=(vsize, FLAGS.hidden_size),
#initializer = keras.initializers.RandomUniform(minval=-10, maxval=10, seed=None),
dtype=tf.float32,
trainable=True)
self.sampled_bias = self.add_weight(name='sampled_bias',
shape=(vsize,),
#initializer = keras.initializers.RandomUniform(minval=-10, maxval=10, seed=None),
dtype=tf.float32,
trainable=True)
self.softmax_loss_function = melt.seq2seq.gen_sampled_softmax_loss_function(100,
vsize,
weights=self.sampled_weight,
biases=self.sampled_bias,
log_uniform_sample=True,
is_predict=False,
sample_seed=1234)
# self.combine = melt.layers.SemanticFusionCombine()
self.dropout = keras.layers.Dropout(0.2)
self.dense_age = keras.layers.Dense(10, name='dense_age')
self.dense_gender = keras.layers.Dense(1, name='dense_gender')
self.pooling = melt.layers.Pooling(FLAGS.pooling)
def call(self, input):
gezi.set('input', input)
LEN = FLAGS.max_len
x_in = input['ad_ids'][:,:LEN]
x_mask = tf.not_equal(x_in, 0)
x_len = melt.length(x_in) if FLAGS.use_mask else None
# x_c = self.cemb(input['creative_ids'][:,:LEN])
x_a = self.aemb(x_in)
# x_pi = self.piemb(input['product_ids'][:,:LEN])
# x_p = self.pemb(input['product_categories'][:,:LEN])
# x_i = self.iemb(input['industries'][:,:LEN])
# x_t = self.temb(input['times'][:,:LEN])
# x_ct = self.ctemb(input['click_times'][:,:5000])
# x = tf.concat([x_a, x_p, x_pi, x_i, x_t], axis=-1)
x = x_a
# x_other = tf.concat([x_p, x_i, x_t], axis=-1)
# x = self.combine(x_a, x_other)
x = self.dropout(x)
x = self.encoder(x, x_len)
# x = self.encoder(x)
# print(x)
# x = self.dropout(x)
if FLAGS.lm_target:
return x
x = self.pooling(x, x_len)
self.age = self.dense_age(x)
self.gender = self.dense_gender(x)
# self.pred_age = tf.math.sigmoid(self.age)
self.pred_age = tf.argmax(self.age, axis=1)
self.pred_gender = tf.math.sigmoid(self.gender)
return self.age
class ClsModel2(keras.Model):
def __init__(self):
super(ClsModel2, self).__init__()
# self.cemb = tf.keras.layers.Embedding(3420000, FLAGS.emb_size, name='cemb')
if FLAGS.use_w2v:
emb = np.load('../input/all/glove-min5/emb.npy')
FLAGS.vocab_size = emb.shape[0]
FLAGS.emb_size = emb.shape[1]
self.aemb = tf.keras.layers.Embedding(FLAGS.vocab_size, FLAGS.emb_size, name='aemb',
embeddings_initializer=tf.constant_initializer(emb),
trainable=FLAGS.train_emb)
else:
self.aemb = tf.keras.layers.Embedding(FLAGS.vocab_size, FLAGS.emb_size, name='aemb',
trainable=FLAGS.train_emb)
self.piemb = tf.keras.layers.Embedding(70000, FLAGS.emb_size, name='piemb')
self.pemb = tf.keras.layers.Embedding(20, FLAGS.emb_size, name='pemb')
self.iemb = tf.keras.layers.Embedding(400, FLAGS.emb_size, name='iemb')
self.temb = tf.keras.layers.Embedding(100, FLAGS.emb_size, name='temb')
# self.ctemb = tf.keras.layers.Embedding(200, FLAGS.emb_size, name='ctemb')
self.position_emb = tf.keras.layers.Embedding(10000, 320, name='position_emb')
# Encoder = getattr(tf.keras.layers, FLAGS.encoder)
# self.encoder = Encoder(FLAGS.hidden_size, return_sequences=True,
# dropout=FLAGS.dropout, recurrent_dropout=FLAGS.rdropout)
self.encoder = melt.layers.transformer.Encoder(num_layers=1, d_model=128, num_heads=4,
dff=128, rate=0)
# self.encoder = melt.layers.CudnnRnn(num_layers=FLAGS.num_layers,
# num_units=FLAGS.hidden_size,
# keep_prob=1. - FLAGS.dropout,
# share_dropout=False,
# recurrent_dropout=False,
# concat_layers=FLAGS.concat_layers,
# bw_dropout=False,
# residual_connect=False,
# train_init_state=False,
# cell='lstm')
# self.dropout = tf.keras.layers.Dropout(FLAGS.dropout)
if FLAGS.lm_target:
vsize = 1000000 if FLAGS.lm_target in ['ad_ids', 'creative_ids'] else 70000
self.sampled_weight = self.add_weight(name='sampled_weight',
shape=(vsize, FLAGS.hidden_size),
#initializer = keras.initializers.RandomUniform(minval=-10, maxval=10, seed=None),
dtype=tf.float32,
trainable=True)
self.sampled_bias = self.add_weight(name='sampled_bias',
shape=(vsize,),
#initializer = keras.initializers.RandomUniform(minval=-10, maxval=10, seed=None),
dtype=tf.float32,
trainable=True)
self.softmax_loss_function = melt.seq2seq.gen_sampled_softmax_loss_function(100,
vsize,
weights=self.sampled_weight,
biases=self.sampled_bias,
log_uniform_sample=True,
is_predict=False,
sample_seed=1234)
self.mlp = melt.layers.MLP([128, 32], drop_rate=0.2, name='mlp')
# self.combine = melt.layers.SemanticFusionCombine()
self.dense_age = keras.layers.Dense(10, name='dense_age')
self.dense_gender = keras.layers.Dense(1, name='dense_gender')
self.pooling = melt.layers.Pooling(FLAGS.pooling)
def call(self, input):
gezi.set('input', input)
# tf.print(input['age'])
# if K.learning_phase():
# with open('/tmp/1.txt', 'a') as out:
# print(gezi.decode(input['id'].numpy()).astype(int), file=out)
# if K.learning_phase():
# print(gezi.decode(input['id'].numpy()).astype(int))
LEN = FLAGS.max_len
x_in = input['ad_ids'][:,:LEN]
x_mask = tf.not_equal(x_in, 0)
x_len = melt.length(x_in) if FLAGS.use_mask else None
# x_c = self.cemb(input['creative_ids'][:,:LEN])
x_a = self.aemb(x_in)
# x_pi = self.piemb(input['product_ids'][:,:LEN])
# x_p = self.pemb(input['product_categories'][:,:LEN])
# x_i = self.iemb(input['industries'][:,:LEN])
# x_t = self.temb(input['times'][:,:LEN])
# # x_ct = self.ctemb(input['click_times'][:,:5000])
# x = tf.concat([x_a, x_p, x_pi, x_i, x_t], axis=-1)
x = x_a
# x_other = tf.concat([x_p, x_i, x_t], axis=-1)
# x = self.combine(x_a, x_other)
x += self.position_emb(melt.get_positions(x))
x = self.encoder(x)
# print(x)
# x = self.dropout(x)
if FLAGS.lm_target:
return x
x = self.pooling(x, x_len)
# x = self.mlp(x)
self.age = self.dense_age(x)
self.gender = self.dense_gender(x)
# self.pred_age = tf.math.sigmoid(self.age)
self.pred_age = tf.argmax(self.age, axis=1)
self.pred_gender = tf.math.sigmoid(self.gender)
return self.age
|
[
"chenghuige@gmail.com"
] |
chenghuige@gmail.com
|
0e61d28fb4685fae1e844483f6497e113ca42ae9
|
34aa10ad60869c515fb9c75488e3033244f9f524
|
/stack/150.py
|
a823cca50ced0d8eb9105b724fbb7c59c9c3a217
|
[] |
no_license
|
superMC5657/leetcode-py
|
558a98a4f995a545b42abe044ce23fd8d55d4b2b
|
0bbad7154f9f936eca42a34d7772ffdc3c94e6b2
|
refs/heads/master
| 2023-06-24T09:35:12.655302
| 2021-07-26T15:47:25
| 2021-07-26T15:47:25
| 270,066,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,902
|
py
|
# -*- coding: utf-8 -*-
# !@time: 2021-03-21 14:45:45
# !@author: superMC @email: 18758266469@163.com
# !@question title: evaluate-reverse-polish-notation
# 根据 逆波兰表示法,求表达式的值。
#
# 有效的算符包括 +、-、*、/ 。每个运算对象可以是整数,也可以是另一个逆波兰表达式。
#
#
#
# 说明:
#
#
# 整数除法只保留整数部分。
# 给定逆波兰表达式总是有效的。换句话说,表达式总会得出有效数值且不存在除数为 0 的情况。
#
#
#
#
# 示例 1:
#
#
# 输入:tokens = ["2","1","+","3","*"]
# 输出:9
# 解释:该算式转化为常见的中缀算术表达式为:((2 + 1) * 3) = 9
#
#
# 示例 2:
#
#
# 输入:tokens = ["4","13","5","/","+"]
# 输出:6
# 解释:该算式转化为常见的中缀算术表达式为:(4 + (13 / 5)) = 6
#
#
# 示例 3:
#
#
# 输入:tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]
# 输出:22
# 解释:
# 该算式转化为常见的中缀算术表达式为:
# ((10 * (6 / ((9 + 3) * -11))) + 17) + 5
# = ((10 * (6 / (12 * -11))) + 17) + 5
# = ((10 * (6 / -132)) + 17) + 5
# = ((10 * 0) + 17) + 5
# = (0 + 17) + 5
# = 17 + 5
# = 22
#
#
#
# 提示:
#
#
# 1 <= tokens.length <= 104
# tokens[i] 要么是一个算符("+"、"-"、"*" 或 "/"),要么是一个在范围 [-200, 200] 内的整数
#
#
#
#
# 逆波兰表达式:
#
# 逆波兰表达式是一种后缀表达式,所谓后缀就是指算符写在后面。
#
#
# 平常使用的算式则是一种中缀表达式,如 ( 1 + 2 ) * ( 3 + 4 ) 。
# 该算式的逆波兰表达式写法为 ( ( 1 2 + ) ( 3 4 + ) * ) 。
#
#
# 逆波兰表达式主要有以下两个优点:
#
#
# 去掉括号后表达式无歧义,上式即便写成 1 2 + 3 4 + * 也可以依据次序计算出正确结果。
# 适合用栈操作运算:遇到数字则入栈;遇到算符则取出栈顶两个数字进行计算,并将结果压入栈中。
#
# Related Topics 栈
# 👍 319 👎 0
from typing import List
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for token in tokens:
if token not in ("+","-","*","/"):
stack.append(int(token))
else:
num2 = stack.pop()
num1 = stack.pop()
stack.append(self.evaluate(num1, num2, token))
return stack[0]
def evaluate(self, num1, num2, op):
if op == "+":
return num1 + num2
elif op == "-":
return num1 - num2
elif op == "*":
return num1 * num2
elif op == "/":
return int(num1 / float(num2))
# leetcode submit region end(Prohibit modification and deletion)
|
[
"18758266469.com"
] |
18758266469.com
|
7dca8524710f9522b297b367c4aacaea5aba1716
|
f42598c5be4408ebec48828923869cb003677ebd
|
/integer_data_type.py
|
fd34b80747002537d6c41f2be95820e9c2e54aac
|
[] |
no_license
|
pravinmaske/Udemy_Python
|
e117a5f024390eaa01061018878903bcff9f435d
|
0468218e9035f86b09f7ae25493fd5e5c40f91c2
|
refs/heads/master
| 2020-06-01T07:30:18.976375
| 2019-06-09T16:54:30
| 2019-06-09T16:54:30
| 190,700,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
import time
import sys
def calc(a):
for i in range(10000000):
2*a
#pass
start=time.perf_counter()
calc(2**100000)
end=time.perf_counter()
print('Time taken for calculation of 2**100000:',end-start)
|
[
"pravinmaske963@gmail.com"
] |
pravinmaske963@gmail.com
|
0df2986ecc8f394823d4929862f8056415531cb8
|
72bbbd03004d690ae654cffa36d291a1d769ca23
|
/icelandic_vocab.py
|
df61533e1789f2fcc1eb2e4f34a2de03e682a3fd
|
[] |
no_license
|
treecasiano/vocabulary_game
|
e90a1b0926b4b83f1997747effd26a01157b596c
|
720d02d1e08db2c40a0332bb85218a0435878b9f
|
refs/heads/master
| 2020-04-06T03:53:30.957596
| 2015-10-29T17:28:09
| 2015-10-29T17:28:09
| 37,989,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
#!/usr/bin/env python
# coding: utf-8
colors = {
'black' : 'svartur',
'blue' : 'blár',
'brown' : 'brúnn',
'green' : 'grænn',
'gray' : 'grár',
'orange' : 'appelsínugulur',
'pink' : 'bleikur',
'purple' : 'fjólublár',
'red' : 'rauður',
'white' : 'hvítur',
'yellow' : 'gulur'
}
numbers = {
'1' : 'einn',
'2' : 'tveir',
'3' : 'þrír',
'4' : 'fjórir',
'5' : 'fimm',
'6' : 'sex',
'7' : 'sjö',
'8' : 'átta',
'9' : 'níu',
'10' : 'tíu',
'11' : 'ellefu',
'12' : 'tólf',
'13' : 'þrettán',
'14' : 'fjórtán',
'15' : 'fimmtán',
'16' : 'sextán',
'17' : 'sautján',
'18' : 'átján',
'19' : 'nítján',
'20' : 'tuttugu',
'21' : 'tuttugu og einn',
'22' : 'tuttugu og tveir',
'23' : 'tuttugu og þrír',
'30' : 'þrjátíu',
'31' : 'þrjátíu og einn',
'40' : 'fjörutíu',
'50' : 'fimmtíu',
'60' : 'sextíu',
'70' : 'sjötíu',
'80' : 'áttatíu',
'90' : 'níutíu',
'100': 'hundrað',
'101': 'hundrað og einn',
'200': 'tvö hundruð'
}
body_parts = {
'arm' : 'armur',
'back' : 'hryggur',
'belly button' : 'nafli',
'blood' : 'blóð',
'cheek' : 'kinn',
'chest (for a man)' : 'brjóstkassi',
'chest/breast (for a woman)' : 'brjóst',
'chin' : 'haka',
'ear' : 'eyra',
'elbow' : 'olnbogi',
'eye' : 'auga',
'finger' : 'fingur',
'foot' : 'fótur',
'forehead' : 'enni',
'hair' : 'hár',
'hand' : 'hönd',
'head' : 'höfuð',
'heart' : 'hjarta',
'heel' : 'hæll',
'hip' : 'mjöðm',
'index finger' : 'vísifingur',
'knee' : 'kné',
'larynx' : 'barkakýli',
'leg' : 'fótur',
'lip' : 'vör',
'middle finger' : 'löngutöng',
'mouth' : 'munnur',
'muscle' : 'vöðvi',
'mustache' : 'grön',
'nail' : 'nögl',
'neck' : 'háls',
'nose' : 'nef',
'pinkie' : 'litilfingur',
'rib' : 'la costola',
'ring finger' : 'hringfingur',
'shoulder' : 'öxl',
'skeleton' : 'beinagrind',
'stomach' : 'maga',
'teeth' : 'tönn',
'thigh' : 'læri',
'thumb' : 'þumall',
'vein' : 'æð',
'wrist' : 'úlnlið'
}
|
[
"tree.pdx@gmail.com"
] |
tree.pdx@gmail.com
|
026efd17fcede4475441a166270eec3c8db8b725
|
c17942b9b9db4081a9b4bc75b44cdf48a926cc94
|
/EV3PiBaseCode_spg9.py
|
4c8c4926a45ea8016493ab8c476c53bbb34d59dd
|
[] |
no_license
|
sgordon291us/lego-ev3
|
fb456401c55211368bdedb731c88eba896cd5073
|
468b7b8c7a8b2075977d58fb66da126a591049f8
|
refs/heads/master
| 2020-06-13T17:20:32.707813
| 2020-04-17T13:55:44
| 2020-04-17T13:55:44
| 194,729,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,353
|
py
|
#! /usr/bin/env python
# import packages
import serial
import time
import datetime
import struct
import os
def printSerIntInfo(ser):
#print('End Device Name {} Port = {} Baud Rate = {} IsOpen = {}\n XonXoff = {}'.format(ser.name,
# ser.port, ser.baudrate, ser.isOpen(), ser.xonxoff))
print(ser.get_settings())
## print('Settings: {}'.format(ser.getSettingsDict()))
## print('Num bytes in waiting = {} isOpen = {}'.format(ser.inWaiting(), ser.isOpen()))
## print('CD {} CTS {} DSR {} RI {}'.format(ser.getCD(), ser.getCTS(), ser.getDSR(), ser.getRI()))
def main():
EV3 = serial.Serial('/dev/rfcomm5',timeout=1)
printSerIntInfo(EV3)
#Debugging
## print('End device name = ',EV3.name)
## print('Baud rate = ', EV3.baudrate)
timeLastRx = datetime.datetime.now()
print("Local Time Now ",format(timeLastRx))
RxToPeriod = 5 #Seconds. This is the amount of time that can pass before doing a buffer reset
while 1:
## print('Time {} CD {} CTS {} DSR {} RI {} InWaiting {}'.format(datetime.datetime.now().time(), EV3.getCD(),
## EV3.getCTS(), EV3.getDSR(), EV3.getRI(), EV3.inWaiting()))
if EV3.inWaiting() >= 2: # check for ev3 message
## print('inWaiting = {}'.format(EV3.inWaiting()))
# Get the number of bytes in this message
s = EV3.read(2)
# struct.unpack returns a tuple unpack using []
[numberOfBytes] = struct.unpack("<H", s)
## print numberOfBytes,
# Wait for the message to complete
## print("{} Expecting num of bytes {}".format(datetime.datetime.now().time(), numberOfBytes))
while EV3.inWaiting() < numberOfBytes:
print "Waiting at point 1"
print "Expecting num of bytes", numberOfBytes
time.sleep(0.01)
#read number of bytes
s = s + EV3.read(numberOfBytes)
## print('s = {}'.format(s))
s = s[6:]
# Get the mailboxName
mailboxNameLength = ord(s[0])
mailboxName = s[1:1+mailboxNameLength-1]
## print mailboxName,
s = s[mailboxNameLength+1: ]
# Get the message text
[messageLength] = struct.unpack("<H", s[0:2])
message = s[2:2+messageLength]
timeLastRx = datetime.datetime.now()
print('{}; Num of Bytes = {} Timenow {}; TimeLastRx {}; inWaiting = {}'.format(message, numberOfBytes,
datetime.datetime.now().time(), timeLastRx.time(), EV3.inWaiting()))
#print message
## timeLastRx = datetime.datetime.now()
## if 'hi' in message:
## print "Hello EV3"
elif EV3.inWaiting() == 1:
print('inWaiting = 1; timenow is {}'.format(datetime.datetime.now().time()))
## printSerIntInfo(EV3)
time.sleep(0.01)
else:
# See if a buffer reset is needede
timeNow = datetime.datetime.now()
timeSinceLastRx = timeNow - timeLastRx
#secSinceLastRx = timeSinceLastRx.second + timeSinceLastRx.microsecond/1000000
secSinceLastRx = timeSinceLastRx.total_seconds()
# print "Time Since Last Rx: ", timeSinceLastRx
if secSinceLastRx > RxToPeriod:
# EV3.flushInput
#EV3.close()
#EV3.open()
timeLastRx = timeNow # Prevents repetitive flushing
#print("*** Flushed at ",timeNow)
## print("Waiting at point 2 at {}".format(datetime.datetime.now().time()))
#in_wait = EV3.inWaiting()
#print "Num of bytes waiting = ", in_wait
## printSerIntInfo(EV3)
## EV3.flush()
## EV3.reset_input_buffer()
## EV3.reset_output_buffer()
# No data is ready to be processed yield control to system
time.sleep(0.01)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
sgordon291us.noreply@github.com
|
2a2d22adbea54a98abb0bf3c680dc0b0436b242b
|
6fde9e27c643a7cef26c416b6d7f383c9ce219ce
|
/NmapScanner.py
|
46186c642759b834827e09cf9efd9c8980617aae
|
[] |
no_license
|
mtuong/Violent-Python-Examples
|
b5d8ccded6f9b6215e4c0450f163bbd4929b1170
|
debd0b49601ad0551d7f241cad7fe3ca3c58c1dd
|
refs/heads/master
| 2020-09-21T23:03:30.824234
| 2019-12-03T11:36:58
| 2019-12-03T11:36:58
| 224,965,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
# -*- coding: utf-8 -*-
import pprint
import sys
import Utils
import BaseModule
import nmap
import Decorators
@Decorators.ModuleDocstringParser
class NmapScanner(BaseModule.BaseModule):
"""
Scan network with nmap and emit result as new event.
Configuration template:
- NmapScanner:
network: # <type: string; is: required>
netmask: # <default: '/24'; type: string; is: optional>
ports: # <default: None; type: None||string; is: optional>
arguments: # <default: '-O -F --osscan-limit'; type: string; is: optional>
interval: # <default: 900; type: integer; is: optional>
receivers:
- NextModule
"""
module_type = "input"
"""Set module type"""
# TODO: This module can run in forked processes. We need some way to partition the network and give each process a
# segment to scan.
can_run_forked = False
def configure(self, configuration):
# Call parent configure method
BaseModule.BaseModule.configure(self, configuration)
self.network = self.getConfigurationValue('network')
self.netmask = self.getConfigurationValue('netmask')
self.arguments = self.getConfigurationValue('arguments')
def getScannerFunc(self):
@Decorators.setInterval(self.getConfigurationValue('interval'), call_on_init=True)
def scanNetwork():
# Get all alive hosts
try:
scan_results = self.scanner.scan('%s%s' % (self.network,self.netmask), arguments="-sn")
except nmap.PortScannerError:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Scanning failed. Exception: %s, Error: %s." % (etype, evalue))
return
for host, scan_result in scan_results['scan'].items():
try:
host_scan_result = self.scanner.scan('%s/32' % (host), arguments=self.arguments)
except nmap.PortScannerError:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Scanning failed. Exception: %s, Error: %s." % (etype, evalue))
return
if host in host_scan_result['scan']:
self.handleEvent(host, host_scan_result['scan'][host])
return scanNetwork
def handleEvent(self, host, scan_result):
# Get OS from scan.
if 'osmatch' in scan_result:
os_info = sorted(scan_result['osmatch'], key=lambda k: int(k['accuracy']))
scan_result['detected_os'] = os_info[0]['name']
scan_result.pop('osmatch')
if 'vendor' in scan_result and isinstance(scan_result['vendor'], dict) and len(scan_result['vendor']) > 0:
scan_result['vendor'] = scan_result['vendor'].values()[0]
# Drop some fields.
if 'osclass' in scan_result:
scan_result.pop('osclass')
event = Utils.getDefaultEventDict(scan_result, caller_class_name=self.__class__.__name__)
event['gambolputty']['event_type'] = 'nmap_scan'
self.sendEvent(event)
def start(self):
self.scanner = nmap.PortScanner()
timed_func = self.getScannerFunc()
self.timed_func_handler = Utils.TimedFunctionManager.startTimedFunction(timed_func)
|
[
"noreply@github.com"
] |
mtuong.noreply@github.com
|
7c33f774adf67e016e517a5dd40cc2a9543dbbe2
|
50ca8c4d75877249fbaed6509190906042d88a6a
|
/tests/test_users.py
|
e32344cf2ae337560082d46d6957adc64a85ef8c
|
[] |
no_license
|
JuanSBrinez/web_page
|
233bbce3f12dc70da81f4dd2e48e9a8f2b54881e
|
e103aac6e901e42cdf4f276a20d84426f54fbad3
|
refs/heads/master
| 2023-06-06T20:22:55.165611
| 2021-07-14T16:41:24
| 2021-07-14T16:41:24
| 385,319,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
import unittest, sys, os
sys.path.append('../web_page')
from file1 import app, db
class UsersTests(unittest.TestCase):
# executed prior to each test
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
self.app = app.test_client()
db.drop_all()
db.create_all()
###############
#### tests ####
###############
def register(self, username, email, password):
return self.app.post('/register',
data=dict(username=username,
email=email,
password=password,
confirm_password=password),
follow_redirects=True)
def test_valid_user_registration(self):
response = self.register('bob', 'bob@example.com', '12345')
self.assertEqual(response.status_code, 200)
def test_invalid_username_registration(self):
response = self.register('t', 'test@example.com', 'FlaskIsAwesome')
self.assertIn(b'Field must be between 2 and 20 characters long.', response.data)
def test_invalid_email_registration(self):
response = self.register('test2', 'test@example', 'FlaskIsAwesome')
self.assertIn(b'Invalid email address.', response.data)
if __name__ == "__main__":
unittest.main()
|
[
"juan.brinez@loras.edu"
] |
juan.brinez@loras.edu
|
dd8065df1ad12df2142dc978ab09e183b5830273
|
451ea083d1dce106ffb3ab7c78e8fa3abe61a725
|
/Day 18/getComments.py
|
40deb034de3bab1c4f1bcd015b0ebde5dac86620
|
[] |
no_license
|
S4ND1X/365-Days-Of-Code
|
52dd6a8b9d0f5c3c27b23da8914725e141d06596
|
257186d2a4d7c28a3063add55d6b72d20fccb111
|
refs/heads/master
| 2022-04-19T22:55:08.426317
| 2020-03-02T23:35:28
| 2020-03-02T23:35:28
| 164,487,731
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Cursor
from datetime import datetime, date, time, timedelta
from collections import Counter
import sys
import tweepy
consumer_key = 'UP4MwbLKrDclqSC3EpkRph6Pd'
consumer_secret = '1YokukdNJCi3VAM7qIOzSrBjOg06Lpjx2W3ehEjfrPeEQKW5Kj'
access_token = '939919094-PA76fcK91fIjoaN1LASGYdKlwkroN2TJWwIRiwbg'
access_token_secret = 'GqvN4ZR3O5aY4VEo37xugez8zUCn5kNTaMZUcuJuv0ZZy'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
auth_api = API(auth)
account_list = ["jufut390"]
if len(account_list) > 0:
for target in account_list:
print("Getting data for " + target)
item = auth_api.get_user(target)
print("screen_name: " + item.screen_name)
#Get info about tweets
end_date = datetime.utcnow() - timedelta(days=5)
for status in Cursor(auth_api.user_timeline, id=target, tweet_mode = "extended").items():
#print tweets
if status.created_at < end_date:
break
|
[
"42609763+S4ND1X@users.noreply.github.com"
] |
42609763+S4ND1X@users.noreply.github.com
|
30bf1a0ab518d9f9d117ac6f9a4d392a86adf78b
|
e558c698295e839fdc092279d056081b97fbd2a3
|
/cgi-bin/sudokuu.py
|
c2566c5ffd71f076820dd9f364e080c2613eb75a
|
[] |
no_license
|
sudokuu/sudokuu.github.io
|
94bf92916b785e9fe525175003339eff99610c66
|
6732f349882915d3b32af36e72734e0b1dec05c7
|
refs/heads/master
| 2020-04-14T19:48:39.258820
| 2019-07-30T22:19:01
| 2019-07-30T22:19:01
| 68,010,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
#!/usr/bin/python
import cgi
import random
jogoFacil = ["460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078"]
jogoFacilComp = ["463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178"]
jogoMedio = ["460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078"]
jogoMedioComp = ["463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178"]
jogoDificil = ["460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078",
"460500019\n827600045\n000020060\n000207081\n006000400\n510304000\n080050000\n730001652\n640002078"]
jogoDificilComp = ["463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178",
"463578219\n827619345\n951423867\n394267581\n276185493\n518394726\n182756934\n739841652\n645932178"]
# imprime um jogo aleatorio da lista de acordo com a dificuldade
def geraJogo(dificuldade):
if ( dificuldade == '0' ):
rand = random.randint(0,4)
print jogoFacil[rand]
print jogoFacilComp[rand]
elif ( dificuldade == '1' ):
rand = random.randint(0,4)
print jogoMedio[rand]
print jogoMedioComp[rand]
elif ( dificuldade == '2' ):
rand = random.randint(0,4)
print jogoDificil[rand]
print jogoDificilComp[rand]
else:
print 'dificuldade invalida'
return;
# cabecalho da resposta
print "Content-type:text/plain"
print
form = cgi.FieldStorage()
if "dificuldade" not in form:
print "chamada invalida"
else:
geraJogo(form.getfirst("dificuldade", "").upper())
|
[
"felipehendrix@programmer.net"
] |
felipehendrix@programmer.net
|
e266848221e4a52337e265a2244057b35982fe8d
|
948b4d6deee2a5c093f7f5b1daab00936a30f1b9
|
/kali_rootfs/usr/share/sqlmap/lib/request/basicauthhandler.py
|
89ec252bd7badfa2fbb9c882ebad3cc798743b51
|
[] |
no_license
|
goutham414/kali-n900
|
0bc476b67ae59ed2f1e25055759cefd38ab72a20
|
1ab8310a5a5e22b35535fe85ad16bd3dd8286d85
|
refs/heads/master
| 2021-01-10T10:51:51.308505
| 2016-03-21T20:08:39
| 2016-03-21T20:08:39
| 54,414,038
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import urllib2
class SmartHTTPBasicAuthHandler(urllib2.HTTPBasicAuthHandler):
"""
Reference: http://selenic.com/hg/rev/6c51a5056020
Fix for a: http://bugs.python.org/issue8797
"""
def __init__(self, *args, **kwargs):
urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
self.retried_req = set()
self.retried_count = 0
def reset_retry_count(self):
# Python 2.6.5 will call this on 401 or 407 errors and thus loop
# forever. We disable reset_retry_count completely and reset in
# http_error_auth_reqed instead.
pass
def http_error_auth_reqed(self, auth_header, host, req, headers):
# Reset the retry counter once for each request.
if hash(req) not in self.retried_req:
self.retried_req.add(hash(req))
self.retried_count = 0
else:
if self.retried_count > 5:
raise urllib2.HTTPError(req.get_full_url(), 401, "basic auth failed",
headers, None)
else:
self.retried_count += 1
return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
self, auth_header, host, req, headers)
|
[
"rapol.goutham@gmail.com"
] |
rapol.goutham@gmail.com
|
03c78e9dee6e157c437e2c34c55b632c02c1ee94
|
c264ad6205c0b47038f9f67ce52eabd816ddd911
|
/prime.py
|
88c7cf50a84af286b58cb50fa7c24746682251b0
|
[] |
no_license
|
kedharavarshini/python-lab
|
3dda522062ef077047f79158263142502d2790a1
|
1ebe6b70a3ba49a3b6902005ef6cab18712094f9
|
refs/heads/master
| 2020-07-22T17:37:10.159215
| 2019-09-24T14:59:00
| 2019-09-24T14:59:00
| 207,277,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
a=int(input("enter a number"))
for b in range(2,a):
if a%b==0:
break
if b<a:
print(a,"is prime")
else:
print(a,"is not prime")
|
[
"noreply@github.com"
] |
kedharavarshini.noreply@github.com
|
6bf6c8e3a697fc2008290a64bb2cf9844aac8da2
|
169e75df163bb311198562d286d37aad14677101
|
/tensorflow/tensorflow/python/training/checkpointable/layer_utils.py
|
978fcb2252cd4481b8286bdf3afd58b30ce6d665
|
[
"Apache-2.0"
] |
permissive
|
zylo117/tensorflow-gpu-macosx
|
e553d17b769c67dfda0440df8ac1314405e4a10a
|
181bc2b37aa8a3eeb11a942d8f330b04abc804b3
|
refs/heads/master
| 2022-10-19T21:35:18.148271
| 2020-10-15T02:33:20
| 2020-10-15T02:33:20
| 134,240,831
| 116
| 26
|
Apache-2.0
| 2022-10-04T23:36:22
| 2018-05-21T08:29:12
|
C++
|
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to layer/model functionality."""
# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
# once __init__ files no longer require all of tf.keras to be imported together.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def is_layer(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
return (hasattr(obj, "call")
and hasattr(obj, "build")
and hasattr(obj, "variables"))
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers."""
return [layer for layer in layer_list
# Filter out only empty Checkpointable data structures. Empty Networks
# will still show up in Model.layers.
if is_layer(layer) or getattr(layer, "layers", True)]
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
|
[
"thomas.warfel@pnnl.gov"
] |
thomas.warfel@pnnl.gov
|
0b524586ad4cd81467f9beee07e15fe46647848a
|
c2ccf0fb3b09c0d2e73001133f12d1814bea1e73
|
/main.py
|
c8865eabd212549793cfd1c93964fc6f0f048c41
|
[] |
no_license
|
nguyenkhoa0721/Smart-Glass-for-blind-people
|
484fb3dd19114625630e2b0632dd49b37ded3781
|
5313eff98f962ad7417b4f940d63138b54184583
|
refs/heads/master
| 2020-04-15T10:07:09.757270
| 2019-09-04T03:12:28
| 2019-09-04T03:12:28
| 164,581,071
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,894
|
py
|
import cv2
import sys
import numpy as np
import os
import json
import time
import threading
import socket
import threading
from http.server import BaseHTTPRequestHandler,HTTPServer
from urllib.parse import parse_qs
import cgi
import base64
import io
from imageio import imread
import matplotlib.pyplot as plt
import json
from odes.odes import classfy
from FTTS import TTS
from news.news import NEWS
from news.read import READ
from ocr.ocr import OCR
from ocr.corrector import ORRECTOR
from ocr.pre import PRE
from sdes.imagecaption import CAPTION
from weather import WEATHER
from wiki import WIKI
from face.facedes import FACEDES
import cloudsight
from google.cloud import translate
from face.facerec import FACEREC
recognizer = cv2.face.LBPHFaceRecognizer_create()
#recognizer.read('face/trainer/trainer.yml')
cascadePath = "face/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
info = []
names=[]
text=''
checkqr=0
qr=''
checkface=0
curpage=0
tintuc=[]
import vlc
with open('face/labels.txt', 'r',encoding='utf-8') as filehandle:
i=0
info.append(('0','0'))
for line in filehandle:
currentPlace = line[:-1]
names.append(currentPlace)
if (i!=0):
with open('face/contact/'+str(i)+'.json',encoding='utf-8') as f:
d = json.load(f)
info.append((d["name"],d["phone"]))
i=i+1
print('nap danh ba')
print (info)
print ('khoi chay bo xu ly')
def r(text):
global curpage,tintuc
if (text=='' or text=="stop" or text=="exitnews"):
player.stop()
elif (text[:4]=="news"):
bao=""
curpage=1
chude=text[5:]
bao=bao+(str("trang "+str(curpage)))+"."
tintuc=NEWS(chude)
for a,b,c in tintuc:
if (int(a)+1<=(curpage*5) and int(a)+1>(curpage*5-5)):
bao=bao+(str("bài số "+ str(a)+": "+b))+"."
TTS(bao,0)
return bao
elif (text=="next"):
bao=""
curpage=curpage+1
bao=bao+(str("trang "+str(curpage)))+"."
print(tintuc)
for a,b,c in tintuc:
if (int(a)+1<=(curpage*5) and int(a)+1>(curpage*5-5)):
bao=bao+(str("bài số "+ str(a)+": "+b))+"."
TTS(bao,0)
return bao
elif (text=="pre"):
bao=""
curpage=curpage-1
bao=bao+(str("trang "+str(curpage)))+"."
for a,b,c in tintuc:
if (int(a)+1<=(curpage*5) and int(a)+1>(curpage*5-5)):
bao=bao+(str("bài số "+ str(a)+": "+b))+"."
TTS(bao,0)
return bao
elif (text[:4]=="read"):
ans=READ(str(tintuc[int(text[4:])-1][2]),0)
TTS(ans,0)
return ans
elif (text[:4]=="rsum"):
ans=READ(str(tintuc[int(text[4:])-1][2]),1)
TTS(ans,0)
return ans
elif (text=='fash'):
ans=FASHION()
TTS(ans,1)
return ans
elif (text=='rec'):
kq=FACEREC()
kqq=""
for n in kq:
if (n!='Unknown'):
kqq=kqq+names[int(n)]+","
if (kqq!=""):
TTS(kqq,1)
return(kqq)
else:
pass
#TTS("không có trong dữ liệu",1)
#return ("không có trong dữ liệu")
ans=classfy()
TTS(ans,1)
return ans
elif (text=='facedes'):
ans=FACEDES('all')
TTS(ans,0)
return ans
elif (text=='facerec'):
kq=FACEREC()
kqq=""
for n in kq:
if (n!='Unknown'):
kqq=kqq+names[int(n)]+","
if (kqq!=""):
TTS(kqq,1)
return(kqq)
else:
TTS("không có trong dữ liệu",1)
return ("không có trong dữ liệu")
elif (text[:4]=='prof'):
try:
id=-1
ok=False
for name in names:
id=id+1
if (name==text[5:]):
TTS('tên: '+info[id][0]+'. điện thoại :'+info[id][1],0)
return ('tên: '+info[id][0]+'. điện thoại :'+info[id][1])
ok=True
break
if (ok==False):
TTS("không có trong dữ liệu",0)
return ("không có trong dữ liệu")
except:
TTS("lỗi",0)
reuturn ("lỗi")
elif (text=='ocr'):
ans=OCR()
ans=HAU(ans)
TTS(ans,0)
return(ans)
elif (text=="sdes"):
ans=CAPTION()
TTS(ans,0)
return (ans)
elif (text=='odes'):
ans=classfy()
TTS(ans,1)
return ans
elif (text[:4]=='wiki'):
try:
TTS(WIKI(text[5:]),0)
return (WIKI(text[5:]))
except:
TTS("lỗi",1)
return ("lỗi")
elif (text[:7]=='weather'):
try:
ans=WEATHER(text[8:])
TTS(ans,0)
return ans
except:
ans=WEATHER("kon tum")
TTS(ans,0)
return ans
elif (text[:5]=="music"):
try:
ans=GETMUSIC(text[6:])
print(ans)
player=vlc.MediaPlayer(ans)
player.play()
except:
return("lỗi")
else:
TTS("Không hiểu lệnh",0)
return ("Không hiểu lệnh")
def decodeimg(data,re):
dataa=data.encode()
b64_string = dataa.decode()
img = imread(io.BytesIO(base64.b64decode(b64_string)))
cv2_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if (re=='ocr'):
cv2_img=PRE(cv2_img)
cv2.imwrite("image.jpg", cv2_img)
class GP(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
def do_GET(self):
self._set_headers()
print (self.path)
print (parse_qs(self.path[2:]))
self.wfile.write("Get Request Received!".encode())
def do_POST(self):
self._set_headers()
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST'}
)
re=(form.getvalue("text"))
print(re)
try:
decodeimg(form.getvalue("img"),re)
except:
pass
re=r(re)
self.wfile.write(bytes(re,'utf-8'))
def run(server_class=HTTPServer, handler_class=GP, port=8088):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print ('Server running at localhost:8088...')
httpd.serve_forever()
run()
|
[
"nguyenkhoa0721@gmail.com"
] |
nguyenkhoa0721@gmail.com
|
b7dae662e90b2f8d8d47e0d5359d53700b75972b
|
5679bb51d6fcc6e94e9206b89ce395c988fd202b
|
/venv/Lib/site-packages/elasticsearch/client/sql.py
|
64a606a2ecb15aaa59997adf782070b02467bf07
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
antwan1/NewHowler
|
762e5af9b646ef96133421cb1c9e84c16d5f0cc1
|
3a35fb23552ab9829bc22ea63b49b988844447f0
|
refs/heads/master
| 2023-09-05T08:02:48.770494
| 2021-10-28T19:47:56
| 2021-10-28T19:47:56
| 410,207,003
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,096
|
py
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class SqlClient(NamespacedClient):
@query_params()
def clear_cursor(self, body, params=None, headers=None):
"""
Clears the SQL cursor
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.15/clear-sql-cursor-api.html>`_
:arg body: Specify the cursor value in the `cursor` element to
clean the cursor.
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_sql/close", params=params, headers=headers, body=body
)
@query_params("format")
def query(self, body, params=None, headers=None):
"""
Executes a SQL request
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.15/sql-search-api.html>`_
:arg body: Use the `query` element to start a query. Use the
`cursor` element to continue a query.
:arg format: a short version of the Accept header, e.g. json,
yaml
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_sql", params=params, headers=headers, body=body
)
@query_params()
def translate(self, body, params=None, headers=None):
"""
Translates SQL into Elasticsearch queries
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.15/sql-translate-api.html>`_
:arg body: Specify the query in the `query` element.
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_sql/translate", params=params, headers=headers, body=body
)
@query_params()
def delete_async(self, id, params=None, headers=None):
"""
Deletes an async SQL search or a stored synchronous SQL search. If the search
is still running, the API cancels it.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.15/delete-async-sql-search-api.html>`_
:arg id: The async search ID
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request(
"DELETE",
_make_path("_sql", "async", "delete", id),
params=params,
headers=headers,
)
@query_params("delimiter", "format", "keep_alive", "wait_for_completion_timeout")
def get_async(self, id, params=None, headers=None):
"""
Returns the current status and available results for an async SQL search or
stored synchronous SQL search
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.15/get-async-sql-search-api.html>`_
:arg id: The async search ID
:arg delimiter: Separator for CSV results Default: ,
:arg format: Short version of the Accept header, e.g. json, yaml
:arg keep_alive: Retention period for the search and its results
Default: 5d
:arg wait_for_completion_timeout: Duration to wait for complete
results
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request(
"GET", _make_path("_sql", "async", id), params=params, headers=headers
)
@query_params()
def get_async_status(self, id, params=None, headers=None):
"""
Returns the current status of an async SQL search or a stored synchronous SQL
search
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.15/get-async-sql-search-status-api.html>`_
:arg id: The async search ID
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request(
"GET",
_make_path("_sql", "async", "status", id),
params=params,
headers=headers,
)
|
[
"antoniore385@gmail.com"
] |
antoniore385@gmail.com
|
180167ac82995f96fa27d0545933b09287fcddad
|
7308fe5be2eaba4e29d605d271cd9c2394e62b4a
|
/user/post_user.py
|
02a130d7982ac3fb7a208f9d240b2d149071fd12
|
[] |
no_license
|
santipm29/crud-aws-with-microservices
|
d8aeac25aea58c2aa2b17d2d8b43e9032eba41aa
|
17e1a984c23ef5196a612006be2d6d1c1f9a47c1
|
refs/heads/master
| 2020-04-30T02:27:04.470739
| 2019-05-25T18:52:05
| 2019-05-25T18:52:05
| 176,560,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
import json, uuid, boto3
from user import mongo
from jsonschema import Draft4Validator
def load_file(name):
with open(name) as f:
return json.load(f)
def type(name):
types = {'string' : 'caracter', 'number' : 'numérico', 'object': 'objeto'}
if name in types:
return types[name]
def beautify(path, instance, message, validator_value):
if 'is not of type' in message:
return 'El campo \''+str(path[-1]) +'\' con el valor \'' + str(instance) +'\' debe ser de tipo ' + type(validator_value)
elif 'is a required property' in message:
return 'El campo '+message.replace('is a required property','es obligatorio')
elif 'is too short' in message and len(instance)==0:
return 'El campo \''+str(path[-1]) +'\' es obligatorio'
else:
return message
def validate(message):
SCHEMA = load_file('validation/jsonschema.json')
v = Draft4Validator(SCHEMA)
errors = []
for error in sorted(v.iter_errors(message), key=str):
errors.append({"mensaje" : beautify(error.path,error.instance,error.message, error.validator_value)})
return errors
def sendS3(message):
id = uuid.uuid4()
s3 = boto3.client('s3')
message['_id'] = str(message['_id'])
response = s3.put_object(
Bucket = 'tallerawsclientes',
Key=f'{id}.json',
Body = json.dumps(message)
)
def post(event, context):
body = json.loads(event["body"])
errors = validate(body)
resp = []
if not errors:
resp.append(mongo.insert("user", body))
sendS3(body)
else:
resp.append(errors)
response = {"statusCode": 200, "body": json.dumps( resp )}
return response
|
[
"santipmartinez@outlook.com"
] |
santipmartinez@outlook.com
|
74e141ff66846d12eeef4c856495c754a20fdb24
|
0b23a9f762b715954308315a570bc490b1f1aec3
|
/final_task/tests/test_function.py
|
2e627cd48cfeaa5bd73a8d7f81ef63659a236245
|
[] |
no_license
|
zaja1kun/test-travis
|
3ac58228c5d805c734ee02a3e1057e0098287e0d
|
0d7f74b55826c63b951b86be5ba533230b1db224
|
refs/heads/master
| 2020-04-04T03:28:03.281807
| 2018-11-01T13:14:27
| 2018-11-01T13:14:27
| 155,714,082
| 0
| 0
| null | 2018-11-01T12:48:53
| 2018-11-01T12:48:52
| null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
from unittest import TestCase
from pycalc.function import Function
class TestFunction(TestCase):
def test_function_object(self):
value = 'abs'
index = 1
function = abs
test_function = Function(value, index, function)
self.assertEqual(test_function.type, 'function')
self.assertEqual(test_function.value, 'abs')
self.assertEqual(test_function.index, 1)
self.assertEqual(test_function.function, abs)
|
[
"Aliaksei_Buziuma@epam.com"
] |
Aliaksei_Buziuma@epam.com
|
5e28b7c0e151fbb86907c2fea78b753d66622874
|
71c698d290ec03eda7afdd90da2623e162f59ece
|
/Tipe data statis/latihan02.py
|
3c6059b3f4cf5b883fb24189996f56a6c993b097
|
[] |
no_license
|
4lasR0ban/Belajar-Python
|
2976ddb215d389c906dd0bf9f8b539baa05563a4
|
88227c2ccd0f39c79f00f3cf23ea7a4cf9953d30
|
refs/heads/main
| 2023-03-04T02:15:35.657434
| 2021-02-15T15:12:06
| 2021-02-15T15:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
# latihan projek python 4
n = 32892700
# pecahan 100k
p100k = n / 100000
print('Pecahan 100k ada ', int(p100k), 'buah')
# pecahan 50k
p50k = n / 50000
print('Pecahan 50k ada ', int(p50k), 'buah')
# pecahan 10k
p10k = n /10000
print('Pecahan 10k ada ', int(p10k), 'buah')
# pecahan 5k
p5k = n / 5000
print('Pecahan 5k ada ', int(p5k), 'buah')
# pecahan 1k
p1k = n / 1000
print('Pecahan 1k ada ', int(p1k), 'buah')
# pecahan 500
p500 = n / 500
print('Pecahan 500 ada ', int(p500), 'buah')
# pecahan 1000
p100 = n /100
print('Pecahan 100 ada ', int(p100), 'buah')
|
[
"71622469+verowardana@users.noreply.github.com"
] |
71622469+verowardana@users.noreply.github.com
|
e442cc61a53ee7f1a668f6b23a79fc8eb9cc2334
|
ab5cdf8f2de94c327e4679da84f941b1f3c04db4
|
/kubernetes/test/test_v1_env_var.py
|
c37c80243ab9137299a6ad478388d2c3a96601b2
|
[
"Apache-2.0"
] |
permissive
|
diannaowa/client-python
|
a4a92a125178db26004eaef5062f9b1b581b49a8
|
5e268fb0b6f21a535a14a7f968b84ed4486f6774
|
refs/heads/master
| 2020-12-02T22:06:03.687696
| 2017-06-30T21:42:50
| 2017-06-30T21:42:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_env_var import V1EnvVar
class TestV1EnvVar(unittest.TestCase):
""" V1EnvVar unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EnvVar(self):
"""
Test V1EnvVar
"""
model = kubernetes.client.models.v1_env_var.V1EnvVar()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
d6da47881922063e4c5338620c14e5242cb1eea2
|
3c113092c1e22c09299ab995300bf7a22bd24fd6
|
/pycorrector/macbert/base_model.py
|
5ffffc2ad547b6fc6a2e257622844f3de7a9b0c6
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
luozhouyang/pycorrector
|
2dbfdd105418b07e613789b45cd8f1c3183a13b6
|
61bc8c1253bfe0784a825d9042c9be7f057a9bff
|
refs/heads/master
| 2023-06-04T17:47:52.140410
| 2022-01-06T09:14:29
| 2022-01-06T09:14:29
| 447,160,173
| 1
| 0
|
Apache-2.0
| 2022-01-12T09:47:28
| 2022-01-12T09:47:27
| null |
UTF-8
|
Python
| false
| false
| 7,265
|
py
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com), Abtion(abtion@outlook.com)
@description:
"""
import operator
from abc import ABC
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from pycorrector.macbert import lr_scheduler
from pycorrector.macbert.evaluate_util import compute_corrector_prf, compute_sentence_level_prf
from pycorrector.utils.logger import logger
class FocalLoss(nn.Module):
"""
Softmax and sigmoid focal loss.
copy from https://github.com/lonePatient/TorchBlocks
"""
def __init__(self, num_labels, activation_type='softmax', gamma=2.0, alpha=0.25, epsilon=1.e-9):
super(FocalLoss, self).__init__()
self.num_labels = num_labels
self.gamma = gamma
self.alpha = alpha
self.epsilon = epsilon
self.activation_type = activation_type
def forward(self, input, target):
"""
Args:
logits: model's output, shape of [batch_size, num_cls]
target: ground truth labels, shape of [batch_size]
Returns:
shape of [batch_size]
"""
if self.activation_type == 'softmax':
idx = target.view(-1, 1).long()
one_hot_key = torch.zeros(idx.size(0), self.num_labels, dtype=torch.float32, device=idx.device)
one_hot_key = one_hot_key.scatter_(1, idx, 1)
logits = torch.softmax(input, dim=-1)
loss = -self.alpha * one_hot_key * torch.pow((1 - logits), self.gamma) * (logits + self.epsilon).log()
loss = loss.sum(1)
elif self.activation_type == 'sigmoid':
multi_hot_key = target
logits = torch.sigmoid(input)
zero_hot_key = 1 - multi_hot_key
loss = -self.alpha * multi_hot_key * torch.pow((1 - logits), self.gamma) * (logits + self.epsilon).log()
loss += -(1 - self.alpha) * zero_hot_key * torch.pow(logits, self.gamma) * (1 - logits + self.epsilon).log()
return loss.mean()
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.OPTIMIZER_NAME == 'SGD':
optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params, momentum=cfg.SOLVER.MOMENTUM)
else:
optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params)
return optimizer
def build_lr_scheduler(cfg, optimizer):
scheduler_args = {
"optimizer": optimizer,
# warmup options
"warmup_factor": cfg.SOLVER.WARMUP_FACTOR,
"warmup_epochs": cfg.SOLVER.WARMUP_EPOCHS,
"warmup_method": cfg.SOLVER.WARMUP_METHOD,
# multi-step lr scheduler options
"milestones": cfg.SOLVER.STEPS,
"gamma": cfg.SOLVER.GAMMA,
# cosine annealing lr scheduler options
"max_iters": cfg.SOLVER.MAX_ITER,
"delay_iters": cfg.SOLVER.DELAY_ITERS,
"eta_min_lr": cfg.SOLVER.ETA_MIN_LR,
}
scheduler = getattr(lr_scheduler, cfg.SOLVER.SCHED)(**scheduler_args)
return {'scheduler': scheduler, 'interval': cfg.SOLVER.INTERVAL}
class BaseTrainingEngine(pl.LightningModule):
def __init__(self, cfg, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cfg = cfg
def configure_optimizers(self):
optimizer = make_optimizer(self.cfg, self)
scheduler = build_lr_scheduler(self.cfg, optimizer)
return [optimizer], [scheduler]
def on_validation_epoch_start(self) -> None:
logger.info('Valid.')
def on_test_epoch_start(self) -> None:
logger.info('Testing...')
class CscTrainingModel(BaseTrainingEngine, ABC):
"""
用于CSC的BaseModel, 定义了训练及预测步骤
"""
def __init__(self, cfg, *args, **kwargs):
super().__init__(cfg, *args, **kwargs)
# loss weight
self.w = cfg.MODEL.HYPER_PARAMS[0]
def training_step(self, batch, batch_idx):
ori_text, cor_text, det_labels = batch
outputs = self.forward(ori_text, cor_text, det_labels)
loss = self.w * outputs[1] + (1 - self.w) * outputs[0]
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, batch_size=len(ori_text))
return loss
def validation_step(self, batch, batch_idx):
ori_text, cor_text, det_labels = batch
outputs = self.forward(ori_text, cor_text, det_labels)
loss = self.w * outputs[1] + (1 - self.w) * outputs[0]
det_y_hat = (outputs[2] > 0.5).long()
cor_y_hat = torch.argmax((outputs[3]), dim=-1)
encoded_x = self.tokenizer(cor_text, padding=True, return_tensors='pt')
encoded_x.to(self._device)
cor_y = encoded_x['input_ids']
cor_y_hat *= encoded_x['attention_mask']
results = []
det_acc_labels = []
cor_acc_labels = []
for src, tgt, predict, det_predict, det_label in zip(ori_text, cor_y, cor_y_hat, det_y_hat, det_labels):
_src = self.tokenizer(src, add_special_tokens=False)['input_ids']
_tgt = tgt[1:len(_src) + 1].cpu().numpy().tolist()
_predict = predict[1:len(_src) + 1].cpu().numpy().tolist()
cor_acc_labels.append(1 if operator.eq(_tgt, _predict) else 0)
det_acc_labels.append(det_predict[1:len(_src) + 1].equal(det_label[1:len(_src) + 1]))
results.append((_src, _tgt, _predict,))
return loss.cpu().item(), det_acc_labels, cor_acc_labels, results
def validation_epoch_end(self, outputs) -> None:
det_acc_labels = []
cor_acc_labels = []
results = []
for out in outputs:
det_acc_labels += out[1]
cor_acc_labels += out[2]
results += out[3]
loss = np.mean([out[0] for out in outputs])
self.log('val_loss', loss)
logger.info(f'loss: {loss}')
logger.info(f'Detection:\n'
f'acc: {np.mean(det_acc_labels):.4f}')
logger.info(f'Correction:\n'
f'acc: {np.mean(cor_acc_labels):.4f}')
compute_corrector_prf(results, logger)
compute_sentence_level_prf(results, logger)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs) -> None:
logger.info('Test.')
self.validation_epoch_end(outputs)
def predict(self, texts):
inputs = self.tokenizer(texts, padding=True, return_tensors='pt')
inputs.to(self.cfg.MODEL.DEVICE)
with torch.no_grad():
outputs = self.forward(texts)
y_hat = torch.argmax(outputs[1], dim=-1)
expand_text_lens = torch.sum(inputs['attention_mask'], dim=-1) - 1
rst = []
for t_len, _y_hat in zip(expand_text_lens, y_hat):
rst.append(self.tokenizer.decode(_y_hat[1:t_len]).replace(' ', ''))
return rst
|
[
"shibing624@126.com"
] |
shibing624@126.com
|
4c7477b8e280f006655ed89ab39128331bbb0f93
|
47d504eba70ce8fcf0d71fb2e3c5895c55897bcf
|
/usuario/validators.py
|
de77aaabf171fcb84a2a23b1fd59edd38cf0bc11
|
[] |
no_license
|
mzKaNgPae/PruebaDesarrollo3FINAL
|
aed4d51da6e042f02e0b041d13abae7dbd2c55b4
|
c58f200b6b8483d6000406cbd7ad18b021dc1353
|
refs/heads/main
| 2023-02-03T20:45:42.765639
| 2020-12-18T16:11:37
| 2020-12-18T16:11:37
| 322,665,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
def rut_valido(rut, dv):
rut = rut.replace('.', '')[::-1] # Revierte el rut
acc = 0 # Suma total del calculo
mul = 2 # Multiplicador del digito del rut
for dig in rut:
if mul > 7:
mul = 2
acc += int(dig) * mul
mul += 1
dv_esperado = 11 - acc%11
if dv_esperado == 11:
dv_esperado = '0'
if dv_esperado == 10:
dv_esperado = 'K'
return str(dv) == str(dv_esperado)
def usuario_administrador(request):
if request.user.usuario.tipo_usuario.id == 1:
return True
return False
|
[
"diegoquezadapavez@gmail.com"
] |
diegoquezadapavez@gmail.com
|
70c6e2b5c6ccc12bbcd613077a33a8b1dc08bf53
|
f99b5a9c2403f6b285f4d92bb098c6f83a4b5eb5
|
/TensorFlow/tf-1/tf1_3_反向传播.py
|
c724582c2855a95447c5f1f43147439755c9cdfe
|
[] |
no_license
|
kanbaochun/Python
|
a094e62096b697cea237d7828668cfd16e9d99db
|
f0328909020532f4ec41c82e58781995895f82f6
|
refs/heads/master
| 2021-07-08T19:46:27.163932
| 2019-03-12T15:09:19
| 2019-03-12T15:09:19
| 146,909,748
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
#coding:utf-8
#反向传播(两层神经网络)
import tensorflow as tf
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
#定义常数
BATCH_SIZE = 8
seed = 23455
#生成虚拟数据集
rng = np.random.RandomState(seed)
X = rng.rand(32, 2)
Y = [[int(x1 + x2 < 1)] for (x1, x2) in X]
#定义输入和输出参数
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
w1 = tf.Variable(tf.random_normal([2,3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3,1], stddev=1, seed=1))
#定义前向传播过程
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
#定义损失函数及反向传播算法
loss = tf.reduce_mean(tf.square(y - y_))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
#train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(loss)
#train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
#用会话图输出计算结果
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
STEPS = 10001
for i in range(STEPS):
start = (i*BATCH_SIZE)%32
end = start + BATCH_SIZE
sess.run(train_step, feed_dict={x:X[start:end], y_:Y[start:end]})
if i%1000 == 0:
total_loss = sess.run(loss, feed_dict={x:X, y_:Y})
print("After %d train_step, total_loss is %g" % (i, total_loss))
|
[
"42867170+kanbaochun@users.noreply.github.com"
] |
42867170+kanbaochun@users.noreply.github.com
|
131e3467c463b97b5c694031145d1d6cf9c290a0
|
68f0e36ec24af35c91ca30edbe88fedc82aaba65
|
/premier.py
|
83f76435500cd1e036b7319fb3500bfdbe950dc6
|
[] |
no_license
|
iamds/hotels
|
454b30a3c0068997a5e33eb0f7c86ec2afdf8efb
|
b839bd79f884df65566baa7b955b9caadda1b9f0
|
refs/heads/main
| 2023-06-02T15:57:10.902903
| 2021-06-24T10:33:12
| 2021-06-24T10:33:12
| 346,646,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
import requests
import json
import string
def search_name(name):
response = requests.get("https://pro-premier-inn.emergyalabs.com/v1/autocomplete?input=/" + name + "&gplaces%5Bcomponents%5D=country:uk")
a = json.loads(response.text)
return [x for x in a["properties"] if x["brand"] == "PI"]
premierinns = {}
for c1 in string.ascii_lowercase:
for c2 in string.ascii_lowercase:
results = search_name(c1 + c2)
print(c1 + c2)
if len(results) > 0:
for c3 in string.ascii_lowercase:
results = search_name(c1 + c2 + c3)
for result in results:
premierinns[result["suggestion"]] = result["geometry"]["coordinates"]
print(premierinns)
|
[
"dan@schiff.dev"
] |
dan@schiff.dev
|
328b5571d77ee4bb5f0d7536ebd4b8f885f1f31b
|
23736cfcaefcc0a5cf6882bb235e681730ba781d
|
/louqa/qa/views.py
|
47baa3fd7743afc3cdf8fea5e39497a921554a93
|
[] |
no_license
|
glorygithub/shiyanlou
|
26d0c52cb266c725eacc589d2b798641cd6a198c
|
1890aa51ce90e68044d5426b3b026c7cf2ad7203
|
refs/heads/master
| 2020-06-05T07:20:03.224211
| 2015-07-18T18:06:02
| 2015-07-18T18:06:02
| 39,292,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
#! /usr/bin/env python
# encoding: utf-8
from flask import Blueprint, render_template
qa = Blueprint('qa',__name__,url_prefix='')
@qa.route('/<title>')
@qa.route('/',defaults={'title':None})
def index(title):
return render_template("qa/index.html",title=title,tem_str="world")
|
[
"xxx"
] |
xxx
|
2a8aa79f7406ee6bf9e5720bd0ebc4bc06c20565
|
b8847590ffed3fd521b005cfbb473a33e993a243
|
/InterFace_Test_Demo/interface/userAPI.py
|
c0f3dde567425cced006ebe733f34e90d3737701
|
[] |
no_license
|
YellowStarr/gitdir
|
6e229bbc035bbd3de179a0f64ed7570b489cc382
|
5252b80148dfc4c5c519c9c8beb5c297d62d2a93
|
refs/heads/master
| 2021-01-19T20:50:47.076141
| 2017-11-06T02:09:28
| 2017-11-06T02:09:28
| 88,567,880
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,776
|
py
|
#coding=utf-8
__author__ = 'QiuWenjing'
import requests
from interface.API import MyAPI
class UserAPI:
def __init__(self, url):
self.baseurl = url
self.api = MyAPI()
def user_FollowedSong(self, token, page=1, size=10):
u'''关注的人的作品接口
Method:get
@return:
data{advertisement:[{des,url,name,id,image,ios},]},msg,status
'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/followed/song'
params = {'page': page, 'size': size}
r = requests.get(url, params=params, headers=headers)
# self.api.writeLog('user_FollowedSong', r.text)
return r
def user_Recommend(self, page=1, size=10):
u'''官方推荐'''
url = self.baseurl + '/api/song/recommend'
params = {'page': page, 'size': size}
r = requests.get(url, params=params)
# self.api.writeLog('user_Recommend', r.text)
return r
def user_Focus(self, id, token):
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/focus'
params = {'id': id}
r = requests.post(url, json=params, headers=headers)
# self.api.writeLog('user_Focus', r.text)
return r
def user_cancelFocus(self,id,token):
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/cancelFocus'
params = {'id': id}
r = requests.post(url, json=params, headers=headers)
# self.api.writeLog('user_cancelFocus', r.text)
return r
def user_followedList(self, id):
u'''关注列表'''
url = self.baseurl + '/api/user/followed'
params = {'id': id}
r = requests.get(url, params=params)
# self.api.writeLog('user_followedList', r.text)
return r
def user_fansList(self, id):
u'''粉丝列表'''
url = self.baseurl + '/api/user/follower'
params = {'id': id}
r = requests.get(url, params=params)
return r
def user_Violate(self, contact, reportid, reporttype, text):
'''举报
@param reportid: number 被举报人id
@param reporttype: number
@param text: string
'''
url = self.baseurl + '/api/violate'
params = {'contact': contact, 'reportId': reportid, 'reportType': reporttype, 'text': text}
r = requests.post(url, json=params)
# self.api.writeLog('user_Violate', r.text)
return r
def user_Add_BlackList(self, id, token):
u'''加入黑名单'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/blacklist'
params = {'uid': id}
r = requests.post(url, json=params, headers=headers)
# self.api.writeLog('user_Add_BlackList', r.text)
return r
def user_Del_BlackList(self, id, token):
u'''移出黑名单'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/blacklist/delete'
params = {'uid': id}
r = requests.post(url, json=params, headers=headers)
# self.api.writeLog('user_Del_BlackList', r.text)
return r
def user_BlackList(self, token, page=1):
u'''黑名单'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/blacklist'
params = {'page': page}
r = requests.get(url, params=params, headers=headers)
# self.api.writeLog('user_BlackList', r.text)
# r = r.json()
return r
def user_Participant_Medley(self, token, page=1, size=10):
u'''我加入的串烧'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/join/medley/underWay'
params = {'page': page, 'size': size}
r = requests.get(url, params=params, headers=headers)
# self.api.writeLog('user_Participant_Medley', r.text)
return r
def user_Create_Medley(self, token, audios, images, latitude, longitude, maxcount, title):
u'''我加入的串烧'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/audio/createMedley'
params = {'audios': audios, 'images': images, 'latitude':latitude, 'longitude':longitude, 'maxCount':maxcount, 'title':title}
r = requests.post(url, json=params, headers=headers)
# self.api.writeLog('user_Create_Medley', r.text)
return r
def user_Medley_Participanter(self, songId):
u'''串烧参与者'''
url = self.baseurl + '/api/medleys/participants/all'
params = {'songId': songId}
r = requests.get(url, params=params)
# self.api.writeLog('user_Medley_Participanter', r.text)
return r
def user_Medley_Participanter_Once(self, songId):
u'''串烧参与者去重'''
url = self.baseurl + '/api/medleys/participants'
params = {'songId': songId}
r = requests.get(url, params=params)
# self.api.writeLog('user_Medley_Participanter_Once', r.text)
return r
def user_ModifyInfo(self, token, id, userName, phoneNumber, area='1', birthday='2010-01-01', emotionStatus=1, hasFocus='', personalProfile='',
portrait='', sex=1):
u'''修改用户信息'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/modifyUserInfo'
params = {'area': area, 'birthday': birthday, 'emotionStatus': emotionStatus,
'hasFocus': hasFocus, 'personalProfile': personalProfile, 'phoneNumber': phoneNumber,
'portrait': portrait, 'sex': sex, 'userName': userName, 'id': id}
r = requests.post(url, json=params, headers=headers)
# self.api.writeLog('user_ModifyInfo', r.text)
return r
def user_getUserInfo(self, id):
u'''获取用户信息'''
url = self.baseurl + '/api/getUserInfo'
params = {'id':id}
r = requests.get(url, params=params)
# self.api.writeLog('user_getUserInfo', r.text)
return r
def user_getMyMedley(self, token, status, page=1, size=10):
u'''我的串烧'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/medleys'
params = {'status': status, 'page': page, 'size': size}
r = requests.get(url, params=params, headers=headers)
# self.api.writeLog('user_getMyMedley', r.text)
return r
def user_getMyComplaint(self, token, status, page=1, size=10):
u'''获取吐槽作品'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/complaints'
params = {'status':status,'page':page,'size':size}
r = requests.get(url, params=params, headers=headers)
# self.api.writeLog('user_getMyComplaint', r.text)
return r
def user_getMyRap(self, token, status, page=1, size=10):
u'''获取我的独白'''
headers = {
"token": token,
"Host": self.baseurl,
"User-Agent": "HeiPa/1.0.1 (iPhone; iOS 9.3.5; Scale/2.00)",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json;charset=UTF-8",
"Connection": "keep-alive"
}
url = self.baseurl + '/api/user/raps'
params = {'status': status, 'page': page, 'size': size}
r = requests.get(url, params=params, headers=headers)
# self.api.writeLog('user_getMyRap', r.text)
return r
def user_Statistic(self, id):
u'''获取用户扩展信息'''
url = self.baseurl + '/api/user/statistic'
params = {'id': id}
r = requests.get(url, params=params)
# self.api.writeLog('user_Statistic', r.text)
return r
|
[
"sillyapplemi@126.com"
] |
sillyapplemi@126.com
|
2dca3c96dd91576624418597ac8f3deaa54bf9cf
|
3c39628921d2d43696f29f1f4d6ad3d1d6733a2c
|
/Scripts/agendamentos/migrations/0001_initial.py
|
6073508074d68be79ae82c1061ad1ee286023791
|
[] |
no_license
|
jamesonSouza/python_curso_api
|
215bc01a02131d1ae195749ea9c3db383784aad1
|
318616790174ea0e5779cf894f1207982a2630c3
|
refs/heads/main
| 2023-06-11T02:47:52.292849
| 2021-06-28T19:53:12
| 2021-06-28T19:53:12
| 381,142,308
| 0
| 0
| null | 2021-06-28T19:59:28
| 2021-06-28T19:36:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
# Generated by Django 3.2.4 on 2021-06-25 20:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('pacientes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Agendamentos',
fields=[
('id_agendamento', models.AutoField(primary_key=True, serialize=False)),
('data_hora', models.DateTimeField()),
('data_criacao', models.DateTimeField(auto_now_add=True)),
('cancelado', models.BooleanField(default=False)),
('obj', models.TextField(blank=True, null=True)),
('tipo', models.CharField(blank=True, max_length=100, null=True)),
('id_paciente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='agendamentos', to='pacientes.pacientes')),
],
options={
'db_table': 'agendamentos',
'managed': True,
'unique_together': {('data_hora', 'id_paciente')},
},
),
]
|
[
"jameson1708@gmail.com"
] |
jameson1708@gmail.com
|
5fc0adf9e6d9f375ddd581bf9e82f8d4ad454d56
|
71535e1512c99a5aef4a2bd0e03efa82825b106d
|
/unit_testing/test_cron.py
|
c8132d4bc18dcc21d0fbd0299ed99d41bbdbbfa1
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Stanford-PERTS/yosemite
|
070606b083311fff8e0a45ec5c6248ac81551744
|
e17a5e7d7a786e7bae4d1dc17ce571442178b1a0
|
refs/heads/master
| 2022-10-24T15:02:45.958601
| 2020-06-14T17:38:08
| 2020-06-14T17:38:08
| 272,251,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,455
|
py
|
"""Testing cron functions."""
from google.appengine.api import taskqueue
import datetime
import time
import unittest
from api import Api
from core import *
from cron import Cron
from named import *
import unit_test_helper
class CronTestCase(unit_test_helper.PopulatedInconsistentTestCase):
def test_aggregate(self):
"""Test aggregation of pds to activities.
Documentation of Yosemite aggregation:
https://docs.google.com/document/d/1tmZhuWMDX29zte6f0A8yXlSUvqluyMNEnFq8qyJq1pA
TODO(chris): actually write the documentation!
"""
cron = Cron(self.internal_api)
# To insulate the expected aggregation stats from changes to the
# populate script, we'll create a separate cohort and classroom. For
# larger things we'll rely on the stuff set by the populate script,
# e.g. self.program.
cohort = self.researcher_api.create('cohort', {
'name': 'DGN 2015',
'code': 'lion mackerel',
'program': self.program.id,
'school': self.school.id,
})
self.researcher_api.associate('set_owner', self.school_admin, cohort)
classroom = self.school_admin_api.create('classroom', {
'name': "English 201",
'user': self.school_admin.id,
'program': self.program.id,
'cohort': cohort.id,
})
student_activities = self.school_admin_api.init_activities(
'student', self.school_admin.id, self.program.id,
cohort_id=cohort.id, classroom_id=classroom.id)
db.get([cohort.key(), classroom.key()])
db.get([a.key() for a in student_activities])
# To test aggregating across multiple users, we'll need several
# students
student_params = {'user_type': 'student', 'classroom': classroom.id}
mystery_finisher = self.public_api.create('user', student_params)
absentee = self.public_api.create('user', student_params)
refusee = self.public_api.create('user', student_params)
expelee = self.public_api.create('user', student_params)
mr_perfect = self.public_api.create('user', student_params)
non_finisher = self.public_api.create('user', student_params)
wrong_name = self.public_api.create('user', student_params)
# This student will be in another classroom, and we won't update her,
# proving that cohort aggregation re-queries more than just the changed
# stuff.
other_classroom = self.school_admin_api.create('classroom', {
'name': "English 202",
'user': self.school_admin.id,
'program': self.program.id,
'cohort': cohort.id,
})
other_student_activities = self.school_admin_api.init_activities(
'student', self.school_admin.id, self.program.id,
cohort_id=cohort.id, classroom_id=other_classroom.id)
other_student = self.public_api.create(
'user', {'user_type': 'student', 'classroom': other_classroom.id})
students = [mystery_finisher, absentee, refusee, expelee, mr_perfect,
non_finisher, wrong_name]
student_keys = [s.key() for s in students]
others = [other_student, other_classroom] + other_student_activities
other_keys = [e.key() for e in others]
### Aggregate initial state
# Assume and simulate that enough time passes between data recording
# and cron execution that entities become consistent.
db.get(student_keys)
db.get(other_keys)
cron.aggregate()
# Every student have the same aggregation data for both activities
# because no one has done anything yet. So just loop and check against
# the same reference.
for s in db.get(student_keys):
self.assertFalse(s.certified)
correct_stats = {'progress': None}
self.assertEqual(s.aggregation_data[1], correct_stats)
self.assertEqual(s.aggregation_data[2], correct_stats)
# Both activities should be the same also
a1, a2 = db.get([a.key() for a in student_activities])
correct_stats = {
'total_students': 7,
'certified_students': 0,
'certified_study_eligible_dict': {
'n': 0,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 0
},
}
self.assertEqual(a1.aggregation_data, correct_stats)
self.assertEqual(a2.aggregation_data, correct_stats)
# The other activities should look like this (this is the last time
# we'll have to check it because we won't be changing it any more):
a1, a2 = db.get([a.key() for a in other_student_activities])
correct_stats = {
'total_students': 1,
'certified_students': 0,
'certified_study_eligible_dict': {
'n': 0,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 0
},
}
self.assertEqual(a1.aggregation_data, correct_stats)
self.assertEqual(a2.aggregation_data, correct_stats)
# Check cohort (has our seven plus one other)
cohort = db.get(cohort.key())
correct_cohort_stats = {
'unscheduled': 2, 'scheduled': 0, 'behind': 0, 'completed': 0,
'incomplete_rosters': 2,
'total_students': 8,
'certified_students': 0,
'certified_study_eligible_dict': {
'n': 0,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 0
},
}
self.assertEqual(cohort.aggregation_data[1], correct_cohort_stats)
self.assertEqual(cohort.aggregation_data[2], correct_cohort_stats)
### Pretend the school admin just certified some students and aggregate
### again.
# NOT changing mystery_finisher proves that the aggregator re-queries
# for unchanged users associated with the same activity.
certified_students = [absentee, refusee, expelee, mr_perfect,
non_finisher]
for s in certified_students:
s.certified = True
db.put(certified_students)
# Assume and simulate that enough time passes between data recording
# and cron execution that entities become consistent.
db.get(student_keys)
cron.aggregate()
# Every student should be the same for both activities.
for s in db.get(student_keys):
correct_stats = {'progress': None}
self.assertEqual(s.aggregation_data[1], correct_stats)
self.assertEqual(s.aggregation_data[2], correct_stats)
# Both activities should be the same also
a1, a2 = db.get([a.key() for a in student_activities])
correct_stats = {
'total_students': 7,
'certified_students': 5,
'certified_study_eligible_dict': {
'n': 5,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 5
},
}
self.assertEqual(a1.aggregation_data, correct_stats)
self.assertEqual(a2.aggregation_data, correct_stats)
# Check cohort
cohort = db.get(cohort.key())
correct_cohort_stats = {
'unscheduled': 2, 'scheduled': 0, 'behind': 0, 'completed': 0,
'incomplete_rosters': 2,
'total_students': 8,
'certified_students': 5,
'certified_study_eligible_dict': {
'n': 5,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 5
},
}
self.assertEqual(cohort.aggregation_data[1], correct_cohort_stats)
self.assertEqual(cohort.aggregation_data[2], correct_cohort_stats)
### Simulate the first session, with two students absent and one who
### doesn't finish. Also schedule the first activity.
absentee.s1_status_code = 'A' # code for absent
refusee.s1_status_code = 'PR' # code for parent refusal
expelee.s1_status_code = 'E' # code for expelled
wrong_name.s1_status_code = 'MWN' # code for merge: wrong name
db.put([absentee, refusee, expelee, wrong_name])
progress_pds = []
pd_params = {
'variable': 's1__progress',
'program': self.program.id,
'activity': student_activities[0].id,
'activity_ordinal': 1,
}
# Progress on activity 1 for those who finished.
for s in [mr_perfect, mystery_finisher, wrong_name]:
pd_params['value'] = '100'
pd_params['scope'] = s.id
progress_pds.append(Api(s).create('pd', pd_params))
# Progress on activity 1 for those who didn't finish.
pd_params['value'] = '50'
pd_params['scope'] = non_finisher.id
progress_pds.append(Api(non_finisher).create('pd', pd_params))
a1.scheduled_date = datetime.date.today()
a1.put()
# Assume and simulate that enough time passes between data recording
# and cron execution that entities become consistent.
db.get([pd.key() for pd in progress_pds] +
[absentee.key(), refusee.key(), expelee.key(), a1.key()])
cron.aggregate()
# Check that user stats are right.
correct_stats = [
{'progress': 100}, # mystery_finisher
{'progress': None}, # absentee
{'progress': None}, # refusee
{'progress': None}, # expelee
{'progress': 100}, # mr_perfect
{'progress': 50}, # non_finisher
{'progress': 100}, # wrong_name
]
for index, s in enumerate(students):
s = db.get(s.key())
self.assertEqual(s.aggregation_data[1], correct_stats[index])
# Check that activity stats are right.
a1 = db.get(student_activities[0].key())
correct_stats = {
# Total has decreased b/c MWN students are dropped from the counts
# completely. This is because they're not really a person, they're
# a duplicate representation of a different real person.
'total_students': 6,
'certified_students': 5,
'certified_study_eligible_dict': {
'n': 4,
'completed': 1,
'makeup_eligible': 1,
'makeup_ineligible': 1,
'uncoded': 1
},
}
self.assertEqual(a1.aggregation_data, correct_stats)
# Activity 2 shouldn't register any of the progress we've made on
# activity 1.
a2 = db.get(student_activities[1].key())
correct_stats = {
'total_students': 6,
'certified_students': 5,
'certified_study_eligible_dict': {
'n': 5,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 5
},
}
self.assertEqual(a2.aggregation_data, correct_stats)
# Check cohort (again, similar, but with a larger 'all' total).
cohort = db.get(cohort.key())
correct_cohort_stats = {
1: {
'unscheduled': 1, 'scheduled': 1, 'behind': 0, 'completed': 0,
'incomplete_rosters': 2,
'total_students': 7,
'certified_students': 5,
'certified_study_eligible_dict': {
'n': 4,
'completed': 1,
'makeup_eligible': 1,
'makeup_ineligible': 1,
'uncoded': 1
},
},
2: {
'unscheduled': 2, 'scheduled': 0, 'behind': 0, 'completed': 0,
'incomplete_rosters': 2,
'total_students': 7,
'certified_students': 5,
'certified_study_eligible_dict': {
'n': 5,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 5
},
}
}
self.assertEqual(cohort.aggregation_data, correct_cohort_stats)
def test_aggregate_handles_duplicates(self):
"""If multiple progress pd, aggregator chooses the largest one."""
# Create w/o the api to intentionally create duplicates
pd_id1 = 'Pd_1.' + self.student.id
pd_id2 = 'Pd_2.' + self.student.id
pd_id3 = 'Pd_3.' + self.student.id
pd_id4 = 'Pd_4.' + self.student.id
pd1 = Pd(key_name=pd_id1, id=pd_id1, parent=self.student,
scope=self.student.id, program=self.program.id,
activity_ordinal=1,
variable='s1__progress', value='66', public=True)
pd2 = Pd(key_name=pd_id2, id=pd_id2, parent=self.student,
scope=self.student.id, program=self.program.id,
activity_ordinal=1,
variable='s1__progress', value='33', public=True)
pd3 = Pd(key_name=pd_id3, id=pd_id3, parent=self.student,
scope=self.student.id, program=self.program.id,
activity_ordinal=2,
variable='s2__progress', value='100', public=True)
pd4 = Pd(key_name=pd_id4, id=pd_id4, parent=self.student,
scope=self.student.id, program=self.program.id,
activity_ordinal=2,
variable='s2__progress', value='66', public=True)
# Put them in a confusing order on purpose, to try to get the
# aggregator to process them from largest to smallest.
db.put([pd1, pd3])
db.put([pd2, pd4])
# Prove that there are duplicates.
duplicates = self.student_api.get('pd', {}, ancestor=self.student)
self.assertEquals(len(duplicates), 4)
# Aggregate and check results.
cron = Cron(self.internal_api)
cron.aggregate()
student = db.get(self.student.key())
self.assertEquals(
student.aggregation_data,
{1: {'progress': 66}, 2: {'progress': 100}})
# Student should also have COM for s2 b/c they hit 100.
self.assertEquals(student.s2_status_code, 'COM')
def test_aggregation_does_not_change_modified_time(self):
cron = Cron(self.internal_api)
# Create a fake set of data to aggregate: A cohort, classroom,
# student activities, a student, and a pd value for that student.
cohort = self.researcher_api.create('cohort', {
'name': 'DGN 2015',
'code': 'lion mackerel',
'program': self.program.id,
'school': self.school.id,
})
self.researcher_api.associate('set_owner', self.school_admin, cohort)
classroom = self.school_admin_api.create('classroom', {
'name': "English 201",
'user': self.school_admin.id,
'program': self.program.id,
'cohort': cohort.id,
})
student_activities = self.school_admin_api.init_activities(
'student', self.school_admin.id, self.program.id,
cohort_id=cohort.id, classroom_id=classroom.id)
db.get([cohort.key(), classroom.key()])
db.get([a.key() for a in student_activities])
student_params = {'user_type': 'student', 'classroom': classroom.id}
student = self.public_api.create('user', student_params)
student = db.get(student.key())
pd_params = {
'variable': 's1__progress',
'program': self.program.id,
'activity': student_activities[0].id,
'activity_ordinal': 1,
'value': 100,
'scope': student.id,
}
pd = Api(student).create('pd', pd_params)
db.get(pd.key())
# First prove that modified times ARE set in this context for normal
# writes. The student's progress has NOT been written to the user
# entity yet.
modified_before = student.modified
time.sleep(0.1)
student.first_name = 'mister'
student.put()
modified_after = db.get(student.key()).modified
self.assertEquals(student.aggregation_data,
{1: {'progress': None}, 2: {'progress': None}})
self.assertNotEqual(modified_before, modified_after)
# Now aggregate, which should write the pd's progress value to the
# user, but should NOT update the user's modified time.
modified_before = modified_after
time.sleep(0.1)
cron.aggregate()
student = db.get(student.key())
modified_after = db.get(student.key()).modified
self.assertEquals(student.aggregation_data,
{1: {'progress': 100}, 2: {'progress': None}})
self.assertEquals(modified_before, modified_after)
def test_aggregate_more_than_thirty_classrooms(self):
"""Aggregation uses an IN filter, which breaks App Engine when it has
more than 30 elements in it. There should be code to handle this."""
cron = Cron(self.internal_api)
# To insulate the expected aggregation stats from changes to the
# populate script, we'll create a separate cohort and classroom. For
# larger things we'll rely on the stuff set by the populate script,
# e.g. self.program.
cohort = self.researcher_api.create('cohort', {
'name': 'DGN 2015',
'code': 'lion mackerel',
'program': self.program.id,
'school': self.school.id,
})
db.get(cohort.key())
self.researcher_api.associate('set_owner', self.school_admin, cohort)
# Create 31 different students in as many different classrooms.
classrooms, student_activities, students = [], [], []
for x in range(30):
c = self.school_admin_api.create('classroom', {
'name': "English " + str(x),
'user': self.school_admin.id,
'program': self.program.id,
'cohort': cohort.id,
})
c = db.get(c.key())
acts = self.school_admin_api.init_activities(
'student', self.school_admin.id, self.program.id,
cohort_id=cohort.id, classroom_id=c.id)
acts = db.get([a.key() for a in acts])
s = self.public_api.create('user', {'user_type': 'student',
'classroom': c.id})
classrooms.append(c)
student_activities += acts
students.append(s)
# Bring them all into full db consistency.
db.get([s.key() for s in students])
cron.aggregate()
# All activities should show one student.
student_activities = db.get([a.key() for a in student_activities])
correct_stats = {
'total_students': 1,
'certified_students': 0,
'certified_study_eligible_dict': {
'n': 0,
'completed': 0,
'makeup_eligible': 0,
'makeup_ineligible': 0,
'uncoded': 0
},
}
for a in student_activities:
self.assertEqual(a.aggregation_data, correct_stats)
def test_aggregate_queues_tasks(self):
cron = Cron(self.internal_api)
queue = taskqueue.Queue(name='default')
# Since this test inherits a populated stub datastore, some tasks
# have already been queued. Set that as the baseline.
baseline = queue.fetch_statistics().tasks
# Now aggregate the entities that have been pre-populated, which
# includes one classroom and one cohort. This should queue 3 tasks: one
# classroom roster, one cohort roster, and one cohort schedule.
cron.aggregate()
post_aggregation = queue.fetch_statistics().tasks
self.assertEqual(post_aggregation - baseline, 3)
|
[
"chris@perts.net"
] |
chris@perts.net
|
6dabdc807370bd4b3c31d1fe008b1c317f826fcd
|
8dc84558f0058d90dfc4955e905dab1b22d12c08
|
/chrome/browser/android/vr/DEPS
|
4bb15ad06141975212205cdb895c4d52b1e9f069
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
meniossin/src
|
42a95cc6c4a9c71d43d62bc4311224ca1fd61e03
|
44f73f7e76119e5ab415d4593ac66485e65d700a
|
refs/heads/master
| 2022-12-16T20:17:03.747113
| 2020-09-03T10:43:12
| 2020-09-03T10:43:12
| 263,710,168
| 1
| 0
|
BSD-3-Clause
| 2020-05-13T18:20:09
| 2020-05-13T18:20:08
| null |
UTF-8
|
Python
| false
| false
| 196
|
include_rules = [
"+cc/base",
"+cc/layers",
"+device/vr",
"+services/metrics/public/cpp/ukm_builders.h",
"+third_party/gvr-android-keyboard/src",
"+third_party/gvr-android-sdk/src",
]
|
[
"arnaud@geometry.ee"
] |
arnaud@geometry.ee
|
|
0ffa3e481eb23c0ae940935b0063cd736086e603
|
08f30ea954d0627f0ae456d1bf22f74b7a809263
|
/exam/61a-su20-mt1/q1/tests/q1.py
|
3e76c09722005ee9c4f2d194686148a269f38828
|
[] |
no_license
|
gintonyc/UCBerkeley_cs61a_SICP
|
da04023deac611b1485921fb83de7926d5d25841
|
61482385dc41e237110b16ca5444a8ca6b25b864
|
refs/heads/master
| 2023-06-02T23:56:21.435348
| 2021-06-10T08:27:23
| 2021-06-10T08:27:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
test = {
'name': 'q1',
'points': 10,
'suites': [
{
'cases': [
{
'code': r"""
>>> put1 = kv()
>>> get2, put2 = put1('cat', 'animal')
>>> get3, put3 = put2('table', 'furniture')
>>> get4, put4 = put3('cup', 'utensil')
>>> get5, put5 = put4('thesis', 'paper')
>>> get5('thesis')
'paper'
>>> get5('cup')
'utensil'
>>> get5('table')
'furniture'
>>> get5('cat')
'animal'
>>> get3('cup')
0
""",
'hidden': False
}
],
'scored': True,
'setup': 'from q1 import *',
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"tonythemayfly@gmail.com"
] |
tonythemayfly@gmail.com
|
4975b6114aede5494545e055742b32230ecc0e5a
|
3093520d03fb2701e97b60755dd55534efa8d6c6
|
/yoursite/settings.py
|
21683f82519698cd361014edbea727b7e8c63134
|
[] |
no_license
|
bradwright/django-layout
|
6da01d613135ffa37a1b335f961a0e7e0d2c8baf
|
9b2fe6b862237217e56d00b1f634e15791a96b26
|
refs/heads/master
| 2020-04-10T16:54:26.939487
| 2009-08-29T08:41:14
| 2009-08-29T08:41:14
| 76,139
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
# Django settings for yoursite project.
import os
TEMPLATE_DEBUG = DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
# note extra change - this path will always work with this settings file
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(*77@q20%l7t-d6kear3wfu_x^&3dh2(&h$_6htf#up25t*3k6'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request', #populate the context with the contents of request
)
ROOT_URLCONF = 'yoursite.urls'
# ensure primary template path is always below the current directory
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '../templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'static_management',
)
|
[
"brad@intranation.com"
] |
brad@intranation.com
|
d79100277593e2e79aec7f47ffa6a0ad0122ce1c
|
41868e541e3eb87cd13969baf79e2dca73952594
|
/queries.py
|
685a08e24a21381b2bce8a60e33cce4de131fc57
|
[] |
no_license
|
JulianAuza/wish_list
|
16950fa8ef6b92c607450615e9b974b6348178c6
|
79b4514578777070b9e07ae7a1a7f439e5fc728b
|
refs/heads/master
| 2021-01-01T19:17:01.218303
| 2017-07-27T15:46:50
| 2017-07-27T15:46:50
| 98,554,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
import django
from apps.wishlist.models import User
# Courses.objects.create(course_name = 'Super fun', desc ='YAS') ssssssss
User.objects.create(name = 'Kim Hyuna' ,email= 'wondergirls@jyp.com', password = '12345678' , user_name = '4minute')
# users = User.objects.all()
# for user in users:
# print user.user_name, user.email_address,user.password
|
[
"julianauza@gmail.com"
] |
julianauza@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.