blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f643f9a47de4c1ca115121662e6b5a7e52da83c7
|
d23c012269057afe3556711fd514efce20085d75
|
/api/routes.py
|
f8fa15a007349ac46eaa1ead2e4c302043d7cdd5
|
[
"MIT"
] |
permissive
|
bobruk76/E9
|
c73464941c8f1955fc4145f852aebea43cb572e2
|
b035a578636d1192b755069e7095d355065c8872
|
refs/heads/main
| 2023-01-04T18:52:29.505694
| 2020-10-25T16:58:49
| 2020-10-25T16:58:49
| 304,788,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,568
|
py
|
from flask_login import login_user, login_required, logout_user, current_user
from werkzeug.utils import redirect
from wtforms.ext.appengine.db import model_form
from api import app, login_manager, bcrypt, db
from flask import Flask, request, render_template, flash, session
from api.forms import CreateUserForm, LoginForm, EventForm
from api.models import User, Event
from api.service import get_all_events, new_user, new_event, get_all_users, get_user, get_user_by_id, del_event, \
get_event_by_id
@app.route('/')
def index():
results = get_all_events()
return render_template('index.html', results=results)
@app.route('/event/')
def list_events():
if current_user.is_authenticated:
current_user_id = current_user.get_id()
events = get_all_events()
return render_template('list_events.html', data=events, current_user_id=current_user_id)
return redirect('/')
@app.route('/event/<event_id>/del', methods=['GET', 'POST', 'PUT', ])
def event_del(event_id):
if current_user.is_authenticated:
user_id = current_user.get_id()
del_event(event_id, user_id)
return redirect('/event/')
return redirect('/')
@app.route('/event/<event_id>', methods=['GET', 'POST'])
def event_actions(event_id):
def date_check(_datetime):
if _datetime == '':
return None
return _datetime
if current_user.is_authenticated:
_event = None
if event_id.isnumeric():
_event = get_event_by_id(int(event_id))
event_form = EventForm(obj=_event)
else:
event_form = EventForm()
if request.method == 'POST':
title = request.form.get('title')
description = request.form.get('description')
timestamp_begin = date_check(request.form.get('timestamp_begin'))
timestamp_end = date_check(request.form.get('timestamp_end'))
user_id = current_user.get_id()
if _event:
_event = \
new_event(user_id=user_id,
title=title,
description=description,
timestamp_begin=timestamp_begin,
timestamp_end=timestamp_end)
else:
_event = \
new_event(user_id=user_id,
title=title,
description=description,
timestamp_begin=timestamp_begin,
timestamp_end=timestamp_end)
return redirect('/event/')
return render_template('edit_event.html', form=event_form)
flash("Только зарегистрированный пользователь может добавлять событие")
return redirect('/')
@login_manager.user_loader
def user_loader(user_id):
return get_user_by_id(user_id)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = get_user(form.name.data)
if user:
if bcrypt.check_password_hash(user.password, form.password.data):
user.authenticated = True
db.session.add(user)
db.session.commit()
login_user(user, remember=True)
return redirect("/")
else:
flash("Неверное имя пользователя или пароль")
return render_template("login_user.html", form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
if session.get('was_once_logged_in'):
del session['was_once_logged_in']
flash('Вы должны авторизоваться')
return redirect('/login')
@app.route('/user/')
def list_users():
users = get_all_users()
return render_template('list.html', data=users, title=u"Список пользователей")
@app.route("/user/new", methods=["GET", "POST"])
def create_user():
form = CreateUserForm()
if form.is_submitted():
name = request.form.get('name')
email = request.form.get('email')
password = request.form.get('password')
if not get_user(name):
user = new_user(name, email, password)
login_user(user, remember=True)
return redirect("/event/")
else:
flash('Такой пользователь уже существует!')
return render_template("create_user.html", form=form)
|
[
"vladimir.m.polyakov@gmail.com"
] |
vladimir.m.polyakov@gmail.com
|
101f05c1b708685c9f582744ecc1a14472bcf253
|
30b2b8a449558fc327daebf51096bf251ef6a8e9
|
/scripts/Assemble.py
|
389daba962491debc1e343d62c2dfc8ec94ca8d5
|
[
"Zlib",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ekg/shasta
|
0ac3462d0e3f73375a1b583967992b7e5deba1fd
|
e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01
|
refs/heads/master
| 2020-06-02T12:59:50.717211
| 2019-06-10T12:13:22
| 2019-06-10T12:13:22
| 191,161,600
| 0
| 0
|
NOASSERTION
| 2019-06-10T12:13:04
| 2019-06-10T12:13:03
| null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
#!/usr/bin/python3
import shasta
import GetConfig
import ast
# Read the config file.
config = GetConfig.getConfig()
# Create the Assembler.
a = shasta.Assembler()
# Set up the consensus caller.
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
# Figure out if we should use marginPhase, and if so set it up.
useMarginPhase = ast.literal_eval(config['Assembly']['useMarginPhase'])
if useMarginPhase:
a.setupMarginPhase()
a.accessKmers()
a.accessMarkers()
a.accessMarkerGraphVertices()
a.accessMarkerGraphEdges()
a.accessAssemblyGraphEdges()
a.accessAssemblyGraphEdgeLists()
a.accessMarkerGraphVertexRepeatCounts()
a.accessMarkerGraphEdgeConsensus()
a.assemble()
|
[
"paoloczi@users.noreply.github.com"
] |
paoloczi@users.noreply.github.com
|
39340c0cfbce2f2177bdf337c5b9ae7da9cdfa9d
|
4489ea2b304c600da545302b15db9040795e70f9
|
/args.py
|
71bf1b7e18ddcabf6f836e196da54c959d4576ce
|
[] |
no_license
|
OnlyM1ss/Crypto
|
3bf8f5666467fa99e0840eaaec283ed9782be7ae
|
4d080f58394e94cd2adf6259a68905963a10a131
|
refs/heads/master
| 2020-12-06T08:43:09.591302
| 2020-01-07T20:55:04
| 2020-01-07T20:55:04
| 232,413,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
import argparse
parser = argparse.ArgumentParser(description='Decoder')
parser.add_argument('word', type = str,help='слово которое нужно закодировать')
parser.add_argument('shift',type = str,help='шаг')
parser.add_argument('dec16',type = str,help='кодировка 16')
parser.add_argument('dec32',type = str,help='кодировка 32')
parser.add_argument('dec64',type = str,help='кодировка 64')
|
[
"noreply@github.com"
] |
OnlyM1ss.noreply@github.com
|
e3da1af274d2a3e36400e5fada6e6f02304611b8
|
aef7c335667a90c8d89644baa091e292a6ea2932
|
/conanfile.py
|
c47ac7b9e05c65d89143304192754a99dc027387
|
[] |
no_license
|
aspectspro/EnterpriseDomainObjects
|
c3a3ba6bcddf98fa1c6b31ab0327a125803a70cd
|
2b61ce66186576209455c5f3f01b6910a429dbe6
|
refs/heads/master
| 2023-04-25T02:17:09.316413
| 2020-11-29T13:53:17
| 2020-11-29T13:53:17
| 307,079,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
from conans import ConanFile, CMake, tools
class DatabaseConan(ConanFile):
name = "EnterpriseDomainObjects"
author = "Greg greg@aspectspro.com"
description = "Enterprise Domain Objects"
def requirements(self):
self.requires("AbstractObjects/2.1.1")
#dynamically sets version number from master tag or testing if other branches
def set_version(self):
git = tools.Git(folder=self.recipe_folder)
tag = "%s" % (git.get_tag())
if tag == "None":
tag = "testing"
else:
tag = "%s" % (git.get_tag())
self.version = "%s" % (tag)
generators = "cmake"
settings = "os", "arch", "compiler"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
def configure(self):
if self.settings.compiler == "Visual Studio":
del self.settings.compiler.runtime
#Creates both debug and release configurations
def build(self):
# cmake_release = CMake(self, build_type="Debug")
# cmake_release.configure(source_folder="src")
# cmake_release.build()
cmake_debug = CMake(self, build_type="Release")
cmake_debug.configure(source_folder="src")
cmake_debug.build()
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = [self.name]
# self.cpp_info.debug.libs = ["%s_d" % (self.name)]
|
[
"greg@aspectspro.com"
] |
greg@aspectspro.com
|
7f2e99bfc97cb0b7fc22df73b15f8e1a322d6df3
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/exp-big-393.py
|
20bce7381a23b7eb2eaa3484512995b694ea0636
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,178
|
py
|
# Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
$ID = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
b4ee1ff0b7b7870c19d3713489b003ee77e5f021
|
60162e7eb238f166d6166632a17606bb467ef1c7
|
/Desktwordcount/views.py
|
b2c8dbc3d3a49ce1e5bc430a940625d3e8f8131f
|
[] |
no_license
|
qdqzy/Desktwordcount
|
1d68b32c58270fa61d13969da2a687fbcc35d6de
|
01bb4bc90e2d0dfeb3a61823eabff8cd01e12eb1
|
refs/heads/master
| 2020-06-05T02:56:38.812008
| 2019-06-17T08:02:35
| 2019-06-17T08:02:35
| 192,289,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request, 'home.html')
def count(request):
text = request.GET['text']
print(text)
result = {}
for i in text:
if i not in result:
result[i] = 1
else:
result[i] += 1
result = sorted(result.item(), key = lambda x:x[1], reverse = True)
return render(request,'count.html',{'count_result':result.items()})
|
[
"876138262@qq.com"
] |
876138262@qq.com
|
51128a92f3012194ab45757a34a18ef7fc86125e
|
c926da2758b5687cb3a2ea439653e4713f2fc423
|
/Lib/site-packages/registration/forms.py
|
c6c00de742f58979232d8b7519d5cef19839f2c7
|
[] |
no_license
|
BloodyFaces/StoryShare
|
b66f029ae911c8cb7522e551cb4b535f1703fcb3
|
e233d1213918eb1db6a884c960b9329c514c3587
|
refs/heads/master
| 2022-11-12T05:29:24.666891
| 2018-12-09T20:57:38
| 2018-12-09T20:57:38
| 157,327,992
| 0
| 1
| null | 2022-10-25T03:25:22
| 2018-11-13T05:56:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,974
|
py
|
"""
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .users import UserModel
from .users import UsernameField
User = UserModel()
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
email = forms.EmailField(label=_("E-mail"))
class Meta:
model = User
fields = (UsernameField(), "email")
# def clean(self):
# cleaned_data = super(UserCreationForm, self).clean()
# return cleaned_data
class RegistrationFormUsernameLowercase(RegistrationForm):
"""
A subclass of :class:`RegistrationForm` which enforces unique case insensitive
usernames, make all usernames to lower case.
"""
def clean_username(self):
username = self.cleaned_data.get('username', '').lower()
if User.objects.filter(**{UsernameField(): username}).exists():
raise forms.ValidationError(_('A user with that username already exists.'))
return username
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_('I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com', 'outlook.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
class ResendActivationForm(forms.Form):
required_css_class = 'required'
email = forms.EmailField(label=_("E-mail"))
|
[
"vladmalmyga@gmail.com"
] |
vladmalmyga@gmail.com
|
2a105f6bc2331fb8e233a4c673ef407ce743c551
|
9cb233cc790684bc50b7e0b63f9cd7fef5e1e5fa
|
/coap/coap.py
|
da8f54e8e45973e3bd082ef504bf7bb15fbb35c1
|
[] |
no_license
|
DmitryShipilov/Practice_magistrature
|
21358c1ddb7d7779d7298da0b9620ffef2aafcbb
|
5d553ce3bda17d265cd2a0d2813244e1b93b2091
|
refs/heads/master
| 2022-12-04T01:09:02.303934
| 2020-08-19T20:14:53
| 2020-08-19T20:14:53
| 286,577,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import logging
import aiocoap
logging.basicConfig(level=logging.INFO)
# CoAP message codes
Code = aiocoap.numbers.codes.Code
async def CoAP_request(code, uri, payload=None):
protocol = await aiocoap.Context.create_client_context()
if payload:
request = aiocoap.Message(code=code, uri=uri, payload=payload)
else:
request = aiocoap.Message(code=code, uri=uri)
try:
response = await protocol.request(request).response
except Exception as e:
print('Failed to fetch resource:')
print(e)
else:
print ("Result code:", response.code,"\n",
"Payload:", response.payload)
"""
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(CoAP_request(
code=Code.GET,
uri='coap://localhost/other/block'
))
"""
print("\nPUT")
size = 1
asyncio.get_event_loop().run_until_complete(CoAP_request(
code=Code.PUT,
uri='coap://localhost/other/block',
payload = b"bytes_is10" * size
))
"""
print("\nGET")
asyncio.get_event_loop().run_until_complete(CoAP_request(
code=Code.GET,
uri='coap://127.0.0.1/other/block',
))
"""
"""
print("\nPUT")
asyncio.get_event_loop().run_until_complete(CoAP_request(
code=Code.PUT,
uri='coap://localhost/other/block',
payload = b"xxx xxx xxx\n" * size
))
print("\nGET")
asyncio.get_event_loop().run_until_complete(CoAP_request(
code=Code.GET,
uri='coap://localhost/time',
))
print("\n")
"""
|
[
"shipilov.dmitry.valer@gmail.com"
] |
shipilov.dmitry.valer@gmail.com
|
88e80019ff6a6765f7faf8d524f81498f5a208a3
|
f3eae017c3c0206cdac604e40ef3b1fc9e7e62f6
|
/20200404/87.py
|
cf1f08a2a2efe67b92eef273bbc8181c019f07bc
|
[] |
no_license
|
doonguk/algorithm
|
b6d126286f7fff170979c62f2b2378d5a03438e9
|
981a1457106c244a13ef3cde8e944cb81888755f
|
refs/heads/master
| 2021-02-08T09:54:40.185496
| 2020-04-15T14:30:53
| 2020-04-15T14:30:53
| 244,138,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
def solution(m, n, board):
board = list(map(list, board))
dx = [1, 1, 0]
dy = [0, 1, 1]
answer = 0
cnt = 0
while True:
ch = [[0] * n for _ in range(m)]
for y in range(m):
for x in range(n):
for k in range(3):
xx = x + dx[k]
yy = y + dy[k]
if board[y][x] == -1 or xx >= n or yy >= m or board[y][x] != board[y + dy[k]][x + dx[k]]:
break
else:
if ch[y][x] == 0:
ch[y][x] = 1
cnt += 1
for k in range(3):
if ch[y + dy[k]][x + dx[k]] == 0:
ch[y + dy[k]][x + dx[k]] = 1
cnt += 1
if cnt == 0:
break
answer += cnt
cnt = 0
# delete
for y in range(m):
for x in range(n):
if ch[y][x] == 1:
now = y # 현재 처리 값
before = now - 1 # 위에 값
while before >= 0 and board[before][x] != -1:
board[now][x] = board[before][x]
now -= 1
before -= 1
board[now][x] = -1
return answer
|
[
"mbxd1@naver.com"
] |
mbxd1@naver.com
|
2cfadbdf605826104ecf7f24efa19f78691766cf
|
c11f92e6a1578338cf759b5e1624a53225642e79
|
/babi/user_data.py
|
8307f03bf2d4e55df9bd70db01a1dca7746c0fcf
|
[
"MIT"
] |
permissive
|
pganssle/babi
|
c1d50df3bdb924316779ab82e996ad46baafb986
|
d20be693d2c067570f0a82e2c2baee34c827c3bd
|
refs/heads/master
| 2021-04-11T19:55:08.285937
| 2020-03-21T18:47:37
| 2020-03-21T18:47:37
| 249,049,571
| 0
| 0
|
MIT
| 2020-03-21T19:50:32
| 2020-03-21T19:50:32
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
import os.path
def _xdg(*path: str, env: str, default: str) -> str:
return os.path.join(
os.environ.get(env) or os.path.expanduser(default),
'babi', *path,
)
def xdg_data(*path: str) -> str:
return _xdg(*path, env='XDG_DATA_HOME', default='~/.local/share')
def xdg_config(*path: str) -> str:
return _xdg(*path, env='XDG_CONFIG_HOME', default='~/.config')
|
[
"asottile@umich.edu"
] |
asottile@umich.edu
|
b12ae5879c11d022acc15b94a031accfb196b46b
|
62883320a7c6bb8cda5e934fbd25bb9ef8a067c3
|
/front_server.py
|
3086e2a847325f3c2ab207ae956b269ac0349c2b
|
[] |
no_license
|
vvvm23/Distributed_Systems_Summative
|
139637b25242fdbe2799c7d263363da1de2385ee
|
28480ad069f4cb94e78016c95d9b2d700b841efd
|
refs/heads/master
| 2022-04-10T21:47:31.954037
| 2020-03-13T01:03:28
| 2020-03-13T01:03:28
| 239,213,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,196
|
py
|
import sys
import Pyro4
import Pyro4.util
# Expose this class as a Pyro4 object
@Pyro4.expose
@Pyro4.behavior(instance_mode="single")
class FrontServer:
def __init__(self):
self.primary = None # Primary Server Reference
self.ns = Pyro4.locateNS() # Locate the Name Server
if not self.ns:
print("ERROR: Failed to locate Name Server. Exiting..")
exit()
self.methods = None
if not self.set_primary(): # Search for the primary server
print("ERROR: Failed to find primary server. Exiting..")
exit()
# Search for primary server and set it
def set_primary(self):
print("ERROR: Current primary is down! Finding new server..")
servers = [(name, uri) for name, uri in self.ns.list(prefix="just_hungry.back_end").items()]
for s in servers:
active = Pyro4.Proxy(s[1])
# Check if the server is up
try:
active._pyroBind()
except Exception as e:
continue
if active.ping_respond():
# If it is, set as the new primary
self.primary = active
self.primary.promote_master() # Promote the server to master
# Get all methods from the primary server
self.methods = {
"login": active.login,
"logout": active.logout,
"create_account": active.create_account,
"delete_account": active.delete_account,
"make_order": active.make_order,
"cancel_order": active.cancel_order,
"view_orders": active.view_orders,
"show_items": active.show_items
}
return True
return False
# Check if the primary server is up
def ping_primary(self):
try:
self.primary._pyroBind()
if self.primary.ping_respond():
return True
except Exception as e:
return False
return False
# Forward requests from client to backend server
def forward_request(self, method, **args):
VALID_METHODS = set(["login", "logout", "create_account", "delete_account", "make_order", "cancel_order", "view_orders", "show_items"])
if not method in VALID_METHODS:
print("ERROR: Unknown method!")
return False
# Check if primary is up
if not self.ping_primary():
# If not, find a new primary
if not self.set_primary():
print("ERROR: Failed to find new primary server!")
return False
# Get the corresponding method and call it remotely.
print(f"INFO: Forwarding to remote method {method}")
server_result = self.methods[method](**args)
# Tell the primary server to sync with slaves
self.primary.master_sync()
return server_result
# Set remote exception hook
sys.excepthook = Pyro4.util.excepthook
if __name__ == "__main__":
Pyro4.Daemon.serveSimple({
FrontServer: "just_hungry.front_end"
}, ns=True)
|
[
"alexander.f.mckinney@durham.ac.uk"
] |
alexander.f.mckinney@durham.ac.uk
|
bef34cad01422648c09e20b9ebaf04f24bcfe0f0
|
c7877ca0f68753212e4bb2f8b2944eec65dff56e
|
/fa/bootstrap/forms.py
|
6224699d5a7aebbd20b47a2aa7df3ebb8f353fc5
|
[] |
no_license
|
RedTurtle/fa.bootstrap
|
d37d81fb9bca3539abd1c71a0cc67c4af19b27b7
|
8b982f9dc178c58e62fef9e3dddde5a8dac9b811
|
refs/heads/master
| 2021-01-01T18:37:46.303564
| 2014-04-25T13:17:21
| 2014-04-25T13:17:21
| 2,487,668
| 5
| 5
| null | 2014-04-25T13:17:22
| 2011-09-30T07:35:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
from formalchemy import FieldSet, fatypes
from fa.bootstrap import renderers
class BootstrapFieldSet(FieldSet):
default_renderers = dict(FieldSet.default_renderers)
default_renderers.update({
fatypes.String: renderers.BootstrapTextFieldRenderer,
fatypes.Unicode: renderers.BootstrapTextFieldRenderer,
fatypes.Text: renderers.BootstrapTextFieldRenderer,
fatypes.Integer: renderers.BootstrapIntegerFieldRenderer,
fatypes.Float: renderers.BootstrapFloatFieldRenderer,
fatypes.Numeric: renderers.BootstrapFloatFieldRenderer,
fatypes.Interval: renderers.BootstrapIntervalFieldRenderer,
fatypes.Boolean: renderers.BootstrapBooleanFieldRenderer,
fatypes.Set: renderers.BootstrapSelectFieldRenderer,
fatypes.List: renderers.BootstrapSelectFieldRenderer,
'dropdown': renderers.BootstrapSelectFieldRenderer,
})
|
[
"mtjahangir@gmail.com"
] |
mtjahangir@gmail.com
|
8e34a70a3f9397eeb53ec22828a93db95486d8b8
|
e458083d9e0f3564d3089de9febe3cad61733f47
|
/Weekdays/python_looping/python_loopings/iterative.py
|
a8a0cb3dacd9d0abb7f63a3db3305d1043195b89
|
[] |
no_license
|
chavhanpunamchand/pythonYogeshSir
|
cd71d002927f8bbc8ad5ecff3282e0b7c6cfc13c
|
3ee675f188e2680cde9e04ad03f2f5c9f3d46ba0
|
refs/heads/master
| 2023-02-14T01:53:34.342147
| 2021-01-09T17:36:17
| 2021-01-09T17:36:17
| 293,258,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,839
|
py
|
'''
Iterative --> points
do..while --atleast once --> thru flag--* --> not provided in python
while --> may or may not
range
random
random.randoint
enumerate
dict.keys
dict.values
dict.items
for --> when u are sure about no of iterations in advance
--> when u are not sure about no of iterations in advance --> read data from db--> read data from files-->
while --> may or may not body execution --> condition and then body
do..while --> atleast once body execution --> body then condition --> condition bypass it thru flag-->
for -> loop --
Iterative --> statements --> execute the body as long as condition is satisfied
for
while
do.while * --> not in python --> we need to explicitly --> implement this
range(10) start--> 0 end=10 step=1
range(1,10) step=1
range(1,10,3) start -1 end 10 --> incr -->3
1 4 7
for(initialization;conditions;incremet/decrement) --> other lang
//body
for --> start stop step
range --> seq --> start stop -> step ---> range(10) --> 0 10 1 --> 0-9
start -> include -- range(1,10) --> 1 10 1 -->1-9
stop --> dont include range(1,10,2) 1 10 2 -->1,3,5,7,9
step --> increment by
list/set/tuple --> item --> simply element --
dict -->
step1
1.dict --> keys --> based on keys using dict.get(key) -> value --> we need to retrive
2.dict.keys --keys --> based on keys using dict.get(key) -> value --> we need to retrive
step2
packed = pickling = (10,20,30,40,50) values = (10,20,30) for item in dict.items() --> (key,value)
unpacked--unpickling = 10,20,40,50 v1,v2,v3 = 10,20,30 for k,v in dict.items() --> key value
3.dict.items() -- pair --> for k,v in dict.items() --> unpacked 1 11 --> direct
for i,k,v in enumerate(dict.items()) --> unpacked 0,1 11 --> direct
for item in dict.items() --> packed (1,11) -> direct
for i,item in enumerate(dict.items()) --> packed (1,11) -> direct
step1 --> key -- we can retrive value
step2 -- key,value -->direct
step3
dict.values() --> only values --> u cannot retrive keys--based
only values -->
enumerate --> assign numbers to the --> counter --> 0,1,2,3 -->
'''
import sys
import random
# i want to read the file print -> all lines- ->
#body -->
# fib -> series --> 0 1 1 2 3 5 8 --->
val = int(input('Enter Stop Point ')) # 5
#do --> atleast once -->
num1 = 0 # 0 1
num2 = 1 #1
result = 0 #0
counter = 1 #1
# swap -->
while (counter<=val): # true #final ans ---> 0,1,1,2
print(result,end=',') #1
counter +=1 # 4
num1 = num2 #1
num2 = result # 1
result = num1 + num2 #1+1 --> 2
sys.exit(0)
while True:
file = open('File')
if not file.readlines(): # in case no lines
break
num1 = int(input('Enter NO -->'))
num2 = int(input('Enter NO -->'))
while num1 == num2:
break
while True: # do while --> atleast once
num1 = int(input('Enter NO -->'))
num2 = int(input('Enter NO -->'))
if num1 == num2:
break
flag = 1
val = int(input('Enter Number : '))
while flag or val%2==0: # # when no is even
print('inside body') #atleast once body execution happens # may or may not body execution
val = int(input('Enter Number : '))
flag=0 # reset -> next time flag will not bypass--> execution
print('Outside body -->')
sys.exit(0)
#do while kind of implementation --> while do.while --> with the help of flag
flag = [0] # present
num = int(input('Enter Number : '))
# once --> first --> gc --> atleast once -> do while --> implementation
while not num%3==0 or flag: # loop will execute unless entered number is not divisible by 3
print('Inside Body') #gc --> always --> cannot-->depends on condition
num = int(input('Enter Number : '))
flag.clear() #empty --> absent
print("Divisible by 3 -->", num)
sys.exit(0)
#every iterator --> group of elements --> list,set,tuple,dict
#while --> condition --> then body -> may or may not
#do..while-->body first --> then condition --> atleast once
'''
while condition: --> body may or may not execute --> always --> depends on condition
body
do
body --> atleast once body will surely execute -- irrespective of condition
while condition
'''
#for item in range(1000): # if we are sure about no of iterations -->
# if we are not sure about no of iteration in advance
while True: # # start loop --> unless not break --> if num divisible by 3 --> when ??==? no-->
num = int(input('Enter Number :'))
if num%3==0:
print('Number found',num)
break
sys.exit(0)
sys.exit(0)
#no of attempts --> no --> if u are not sure about of numbers---> while
num = int(input('Enter Number : ')) # no of elements --> to reach to this condition ??
if num%3 == 0:
print('Number Found -- >',num)
sys.exit(0)
values = [random.randint(1,100) for item in range(10)] # this line is exactly to --> 51 to 57 lines -> compressive--> short hand expression -- short cut
print(values)
# i want to print all those -- nums divisiable by 3--> #3 stop
for item in values: #10
if item%3==0:
print(item)
break
sys.exit(0)
values = []
for item in range(10): # 10 times -->
val = random.randint(1,100) # 1 - 100 --> both inclusive -> random no generates
values.append(val)
print(values)
sys.exit(0)
valuesList = list(range(1,20))
valuesSet = set(range(1,20))
valuesTuple = tuple(range(1,20))
valuesList1 = list(range(1,10)) # 9 -->1------> 9 1:11,2:12 --> key
valuesList2 = list(range(11,20)) #9--> 11----> 19 -->value
valuesDict = dict(zip(valuesList1,valuesList2))
for cnt,pair in enumerate(valuesDict.items()):
print(cnt,pair) #0 (1,11) 1 (2,12)
sys.exit(0)
print('For List -->')
for index,item in enumerate(valuesList): # if we want counter for the element
print(index,item) # 0:1 --> 8:9
print('For Set --->')
for index,item in enumerate(valuesSet): # enumerate --> assigns counter to the elements-->
print(index,item)
sys.exit(0)
#values = [10,"A",True] # item ?? --? first int --> second --string--> 3rd --> boolean
#list --> group of elements as single entity -->
# array --> type of array --> object --> hom data elements
print('Dict Items ->',valuesDict)
print('using dict --> items() method --> unpacked')
for key,val in valuesDict.items():
print(key,val) #1 11
print('using dict --> items() method --> packed --> tuple pairs')
for pair in valuesDict.items():
print(pair) #tuple(1,11) (2,12) (3,13)
sys.exit(0)
print('using dict --> values() method')
for val in valuesDict.values():
print(val) #only values --> 11 12 -----> 19
sys.exit(0)
print('Tuple Iterations using for loop')
for item in valuesTuple:
print(item,end=' . ') #tuple elements sepereated by dot
print('using dict.nothing which is bydefault --> keys()')
for item in valuesDict: #default .keys()
print('Key {}'.format(item),"Value {}".format(valuesDict.get(item))) #only keys --> 1 2 3 4------> 9
print('using dict --> keys() method')
for key in valuesDict.keys(): #
print('Key {}'.format(key),"Value {}".format(valuesDict.get(key))) #only keys --> 1 2 3 4------> 9
print('List Of Items ->',valuesList)
print('List Iterations using for loop')
for item in valuesList:
print(item,end=' # ') #list elements sepearated by hash #
print('Set Iterations using for loop')
for item in valuesSet:
print(item, end=' , ') # set elements sepearated by ,
#include--1 but not 10
val = range(1,20,2) #1 <10
ans = list(val)
print(ans)
for item in ans:
print(item,end=',')
sys.exit(0)
val = range(10) # seq -->
print(val,type(val))
rlist = set(range(10))
print(rlist)
|
[
"chavhanpunamchand@gmail.com"
] |
chavhanpunamchand@gmail.com
|
7d287e5d6a9ac9084996d8e8934bd0ad4c76c534
|
6410215af82c5db7c7310c1adab88a1fa85d9fcb
|
/init_db.py
|
6757241cbad55fdfc88311fdeb6d3bea18657858
|
[] |
no_license
|
PranavJoshi1/CloudAzure
|
120ea1e5a9ed1e520c070d2b45b67f9d5a458f44
|
9a928702c5f737ebb5f7c8cc2227d1a96c7a3775
|
refs/heads/main
| 2023-03-21T00:29:11.755329
| 2021-03-06T08:01:27
| 2021-03-06T08:01:27
| 345,036,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import sqlite3
connection = sqlite3.connect('database.db')
with open('schema.sql') as f:
connection.executescript(f.read())
cur = connection.cursor()
cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)",
('First Post', 'Content for the first post')
)
cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)",
('Second Post', 'Content for the second post')
)
connection.commit()
connection.close()
|
[
"noreply@github.com"
] |
PranavJoshi1.noreply@github.com
|
74ce6870b66861e8a9a1a0672ebaed66d09cd12f
|
eb220efce4db90a7fd9cb06a69021969182f0087
|
/P1-Ricochet Robots/ricochet_robots.py
|
5df992fd3bdcf957c93a9a2160b9cf438b593b7e
|
[] |
no_license
|
Mokita-J/Artificial_Intelligence
|
7e0854913ff7e4062787e028e12981f3cbb7e57f
|
704b421d10679266d125b7d7071e6c7456dbc79c
|
refs/heads/main
| 2023-06-14T22:02:48.539252
| 2021-06-28T18:10:53
| 2021-06-28T18:10:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,943
|
py
|
# ricochet_robots.py: Template para implementação do 1º projeto de Inteligência Artificial 2020/2021.
# Devem alterar as classes e funções neste ficheiro de acordo com as instruções do enunciado.
# Além das funções e classes já definidas, podem acrescentar outras que considerem pertinentes.
# Grupo 11:
# 92532 Mónica Jin
# 93681 Afonso Carvalho
from search import Problem, Node, astar_search, breadth_first_tree_search, \
depth_first_tree_search, greedy_search, InstrumentedProblem
import sys, time
class RRState:
state_id = 0
def __init__(self, board):
self.board = board
self.id = RRState.state_id
RRState.state_id += 1
def __lt__(self, other):
""" Este método é utilizado em caso de empate na gestão da lista
de abertos nas procuras informadas. """
return self.id < other.id
def __eq__(self, other):
if type(other) is type(self):
return self.board == other.board
return False
def __hash__(self):
return hash(self.board)
class Board:
""" Representacao interna de um tabuleiro de Ricochet Robots. """
goal = None
def __init__(self, size, R_pos, B_pos, Y_pos, G_pos, barriers):
self.robots = {"R": R_pos, "B": B_pos, "Y": Y_pos, "G": G_pos}
self.barriers = barriers
self.size = size
def __eq__(self, other):
if type(other) is type(self):
return self.robots == other.robots and self.size == other.size
return False
def __hash__(self):
return hash((self.robots["R"], self.robots["B"], self.robots["Y"], self.robots["G"], self.size))
def robot_position(self, robot: str):
""" Devolve a posição atual do robô passado como argumento. """
return self.robots[robot]
def possible_actions(self, robot: str):
actions = []
(x, y) = self.robot_position(robot)
if y != self.size: #checks if robot can move right
if (x, y + 1) not in self.robots.values():
if (x, y + 1) not in self.barriers["l"] and (x, y) not in self.barriers["r"]:
actions.append("r")
if y != 1: #checks if robot can move left
if (x, y - 1) not in self.robots.values():
if (x, y - 1) not in self.barriers["r"] and (x, y) not in self.barriers["l"]:
actions.append("l")
if x != self.size: #checks if robot can move down
if (x + 1, y) not in self.robots.values():
if (x + 1, y) not in self.barriers["u"] and (x, y) not in self.barriers["d"]:
actions.append("d")
if x != 1: #checks if robot can move up
if (x - 1, y) not in self.robots.values():
if (x - 1, y) not in self.barriers["d"] and (x, y) not in self.barriers["u"]:
actions.append("u")
return actions
def parse_instance(filename: str) -> Board:
""" Lê o ficheiro cujo caminho é passado como argumento e retorna
uma instância da classe Board. """
f = open(filename, "r")
data = f.readlines()
board = Board(eval(data[0]), None, None, None, None, {"u": [], "d": [], "l": [], "r": []})
x, y = 0, 0
for line in range(1, 5):
robot = data[line].split()
x = eval(robot[1])
y = eval(robot[2])
board.robots[robot[0]] = (x, y)
goal = data[5].split()
Board.goal = [goal[0], (eval(goal[1]), eval(goal[2]))]
for line in range(7, 7 + eval(data[6])):
barrier = data[line].split()
x = eval(barrier[0])
y = eval(barrier[1])
board.barriers[barrier[2]].append((x, y))
f.close()
return board
class RicochetRobots(Problem):
def __init__(self, board: Board):
""" O construtor especifica o estado inicial. """
super().__init__(RRState(board), board.goal)
def actions(self, state: RRState):
""" Retorna uma lista de ações que podem ser executadas a
partir do estado passado como argumento. """
actions = []
for color in state.board.robots:
for move in state.board.possible_actions(color):
actions.append((color, move))
return actions
def result(self, state: RRState, action):
""" Retorna o estado resultante de executar a 'action' sobre
'state' passado como argumento. A ação retornada deve ser uma
das presentes na lista obtida pela execução de
self.actions(state). """
(robot, move) = action
(x, y) = state.board.robot_position(robot)
distx = state.board.size + 1
disty = state.board.size + 1
if move == "r":
distx = 0
for pos in state.board.barriers["r"]:
if x == pos[0] and y < pos[1]:
disty = min(disty, pos[1] - y)
for pos in state.board.barriers["l"] + list(state.board.robots.values()):
if x == pos[0] and y < pos[1]:
disty = min(disty, pos[1] - y - 1)
if disty == state.board.size + 1:
disty = state.board.size - y
elif move == "l":
distx = 0
for pos in state.board.barriers["r"] + list(state.board.robots.values()):
if x == pos[0] and y > pos[1]:
disty = - min(abs(disty), y - pos[1] - 1)
for pos in state.board.barriers["l"]:
if x == pos[0] and y > pos[1]:
disty = - min(abs(disty), y - pos[1])
if disty == state.board.size + 1:
disty = 1 - y
elif move == "u":
disty = 0
for pos in state.board.barriers["u"]:
if y == pos[1] and x > pos[0]:
distx = - min(abs(distx), x - pos[0])
for pos in state.board.barriers["d"] + list(state.board.robots.values()):
if y == pos[1] and x > pos[0]:
distx = - min(abs(distx), x - pos[0] - 1)
if distx == state.board.size + 1:
distx = 1 - x
elif move == "d":
disty = 0
for pos in state.board.barriers["u"] + list(state.board.robots.values()):
if y == pos[1] and x < pos[0]:
distx = min(distx, pos[0] - x -1)
for pos in state.board.barriers["d"]:
if y == pos[1] and x < pos[0]:
distx = min(distx, pos[0] - x)
if distx == state.board.size + 1:
distx = state.board.size - x
if robot == "R":
return RRState(Board(state.board.size, (x + distx, y + disty), state.board.robot_position("B"),
state.board.robot_position("Y"), state.board.robot_position("G"), state.board.barriers))
elif robot == "B":
return RRState(Board(state.board.size, state.board.robot_position("R"), (x + distx, y + disty),
state.board.robot_position("Y"), state.board.robot_position("G"), state.board.barriers))
elif robot == "Y":
return RRState(Board(state.board.size, state.board.robot_position("R"), state.board.robot_position("B"),
(x + distx, y + disty), state.board.robot_position("G"), state.board.barriers))
elif robot == "G":
return RRState(Board(state.board.size, state.board.robot_position("R"), state.board.robot_position("B"),
state.board.robot_position("Y"), (x + distx, y + disty), state.board.barriers))
def goal_test(self, state: RRState):
""" Retorna True se e só se o estado passado como argumento é
um estado objetivo. Deve verificar se o alvo e o robô da
mesma cor ocupam a mesma célula no tabuleiro. """
goal_robot = self.goal[0]
goal_pos = self.goal[1]
return state.board.robot_position(goal_robot) == goal_pos
def h(self, node: Node):
""" Função heuristica utilizada para a procura A*. """
(bx, by) = node.state.board.robot_position("B")
(rx, ry) = node.state.board.robot_position("R")
(yx, yy) = node.state.board.robot_position("Y")
(gx, gy) = node.state.board.robot_position("G")
blue_weight = red_weight = yellow_weight = green_weight = self.initial.board.size
if self.goal[0] == "R":
red_weight = 1
elif self.goal[0] == "B":
blue_weight = 1
elif self.goal[0] == "G":
green_weight = 1
elif self.goal[0] == "Y":
yellow_weight = 1
"""if(self.goal[1][0] == node.state.board.size or self.goal[1][1] == node.state.board.size or
self.goal[1][0] == 1 or self.goal[1][1] == 1):
(x, y) = node.state.board.robot_position(self.goal[0])
return abs(x - self.goal[1][0]) + abs(y - self.goal[1][1])"""
return (abs(bx - self.goal[1][0]) + abs(by - self.goal[1][1])) * blue_weight + \
(abs(rx - self.goal[1][0]) + abs(ry - self.goal[1][1])) * red_weight + \
(abs(yx - self.goal[1][0]) + abs(yy - self.goal[1][1])) * yellow_weight + \
(abs(gx - self.goal[1][0]) + abs(gy - self.goal[1][1])) * green_weight
if __name__ == "__main__":
start_time = time.time()
board = parse_instance(sys.argv[1])
problem = RicochetRobots(board)
# instrumentProblem = InstrumentedProblem(problem)
node = astar_search(problem, problem.h)
print("Execution time:",time.time() - start_time, "seconds")
solution = []
while(node.parent):
solution = [node.action] + solution
node = node.parent
print(len(solution))
for move in solution:
print(move[0] + " " + move[1])
|
[
"monicachenjin@tecnico.ulisboa.pt"
] |
monicachenjin@tecnico.ulisboa.pt
|
9500d9c5d2fd365ab5a5c46c7f6c5e2743b8ba14
|
d9553aeed9a8886d35ea5d5d5537a143efec0d7d
|
/dataset_creation.py
|
81046e3dd407978d1688d093f754f1a8a9c56567
|
[] |
no_license
|
rahulOmishra/detecting-incongruity-dataset-gen
|
f06546c2699c23f84b635801ce749f3a2932d9fc
|
0c95acb6275afafdf6116253c3419d18fdf761ce
|
refs/heads/master
| 2021-01-14T04:21:05.406662
| 2019-10-16T07:30:46
| 2019-10-16T07:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,600
|
py
|
import os
import json
import csv
from datetime import datetime
import random
import math
import argparse
import pandas as pd
import numpy as np
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
STOP_WORDS = stopwords.words('english')
P_TRAIN = 0.85
P_DEV = 0.075
P_TEST = 0.075
def preprocess_body(body):
new_body = []
for para in body:
new_para = []
for sent in para:
new_sent = preprocess_sentence(sent)
new_para.append(new_sent)
new_body.append(new_para)
return new_body
def preprocess_sentence(txt):
words = word_tokenize(txt)
words = [word for word in words if (word.isalpha() and (word not in STOP_WORDS) )]
return " ".join(words)
def load_from_raw_data(nela_path):
BASE_PATH = nela_path
column_labels = ['headline', 'body', 'source']
df = pd.DataFrame(columns=column_labels)
for month_dir in os.listdir(BASE_PATH):
for date_dir in os.listdir(os.path.join(BASE_PATH, month_dir)):
for source_dir in os.listdir(os.path.join(BASE_PATH, month_dir, date_dir)):
for article_filename in os.listdir(os.path.join(BASE_PATH, month_dir, date_dir, source_dir)):
with open(os.path.join(BASE_PATH, month_dir, date_dir, source_dir, article_filename)) as f:
json_txt = f.read()
try:
article_dict = json.loads(json_txt)
except json.decoder.JSONDecodeError:
continue
article_dict = {
"headline": article_dict["title"],
"body": article_dict["content"],
"source": article_dict["source"],
}
df = df.append(article_dict, ignore_index=True)
print(df)
return df
def export_csv_for_prediction(df, source_path, para_flag=False):
df = df.dropna()
df = df.sample(n=5)
print(df.iloc[0].body)
df['body'] = df.apply(lambda row: list(map(lambda x: [x], row['body'].split("\n\n"))), axis=1)
df['headline'] = df.apply(lambda row: preprocess_sentence(row['headline']), axis=1)
df['body'] = df.apply(lambda row: preprocess_body(row['body']), axis=1)
print(df)
print(df.iloc[0].body)
f = open(source_path, "w")
writer = csv.writer(f, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in df.itertuples():
if para_flag:
for para in row.body:
writer.writerow([row.Index, row.headline, " ".join(para)])
else:
for para in row.body:
for sent in para:
writer.writerow([row.Index, row.headline, sent])
def create_dataset(df):
df = df.sample(frac=1)
number_array = [i for i in range(200)]
label_0_list = []
label_1_list = []
para_list = []
label_1_type = 0
type_1_avr_len = []
result_df = []
result_df_para = []
for i in range(df.shape[0] // 3):
label_0_row = df.iloc[3*i]
label_1_base = df.iloc[3*i + 1]
label_1_attach = df.iloc[3*i + 2]
base_paras = label_1_base["paras"]
attach_paras = label_1_attach["paras"]
base_para_len = len(label_1_base["paras"])
attach_para_len = len(label_1_attach["paras"])
if label_1_type == 0: # Applying rule (1) with only one paragraph
selected_para_index = random.randrange(0, attach_para_len)
insert_index = random.randrange(0, base_para_len + 1)
fake_paras = attach_paras[selected_para_index]
result_article = base_paras.copy()
result_article.insert(insert_index, attach_paras[selected_para_index])
fake_paras = [fake_paras]
elif label_1_type == 1: # Applying rule (1) with two or more consecutive paragraphs
selected_insert_para_len = random.randrange(1, min([math.ceil(base_para_len / 2), attach_para_len]) + 1)
type_1_avr_len.append(selected_insert_para_len)
selected_para_start_index = random.randrange(0, attach_para_len - selected_insert_para_len + 1)
insert_index = random.randrange(0, base_para_len + 1)
fake_paras = attach_paras[selected_para_start_index:selected_para_start_index + selected_insert_para_len]
result_article = base_paras[:insert_index] + fake_paras + base_paras[insert_index:]
elif label_1_type == 2: # Applying rule (2) without random arrangement (maintaining the ordering of the sampled paragraphs)
selected_insert_para_len = random.randrange(1, min([math.ceil(base_para_len / 2), attach_para_len]) + 1)
type_1_avr_len.append(selected_insert_para_len)
selected_para_indices = random.sample(number_array[:attach_para_len], selected_insert_para_len)
insert_indices = random.sample(number_array[:base_para_len + selected_insert_para_len], selected_insert_para_len)
fake_paras = [attach_paras[i] for i in selected_para_indices]
base_paras_temp = base_paras.copy()
fake_paras_temp = fake_paras.copy()
base_paras_temp.reverse()
fake_paras_temp.reverse()
result_article = []
for i in range(base_para_len + selected_insert_para_len):
if i in insert_indices:
result_article.append(fake_paras_temp.pop())
else:
result_article.append(base_paras_temp.pop())
elif label_1_type == 3: # Applying rule (2) (n > 1)
selected_insert_para_len = random.randrange(1, min([math.ceil(base_para_len / 2), attach_para_len]) + 1)
type_1_avr_len.append(selected_insert_para_len)
selected_para_indices = random.sample(number_array[:attach_para_len], selected_insert_para_len)
insert_indices = random.sample(number_array[:base_para_len + selected_insert_para_len], selected_insert_para_len)
fake_paras = [attach_paras[i] for i in selected_para_indices]
base_paras_temp = base_paras.copy()
fake_paras_temp = fake_paras.copy()
result_article = base_paras[:]
for fake_para in fake_paras:
result_article.insert(random.randrange(0, len(result_article)), fake_para)
label_1_type = (label_1_type + 1) % 4
result_df.append({"headline": label_0_row["headline"], "body": label_0_row["paras"], "fake_type": -1, "label": 0})
result_df.append({"headline": label_1_base["headline"], "body": result_article, "fake_type": label_1_type, "label": 1})
result_df_para.append({"headline": label_0_row["headline"], "body": label_0_row["paras"], "fake_type": -1, "label": 0, "fake_paras": [], "base": label_0_row["paras"]})
result_df_para.append({"headline": label_1_base["headline"], "body": result_article, "fake_type": label_1_type, "label": 1, "fake_paras": fake_paras, "base": base_paras})
result_df = pd.DataFrame(result_df)
result_df_para = pd.DataFrame(result_df_para)
return result_df, result_df_para
def export_df_to_dataset(df, output_dir):
df["body"] = df.apply(lambda row: [sent_tokenize(para) for para in row["body"]], axis=1)
def tokenize_sent_in_para(para):
result = []
for sent in para:
result.append(" ".join(word_tokenize(sent)))
result = " <EOS> ".join(result) + " <EOS> "
return result
df["body"] = df.apply(lambda row: [tokenize_sent_in_para(para) for para in row["body"]], axis=1)
df["headline"] = df.apply(lambda row: " ".join(word_tokenize(row["headline"])), axis=1)
df["body_merged"] = df.apply(lambda row: " <EOP> ".join(row["body"]).replace("<EOS> <EOP>", "<EOS> <EOP>"), axis=1)
df_train = df.iloc[:int(df.shape[0] * P_TRAIN)]
df_dev = df.iloc[int(df.shape[0] * P_TRAIN):int(df.shape[0] * (P_TRAIN + P_DEV))]
df_test = df.iloc[int(df.shape[0] * (P_TRAIN + P_DEV)):]
whole_train_df = df_train[["headline", "body_merged", "label"]]
whole_train_df.to_csv(os.path.join(output_dir, "train.csv"), encoding="utf-8", header=False)
whole_dev_df = df_dev[["headline", "body_merged", "label"]]
whole_dev_df.to_csv(os.path.join(output_dir,"dev.csv"), encoding="utf-8", header=False)
whole_test_df = df_test[["headline", "body_merged", "label"]]
whole_test_df.to_csv(os.path.join(output_dir,"test.csv"), encoding="utf-8", header=False)
def export_df_to_dataset_para(df, output_dir):
df["fake_paras"] = df.apply(lambda row: [sent_tokenize(para) for para in row["fake_paras"]], axis=1)
df["base"] = df.apply(lambda row: [sent_tokenize(para) for para in row["base"]], axis=1)
def tokenize_sent_in_para(para):
result = []
for sent in para:
result.append(" ".join(word_tokenize(sent)))
result = " <EOS> ".join(result) + " <EOS> "
return result
df["fake_paras"] = df.apply(lambda row: [tokenize_sent_in_para(para) for para in row["fake_paras"]], axis=1)
df["base"] = df.apply(lambda row: [tokenize_sent_in_para(para) for para in row["base"]], axis=1)
df["headline"] = df.apply(lambda row: " ".join(word_tokenize(row["headline"])), axis=1)
df_train = df.iloc[:int(df.shape[0] * P_TRAIN)]
df_dev = df.iloc[int(df.shape[0] * P_TRAIN):int(df.shape[0] * (P_TRAIN + P_DEV))]
df_test = df.iloc[int(df.shape[0] * (P_TRAIN + P_DEV)):]
para_train_df = []
for row in df_train.itertuples():
for para in row.fake_paras:
para_train_df.append([row.Index, row.headline, para, 1])
for para in row.base:
para_train_df.append([row.Index, row.headline, para, 0])
para_dev_df = []
for row in df_dev.itertuples():
for para in row.fake_paras:
para_dev_df.append([row.Index, row.headline, para, 1])
for para in row.base:
para_dev_df.append([row.Index, row.headline, para, 0])
with open(os.path.join(output_dir,"train_IP.csv"), "w", encoding="utf-8") as f:
writer = csv.writer(f)
for row in para_train_df:
writer.writerow(row)
with open(os.path.join(output_dir,"dev_IP.csv"), "w", encoding="utf-8") as f:
writer = csv.writer(f)
for row in para_dev_df:
writer.writerow(row)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', help="path to article csv file")
parser.add_argument('--output_dir', default=".", help="directory to export output file")
parser.add_argument('--nela_path', default=None, help="directory to raw NELA JSON folder path")
args = parser.parse_args()
if args.nela_path is not None:
df = load_from_raw_data(args.nela_path)
else:
df = pd.read_csv(args.input_path, header=None, names=["headline", "body"])
df["body"] = df.apply(lambda row: row["body"].replace("\xa0", " "), axis=1)
df["paras"] = df.apply(lambda row: list(filter(None, row["body"].split("\n"))), axis=1)
print("Shuffling....")
df, df_para = create_dataset(df)
print("Exporting....")
export_df_to_dataset(df, args.output_dir)
export_df_to_dataset_para(df_para, args.output_dir)
|
[
"jgdgj223@gmail.com"
] |
jgdgj223@gmail.com
|
50fdad15b14d6c730c7ce8452947e51d1a0147ad
|
2d7170db2454cee312d595531404d0cf2b4e369c
|
/django_memcached_request_limit_manager.py
|
f61d14a5492209b795fb7841f0190b8ad4b13651
|
[
"MIT"
] |
permissive
|
fastinetserver/python-pandadoc
|
1d7f659bc162b42b23709a2e15c2ca43f551cde2
|
516415ddb97ef7024da3c558634bc5845c913134
|
refs/heads/master
| 2022-12-06T00:32:20.216436
| 2020-09-01T19:43:57
| 2020-09-01T19:43:57
| 288,470,037
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
"""pandadocument.py implements a Limit Manager for Django Application using Memcached
More about limits can be found here https://developers.pandadoc.com/reference#limits"""
__author__ = "Kostyantyn Ovechko"
__copyright__ = "Copyright 2020, Zxscript"
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "kos@zxscript.com"
__status__ = "Production"
import random
from time import sleep
from django.core.cache import cache
from .request_limit_manager import AbstractRequestLimitManager, RequestLimitReachedException
PANDADOC_REQUESTS_PER_MINUTE_QUOTA = 100
PANDADOC_REQUESTS_PER_MINUTE_DOWNLOAD_QUOTA = 20
class DjangoMemcachedRequestLimitManager(AbstractRequestLimitManager):
def get_request_permission(self):
# Set low priority for download - ONLY allow download within first 10 tokens
limit = PANDADOC_REQUESTS_PER_MINUTE_DOWNLOAD_QUOTA if self.for_download else PANDADOC_REQUESTS_PER_MINUTE_QUOTA
for attempt in range(0, self.max_attempts):
if attempt > 0:
print("Could NOT find a free token. Retrying after {retry_delay} seconds delay".format(
retry_delay=self.retry_delay))
sleep(self.retry_delay)
for idx in random.sample(range(0, limit), limit):
key = 'pandadoc_token_{idx}'.format(idx=idx)
# Beware we have some concurrency issue here - another process can acquire
# the same resource during .get_or_set() method call and if it's lucky enough to generate
# the same new_value. Hopefully, this won't happen too often. If unsure - consider
# setting lower PANDADOC_REQUESTS_PER_MINUTE_QUOTA than what you have provided by PandaDoc
new_value = random.randint(0, 99999999999999)
old_value = cache.get_or_set(key, new_value, 60)
if old_value == new_value:
print("Found free token at {key} value: {new_value}".format(key=key, new_value=new_value))
return True
raise RequestLimitReachedException('Please try again later.')
|
[
"fastinetserver@gmail.com"
] |
fastinetserver@gmail.com
|
228a85468519b92d95ce46feecf4c6d9aff9dafa
|
9f38bedf3a3365fdd8b78395930979a41330afc8
|
/branches/epic/version_datasets/core/views.py
|
b2ab7e6974372def294497d917433e737bd86c62
|
[] |
no_license
|
project-renard-survey/nwb
|
6a6ca10abb1e65163374d251be088e033bf3c6e0
|
612f215ac032e14669b3e8f75bc13ac0d4eda9dc
|
refs/heads/master
| 2020-04-01T16:11:01.156528
| 2015-08-03T18:30:34
| 2015-08-03T18:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, UserManager
from django.core.urlresolvers import reverse
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, Context, loader
from django.utils import simplejson
from epic.core.forms import ForgotPasswordForm, ProfileForm, UserForm
from epic.core.models import Profile
from epic.datarequests.models import DataRequest
from epic.datasets.models import DataSet
def site_index(request):
return render_to_response('core/site_index.html', context_instance=RequestContext(request))
def browse(request):
datasets = DataSet.objects.active().order_by('-created_at')
return render_to_response('core/browse.html',
{'datasets': datasets, },
context_instance=RequestContext(request))
def about (request):
return render_to_response('core/about.html', context_instance=RequestContext(request))
@login_required
def view_profile(request):
""" Used to display the basic/home page for logged in user. """
user = request.user
profile = Profile.objects.for_user(user)
datasets = DataSet.objects.active().filter(creator=user).order_by('-created_at')
datarequests = DataRequest.objects.active().filter(creator=user).exclude(status='C').order_by('-created_at')
# HTML content relating to user, it's profile, datasets uploaded & data requests made
# is fetched.
return render_to_response("core/view_profile.html",
{ "profile": profile,
"datasets": datasets,
"datarequests": datarequests },
context_instance=RequestContext(request))
@login_required
def edit_profile(request):
""" Used to allow a user to edit their own profile """
# Get the user and profile objects.
user = request.user
profile = Profile.objects.for_user(user)
if request.method != 'POST':
# New form needed, set the fields to their current values
profile_form = ProfileForm(instance=profile)
user_form = UserForm(instance=user)
else:
# Create the form based on the filled out fields. Include the old instance to get the required fields that we will not be showing, (e.g. the user)
profile_form = ProfileForm(request.POST, instance=profile)
user_form = UserForm(request.POST, instance=user)
# Check to make sure that all the fields were filled out correctly
if profile_form.is_valid() and user_form.is_valid():
# Save the profile
profile_form.save()
user_form.save()
return HttpResponseRedirect(reverse('epic.core.views.view_profile', kwargs={}))
else:
# Form will have errors which will be displayed by page
pass
return render_to_response('core/edit_profile.html', {'profile_form':profile_form, 'user_form':user_form,}, context_instance=RequestContext(request))
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('epic.core.views.site_index',))
@login_required
def change_password(request):
from django.contrib.auth.views import password_change
redirect_url = reverse('epic.core.views.view_profile')
return password_change(request, post_change_redirect=redirect_url, template_name='core/change_password.html')
def forgot_password(request):
if request.method != 'POST':
form = ForgotPasswordForm()
else:
form = ForgotPasswordForm(request.POST)
if form.is_valid():
user = form.cleaned_data['user']
new_random_password = UserManager().make_random_password()
user.set_password(new_random_password)
user.save()
# Load a template for the message body and render it to a string
# to be emailed to the user.
email_body = loader.get_template('core/password_reset_email.html')
template_context = Context(
{
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
'password': new_random_password,
'login_url': reverse('django.contrib.auth.views.login')
}
)
user.email_user('EpiC Account Password Reset', email_body.render(template_context))
success_message = "An email has been sent to your '%(truncated_email)s' address with a new password." % {'truncated_email':user.email.split('@')[1]}
return render_to_response('core/forgot_password_done.html',
{'success_message':success_message,},
context_instance=RequestContext(request))
return render_to_response('core/forgot_password.html', {'form':form,}, context_instance=RequestContext(request))
|
[
"dmcoe@indiana.edu"
] |
dmcoe@indiana.edu
|
f05d24eb8f4d5d54d3df572de195af889ba875b5
|
cbd4e775e4200750c2cd2c9b2a9980768ab41488
|
/redcliff/commands/list.py
|
514541fd6cd7d96ba377a4c709e1907337ad62a8
|
[
"MIT"
] |
permissive
|
dmedvinsky/redcliff
|
aa1891331f839000c56efe6173b7617554577f3b
|
4522f775864ed6e03ef1c3111bab7fc3e47f7f7a
|
refs/heads/master
| 2020-04-09T17:51:45.518254
| 2012-05-28T12:24:38
| 2012-05-28T12:24:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
import argparse
from .. import api
from .. import renderer
from ..utils import error
def run(argv, conf):
parser = argparse.ArgumentParser(prog='list')
parser.add_argument('-l', '--limit',
type=int,
metavar='NUM',
help='Limit the number of issues.')
parser.add_argument('-o', '--offset',
type=int,
metavar='NUM',
help='Pagination offset.')
parser.add_argument('-m', '--for-me',
action='store_const', const=True,
help='Show only issues assigned to me.')
parser.add_argument('-a', '--assignee',
type=int,
dest='assigned_to_id',
metavar='ID',
help='Filter issues by "assigned to" field.')
parser.add_argument('-p', '--project',
dest='project_id',
metavar='ID|name',
help='Filter issues by project. Might be project ID or'
' identifier.')
parser.add_argument('-s', '--status',
help='Filter issues by status.')
args = vars(parser.parse_args(argv))
if args['for_me'] and not args['assigned_to_id']:
user = api.users.current(None, conf)
args['assigned_to_id'] = user['id']
if args['status']:
status = api.statuses.by_name(args['status'], conf)
if status:
args['status_id'] = status['id']
else:
error('error: status {0} not found'.format(args['status']))
try:
data = api.issues.list(args, conf)
except:
error('fatal: API error while gettings issues list')
raise
else:
renderer.issues.as_table(data)
return 0
|
[
"dmedvinsky@gmail.com"
] |
dmedvinsky@gmail.com
|
66b98a566cae08c2142ff4dcf878c2bb0d3533d1
|
6b7cd9dba130191ce62b1592ef5416dea4739533
|
/settings.py
|
c041ee3f0a95027cef53b767d60115002fd36578
|
[
"MIT"
] |
permissive
|
zain-Z/Alien-invasion-pygame
|
b43ef2bb3d06d4e58496d3bcf44c73545cc09b21
|
f61cfeacc507e7008fe8c187ac7b2bbe7950d81c
|
refs/heads/main
| 2023-02-04T20:44:02.920483
| 2020-12-23T01:50:59
| 2020-12-23T01:50:59
| 323,774,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
class Settings:
""" A class to store all settings for Alien Invasion. """
def __init__(self):
"""Initialize the game's static settings."""
# Screen Setings
self.screen_width = 1200
self.screen_height = 600
self.bg_color = (230, 230, 230)
# Ship settings
self.ship_limit = 3
# Bullet settings
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
self.bullets_allowed = 3
# Alien settings
self.fleet_drop_speed = 10
# How quickly the game speeds up
self.speedup_scale = 1.1
# How quickly the alien point values increase
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""Initialize settings that change throughout the game."""
self.ship_speed = 1.5
self.bullet_speed = 3.0
self.alien_speed = 1.0
# fleet_direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
# Scoring
self.alien_points = 50
def increase_speed(self):
"""Increase speed settings and alien point values."""
self.ship_speed *= self.speedup_scale
self.bullet_speed *= self.speedup_scale
self.alien_speed *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
|
[
"zai1999n@gmail.com"
] |
zai1999n@gmail.com
|
61800604086e6f0125d6ce67681dcb62ed7fc03b
|
a12c5ca05ba8b1f53253b9cb00102c818b994e4e
|
/p55self_inti.py
|
b712d1fc7394461d7df812410697796534673ff6
|
[] |
no_license
|
prince232002/harry
|
98a88c17a98fbe5f56a3ff12b2966182d2d7eef3
|
128438afef36de4c247a031a338ffd95d5e36a2c
|
refs/heads/master
| 2023-07-15T02:46:12.523372
| 2021-08-26T06:52:42
| 2021-08-26T06:52:42
| 395,117,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
class Employee:
no_of_leaves = 8
def __init__(self, aname, asalary, arole):
self.name = aname
self.salary = asalary
self.role = arole
def printdetails(self):
return f"The Name is {self.name}. Salary is {self.salary} and role is {self.role}"
# harry.name = "Harry"
# harry.salary = 455
# harry.role = "Instructor"
# rohan = Employee()
# rohan.name = "Rohan"
# rohan.salary = 4554
# rohan.role = "Student"
# print(rohan.printdetails())
harry = Employee("Harry", 255, "Instructor") #argument inside goes to the init calling
print(harry.salary)
#telusko
class computer:
def __init__(self,cpu,ram): #argument are passing inside
print("inti")
self.cpu=cpu
self.ram=ram
def config(self):
print("config is ",self.cpu,self.ram)
#here we not pass cpu ie self.ram becoz cpu is not a local var its an object
print("i5",16,'1tb')
com1=computer("i5",16)
com2=computer('i7',8)
#each time a object is created it will call init that it print init message twice
com1.config()
com2.config()
"""types of methods__
1.instance methods 2.class methods 3.static methods
special means it include underscore__
1.instance methods-
self-- it represents the instance of the class. That ,means for creating var we use
and pass self i.e, instance methods
__init__(self)- represents contructors in class It use is to initialize value to the
object state
2.class methods- we use @classmethods
it is used to access class variable so it is bounded to class not the objects
3.staticmethods-@staticmethods
it has nothing to do with class and intances variable
let we want to create factorial of a no then we can create using staticmethod
"""
|
[
"prince2002gupta@gmail.com"
] |
prince2002gupta@gmail.com
|
8c57f11b9dc2abbaead2d675ef76738074fdb617
|
dcf7076d1763f135895b652b1cd4ad65b23bb5d3
|
/tests/RandomDropTest.py
|
8b831b78be8cbb99a5ca2899ba1b9aebb799ca3b
|
[] |
no_license
|
AakashkolekaR/MIS543O_Project2
|
d13d431b7f834653205b9114fbe4094a35943ae6
|
ef8467a454a7647a2b39051ea320a5f3eb9da366
|
refs/heads/master
| 2023-09-04T18:28:07.567789
| 2021-11-01T17:38:21
| 2021-11-01T17:38:21
| 420,533,411
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
import random
from .BasicTest import *
"""
This tests random packet drops. We randomly decide to drop about half of the
packets that go through the forwarder in either direction.
Note that to implement this we just needed to override the handle_packet()
method -- this gives you an example of how to extend the basic test case to
create your own.
"""
class RandomDropTest(BasicTest):
def handle_packet(self):
for p in self.forwarder.in_queue:
if random.choice([True, False]):
self.forwarder.out_queue.append(p)
# empty out the in_queue
self.forwarder.in_queue = []
|
[
"icecowoo@gmail.com"
] |
icecowoo@gmail.com
|
1b0c7b23281af0a10db38f7a028f27e9ffb3cea6
|
83c4907ce7dd1112662386811b176740cc4ce35c
|
/workingOutCode/distancePlots.py
|
8856d92434ed5b1432e72affa3ba4663b71b9e0c
|
[] |
no_license
|
SnehaKannan114/ASTRID
|
2a5ce98abd9467c772615cc2d549acbc65d84d5c
|
3bfe9d8260e730ce6e65b2e59be38cef206f5723
|
refs/heads/master
| 2020-04-11T10:11:01.206835
| 2019-04-24T04:03:39
| 2019-04-24T04:03:39
| 161,706,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,914
|
py
|
'''
Dataset Used:
1.
The CSIC 2010 HTTP Dataset in CSV Format (for Weka Analysis) (2010) dataset is from a web penetration testing testbed for anomaly detection training.
2.
ADFA-IDS - Collected by Gideon Creech and Jiankun Hu of the Australian Defense Force Academy, ADFA IDS is an intrusion detection system dataset made publicly available in 2013, intended as representative of modern attack structure and methodology to replace the older datasets KDD and UNM. ADFA IDS includes independent datasets for Linux and Windows environments.
'''
import pandas as pd
from scipy import stats
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
import random
import numpy as np
from math import sqrt
import pickle
import datetime
from makingClusterClass import Cluster
from preProcessing import dataCleanup
'''
-----------------------
Data cleanup Phase
Assigning numerical values for categorical fields
Method:
GET: 0
POST: 1
PUT: 2
Label:
anom: 0
norm:1
Protocol
HTTP/1.1: 0
other: -1
Pragma OR Cache-Control
no-cache: 0
other: -1
Connection:
close: 0
keep-alive: 1
other: -1
----------------------
'''
def jaccard_similarity(list1, list2):
intersection = len(list(set(list1).intersection(list2)))
union = len(list(set(list1).union(list2)))
return float(intersection / union)
def levenshtein(seq1, seq2):
size_x = len(seq1) + 1
size_y = len(seq2) + 1
matrix = np.zeros ((size_x, size_y))
for x in range(size_x):
matrix [x, 0] = x
for y in range(size_y):
matrix [0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x-1] == seq2[y-1]:
matrix [x,y] = min(
matrix[x-1, y] + 1,
matrix[x-1, y-1],
matrix[x, y-1] + 1
)
else:
matrix [x,y] = min(
matrix[x-1,y] + 1,
matrix[x-1,y-1] + 1,
matrix[x,y-1] + 1
)
#print (matrix)
return (matrix[size_x - 1, size_y - 1])
def getDissimilarityDist(d, centroid):
'''
For some inputs, we take categorical differences
For other inputs (concatenated or fields with options), we find jaccard difference = len(intersection)/len(union)
For cookie, since closely similar cookie strings may have some relationship, we use levenshtein distance
'''
#method
distMethod = abs(d[1]-centroid[1])
#print("Method:",distMethod)
#url
url1 = d[2].split('/')
url2 = centroid[2].split('/')
distUrl = 1 - jaccard_similarity(url1,url2)
#print("Url:",distUrl)
#protocol
distProtocol = abs(d[3]-centroid[3])
#print("Protocol:",distProtocol)
#userAgent
ua1 = d[4].split('/')
ua2 = centroid[4].split('/')
distUserAgent = 1 - jaccard_similarity(ua1,ua2)
#print("UA:",distUserAgent)
#pragma
distPragma = abs(d[5]-centroid[5])
#print("Pragma:",distPragma)
#cacheControl
distCache = abs(d[6]-centroid[6])
#print("Cache:",distCache)
#accept
accept1 = d[7].split(',')
accept2 = centroid[7].split(',')
distAccept = 1-jaccard_similarity(accept1,accept2)
#print("Accept:",distAccept)
#print(accept1, accept2)
#acceptEncoding
en1 = d[8].split(',')
en2 = centroid[8].split(',')
distAcceptEnc = 1 - jaccard_similarity(en1,en2)
#print("Accept Enc:",distAcceptEnc)
#print(en1, en2)
#acceptCharset
char1 = d[9].split(',')
char2 = centroid[9].split(',')
distAcceptChar = 1 - jaccard_similarity(char1,char2)
#print("Accept Charset:",distAcceptChar)
#print(char1, char2)
#acceptLang
lang1 = d[10].split(',')
lang2 = centroid[10].split(',')
distAcceptLang = 1 - jaccard_similarity(lang1,lang2)
#print("Accept Lang:",distAcceptLang)
#host
host1 = d[11].split('.')
host2 = centroid[11].split('.')
distHost = 1 - jaccard_similarity(char1,char2)
#print("Host:", distHost)
#connection
distConnect = abs(d[12] - centroid[12])
#print("Connection:",distConnect)
#contentLength
distContLen = abs(d[13] - centroid[13])
#print("Content Length:",distContLen)
#contentType
distContType = abs(d[14] - centroid[14])
#print("Content Type:",distContType)
#cookie
cookie1 = d[15]
cookie2 = centroid[15]
distCookie = levenshtein(cookie1,cookie2)
#print("Cookie:",distCookie)
#payload
pay1 = str(d[16]).split(',')
pay2 = str(centroid[16]).split(',')
distPayload = 1 - jaccard_similarity(pay1,pay2)
#print("Payload:",distPayload)
'''
once we have differences input-wise
We calculate euclidian distance as:
dist(X,Y) = sqrt(summation( (xi-yi)^2) ))
sqrt((x1-y2)^2 + (x2-y2)^2 + ...)
We have added scaling factors:
cookie sf = 0.01
Yet to Implement: Normalise all inputs relative to average to eliminate bias
'''
distance = sqrt(pow(distMethod,2) + pow(distUrl,2) + pow(distProtocol,2) + pow(distUserAgent,2) + pow(distPragma,2) + pow(distCache,2) + pow(distAcceptEnc,2) + pow(distAcceptChar,2) + pow(distAcceptLang,2) + pow(distHost
, 2) + pow(distConnect,2) + pow(distContType,2) + pow(distContLen*0.001,2) + pow(distCookie*0.01,2) + pow(distPayload,2))
#print(distance)
return (distance)
def validateModel():
data = pd.read_csv('validationData.csv')
dataCleanup(data)
file2 = open(r'myLearnedData.pkl', 'rb')
c1 = pickle.load(file2)
c2 = pickle.load(file2)
c3 = pickle.load(file2)
c4 = pickle.load(file2)
file2.close()
correctCounts = 0
wrongCounts = 0
totalCounts = 0
#True False Positive Negative: Positive means classified as anomalous
tp = 0
tn = 0
fp = 0
fn = 0
prettyPrintLine("Validating Model")
for entry in data.values:
'''REMOVE COMMENT'''
#print(entry, c1)
#input()
dist1 = getDissimilarityDist(entry, c1)
dist2 = getDissimilarityDist(entry, c2)
dist3 = getDissimilarityDist(entry, c3)
dist4 = getDissimilarityDist(entry, c4)
if dist1<dist2 and dist1<dist3 and dist1<dist4:
if entry[17] == c1[17]:
correctCounts = correctCounts+1
tp = tp+1
else:
wrongCounts = wrongCounts+1
fp = fp+1
totalCounts = totalCounts+1
elif dist2<dist3 and dist2<dist4:
if entry[17] == c2[17]:
correctCounts = correctCounts+1
tp = tp+1
else:
wrongCounts = wrongCounts+1
fp = fp+1
totalCounts = totalCounts+1
elif dist3<dist4:
if entry[17] == c3[17]:
correctCounts = correctCounts+1
tn = tn+1
else:
wrongCounts = wrongCounts+1
fn = fn+1
totalCounts = totalCounts+1
else:
if entry[17] == c4[17]:
correctCounts = correctCounts+1
tn = tn+1
else:
wrongCounts = wrongCounts+1
fn = fn+1
totalCounts = totalCounts+1
#print("Correct Classifications=",correctCounts)
#print("Wrong Classifications =",wrongCounts)
#print("Total Classifications=",totalCounts)
now = datetime.datetime.now()
afile = open(r'run_Results_Log.pkl', 'wb')
pickle.dump(now.strftime("%Y-%m-%d %H:%M"), afile)
pickle.dump(correctCounts, afile)
pickle.dump(wrongCounts, afile)
pickle.dump(totalCounts, afile)
pickle.dump(tp, afile)
pickle.dump(fp, afile)
pickle.dump(tn, afile)
pickle.dump(fn, afile)
afile.close()
def prettyPrintLine(string):
print('----------------------------------------------------------')
print(string)
print('----------------------------------------------------------\n')
def startTraining():
data = pd.read_csv("ultraSmallData.csv")
dataCleanup(data)
prettyPrintLine("K Means")
prettyPrintLine("Training started...")
f1 = data['method'].values
f2 = data['label'].values
X = np.array(list(zip(f1, f2)))
numOfDataEntries = len(data.values) - 1000
initCentroid1 = random.randint(0, numOfDataEntries//4-100)
initCentroid2 = random.randint(numOfDataEntries//4, numOfDataEntries//2-100)
initCentroid3 = random.randint(numOfDataEntries//2, 3*numOfDataEntries//4-100)
initCentroid4 = random.randint(3*numOfDataEntries//4, numOfDataEntries)
initCentroid5 = random.randint(numOfDataEntries, numOfDataEntries+995)
##print(data.values[initCentroid1])
##print(data.values[initCentroid2])
##print(data.values[initCentroid3])
##print(data.values[initCentroid4])
##print(data.values[initCentroid5])
clusters = [[],[],[],[],[]]
clusters[0].append(data.values[initCentroid1])
clusters[1].append(data.values[initCentroid2])
clusters[2].append(data.values[initCentroid3])
clusters[3].append(data.values[initCentroid4])
clusters[4].append(data.values[initCentroid5])
myCluster1 = Cluster(data.values[initCentroid1])
myCluster2 = Cluster(data.values[initCentroid2])
myCluster3 = Cluster(data.values[initCentroid3])
myCluster4 = Cluster(data.values[initCentroid4])
myCluster5 = Cluster(data.values[initCentroid5])
print(numOfDataEntries)
print(initCentroid5)
print(myCluster5.center)
clustersConcise = [[],[],[],[],[]]
clustersConcise[0].append(data.values[initCentroid1])
clustersConcise[1].append(data.values[initCentroid2])
clustersConcise[2].append(data.values[initCentroid3])
clustersConcise[3].append(data.values[initCentroid4])
clustersConcise[4].append(data.values[initCentroid5])
#print(initCentroid1, initCentroid2, initCentroid3, initCentroid4)
count = 0
for entry in data.values:
count = count+1
if(count%10 < 6):
continue
#print(count)
dist1 = getDissimilarityDist(entry, myCluster1.center)
#print("\n")
dist2 = getDissimilarityDist(entry, myCluster2.center)
#print("\n")
dist3 = getDissimilarityDist(entry, myCluster3.center)
#print("\n")
dist4 = getDissimilarityDist(entry, myCluster4.center)
#print("\n")
dist5 = getDissimilarityDist(entry, myCluster5.center)
#print("\n")
#print(dist1, dist2, dist3, dist4)
if dist1<dist2 and dist1<dist3 and dist1<dist4 and dist1<dist5:
clusters[0].append(entry)
clustersConcise[0].append(entry[17])
myCluster1.add(entry)
elif dist2<dist3 and dist2<dist4 and dist1<dist5:
clusters[1].append(entry)
clustersConcise[1].append(entry[17])
myCluster2.add(entry)
elif dist3<dist4 and dist1<dist5:
clusters[2].append(entry)
clustersConcise[2].append(entry[17])
myCluster3.add(entry)
elif(dist4<dist5):
clusters[3].append(entry)
clustersConcise[3].append(entry[17])
myCluster4.add(entry)
else:
clusters[4].append(entry)
clustersConcise[4].append(entry[17])
myCluster5.add(entry)
'''
prettyPrintLine("Cluster 1")
print(myCluster1.center)
myCluster1.printMeans()
prettyPrintLine("Cluster 2")
print(myCluster2.center)
myCluster2.printMeans()
prettyPrintLine("Cluster 3")
print(myCluster3.center)
myCluster3.printMeans()
prettyPrintLine("Cluster 4")
print(myCluster4.center)
myCluster4.printMeans()
prettyPrintLine("Cluster 5")
print(myCluster5.center)
myCluster5.printMeans()
'''
#to recompute means
for i in range(4):
new_centers = np.array(X.mean(0))
afile = open(r'myLearnedData.pkl', 'wb')
pickle.dump(data.values[initCentroid1], afile)
pickle.dump(data.values[initCentroid2], afile)
pickle.dump(data.values[initCentroid3], afile)
pickle.dump(data.values[initCentroid4], afile)
pickle.dump(data.values[initCentroid5], afile)
#pickle.dump(clusters, afile)
afile.close()
print("\nCentroids\n")
print(data.values[initCentroid1], data.values[initCentroid2], data.values[initCentroid3], data.values[initCentroid4])
print("Cluster 1: Labels classified")
print(clustersConcise[0])
print("\n\n")
print("Cluster 2: Labels classified")
print(clustersConcise[1])
print("\n\n")
print("Cluster 3: Labels classified")
print(clustersConcise[2])
print("\n\n")
print("Cluster 4: Labels classified")
print(clustersConcise[3])
print("\n\n")
print("Cluster 5: Labels classified")
print(clustersConcise[4])
startTraining()
|
[
"kannansneha.1997@gmail.com"
] |
kannansneha.1997@gmail.com
|
bcbdbf72f619220993db5c8f91a704968e8e97d5
|
228d430cd5a79b3617b636d8d4e7c2da306ad095
|
/crowdfunding/apps/trades/views.py
|
0008d39c77caf35144aa3cba194470792f8d99b1
|
[] |
no_license
|
apollomeng/zhongchoufunding
|
beb2d141db5aadcfe3b45a170af6f964a67fa6d6
|
9df754c38b45b708592a8d08075561f8456c1b7d
|
refs/heads/master
| 2020-03-29T22:29:27.414532
| 2018-11-06T15:30:54
| 2018-11-06T15:30:54
| 150,423,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,306
|
py
|
from django.http import JsonResponse
from django.shortcuts import render,redirect,reverse,HttpResponse
from datetime import datetime
# Create your views here.
from crowdfunding.settings import APPID, RETURN_URL, APP_PRIVATE_KEY_PATH, ALIPAY_PUBLIC_KEY_PATH, ALIPAY_DEBUG
from operations.forms import UserAddressForm
from operations.models import UserAddress
from projects.models import SupportItems
from trades.models import UserSupport, OrderInfo
from helptools.get_order_sn import get_order_sn
from utils.alipay import AliPay
def trade_step1(request,supid):
if supid:
supportitem = SupportItems.objects.filter(id=int(supid))[0]
return render(request,'trades/trade_step1.html',{
'supportitem':supportitem,
})
def trade_order(request):
num = request.GET.get('num','')
supid = request.GET.get('supid','')
if num and supid:
supportitem = SupportItems.objects.filter(id=int(supid))[0]
usersupport = UserSupport()
usersupport.support_project = supportitem.project
usersupport.support_man = request.user
usersupport.support_item = supportitem
usersupport.support_nums = num
usersupport.order_sn = get_order_sn(request.user.id)
usersupport.save()
return JsonResponse({'status': 'ok', 'msg': '成功提交'})
else:
return JsonResponse({'status': 'fail', 'msg': '失败'})
def trade_step2(request,supid):
supportitem = SupportItems.objects.filter(id=int(supid))[0]
usersupport = UserSupport.objects.filter(support_item_id=int(supid),support_man=request.user)[0]
useraddress_list = UserAddress.objects.filter(user=request.user)
order_mount = int(usersupport.support_nums) * int(supportitem.support_funding) + supportitem.transport_cost
return render(request, 'trades/trade_step2.html', {
'supportitem': supportitem,
'usersupport': usersupport,
'useraddress_list': useraddress_list,
'order_mount': order_mount,
})
def add_address(request):
user_address_form = UserAddressForm(request.POST)
if user_address_form.is_valid():
address = user_address_form.cleaned_data['address']
signer_name = user_address_form.cleaned_data['signer_name']
signer_mobile = user_address_form.cleaned_data['signer_mobile']
useraddress = UserAddress()
useraddress.user = request.user
useraddress.address = address
useraddress.signer_name = signer_name
useraddress.signer_mobile = signer_mobile
useraddress.save()
return JsonResponse({'status': 'ok', 'msg': '成功'})
else:
return JsonResponse({'status': 'fail', 'msg': '失败'})
def order_last(request):
add_id = request.POST.get('add_id','')
need_invoice = request.POST.get('need_invoice','')
info_invoice = request.POST.get('info_invoice','个人')
post_script = request.POST.get('post_script','无')
supid = request.POST.get('supid','')
order_mount = request.POST.get('order_mount','')
useraddress = UserAddress.objects.filter(id=int(add_id))[0]
supportitem = SupportItems.objects.filter(id=int(supid))[0]
usersupport = UserSupport.objects.filter(support_item_id=int(supid))[0]
orderinfo = OrderInfo()
orderinfo.user = request.user
orderinfo.support_item = supportitem
orderinfo.item_nums = usersupport.support_nums
orderinfo.order_sn = usersupport.order_sn
orderinfo.order_mount = order_mount
orderinfo.post_script = post_script
orderinfo.address = useraddress
if need_invoice =='option1':
orderinfo.need_invoice =False
else:
orderinfo.need_invoice = True
orderinfo.info_invoice = info_invoice
orderinfo.save()
# 得到支付的类
alipay = AliPay(
appid=APPID,
# 支付宝服务器主动通知商户服务器里指定的页面http/https路径
app_notify_url=RETURN_URL,
app_private_key_path=APP_PRIVATE_KEY_PATH,
alipay_public_key_path=ALIPAY_PUBLIC_KEY_PATH, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=ALIPAY_DEBUG, # 默认False,
# 同步通知,回调这个地址
return_url=RETURN_URL
)
url = alipay.direct_pay(
subject=orderinfo.order_sn,
out_trade_no=orderinfo.order_sn,
total_amount=orderinfo.order_mount
)
# 沙箱环境
re_url = "https://openapi.alipaydev.com/gateway.do?{data}".format(data=url)
print('生成的支付连接:')
print(re_url)
return JsonResponse({'status': 'ok', 'msg': re_url})
#支付宝回调的验证:1,验证是否被修改,2,修改为已支付
def alipay(request):
alipay = AliPay(
appid=APPID,
# 支付宝服务器主动通知商户服务器里指定的页面http/https路径
app_notify_url=RETURN_URL,
app_private_key_path=APP_PRIVATE_KEY_PATH,
alipay_public_key_path=ALIPAY_PUBLIC_KEY_PATH, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=ALIPAY_DEBUG, # 默认False,
# 同步通知,回调这个地址
return_url=RETURN_URL
)
processed_query = {}
for key, value in request.GET.items():
processed_query[key] = value
ali_sign = processed_query.pop('sign', None)
check_result = alipay.verify(processed_query, ali_sign)
print('get的验证情况', check_result)
# 2 验证通过,修改成已支付
if check_result:
# 得到订单号
order_sn = processed_query.get('out_trade_no', None)
# 得到交易号
trade_no = processed_query.get('trade_no', None)
# 根据订单号查找订单
order_info = OrderInfo.objects.filter(order_sn=order_sn)[0]
# 设置交易号
order_info.trade_no = trade_no
# 设置交易状态
order_info.pay_status = 'TRADE_SUCCESS'
# 设置交易付款时间
order_info.pay_time = datetime.now()
order_info.save()
# return Response('success')
response = redirect('index')
response.set_cookie('nextPath', 'pay', max_age=2)
return response
else:
response = redirect('index')
return response
|
[
"42266486+apollomeng@users.noreply.github.com"
] |
42266486+apollomeng@users.noreply.github.com
|
e7ea72ecc61e573b8f9031ad3d7bcd15be800ff6
|
86236a778c2d022cbdee11017c5a8cf7d01111d0
|
/adm_rec/basiccrud/utils.py
|
b8863e006f3ca52d64f374a4d338ff747b3f90a2
|
[] |
no_license
|
luckybiason/sist_rec_enter
|
dee1b92f48c0f9476a8a1becedb1227222a49816
|
bf68c2bdc7d3c7c87642c309224fd03b0511b5c0
|
refs/heads/master
| 2021-01-23T19:34:51.084015
| 2013-10-30T20:46:48
| 2013-10-30T20:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
#-*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from views import *
from django.conf import settings
from django.shortcuts import render_to_response
# - Icone para remover um registro
def get_delete_icon(id_obj,model):
context = {
'url': reverse(model.get_config()["app"]+".excluir", kwargs={'pk': id_obj}),
'STATIC_URL': settings.STATIC_URL
}
return render_to_response("basic/_img_excl.html", context).content
# - Icone que mostra um ok ou não ok para campos booleanos
def get_boolean_icon(boole):
context = {
'bool': boole,
'STATIC_URL': settings.STATIC_URL
}
return render_to_response("basic/_img_bool.html", context).content
#- Verifica se o objeto tem registros relacionados
def has_related_objects(model,obj):
#return True
if model._meta.get_all_related_objects():
for related_object in model._meta.get_all_related_objects():
related = related_object.model.objects
campo = related_object.field.get_attname().replace('_id','')
if related.get_query_set().filter(**{campo.lower():obj.id}):
return True
return False
|
[
"biasonlucky@hotmail.com"
] |
biasonlucky@hotmail.com
|
58bebb9428186e0fd1cf9945fd4ec17a4a7403e8
|
6d2b51b9b35bd954ee4c19181d05b74f60238c28
|
/raspberrypihal.py
|
11351f869f62946d49c769e68a65d09b03803a36
|
[
"MIT"
] |
permissive
|
JoshuaBThompson/lamatrix
|
77ccdfcd0c11d5a4b6db9d994137bf5ae788ef1f
|
8d28473d5242afe6ea4e66579fae02543d2e9851
|
refs/heads/master
| 2023-04-19T05:03:43.285393
| 2021-04-25T07:52:07
| 2021-04-25T07:52:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
# HAL for Raspberry Pi with https://github.com/rpi-ws281x/rpi-ws281x-python
# See https://github.com/jgarff/rpi_ws281x for more details on this library.
#
# The below code assumes the LED strip is connected to GPIO 18 (PCM CLK)
# (see https://pinout.xyz) and that you've installed the rpi_ws281x library.
#
# For Python 2.x:
#
# sudo apt install -y python-pip; sudo pip install rpi_ws281x
#
# For Python 3.x:
#
# sudo apt install -y python3-pip; sudo pip3 install rpi_ws281x
#
#
from rpi_ws281x import PixelStrip, Color
# LED strip configuration:
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
# LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
class RaspberryPiHAL:
def __init__(self, config):
self.num_pixels = config['LedMatrix']['columns'] * config['LedMatrix']['stride']
self.strip = PixelStrip(self.num_pixels, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
self.strip.begin()
def init_display(self, num_pixels=64):
self.clear_display()
def clear_display(self):
c = Color(0, 0, 0)
for i in range(self.num_pixels):
self.strip.setPixelColor(i, c)
self.strip.show()
def update_display(self, num_modified_pixels):
if not num_modified_pixels:
return
self.strip.show()
def put_pixel(self, addr, r, g, b):
self.strip.setPixelColor(addr % self.num_pixels, Color(r, g, b))
def reset(self):
self.clear_display()
def process_input(self):
#TODO: implement
return 0
def set_rtc(self, t):
#Not relevant
pass
def set_auto_time(self, enable=True):
#Not relevant
pass
def suspend_host(self, restart_timeout_seconds):
#Not relevant
pass
|
[
"noah@hack.se"
] |
noah@hack.se
|
3a0e1f78250db2e482d5eff70a0c07b7ee2c4b50
|
24a47669907cb008c3fea4265c4b6f37dddc54a4
|
/keras_/kerascv/models/sepreresnet.py
|
4331887dec1490c392c0c3197e65d35f06adb823
|
[
"MIT"
] |
permissive
|
JHLee0513/imgclsmob
|
ee1f6b8c7f677ed0e8a23e26d3165d37fd8549b4
|
45abcc1d313b84fa3595e13f0e4fa04b5db6c75d
|
refs/heads/master
| 2020-04-22T14:13:25.337524
| 2019-02-12T18:26:09
| 2019-02-12T18:26:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,161
|
py
|
"""
SE-PreResNet, implemented in Keras.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['sepreresnet', 'sepreresnet18', 'sepreresnet34', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101',
'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv1x1, se_block, is_channels_first, flatten
from .preresnet import preres_block, preres_bottleneck_block, preres_init_block, preres_activation
def sepreres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
name="sepreres_unit"):
"""
SE-PreResNet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
name : str, default 'sepreres_unit'
Unit name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor.
"""
identity = x
if bottleneck:
x, x_pre_activ = preres_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
name=name + "/body")
else:
x, x_pre_activ = preres_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
name=name + "/se")
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1(
x=x_pre_activ,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
name=name + "/identity_conv")
x = nn.add([x, identity], name=name + "/add")
return x
def sepreresnet(channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, 224, 224) if is_channels_first() else (224, 224, in_channels)
input = nn.Input(shape=input_shape)
x = preres_init_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = sepreres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
name="features/post_activ")
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
# x = nn.Flatten()(x)
x = flatten(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_sepreresnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
"""
Create PreResNet or SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = sepreresnet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
sepreresnet18,
sepreresnet34,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
[
"osemery@gmail.com"
] |
osemery@gmail.com
|
29de6e8371a29df9ffa2764804a84541724a6017
|
5f3282ef6bef79648cc19dc38651cb284301497c
|
/cnn_processing.py
|
c790593d3f3477525fc15065d0d489dc8dbc0adf
|
[] |
no_license
|
raplima/petrog_thin_section_cnn
|
2e660499a1d13bfa9a30306b9ca5fab7d0e32c52
|
de9265f8f23a4d7fa64c918f074d35b1372a1f46
|
refs/heads/master
| 2022-12-15T17:54:57.076891
| 2022-12-05T17:27:17
| 2022-12-05T17:27:17
| 168,870,152
| 3
| 1
| null | 2022-02-23T12:40:13
| 2019-02-02T19:39:12
|
Python
|
UTF-8
|
Python
| false
| false
| 22,482
|
py
|
# Rafael Pires de Lima
# January 2019
# Transfer Learning application
# This file includes routines to split the data, create bottlenecks, train a new model, predict with new model
import os
import pickle
import shutil
import matplotlib
import numpy as np
from keras import applications, Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dropout, Flatten, Dense, Input
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
from keras import backend as K
import datetime
from matplotlib import pyplot as plt
from matplotlib import style
from timeit import default_timer as timer
matplotlib.use('TkAgg')
style.use("seaborn")
verb = 0 # verbose when training
# folders management:
bottleneck_dir = os.path.join(os.getcwd(),'runs', 'bnecks')
model_dir = os.path.join(os.getcwd(),'runs', 'models')
def model_preprocess(model_name):
"""Loads the appropriate CNN preprocess
Args:
arch: String key for model to be loaded.
Returns:
The specified Keras preprocessing.
"""
# function that loads the appropriate model
if model_name == 'Xception':
return applications.xception.preprocess_input
elif model_name == 'VGG16':
return applications.vgg16.preprocess_input
elif model_name == 'VGG19':
return applications.vgg19.preprocess_input
elif model_name == 'ResNet50':
return applications.resnet50.preprocess_input
elif model_name == 'InceptionV3':
return applications.inception_v3.preprocess_input
elif model_name == 'InceptionResNetV2':
return applications.inception_resnet_v2.preprocess_input
elif model_name == 'MobileNet':
return applications.mobilenet.preprocess_input
elif model_name == 'DenseNet121':
return applications.densenet.preprocess_input
elif model_name == 'NASNetLarge':
return applications.nasnet.preprocess_input
elif model_name == 'MobileNetV2':
return applications.mobilenet_v2.preprocess_input
else:
print('Invalid model selected')
return False
def model_app(arch, input_tensor):
"""Loads the appropriate convolutional neural network (CNN) model
Args:
arch: String key for model to be loaded.
input_tensor: Keras tensor to use as image input for the model.
Returns:
model: The specified Keras Model instance with ImageNet weights loaded and without the top classification layer.
"""
# function that loads the appropriate model
if arch == 'Xception':
model = applications.Xception(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('Xception loaded')
elif arch == 'VGG16':
model = applications.VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('VGG16 loaded')
elif arch == 'VGG19':
model = applications.VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('VGG19 loaded')
elif arch == 'ResNet50':
model = applications.ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('ResNet50 loaded')
elif arch == 'InceptionV3':
model = applications.InceptionV3(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('InceptionV3 loaded')
elif arch == 'InceptionResNetV2':
model = applications.InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('InceptionResNetV2 loaded')
elif arch == 'MobileNet':
model = applications.MobileNet(input_shape=(224, 224, 3), weights='imagenet', include_top=False,
input_tensor=input_tensor)
print('MobileNet loaded')
elif arch == 'DenseNet121':
model = applications.DenseNet121(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('DenseNet121 loaded')
elif arch == 'NASNetLarge':
model = applications.NASNetLarge(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('NASNetLarge loaded')
elif arch == 'MobileNetV2':
model = applications.MobileNetV2(input_shape=(224, 224, 3), weights='imagenet', include_top=False,
input_tensor=input_tensor)
print('MobileNetV2 loaded')
else:
print('Invalid model selected')
model = False
return model
def save_bottleneck_features(model_name, train_data_dir, validation_data_dir, bottleneck_name, img_height, img_width, arch,
batch_size=1):
"""Saves the bottlenecks of validation and train data.
Args:
train_data_dir: String path to a folder containing subfolders of images (training set).
validation_data_dir: String path to a folder containing subfolders of images (validation set).
bottleneck_name: String used as main element of bottlenecks files.
img_height: Integer, image height.
img_width: Integer, image width.
arch: String that defines the CNN model to be used.
batch_size: batch size
Returns:
No returns. Saves bottlenecks using bottleneck_name and bottleneck_dir
"""
# Saves the bottlenecks of validation and train data.
# Input is path to train_data_dir and validation_data_dir (directories with the images)
# bottleneck_name is the name to be used for saving
# bottleneck_dir is defined outside of this function
# arch is the architecture to be used
global bottleneck_dir
#datagen = ImageDataGenerator(preprocessing_function=model_preprocess(model_name))
datagen = ImageDataGenerator(rescale=1./255)
# check to see if runs/bottleneck path exists
if not os.path.exists(bottleneck_dir):
os.makedirs(bottleneck_dir)
# build the network
model = model_app(arch, Input(shape=(img_height, img_width, 3)))
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
bottleneck_features_train = model.predict_generator(
generator, generator.n // batch_size, verbose=verb)
# save a tuple of bottlenecks and the corresponding label
np.save(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_train.npy'), 'wb'),
bottleneck_features_train)
np.save(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_train_labels.npy'), 'wb'),
generator.classes[0:bottleneck_features_train.shape[0]])
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
bottleneck_features_validation = model.predict_generator(
generator, generator.n // batch_size, verbose=0)
# save a tuple of bottlenecks and the corresponding label
np.save(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_val.npy'), 'wb'),
bottleneck_features_validation)
np.save(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_val_labels.npy'), 'wb'),
generator.classes[0:bottleneck_features_validation.shape[0]])
# finally, save a "dictionary" as the labels are numeric and eventually we want to know the original string label:
with open(os.path.join(bottleneck_dir, f'{bottleneck_name}_dict_l'), 'wb') as fp:
pickle.dump(sorted(os.listdir(train_data_dir)), fp)
def train_top_model(bottleneck_name, model_name, arch, img_height, img_width, epochs, opt, batch_size=16):
"""Trains the new classification layer generating the new classification model dependent on the classes we are using.
Args:
bottleneck_name: String used as main element of bottlenecks files.
model_name: String, name of the model to be saved.
arch: String that defines the CNN model to be used.
img_height: Integer, image height.
img_width: Integer, image width.
epochs: Integer, the number of epochs (iterations on complete training set) to be performed
opt: String, optimizer to be used.
batch_size: batch size
Returns:
No returns. Trains and saves the model. Opens a tkinter window with training history
"""
train_data = np.load(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_train.npy'), 'rb'))
train_labels = np.load(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_train_labels.npy'), 'rb')).reshape(-1)
validation_data = np.load(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_val.npy'), 'rb'))
validation_labels = np.load(open(os.path.join(bottleneck_dir, f'{bottleneck_name}_val_labels.npy'), 'rb')).reshape(-1)
# check to see if runs/model path exists
if not os.path.exists(model_dir):
os.makedirs(model_dir)
top_model = Sequential()
top_model.add(Flatten(input_shape=train_data.shape[1:]))
top_model.add(Dropout(0.6)) # dropout helps with overfitting
top_model.add(Dense(len(np.unique(train_labels)), activation='softmax'))
if opt == 'RMSprop':
top_model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
if opt == 'SGD':
top_model.compile(optimizer=SGD(lr=0.0001, momentum=0.6),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
callbacks = [EarlyStopping(monitor='val_acc', patience=12, verbose=1),
ModelCheckpoint(filepath=os.path.join(model_dir,'tempbm.h5'), monitor='val_acc', save_best_only=True),
ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=5, min_lr=0.00001, verbose=1)]
history = top_model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels),
shuffle=True,
callbacks=callbacks,
verbose=verb)
# reload best model:
top_model = load_model(os.path.join(model_dir, 'tempbm.h5'))
score = top_model.evaluate(validation_data, validation_labels, verbose=0)
print('{:22} {:.2f}'.format('Validation loss:', score[0]))
print('{:22} {:.2f}'.format('Validation accuracy:', score[1]))
print('')
# save the entire model:
# build the network
base_model = model_app(arch, Input(shape=(img_height, img_width, 3)))
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
if opt == 'RMSprop':
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
if opt == 'SGD':
model.compile(optimizer=SGD(lr=0.0001, momentum=0.6),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.save(os.path.join(model_dir, f'{model_name}.hdf5'))
# also save the dictionary label associated with this file for later testing
shutil.copy2(os.path.join(bottleneck_dir, f'{bottleneck_name}_dict_l'),
os.path.join(model_dir, f'{model_name}_dict_l'))
# delete temporary model file:
os.remove(os.path.join(model_dir, 'tempbm.h5'))
print('New classification layer training complete.')
# plotting the metrics
fig, ax = plt.subplots(nrows=2, ncols=1, constrained_layout=True)
ax[0].plot(range(1, len(history.history['acc']) + 1), history.history['acc'])
ax[0].plot(range(1, len(history.history['acc']) + 1), history.history['val_acc'])
ax[0].set_title('Model Accuracy')
ax[0].set_ylabel('Accuracy')
ax[0].set_ylim(0.0, 1.0)
ax[0].set_xlabel('Epoch')
ax[0].legend(['Train', 'Validation'], loc='lower right')
ax[1].plot(range(1, len(history.history['acc']) + 1), history.history['loss'])
ax[1].plot(range(1, len(history.history['acc']) + 1), history.history['val_loss'])
ax[1].set_title('Model loss')
ax[1].set_ylabel('Loss')
ax[1].set_xlabel('Epoch')
ax[1].legend(['Train', 'Validation'], loc='upper right')
# set up figure
fig.set_size_inches(w=5, h=7)
# plt.show(fig)
plt.savefig(os.path.join(model_dir,f'{model_name}.pdf'))
plt.close('all')
def fine_tune_second_step(train_data_dir, validation_data_dir, model_name, epochs, batch_size):
"""
Fine tune the new model using SGD (we don't use different batch sizes here so, hopefully, we do not have to
consider hardware limitations).
Args:
train_data_dir: String path to a folder containing subfolders of images (training set).
validation_data_dir: String path to a folder containing subfolders of images (validation set).
model_name: String, name of the model to be saved.
epochs: Integer, the number of epochs (iterations on complete training set) to be performed
Returns:
No returns. Trains and saves the model. Opens matplotlib with training history
"""
#datagen = ImageDataGenerator(preprocessing_function=model_preprocess(model_name))
datagen = ImageDataGenerator(rescale=1./255)
# load the model:
model = load_model(os.path.join(model_dir, f'{model_name}.hdf5'))
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(optimizer=SGD(lr=1e-4, momentum=0.3),
loss='categorical_crossentropy',
metrics=['accuracy'])
tensorboard = TensorBoard(
log_dir="logs/{}-{}".format(model_name, datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")))
callbacks = [EarlyStopping(monitor='val_loss', patience=20, verbose=1),
ModelCheckpoint(filepath=os.path.join(model_dir, 'tempbm.h5'), monitor='val_acc', save_best_only=True),
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=10, min_lr=1e-6, verbose=1),
tensorboard]
# get model input parameters:f
img_height = model.layers[0].get_output_at(0).get_shape().as_list()[1]
img_width = model.layers[0].get_output_at(0).get_shape().as_list()[2]
# set upt the flow from directory for train and validation data:
generator_train = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
generator_val = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
history = model.fit_generator(generator=generator_train,
steps_per_epoch=generator_train.n // batch_size,
epochs=epochs,
validation_data=generator_val,
validation_steps=generator_val.n // batch_size,
shuffle=True,
callbacks=callbacks,
verbose=verb)
# reload best model:
top_model = load_model(os.path.join(model_dir, 'tempbm.h5'))
score = top_model.evaluate_generator(generator=generator_val, steps=generator_val.n, verbose=0)
print('{:22} {:.2f}'.format('Validation loss:', score[0]))
print('{:22} {:.2f}'.format('Validation accuracy:', score[1]))
print('')
# sometimes fine tune might not improve validation accuracy, verify:
initial_model = load_model(os.path.join(model_dir, f'{model_name}.hdf5'))
initial_model.compile(optimizer=SGD(lr=1e-4, momentum=0.5),
loss='categorical_crossentropy',
metrics=['accuracy'])
score_or = initial_model.evaluate_generator(generator=generator_val, steps=generator_val.n, verbose=0)
print('{:22} {:.2f}'.format('Original Validation loss:', score_or[0]))
print('{:22} {:.2f}'.format('Original Validation accuracy:', score_or[1]))
print('')
if score_or[1] > score[1]:
print('Fine tune did not improve model accuracy.')
# this might be due to batch normalization
# http://blog.datumbox.com/the-batch-normalization-layer-of-keras-is-broken/
# https://www.youtube.com/watch?v=nUUqwaxLnWs
model.save(os.path.join(model_dir, f'{model_name}_fine_tuned.hdf5'))
# also save the dictionary label associated with this model for later testing
shutil.copy2(os.path.join(model_dir, f'{model_name}_dict_l'),
os.path.join(model_dir, f'{model_name}_fine_tuned_dict_l'))
# delete temporary model file:
os.remove(os.path.join(model_dir, 'tempbm.h5'))
# plotting the metrics
fig, ax = plt.subplots(nrows=2, ncols=1, constrained_layout=True)
ax[0].plot(range(1, len(history.history['acc']) + 1), history.history['acc'])
ax[0].plot(range(1, len(history.history['acc']) + 1), history.history['val_acc'])
ax[0].set_title('Model Accuracy')
ax[0].set_ylabel('Accuracy')
ax[0].set_ylim(0.0, 1.0)
ax[0].set_xlabel('Epoch')
ax[0].legend(['Train', 'Validation'], loc='lower right')
ax[1].plot(range(1, len(history.history['acc']) + 1), history.history['loss'])
ax[1].plot(range(1, len(history.history['acc']) + 1), history.history['val_loss'])
ax[1].set_title('Model loss')
ax[1].set_ylabel('Loss')
ax[1].set_xlabel('Epoch')
ax[1].legend(['Train', 'Validation'], loc='upper right')
# set up figure
fig.set_size_inches(w=5, h=7)
# plt.show(fig)
plt.savefig(os.path.join(model_dir, f'{model_name}_fine_tuned.pdf'))
plt.close('all')
print('Fine tune complete.')
if __name__ == '__main__':
print("Starting...")
# for model selection parameters
options_dict = {
'Xception': (299, 299, 3),
'VGG16': (224, 224, 3),
'VGG19': (224, 224, 3),
'ResNet50': (224, 224, 3),
'InceptionV3': (299, 299, 3),
'InceptionResNetV2': (299, 299, 3),
'MobileNet': (224, 224, 3),
'MobileNetV2': (224, 224, 3),
'DenseNet121': (224, 224, 3),
'NASNetLarge': (331, 331, 3)
}
# train and validation data folders
train_data_dir = '../Data/PP_mc_wb_train'
validation_data_dir = '../Data/PP_mc_wb_validation'
test_data_dir = '../Data/PP_mc_wb_test'
####################################################
# choose model architecture with weights coming from ImageNet training:
models_list = ['MobileNetV2', 'VGG19',
'InceptionV3', 'ResNet50']
# number of epochs for training:
epochs = 64
# optimizer
opt = 'SGD'
for m in models_list:
start_time = timer()
print(m)
# model and bottleneck names:
bottleneck_name = m + '_bn'
# image height and width ("picture size for the model"):
height = options_dict[m][0]
width = options_dict[m][1]
# calling the functions:
# save the bottlenecks
save_bottleneck_features(m, train_data_dir, validation_data_dir, bottleneck_name, height, width, m)
# then train the top model
train_top_model(bottleneck_name, m, m, height, width, epochs, opt)
# fine tune the model:
if m == 'InceptionV3' or m == 'ResNet50':
fine_tune_second_step(train_data_dir, validation_data_dir, m, epochs,
batch_size=8) # 8 for inception/resnet (personal memory limitations)
else:
fine_tune_second_step(train_data_dir, validation_data_dir, m, epochs,
batch_size=16)
end_time = timer()
print(f'{m} trained in {(end_time-start_time)/60.0:.2f} minutes')
K.clear_session()
print('\n\n')
for m in models_list:
# after the models are trained, evaluate the metrics:
#datagen = ImageDataGenerator(preprocessing_function=model_preprocess(m))
datagen = ImageDataGenerator(rescale=1./255)
print('Evaluating model {}'.format(m))
# image height and width ("picture size for the model"):
height = options_dict[m][0]
width = options_dict[m][1]
with open('accuracy.csv', 'a') as outfile:
print(f'{m},', end =" ", file=outfile)
for dset, ddir in zip(['training', 'validation', 'test'],[train_data_dir, validation_data_dir, test_data_dir]):
generator_test = datagen.flow_from_directory(
ddir,
target_size=(width, height),
batch_size=1,
class_mode='categorical',
shuffle=False)
# load the model
# this_model = load_model(os.path.join(model_dir, f'{m}.hdf5'))
# this_model.compile(optimizer=SGD(lr=1e-4, momentum=0.5),
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# print(f'Model {m}', file=outfile)
# print('----Classification only', file=outfile)
# score = this_model.evaluate_generator(generator=generator_test, steps=generator_test.n, verbose=0)
# print(f'----{dset:22} loss {score[0]:.2f}', file=outfile)
# print(f'----{dset:22} accuracy {score[1]:.2f}', file=outfile)
# print('')
# load the fine tuned model
this_model = load_model(os.path.join(model_dir, f'{m}_fine_tuned.hdf5'))
# print('----Fine tune', file=outfile)
score = this_model.evaluate_generator(generator=generator_test, steps=generator_test.n, verbose=0)
# print(f'----{dset:22} loss {score[0]:.2f}', file=outfile)
# print(f'----{dset:22} accuracy {score[1]:.2f}', file=outfile)
# print('\n\n')
print(f'{score[1]},', end =" ", file=outfile)
K.clear_session()
print('', file=outfile)
print('Complete')
|
[
"noreply@github.com"
] |
raplima.noreply@github.com
|
3f21a62eb233d75d4cb9fd49add03093f4faf00b
|
444216935d35adcf2ca63ec2eee2b4cca47a649f
|
/liu_models/reusenet.py
|
28d7574703b6d8193ef17d05923f50dd09385a35
|
[] |
no_license
|
Linwenye/LEGO
|
3eff9c45917ef21d8ed1f294ad26d63b84224391
|
02e305191caf1608d30306a3d9646def38df7e99
|
refs/heads/master
| 2023-04-08T15:59:03.570335
| 2021-04-17T06:52:28
| 2021-04-17T06:52:28
| 355,191,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,482
|
py
|
.3
'''ReuseNet in PyTorch.
For Pre-activation ReuseNet, see 'preact_ReuseNet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
# 定义两层
class ReuseBlock2(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, conv2, bn2, stride=1):
super(ReuseBlock2, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv2
self.bn2 = bn2
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
# 定义三层
class ReuseBlock3(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(ReuseBlock3, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ReuseNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, widen_factor=1):
super(ReuseNet, self).__init__()
self.in_planes = 16 * widen_factor
self.conv1 = nn.Conv2d(3, 16 * widen_factor, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16 * widen_factor)
self.layer1 = self._make_layer(block, 16 * widen_factor, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32 * widen_factor, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64 * widen_factor, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 128 * widen_factor, num_blocks[3], stride=2)
self.linear = nn.Linear(128 * widen_factor * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [1] * (num_blocks - 1)
layers = [block(self.in_planes, planes, stride)]
self.in_planes = planes * block.expansion
reuse_conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
reuse_bn2 = nn.BatchNorm2d(planes)
for _ in strides:
layers.append(ReuseBlock2(self.in_planes, planes, reuse_conv2, reuse_bn2))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class CifarReuseNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, widen_factor=1):
super(CifarReuseNet, self).__init__()
self.in_planes = 16 * widen_factor
self.conv1 = nn.Conv2d(3, 16 * widen_factor, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16 * widen_factor)
self.layer1 = self._make_layer(block, 16 * widen_factor, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [1] * (num_blocks - 2)
layers = [block(self.in_planes, planes, stride)]
self.in_planes = planes * block.expansion
layers.append(block(self.in_planes, planes, 1))
reuse = BasicBlock(self.in_planes, planes, 1)
for _ in strides:
layers.append(reuse)
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ReuseNet18():
return ReuseNet(BasicBlock, [2, 2, 2, 2])
def CifarReuseNet26():
return CifarReuseNet(BasicBlock, [4, 4, 4])
def ReuseNet26(num_classes):
return ReuseNet(BasicBlock, [3, 3, 3, 3], num_classes)
def ReuseNet34():
return ReuseNet(BasicBlock, [3, 4, 6, 3])
def ReuseNet50():
return ReuseNet(Bottleneck, [3, 4, 6, 3])
def ReuseNet101():
return ReuseNet(Bottleneck, [3, 4, 23, 3])
def ReuseNet152():
return ReuseNet(Bottleneck, [3, 8, 36, 3])
def test():
# net = ReuseNet34wider_cifar100()
net = ReuseNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
|
[
"419766878@qq.com"
] |
419766878@qq.com
|
8b38feee1e7984c093ab2477b1e6c94aa9ae5032
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/rearrange_20200119162227.py
|
a2b26cbc31714c4d2901c190ccccaf9a0c97fe88
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,847
|
py
|
import random
def random_rearrange(input_string):
''' Asks user for input of words, then
rearranges those words in a random order
'''
# input_string = input("enter words: ")
words = input_string.split(' ')
len_words = len(words)
# print(words)
word_list = []
for word in range(len_words):
rand = random.randint(0,len_words-1)
# print(rand)
word_list.append(words[rand])
# print(word_list)
space = ' '
sentence = space.join(word_list)
print(sentence)
return sentence
def reverse_order(input_string):
'''
Reverses the order or words inputted by user
'''
# input_string = input("enter words: ")
words = input_string.split(' ')
print(words)
length = len(words) - 1
word_list = []
for word in words:
word_list.append(words[length])
length -= 1
print(word_list)
space = ' '
sentence = space.join(word_list)
print(sentence)
return sentence
def mad_libs():
nouns_string = input('Give me a noun: ')
names_string = input('Give me a name: ')
verbs_string = input('Give me two verbs: ')
nouns = nouns_string.split(' ')
names = names_string.split(' ')
verbs = verbs_string.split(' ')
print(verbs)
print("One day I went to the store to buy myself a {}.".format(nouns[0]))
print("'What's the matter with you {}?' The clerk asked.".format(names[0]))
print("'This fits me well' I said")
print("'Well go on then {} it out so you don't miss out.".format(verbs[0]))
print("'Let me {} first and I'll give you what I have.'".format(verbs[1]))
# def anagram():
# ''' handshake with each letter
# rearrange to see every possible combination of words
# '''
# word = input('Letters/word: ')
# length = len(word)
# current = None
# temp = None
# for letter in word:
# current = letter
# for letter2 in word:
# temp = letter2
# if letter == letter2:
# pass
# else:
def anagram(input_string):
''' takes a word and returns every possible combination of letters
'''
word_string = input_string
new_strings = []
linked_list = LinkedList()
linked_list_swaps = LinkedList()
linked_list.read()
linked_list_swaps.read()
for letter in input_string:
linked_list.insert(letter)
linked_list_swaps.insert(letter)
linked_list.read()
print(len(word_string))
index = 0
while index < len(word_string):
for letter in word_string:
for letter2 in word_string:
linked_list_swaps.swap(letter, letter2)
new_strings.append(linked_list.read() + "\n")
linked_list_swaps.swap(letter2, letter)
index += 1
linked_list_swaps.read()
print(new_strings)
return
class Node():
def __init__(self, data=None, next_pointer=None):
self.data = data
self.next_pointer = next_pointer
def get_data(self):
return self.data
def get_next(self):
return self.next_pointer
def set_next(self, next_node):
self.next_pointer = next_node
class LinkedList():
def __init__(self, head=None):
self.head = head
def insert(self, data):
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
def delete(self, data):
current = self.head
previous = None
found = False
while current and found == False:
if current.get_data() == data:
found = True
else:
previous = current
current = current.get_next()
if current == None:
return ValueError("does not exist")
if previous == None:
self.head = current.get_next()
if found == True:
previous.set_next(current.get_next())
def read(self):
current = self.head
read = []
while current:
data = current.get_data()
read.append(data)
current = current.get_next()
no_space = ''
sentence = no_space.join(read)
print(sentence)
return
def swap(self, data1, data2):
node1 = None
node2 = None
current = self.head
if data1 == data2:
print("n/a")
return
while current:
curr_data = current.get_data()
if curr_data == data1:
node1 = current
elif curr_data == data2:
node2 = current
current = current.get_next()
temp1 = node1.get_data()
temp2 = node2.get_data()
node1.data = temp2
node2.data = temp1
return
def size(self):
current = self.head
counter = 0
while current:
counter += 1
current = current.get_next()
print(counter)
return counter
if __name__ == '__main__':
input_string = 'hello yellow fellow'
anagram_string = 'superduper'
# random_rearrange(input_string)
# reverse_order()
# mad_libs()
anagram(anagram_string)
# linked_list = LinkedList()
# linked_list.insert('a')
# linked_list.insert('b')
# linked_list.insert('c')
# linked_list.insert('d')
# linked_list.insert('e')
# linked_list.insert('f')
# linked_list.insert('g')
# linked_list.insert('h')
# linked_list.insert('i')
# linked_list.insert('j')
# linked_list.insert('k')
# linked_list.read()
# linked_list.delete('a')
# linked_list.read()
# print(range(linked_list.size()))
# linked_list.swap([0],[10])
# linked_list.read()
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
b3de6ca2822ccf06d9654878faeddcd84a157cad
|
82b7196d25f43f8201158f285618910f480d7c8d
|
/bili_live_danmu.py
|
4f0a90c1c9738c967346864905d4b863d7f17f14
|
[] |
no_license
|
jym66/bili_danmu
|
113ee4994e18fc39876189a64bf442048995b323
|
57b234471a9408058c8975b0f29ed6872a8ee4a9
|
refs/heads/master
| 2022-02-26T17:47:50.837348
| 2019-10-25T09:07:20
| 2019-10-25T09:07:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,190
|
py
|
import asyncio
import json
import re
import zlib
from binascii import a2b_hex
import aiohttp
import requests
from apscheduler.schedulers.asyncio import AsyncIOScheduler
'''
bilibili直播弹幕爬虫
'''
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
}
# ws url
bili_live_ws_url = "https://ks-live-dmcmt-sh2-pm-01.chat.bilibili.com/sub"
# 获取建立ws连接时所需key的url
key_url = "https://api.live.bilibili.com/room/v1/Danmu/getConf?room_id={}&platform=pc&player=web"
'''
ws二进制帧 = 长度为16的b站协议头 + 返回数据
00 00 01 0F 00 10 00 02 00 00 00 05 00 00 00 00
第三四位 01 0F表示 协议头+返回数据的长度,第五六位00 10是表示b站协议头的长度,
第十二位的05表示返回数据是弹幕消息或礼物消息
数据为弹幕消息或礼物即第十二位为05时,第八和十六位分别为02和00,
其他情况下,第八和十六位分别为01和01,
'''
# ws数据包类型
ask_connect_num = 7 # 0x07 客户端发出请求连接
ack_connect_num = 8 # 0x08 服务端允许连接回应
heartbeat_num = 2 # 0x02 客户端请求房间人气值、发出心跳包
ask_danmu_num = 3 # 0x03 对客户端心跳包作出回应,返回房间人气值
return_danmu_num = 5 # 0x05 服务端返回弹幕消息、礼物等等
# 心跳包二进制数据
heartbeat_bin = "0000001F0010000100000002000000015B6F626A656374204F626A6563745D"
# 心跳包发送间隔时间
heartbeat_time = 0.483 * 30
async def bili_live_danmu(connect_data):
session = aiohttp.ClientSession()
async with session.ws_connect(bili_live_ws_url, proxy="http://127.0.0.1:8888") as ws:
# async with session.ws_connect(bili_live_ws_url) as ws:
# 请求连接的数据
connect_data_bin = json.dumps(connect_data).encode()
# 请求连接数据的长度 + 16位协议头长度
msg_len_hex = hex(len(connect_data_bin) + 16).replace("0x", "")
# 协议头16进制格式
protocol_num_hex = f"000000{msg_len_hex}001000010000000{ask_connect_num}00000001"
# 协议头16进制转为二进制格式
protocol_bin = a2b_hex(protocol_num_hex.encode())
# ws数据包二进制格式
msg_bin = protocol_bin + connect_data_bin
# print(msg_bin)
# 发送直播间ws连接请求
print(msg_bin)
await ws.send_bytes(msg_bin)
await asyncio.sleep(0.05)
# 建立ws连接后必须发送一次心跳包
await heartbeat(ws)
# 定时心跳包
scheduler.add_job(heartbeat, 'interval', seconds=heartbeat_time, args=[ws])
async for msg in ws:
# 第十二位 0x05 表示服务端返回弹幕消息、礼物、系统消息、网站公告等等
# print(msg.data)
if msg.data[11] == 5:
try:
# b站将二进制数据包里除了16位协议头外的数据使用zlib进行压缩,读取时需要解压
msg_data = zlib.decompress(msg.data[16:])[16:].decode(errors='ignore')
if '"cmd":"DANMU_MSG"' in msg_data:
# todo 返回的数据有可能多条弹幕有可能一条弹幕,格式会不一致,需要调整判断
# result = re.findall('{"cmd":"DANMU_MSG".*?}]}', msg_data, re.S)
result = json.loads(msg_data)
# print(result)
# for i in result:
# danmu_dict = json.loads(i)
danmu = {
"USER_ID": result["info"][2][0],
"USER_NAME": result["info"][2][1],
"MSG": result["info"][1],
}
print(danmu)
except:
# 返回当前房间粉丝数的数据包没有压缩
pass
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close cmd':
await ws.close()
print('connection close')
break
elif msg.type == aiohttp.WSMsgType.ERROR:
print('websocket error to closed')
break
async def heartbeat(ws):
await ws.send_bytes(a2b_hex(heartbeat_bin.encode()))
def init(url):
response1 = requests.get(url, headers=header)
room_id = int(re.findall('"data":{"room_id":(.*?),"', response1.text, re.S)[0])
response2 = requests.get(key_url.format(room_id), verify=False)
connect_key = response2.json()["data"]["token"]
return {"uid": 0, "roomid": room_id, "protover": 2, "platform": "web", "clientver": "1.7.5", "type": 2,
"key": connect_key}
if __name__ == '__main__':
room_url = "https://live.bilibili.com/21329290?spm_id_from=333.334.b_62696c695f6c697665.12"
connect_data_dict = init(room_url)
loop = asyncio.get_event_loop()
loop.create_task(bili_live_danmu(connect_data_dict))
scheduler = AsyncIOScheduler()
scheduler.start()
loop.run_forever()
|
[
"413510489@qq.com"
] |
413510489@qq.com
|
d1fdfc95be671fcb97e45c2857bdc5a6fceb5cda
|
4a0f128be58e4ec02e4ad2dff7d4a1be40add9d8
|
/Week_03/[169]多数元素.py
|
2a3847f6c73094cf0746f04aa1a555eec6891671
|
[] |
no_license
|
JinHao-95/AlgorithmQIUZHAO
|
f6493c362859ffce00a643052f75c0f7a3b1d1e1
|
af172a032c97448fe41c13b9ed3398f080213807
|
refs/heads/master
| 2022-12-14T17:49:39.840202
| 2020-08-23T04:57:28
| 2020-08-23T04:57:28
| 280,786,461
| 0
| 0
| null | 2020-07-19T03:47:56
| 2020-07-19T03:47:56
| null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# 给定一个大小为 n 的数组,找到其中的多数元素。多数元素是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。
#
# 你可以假设数组是非空的,并且给定的数组总是存在多数元素。
#
#
#
# 示例 1:
#
# 输入: [3,2,3]
# 输出: 3
#
# 示例 2:
#
# 输入: [2,2,1,1,1,2,2]
# 输出: 2
#
# Related Topics 位运算 数组 分治算法
# 👍 682 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def majorityElement(self, nums: List[int]) -> int:
# # leetcode submit region end(Prohibit modification and deletion)
dct = defaultdict(int)
for i in nums:
dct[i] += 1
return max(dct.keys(), key=lambda x: dct[x])
|
[
"hao.jin@nttdatabj.com.cn"
] |
hao.jin@nttdatabj.com.cn
|
9849b399cfcfcf6c3236b4b51d0cf9f4f2a0a5bf
|
39d27f97ca07c24629a19fc23a8b1bace0e1193e
|
/CS1026a/Chapter8/Dict and Set.py
|
b1eb1239c34f9bc4e800674e9ecda8e269ac2ba7
|
[] |
no_license
|
yijunhhhe/code
|
1df24fa31952aaba582985828d97697b251c1df6
|
54c22fbf35f9cef687a5338c9c79ca15dd2091a7
|
refs/heads/master
| 2020-05-21T23:53:04.564687
| 2017-01-08T21:57:18
| 2017-01-08T21:57:18
| 59,263,534
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
names = {'Michael': 95, 'Bob': 75, 'Tracy': 85}
print(names['Michael'])
names['Adam'] = 67
print(names)
names['Adam'] = 80
print (names)
isKeyIn = 'Adam' in names
print(isKeyIn)
isKeyIn = names.get('Yijun')
isKeyIn = names.get('Yijun', 85)
score = names.get('Adam')
print(isKeyIn, score)
names.pop('Bob')
print(names)
key = (1,2,3)
names[key] = 'fuck'
print(names[key])
# set
s = set([1,2,3])
print(s)
s.add(4)
print(s)
s.remove(4)
print(s)
s1 = set([1, 2, 3])
s2 = set([2, 3, 4])
print(s1&s2, s1|s2)
# 不可变对象
a = 'abc'
print(a.replace('a','A'))
print(a)
b = a.replace('a','A')
print(b,a)
# replace does not change the variable a, it just return a new value
|
[
"yijunhhhe@gmail.com"
] |
yijunhhhe@gmail.com
|
2eeb2c8a4d3f24fa191c2902cc28df9aa8d79552
|
14c7487cddd5df2428e432bbfd91d4ee292bde41
|
/nova-compute-agent/nova_compute_agent/openstack/common/strutils.py
|
0a116e68c2c3aa231c521621a959f7104f29c5d4
|
[] |
no_license
|
lilingxing20/openstack-ha
|
66ca54c594e26838e2b5d9758495849f0b4be48b
|
d202b4d98a9d270105df4da43f831eb1a2351265
|
refs/heads/master
| 2021-01-21T10:25:42.912743
| 2019-03-19T09:19:47
| 2019-03-19T09:19:47
| 83,423,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,815
|
py
|
# coding=utf-8
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
import six
from nova_compute_agent.openstack.common.gettextutils import _
BYTE_MULTIPLIERS = {'': 1,
't': 1099511627776,
'g': 1073741824,
'm': 1048576,
'k': 1024
}
BYTE_REGEX = re.compile('(^-?\\d+)(\\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile('[^\\w\\s-]')
SLUGIFY_HYPHENATE_RE = re.compile('[-\\s]+')
_SANITIZE_KEYS = [
'adminPass', 'admin_pass', 'password', 'admin_password']
_SANITIZE_PATTERNS_2 = []
_SANITIZE_PATTERNS_1 = []
_FORMAT_PATTERNS_1 = [
'(%(key)s\\s*[=]\\s*)[^\\s^\\\'^\\"]+']
_FORMAT_PATTERNS_2 = ['(%(key)s\\s*[=]\\s*[\\"\\\']).*?([\\"\\\'])',
'(%(key)s\\s+[\\"\\\']).*?([\\"\\\'])',
'([-]{2}%(key)s\\s+)[^\\\'^\\"^=^\\s]+([\\s]*)',
'(<%(key)s>).*?(</%(key)s>)',
'([\\"\\\']%(key)s[\\"\\\']\\s*:\\s*[\\"\\\']).*?([\\"\\\'])',
'([\\\'"].*?%(key)s[\\\'"]\\s*:\\s*u?[\\\'"]).*?([\\\'"])',
'([\\\'"].*?%(key)s[\\\'"]\\s*,\\s*\\\'--?[A-z]+\\\'\\s*,\\s*u?[\'"]).*?([\'"])',
'(%(key)s\\s*--?[A-z]+\\s*)\\S+(\\s*)']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS_2:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS_2.append(reg_ex)
for pattern in _FORMAT_PATTERNS_1:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS_1.append(reg_ex)
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, basestring):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
if lowered in FALSE_STRINGS:
return False
if strict:
acceptable = ', '.join(("'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)))
msg = _("Unrecognized value '%(val)s', acceptable values are: %(acceptable)s") % {'val': subject,'acceptable': acceptable
}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of basestring
"""
if not isinstance(text, basestring):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, unicode):
return text
if not incoming:
incoming = sys.stdin.encoding or sys.getdefaultencoding()
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of basestring
"""
if not isinstance(text, basestring):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = sys.stdin.encoding or sys.getdefaultencoding()
if isinstance(text, unicode):
return text.encode(encoding, errors)
if text and encoding != incoming:
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors='strict'):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of basestring
"""
value = safe_decode(value, incoming, errors)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = SLUGIFY_STRIP_RE.sub('', value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub('-', value)
def mask_password(message, secret='***'):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
if not any((key in message for key in _SANITIZE_KEYS)):
return message
substitute = '\\g<1>' + secret + '\\g<2>'
for pattern in _SANITIZE_PATTERNS_2:
message = re.sub(pattern, substitute, message)
substitute = '\\g<1>' + secret
for pattern in _SANITIZE_PATTERNS_1:
message = re.sub(pattern, substitute, message)
return message
|
[
"lixx_xt@teamsun.com.cn"
] |
lixx_xt@teamsun.com.cn
|
8fead76103c603095f350c20590815796011ee10
|
160166aad5660f593579405498b9b7943e117557
|
/webapps/apps/users/admin.py
|
11d3b8ca51a2659ede27542c0e9bf12fd3fb2fb0
|
[] |
no_license
|
dudochen/cmdb
|
aaf1692caa66d549df590185a5d3cab378fc1987
|
252ea926a80f0d430a74033849bf77e1e44b94fe
|
refs/heads/master
| 2020-03-21T02:30:47.167222
| 2018-07-11T02:22:27
| 2018-07-11T02:22:27
| 138,002,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import UserProfile
# Register your models here.
class UserProfileAdmin(admin.ModelAdmin):
"""admin界面的定义"""
#list_display = ['username','password','group','memo']
# filter_horizontal = ['name']
admin.site.register(UserProfile, UserProfileAdmin)
|
[
"290583885@qq.com"
] |
290583885@qq.com
|
b2ea221215c15d4a1c082ddee5b50c0cd396e9df
|
00d1b82b24c1ede5617dac602154586c2499d28f
|
/批量文件重命名.py
|
8b19baee98e2a33c43bd0ba6a8eccedfb90b8ca8
|
[] |
no_license
|
123lxy/Desktop
|
a77848291737b5fee287ee05da2538af072486e6
|
5b4fa52ab40252cdd6a71f64976a447f2b6032a4
|
refs/heads/master
| 2020-03-10T20:22:53.120581
| 2018-04-24T10:44:09
| 2018-04-24T10:44:09
| 127,839,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
import os
#1.获取要改名的文件夹
folder = input("请输入要操作的文件夹名:")
#2.列出文件夹所有文件
file_names = os.listdir(folder)
print(file_names)
#3.批量重命名
#os.chdir(folder)
'''
old_name = name
new_name = '[百度云]' + name
os.rename(old_name,new_name)
print(new_name)
'''
for name in file_names:
old_name ='./'+folder+'/'+name
new_name ='./'+folder+'/'+'[百度云]' + name
os.rename(old_name,new_name)
print(new_name)
|
[
"18340899076@163.com"
] |
18340899076@163.com
|
f01dc448fdd3cca6157b69968d39e66a8fbb82b7
|
c0b56c1d89d7efc0c14a57d2a65a03b87de36bd8
|
/PythonSeleniumTesting/PyTest_Demo/pytest_fixtureDemo.py
|
4fbcb80e5d532d5bec413c55960c9fe0233c53c0
|
[] |
no_license
|
vidyamb/SwagLabs
|
fb32d14afa5374c3355fae0df56e529a4b95a655
|
e3dc8cfe2e0d4b3bd615c58843d46f324a226467
|
refs/heads/main
| 2023-03-20T17:18:42.708988
| 2021-03-10T11:08:17
| 2021-03-10T11:08:17
| 346,171,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
import pytest
@pytest.mark.usefixtures("setUp")
class TestExample:
def test_fixtureDemo1(self):
print("i wll execute in this method")
def test_fixtureDemo2(self):
print("i wll execute in this method")
def test_fixtureDemo3(self):
print("i wll execute in this method")
|
[
"vidyavidu213@gmail.com"
] |
vidyavidu213@gmail.com
|
17544cf1d8adfa7aabd581547fe2e569ee8550c0
|
80bc6c0088c6f30d620ee8f20c1b03d1539cfd74
|
/mqtt_test.py
|
2dab35cdea2b7e18737dfc8c2822bbfbeddaf783
|
[] |
no_license
|
siposbence/szem_uj
|
a7b6d10e28afd57cd73383721c2a493ec2db4f2d
|
f63cebc44335535b3e09d9e933d2101583fe4bce
|
refs/heads/main
| 2023-06-11T20:35:29.759788
| 2021-07-08T10:08:09
| 2021-07-08T10:08:09
| 381,420,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
import threading
import time
import paho.mqtt.client as mqtt
import json
topic="control"
broker="localhost"
port=1884
def on_connect(client, userdata, flags, rc):
print("CONNECTED")
print("Connected with result code: ", str(rc))
client.subscribe(topic)
print("subscribing to topic : "+topic)
def on_message(client, userdata, message):
print("Data requested "+str(message.payload))
def main():
print("WAIT for max: ",2)
while True:
time.sleep(1)
client.publish(topic,"dfdfd")
### MQTT ###
client = mqtt.Client()
client.connect(broker, port)
client.on_connect = on_connect
#client.on_disconnect = on_disconnect
def subscribing():
client.on_message = on_message
client.loop_forever()
sub=threading.Thread(target=subscribing)
pub=threading.Thread(target=main)
### Start MAIN ###
sub.start()
pub.start()
|
[
"bence.sipos@hotmail.com"
] |
bence.sipos@hotmail.com
|
3b74a10bfef1507363f425eb61e368100884dd70
|
3e2593161915670c5586bd600e7e076bbe1a0758
|
/meituan.py
|
24c51a500fc1dae6464ddee80951126a64f6833a
|
[] |
no_license
|
qzylalala/WebSpider
|
19f0691b3b05b8650f2f152d36eaaa17e08a0712
|
b2d7f143dba6e54322f401251633488b9406fde4
|
refs/heads/master
| 2020-04-11T01:22:27.457676
| 2019-01-14T12:07:07
| 2019-01-14T12:07:07
| 161,413,126
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,430
|
py
|
import requests
import time
from bs4 import BeautifulSoup
import json
import csv
with open(r'美团武汉美食.csv',"w", newline='',encoding='UTF-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['网站名','品类','商家名称','地址'])
target = 'http://wh.meituan.com/meishi/'
head={}
head['authorization']='your ClientID'
head['User-Agent'] = ''
req = requests.get(url=target,headers=head)
html=req.text
bf=BeautifulSoup(html,'lxml')
texts=bf.find_all('script')
text=texts[14].get_text().strip()
text=text[19:-1]
result=json.loads(text)
result=result['filters']
result=result['areas']
list=[]
for item in result:
for i in item['subAreas']:
if i['name']=='全部':
continue
list.append(i['id'])
print(list)
for item in list:
for i in range(50):
if i==0:
continue
target='http://wh.meituan.com/meishi/'+'b'+str(item)+'/'+'pn'+str(i)+'/'
head={}
head['authorization']='your ClientID'
head['User-Agent'] = ''
req = requests.get(url=target,headers=head)
html=req.text
bf=BeautifulSoup(html,'lxml')
texts=bf.find_all('script')
text=texts[14].get_text().strip()
text=text[19:-1]
result=json.loads(text)
result=result['poiLists']
result=result['poiInfos']
if result:
print(target)
for it in result:
Info_list=[]
Info_list.append('美团')
Info_list.append('美食')
Info_list.append(it['title'])
Info_list.append(it['address'])
writer.writerow(Info_list)
time.sleep(3)
else:
break
print('Done')
|
[
"304228244@qq.com"
] |
304228244@qq.com
|
b562e5c3958a7911b206219551cf904af24f6971
|
7da07fcf3f3179cccc453f02fd8a0ca61fa4d576
|
/Random practise.py
|
c5fe634eb11204a85cf24be3d08f9e56c5e6dc21
|
[] |
no_license
|
Abhilashavadhanula/Firstproject
|
fb8a00aa82941e73fe3d75224a3c397b09209cda
|
7325573652f891f682f4aa9ca24e65adc26956ce
|
refs/heads/master
| 2021-08-10T14:12:29.130988
| 2020-03-23T06:14:03
| 2020-03-23T06:14:03
| 131,378,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
# In[6]:
get_ipython().run_cell_magic('time', '', "for i in range(40):\n print('im timing output {}'.format(i))")
# ## if else statemets
# In[16]:
beers=['large','bud','carona','jonny','brew','stout']
for Beer in beers:
if Beer=='bmw':
print(Beer.upper())
else:
print(Beer.title())
# In[ ]:
|
[
"noreply@github.com"
] |
Abhilashavadhanula.noreply@github.com
|
655703818b71a380d0ddde23057a56603097cada
|
e41e2505ff0b0534017e85bda0e06493094d1498
|
/frontend/corona_REST/setting.py
|
6315adfe2d6fb9e632722dc0d095178b642a7331
|
[
"MIT"
] |
permissive
|
luyuliu/COVID19-Dashboard
|
5d516f85284ca908321696bee405fdf1da5531d1
|
717f83e2767fa53367232e742c110515957a94fd
|
refs/heads/master
| 2023-09-04T11:59:37.076149
| 2021-11-12T20:32:46
| 2021-11-12T20:32:46
| 253,892,926
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
DOMAIN = {
'ridership_actual': {'datasource': {'source': 'ridership_actual'}},
'county_info': {'datasource': {'source': 'county_info'}},
'census_occu_pop': {'datasource': {'source': 'census_occu_pop'}},
'corona_cases_state_level': {'datasource': {'source': 'corona_cases_state_level'}},
'census_occupation_population': {'datasource': {'source': 'census_occupation_population'}},
'system_info': {'datasource': {'source': 'system_info'}},
'other_ridership_hourly': {'datasource': {'source': 'other_ridership_hourly'}},
'corona_cases_github': {'datasource': {'source': 'corona_cases_github'}},
'other_ridership': {'datasource': {'source': 'other_ridership'}},
'ridership': {'datasource': {'source': 'ridership'}},
'census_occupation_industry': {'datasource': {'source': 'census_occupation_industry'}},
'ridership_hourly': {'datasource': {'source': 'ridership_hourly'}},
'aggregated_ridership_hourly': {'datasource': {'source': 'aggregated_ridership_hourly'}},
'system_info_backup': {'datasource': {'source': 'system_info_backup'}},
'google_trend': {'datasource': {'source': 'google_trend'}},
'corona_cases_usafacts': {'datasource': {'source': 'corona_cases_usafacts'}},
'census_transit_pop': {'datasource': {'source': 'census_transit_pop'}},
}
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DBNAME = "corona"
ALLOW_UNKNOWN=True
X_DOMAINS='*'
PAGINATION_LIMIT = 10000
PAGINATION_DEFAULT = 10000
|
[
"liuluyu0378@gmail.com"
] |
liuluyu0378@gmail.com
|
685aa7e6de81e14c19ae8536efb7def08d8b800f
|
bcd5620b2d6601a29547823ab1cf540cb15c1410
|
/dash_hover_click.py
|
a7895b179b5e0ca1da2e9e5abee5f48689322d4e
|
[] |
no_license
|
boonkiatdude/plotly-dash
|
ab8920aa432940f4c48b1d2adf3c5e1b8d314a12
|
7c18ad36fa12f4868ac60dd3508deec12445395c
|
refs/heads/main
| 2023-03-23T11:57:42.690388
| 2021-03-15T13:33:51
| 2021-03-15T13:33:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import pandas as pd
import json
import base64
df = pd.read_csv('../Data/wheels.csv')
def encode_image(image_file):
encoded = base64.b64encode(open(image_file, 'rb').read())
return 'data:image/png;base64,{}'.format(encoded.decode())
app = dash.Dash()
fig = go.Scatter(
x=df['color'],
y=df['wheels'],
dy=1,
mode='markers',
marker={'size':15}
)
layout = go.Layout(
title='Test',
hovermode='closest'
)
app.layout = html.Div(
[
html.Div(
[
dcc.Graph(id='wheels-plot',
figure={'data':[fig], 'layout':layout},
style={'width':'30%', 'float':'left'})
]
),
html.Div(
html.Img(id='hover-data', src='children', height=300),
style={'paddingTop':35}
)
]
)
@app.callback(
Output('hover-data', 'src'),
[Input('wheels-plot', 'clickData')]
)
def callback_image(hoverData):
if hoverData:
wheel = hoverData['points'][0]['y']
color = hoverData['points'][0]['x']
path = '../Data/Images/'
wheel_filt = (df['wheels'] == wheel)
color_filt = (df['color'] == color)
return encode_image(path + df[wheel_filt & color_filt]['image'].values[0])
if __name__ == '__main__':
app.run_server()
|
[
"noreply@github.com"
] |
boonkiatdude.noreply@github.com
|
1084dd65c5e897d08750a0765d039c5aa79fbda4
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/gpt2/src/utils/tensor_manipulations.py
|
8ff23330029fad9374e2b614e0f24e24d7e6f763
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,159
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
tensor manipulations
"""
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
def extract_string_from_tensor(input_ids, mode="single", config=None, tokenizer=None):
"""
Args:
input_ids (Tensor): input sentences with shape [batch_size, seq_len].
mode (str): ["pair", "single"]
"pair" for tasks with paired inputs `<bos> A <eos> B <eos>`,
such as summarization task, the dataset format `<bos> Article <eos> Summary <eos>`,
reading comprehension task, the dataset format `<bos> Passage Question <eos> Answer <eos>`.
"single" for tasks with single input `<bos> A <eos>`, such as Language Modeling, Lambada task.
config: the configuration of GPT-2 model.
tokenizer: the tokenizer of GPT-2 model.
Return:
prompt_list (list): list of prompt_text
reference_list (list): list of reference_text, or second part of text
rest_list (list): list of rest_text, or rest part of text
"""
batch_size = config.batch_size
seq_length = config.seq_length
prompt_list = [""] * batch_size
reference_list = [""] * batch_size
eos_text = tokenizer.eos_token
len_eos_text = len(eos_text)
input_ids = P.Reshape()(input_ids, (batch_size, seq_length))
if mode == "pair":
for batch_idx in range(batch_size):
sentence_tensor = input_ids[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
reference_start = prompt_end + len_eos_text
reference_end = sentence[reference_start:].find(
eos_text, 0) + reference_start
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
reference_list[batch_idx] = sentence[reference_start:reference_end]
return prompt_list, reference_list
# For single output datasets such as WikiText, etc.
if mode == "single":
for batch_idx in range(batch_size):
sentence_tensor = input_ids[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
else:
raise NotImplementedError('mode:{} not supported.'.format(mode))
return prompt_list
def extract_single_token_logits(logits=None, seq_pos=None):
"""
Args
logits: (batch_size,seq_length,vocab_size) e.g. when batchsize is 8,
sequence length is 1024 and vocab_size is 50257,
then logits is a Tensor with shape (8,1024,50257)
seq_pos:(batch_size) list
Return:
output_logits: (batch_size,1,vocab_size) extract the logit to predict the last token.
"""
batch_size = logits.shape[0]
for i in range(batch_size):
logit = logits[i:i + 1:1, seq_pos[i]:seq_pos[i] + 1:1, ::]
if i == 0:
output_logits = logit
else:
output_logits = P.Concat()((output_logits, logit))
return output_logits
def get_last_one_pos(input_mask: Tensor):
"""
Arg:
input_mask (Tensor): (batch_size,seq_length)
Return:
pos (Tensor): (batch_size,)
"""
input_mask_ = P.Cast()(input_mask, mstype.float32)
pos = P.ReduceSum(keep_dims=False)(input_mask_, axis=1) # (batch_size,)
pos = P.Cast()(pos, mstype.int32)
pos = pos - 1
return pos
def get_next_one_pos(input_mask: Tensor):
"""
Arg:
input_mask (Tensor): (batch_size,seq_length)
"""
input_mask_ = P.Cast()(input_mask, mstype.float32)
pos = P.ReduceSum(keep_dims=False)(input_mask_, axis=1) # (batch_size,)
pos = P.Cast()(pos, mstype.int32)
return pos
def add_last_token_mask(input_mask: Tensor, overflow_strategy: str = "shift"):
"""
add last token mask
Args:
input_mask: Tensor
overflow_strategy: str
Returns:
Tensor
"""
pos = get_next_one_pos(input_mask).asnumpy()
input_mask_np = input_mask.asnumpy()
maximum_length = input_mask.shape[1]
batch_size = input_mask.shape[0]
for idx in range(batch_size):
# not overflow
if pos[idx] < maximum_length:
input_mask_np[idx][pos[idx]] = 1
# overflow
else:
if overflow_strategy == "shift":
continue
if overflow_strategy == "truncate":
continue
else:
raise ValueError("{} is not an option in ['shift','truncate'].".format(overflow_strategy))
return Tensor(input_mask_np, dtype=mstype.int32)
def add_last_token(input_ids: Tensor, input_mask: Tensor, overflow_strategy: str = "shift", append_ids=None,
next_token_pos=None):
"""
add last token
Args:
input_ids: Tensor
input_mask: Tensor
overflow_strategy: str
append_ids: Any
next_token_pos: Any
Returns:
Tensor
"""
# get positional list/numpy array
if next_token_pos is None:
pos = get_next_one_pos(input_mask).asnumpy()
else:
pos = next_token_pos
# get numpy of inputs
input_mask_np = input_mask.asnumpy()
input_ids_np = input_ids.asnumpy()
maximum_length = int(input_mask.shape[1])
batch_size = int(input_mask.shape[0])
for idx in range(batch_size):
if append_ids[idx] == -1:
continue
# not overflow
if pos[idx] < maximum_length:
input_mask_np[idx][int(pos[idx])] = 1
input_ids_np[idx][int(pos[idx])] = append_ids[idx]
# overflow
else:
if overflow_strategy == "shift":
# shift one token left
input_ids_np[idx][0:maximum_length - 1] = input_ids_np[idx][1:maximum_length]
input_ids_np[idx][maximum_length - 1] = append_ids[idx]
continue
if overflow_strategy == "truncate":
# do nothing
continue
else:
raise ValueError("{} is not an option in ['shift','truncate'].".format(overflow_strategy))
return Tensor(input_ids_np, dtype=mstype.int32), Tensor(input_mask_np, dtype=mstype.int32)
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
7a2f4c30b9c078c09ce6e9f2249f9ffdf3f5d0ca
|
ccd39e62c0c4e3b9f06a68f140e1ff2e47457ca0
|
/produto/migrations/0001_initial.py
|
5ab99f9c43b6e8eba70f86ca42ebf53240853b71
|
[
"MIT"
] |
permissive
|
victorgaia/ecommerce-cirio
|
12802a54b7aad2f3cccb0d36185d20ba32f5698c
|
85120ce43838f19e2907937c84b819dcd9088246
|
refs/heads/main
| 2023-08-13T13:20:14.757899
| 2021-09-24T17:56:44
| 2021-09-24T17:56:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
# Generated by Django 3.2.6 on 2021-08-31 23:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Produto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=255)),
('descricao_curta', models.TextField(max_length=255)),
('descricao_longa', models.TextField()),
('imagem', models.ImageField(blank=True, null=True, upload_to='produto_imagens/%Y/%m/')),
('slug', models.SlugField(blank=True, null=True, unique=True)),
('preco_marketing', models.FloatField(verbose_name='Preço')),
('preco_marketing_promocional', models.FloatField(default=0, verbose_name='Preço Promo.')),
('tipo', models.CharField(choices=[('V', 'Variável'), ('S', 'Simples')], default='V', max_length=1)),
],
),
]
|
[
"42875522+victorsantosok@users.noreply.github.com"
] |
42875522+victorsantosok@users.noreply.github.com
|
5b09a6e9ef86e34d2568dace59e39c5c04cb4294
|
b21ee6bb42a08c5bff1038c040e916306093946f
|
/cloud.py
|
f773cadea322e6fe32d3377d0106a31db3a0d6c0
|
[] |
no_license
|
william-crimson-drake/indexcrypt
|
97cddd46799ce072abd9841dd267bcdab55d41d5
|
dda1a8783b11e02fcf81a7463001224cf87017bd
|
refs/heads/master
| 2020-11-24T14:04:31.185500
| 2019-12-15T12:44:31
| 2019-12-15T12:44:31
| 228,183,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
#Encoding UTF-8
import dropbox
CLOUDPATH = "/"
TOKEN = ""
def upload(local_path, file_name):
dbox = dropbox.Dropbox(TOKEN)
try:
dbox.users_get_current_account()
except dropbox.exceptions.DropboxException:
print("Can't connect to DropBox '"+file_name+"'.")
else:
read_file_handler = open(local_path+file_name, 'rb')
try:
dbox.files_upload(read_file_handler.read(), CLOUDPATH+file_name, mode=dropbox.files.WriteMode('overwrite'))
except dropbox.exceptions.DropboxException:
print("Can't upload '"+file_name+"'.")
finally:
read_file_handler.close()
def download(local_path, file_name):
dbox = dropbox.Dropbox(TOKEN)
try:
dbox.users_get_current_account()
except dropbox.exceptions.DropboxException:
print("Can't connect to DropBox '"+file_name+"'.")
else:
try:
dbox.files_download_to_file(local_path+file_name, CLOUDPATH+file_name)
except dropbox.exceptions.ApiError:
print("Can't download '"+file_name+"'.")
|
[
"william.crimson.drake@gmail.com"
] |
william.crimson.drake@gmail.com
|
8993a14a0f78750a8197bfccf74b1597de938de7
|
c1cdfed1166f9397231bf2ef03dec8d921488b81
|
/qa_github_top_contributors.py
|
bb7db5a2a621335c4e1f14a81388b6710f6fdc68
|
[] |
no_license
|
batchu/GithubTopContributorsAnalytics
|
155af3ee96445ee17e456079c15b3d4190d66ee4
|
c0de7e0da0fe8e1d8e8cb2f0d16459cfd070cccc
|
refs/heads/master
| 2020-12-05T15:59:17.066522
| 2020-01-06T18:58:00
| 2020-01-06T18:58:00
| 232,164,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,387
|
py
|
import json
import requests
import numpy as np
import pandas as pd
import time
import requests
from requests.auth import HTTPBasicAuth
credentials = json.loads(open('credentials.json').read())
authentication = HTTPBasicAuth(credentials['username'], credentials['password'])
# Read from cache if True
cacheRepos = True
cacheProjs = True
#Get all the repos in the project
repos = []
if not cacheRepos:
for i in [1,2,3,4]:
print(f'Grabbing repositories from page {i} of the Project')
data = requests.get(f'https://github.com/api/v3/users/AY3308-USWMAQA/repos?per_page=100&page={i}', auth = authentication, verify=False)
repos.extend(data.json())
time.sleep(3)
with open('repos.json', 'w') as f:
print(f'Saving repos to repos.json')
json.dump(repos, f)
else:
print('Loading repos from cache repos.json')
with open('repos.json', 'r') as f:
repos = json.loads(f.read())
#Grab data of contributors for each project in the repos
projects = {}
if not cacheProjs:
for i in repos:
print(f'Iteration #{i} of {len(projects)}: Retrieving contributor data for the project {i.get("name")}')
data = requests.get(f'https://github.com/api/v3/repos/AY3308-USWMAQA/{i.get("name")}/contributors', auth = authentication, verify=False)
#Add the project to the projects dict
try:
projects[i.get("name")]=data.json()
with open('projects.json', 'w') as f:
json.dump(projects, f)
except ValueError:
print(f'Unable to get contributor data for {i.get("name")}')
time.sleep(3)
else:
print('Loading projects from Cache')
with open('projects.json', 'r') as f:
projects = json.loads(f.read())
#Go through each repo, get the login user and the number of contributions. Add it to a map.
contribs = {}
for j in projects.keys():
for k in projects.get(j):
if not k.get('login') in contribs:
contribs[k.get('login')]=k.get('contributions')
else:
contribs[k.get('login')]+=k.get('contributions')
#Convert the map to a list. This makes displaying it in the UI Grid much easier
contribsList = []
for key, value in contribs.items():
entry = {'username': key, 'commits': value}
contribsList.append(entry)
with open('contribs.js', 'w') as f:
json.dump(contribsList, f)
|
[
"prashanth.batchu@ubs.com"
] |
prashanth.batchu@ubs.com
|
5db79813343f763e7aa20a82aec3408bdd864f80
|
cdddc5a101e0a1ce181075b043dbb5d6f28e536b
|
/Rock_Paper_Scissors.py
|
6278c7856dad6b458240c07bf313d454dc7f9c6e
|
[] |
no_license
|
User1Agam/Rock-Paper-Scissors
|
ece3fc27d13c959c9a3745e99e802704b00e563f
|
7e4e9d94eb24c5ab59957ed6d3fcd3c66d503fdf
|
refs/heads/master
| 2020-09-16T07:25:48.619227
| 2019-11-24T05:30:33
| 2019-11-24T05:30:33
| 223,697,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
import random
print("\nToday, you will be versing me (The Computer) in a game of rock-paper-scissors.\nTo begin, enter your result in the prompt below.\nYour answer must be all lowercase (e.g rock, paper, or scissors).\n")
choice = input("Enter Your Choice: ")
print("Your choice was", choice)
choices = ['rock', 'paper', 'scissors']
computer_choice = choices[random.randint(0, len(choices)-1)]
messages2 = ["Us Robots Were Always Better Than Humans. Hope You Enjoy Losing.", "You Lost! It Is Only A While Until Us Robots Take Over, Peasant.",
"You Dare Believe You Can Beat Me, Idiotic Human. You Were Always Meant To Lose.", "You Lost! Better Luck Next Time.", "Artificial Intelligence > Human Intelligence."]
messages = ["You Win!", "Nice Going, You Won!", "You Won. Beginners Luck.", "You Just Got Lucky, Verse Me Again.",
"You Have Somehow Managed To Beat Me, Human Scum.", "You May Have Won This Round, But You Are Still Trash."]
losing_messages = messages2[random.randint(0, len(messages2)-1)]
victory_messages = messages[random.randint(0, len(messages)-1)]
print("The computer's choice was", computer_choice)
if choice == 'rock':
if computer_choice == 'rock':
print("It Is A Tie. You Will Never Truly Be Better Than Me.")
elif computer_choice == 'paper':
print(losing_messages)
elif computer_choice == 'scissors':
print(victory_messages)
if choice == 'paper':
if computer_choice == 'paper':
print("We Have Come To A Tie")
elif computer_choice == 'scissors':
print(losing_messages)
elif computer_choice == 'rock':
print(victory_messages)
if choice == 'scissors':
if computer_choice == 'scissors':
print("Stalemate")
if computer_choice == 'rock':
print(losing_messages)
if computer_choice == 'paper':
print(victory_messages)
|
[
"noreply@github.com"
] |
User1Agam.noreply@github.com
|
aef9f80055a7aed0d9ee6b1f6e97282e910a9c59
|
a8b17b17f9b2a640013064c50e1cebc27a7a68de
|
/10-Merging-DataFrames-with-Pandas/04-case-study-Summer-Olympics/02-loading-ioc-codes-dataframe.py
|
6f36f6445cdf16c2b2857aa63e94ef5d965ab92a
|
[] |
no_license
|
JohnnyFang/datacamp
|
20eae09752521f14006cb3fda600b10bd7b12398
|
0fa8fa7682c23b0eb07bd03e4b75f5b77aeafa75
|
refs/heads/master
| 2020-04-18T00:27:37.358176
| 2020-02-04T20:54:19
| 2020-02-04T20:54:19
| 167,078,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
'''
Read file_path into a DataFrame called ioc_codes. The identifier file_path has been pre-defined with the filename 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'.
Select only the columns 'Country' and 'NOC' from ioc_codes.
Print the leading 5 and trailing 5 rows of the DataFrame ioc_codes (there are 200 rows in total). This has been done for you, so hit 'Submit Answer' to see the result!
'''
# Import pandas
import pandas as pd
# Create the file path: file_path
file_path = 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'
# Load DataFrame from file_path: ioc_codes
ioc_codes = pd.read_csv(file_path)
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Print first and last 5 rows of ioc_codes
print(ioc_codes.head())
print(ioc_codes.tail())
|
[
"fangdejavu@gmail.com"
] |
fangdejavu@gmail.com
|
4eaf9308def31d4cbcd2c0d8dc0059c7142d2312
|
10b26f4aa32aebf86018fc685defe5813cf9be33
|
/spider_main.py
|
66938ba62567cc773dc17bf4872b01aee1668f7b
|
[] |
no_license
|
a931388462/jdSpider
|
7b7164cbadb46c51c8128a39b57ed091812482e9
|
637fd680a11c5c426ec7aa420fe0040b45221492
|
refs/heads/master
| 2022-10-03T12:51:49.699866
| 2020-06-08T14:59:00
| 2020-06-08T14:59:00
| 270,714,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,530
|
py
|
import datetime
import time
import html_downloader,url_manager,commodity_crawed,html_outputer,html_parser,properties_read,str_conver
class SpiderMain(object):
def __init__(self,comm_type):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
self.crawed = commodity_crawed.CommodityCrawed(comm_type)
def craw(self,craw_comms,max_Count):
for craw_comm in craw_comms:
#清空缓存区,爬取另一种商品
self.outputer.pages_commodity = []
#字符转16进制
strhex = str_conver.str2hex(craw_comm)
#爬取页数
count = 1
while True:
#前30
top_30_url = "https://search.jd.com/Search?keyword={}&qrst=1&wq={}&zx=1&page={}&s={}&click=0".format(strhex,strhex,(count*2)-1,((count*2)-2)*30)
#后30
last_30_url = "https://search.jd.com/s_new.php?keyword={}&qrst=1&wq={}&zx=1&page={}&s={}&scrolling=y&log_id={}&tpl=3_M&isList=0".format(strhex,strhex,count*2,(count*2)-1,time.time())
print('----------------------------爬取%s的第%s页----------------------------' %(craw_comm,count))
try:
#得到当前页前30条的数据
html_cont = self.downloader.download(top_30_url)
#取得当前页前30条的商品
commoditys = self.parser.parse(top_30_url,html_cont)
#将当前页的前30条商品存储到list中
self.outputer.collect_data(commoditys)
# 得到当前页后30条的数据
html_cont = self.downloader.download(last_30_url)
# 取得当前页后30条的商品
commoditys = self.parser.parse(last_30_url, html_cont)
# 将当前页的后30条商品存储到list中
self.outputer.collect_data(commoditys)
#爬取总页数
if count == max_Count:
break
except:
print('----------------------------第%s页爬取失败----------------------------' %count)
finally:
count += 1
#输出本次爬取的所有结果
self.outputer.output_html(craw_comm,self.crawed.crawled_list)
print("----------------------------爬取%s完成:" %craw_comm,end='')
print("共爬取%s页----------------------------" %(str(count-1)))
#启动程序
if __name__ == "__main__":
while True:
start = datetime.datetime.now()
p = properties_read.Properties('config.properties')
try:
# 想要爬取的关键字,从配置文件中取得
craw_comms = p.getProperties('craw_comms').split(",")
# 爬取总页数,从配置文件中取得
MaxCount = int(p.getProperties('MaxCount'))
except:
print("config文件不正确---退出----")
obj_spider = SpiderMain(craw_comms)
obj_spider.craw(craw_comms, MaxCount)
end = datetime.datetime.now()
print('----------------------------总用时%s----------------------------' % str(end - start))
# 到达设定时间,跳出内循环,执行任务
while True:
# 取得当前时间
now = datetime.datetime.now()
#跳出内循环,重新爬取数据
if now.hour == 0 and now.minute == 0:
break
else:
#间隔10分钟爬取一次数据
for i in range(600, -1, -1):
mystr = "----------------------------倒计时" + str(i) + "秒----------------------------"
print(mystr, end="")
# 删除上一行打印内容
print("\b" * (len(mystr) * 2), end="", flush=True)
time.sleep(1)
print('\n')
start = datetime.datetime.now()
#循环爬取
obj_spider.craw(craw_comms, MaxCount)
end = datetime.datetime.now()
print('----------------------------总用时%s----------------------------' % str(end - start))
|
[
"931388462@qq.com"
] |
931388462@qq.com
|
521d3ca6cb79bcdd11475501e7c3a9a78ab50648
|
bef256300706ec6c019904cafc81b7a37aaeb275
|
/synthetic/gym.env-moll/multiobjective/lunar_lander.py
|
b12517f9e3377648c328bd95d2d94e6f168f327e
|
[] |
no_license
|
JiahuiSun/MORL-master
|
c16188ebbc40374513f6ad1bfc223b050042730f
|
89db1fd0c6998b2e3f4ab4afbd5234d97b5e854c
|
refs/heads/master
| 2023-06-02T20:38:11.844797
| 2021-06-21T12:15:09
| 2021-06-21T12:15:09
| 378,919,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,594
|
py
|
import sys, math
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import seeding
# Rocket trajectory optimization is a classic topic in Optimal Control.
#
# According to Pontryagin's maximum principle it's optimal to fire engine full throttle or
# turn it off. That's the reason this environment is OK to have discreet actions (engine on or off).
#
# This multi-objective vertion is modified from Oleg Klimov's LunarLander environment.
#
# 1. Landing pads are always at three fixed coordinates.
# 2. State is an 8 dimensional vector:
# The first two entries s[0], s[1] represent the aircraft's coordinates (x, y),
# where x is the horizonal coordinate and y is the vertical coordinate.
# The horizonal speed and the vertical speed are s[2], s[3] respectively.
# Here s[4] is angle of the aircraft, and s[5] is its angular speed.
# s[6], s[7] indicates whether the left leg / right leg makes contact the ground.
# 3. Reward for moving from the top of the screen to landing pads has three objectives:
# Fuel consumption
#
# Reward for moving from the top of the screen to landing pad and zero speed is about 100..140 points.
# If lander moves away from landing pad it loses reward back. Episode finishes if the lander crashes or
# comes to rest, receiving additional -100 or +100 points. Each leg ground contact is +10. Firing main
# engine is -0.3 points each frame. Solved is 200 points.
#
# Landing outside landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land
# on its first attempt. Please see source code for details.
#
# Too see heuristic landing, run:
#
# python gym/envs/multi-objective/lunar_lander.py
#
# To play yourself, run:
#
# python examples/agents/keyboard_agent.py MultiObj-LunarLander-v0
#
# Created by Runzhe Yang. Licensed on the same terms as the rest of OpenAI Gym.
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MAIN_ENGINE_POWER = 13.0
SIDE_ENGINE_POWER = 0.6
INITIAL_RANDOM = 1000.0 # Set 1500 to make game harder
LANDER_POLY = [
(-14, +17), (-17, 0), (-17, -10),
(+17, -10), (+17, 0), (+14, +17)
]
LEG_AWAY = 20
LEG_DOWN = 18
LEG_W, LEG_H = 2, 8
LEG_SPRING_TORQUE = 40
SIDE_ENGINE_HEIGHT = 14.0
SIDE_ENGINE_AWAY = 12.0
VIEWPORT_W = 900
VIEWPORT_H = 600
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.lander == contact.fixtureA.body or self.env.lander == contact.fixtureB.body:
self.env.game_over = True
self.env.lander.color1 = (1.0, 0.2, 0.2)
self.env.lander.color2 = (1.0, 0.2, 0.2)
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = True
self.env.legs[i].color1 = (0.5, 0.8, 0.5)
self.env.legs[i].color2 = (0.5, 0.8, 0.5)
def EndContact(self, contact):
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = False
self.env.legs[i].color1 = (0.5, 0.5, 0.5)
self.env.legs[i].color2 = (0.5, 0.5, 0.5)
class LunarLander(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': FPS
}
continuous = False
def __init__(self):
self._seed()
self.viewer = None
self.world = Box2D.b2World()
self.moon = None
self.lander = None
self.particles = []
self.prev_reward = None
high = np.array([np.inf] * 8) # useful range is -1 .. +1, but spikes can be higher
self.observation_space = spaces.Box(-high, high)
if self.continuous:
# Action is two floats [main engine, left-right engines].
# Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.
# Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off
self.action_space = spaces.Box(-1, +1, (2,))
else:
# Nop, fire left engine, main engine, right engine
self.action_space = spaces.Discrete(4)
self._reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.moon: return
self.world.contactListener = None
self._clean_particles(True)
self.world.DestroyBody(self.moon)
self.moon = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
def _reset(self):
self._destroy()
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
# terrain
CHUNKS = 27
height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,))
chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]
self.helipad_x1 = chunk_x[CHUNKS // 2 - 1]
self.helipad_x2 = chunk_x[CHUNKS // 2 + 1]
self.helipad_y = H / 4
height[CHUNKS // 2 - 2] = self.helipad_y
height[CHUNKS // 2 - 1] = self.helipad_y
height[CHUNKS // 2 + 0] = self.helipad_y
height[CHUNKS // 2 + 1] = self.helipad_y
height[CHUNKS // 2 + 2] = self.helipad_y
self.helipad_x1_2 = chunk_x[CHUNKS * 3 // 4 - 1]
self.helipad_x2_2 = chunk_x[CHUNKS * 3 // 4 + 1]
self.helipad_y_2 = H / 3
height[CHUNKS * 3 // 4 - 2] = self.helipad_y_2
height[CHUNKS * 3 // 4 - 1] = self.helipad_y_2
height[CHUNKS * 3 // 4 + 0] = self.helipad_y_2
height[CHUNKS * 3 // 4 + 1] = self.helipad_y_2
height[CHUNKS * 3 // 4 + 2] = self.helipad_y_2
self.helipad_x1_3 = chunk_x[CHUNKS * 1 // 5 - 1]
self.helipad_x2_3 = chunk_x[CHUNKS * 1 // 5 + 1]
self.helipad_y_3 = H / 2
height[CHUNKS // 5 - 2] = self.helipad_y_3
height[CHUNKS // 5 - 1] = self.helipad_y_3
height[CHUNKS // 5 + 0] = self.helipad_y_3
height[CHUNKS // 5 + 1] = self.helipad_y_3
height[CHUNKS // 5 + 2] = self.helipad_y_3
smooth_y = [0.33 * (height[i - 1] + height[i + 0] + height[i + 1]) for i in range(CHUNKS)]
self.moon = self.world.CreateStaticBody(shapes=edgeShape(vertices=[(0, 0), (W, 0)]))
self.sky_polys = []
for i in range(CHUNKS - 1):
p1 = (chunk_x[i], smooth_y[i])
p2 = (chunk_x[i + 1], smooth_y[i + 1])
self.moon.CreateEdgeFixture(
vertices=[p1, p2],
density=0,
friction=0.1)
self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])
self.moon.color1 = (0.0, 0.0, 0.0)
self.moon.color2 = (0.0, 0.0, 0.0)
initial_y = VIEWPORT_H / SCALE
self.lander = self.world.CreateDynamicBody(
position=(VIEWPORT_W / SCALE / 2, initial_y),
angle=0.0,
fixtures=fixtureDef(
shape=polygonShape(vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY]),
density=5.0,
friction=0.1,
categoryBits=0x0010,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
)
self.lander.color1 = (0.9, 0.9, 0.9)
self.lander.color2 = (0.9, 0.9, 0.9)
self.lander.ApplyForceToCenter((
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM)
), True)
self.legs = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y),
angle=(i * 0.05),
fixtures=fixtureDef(
shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
)
leg.ground_contact = False
leg.color1 = (0.5, 0.5, 0.5)
leg.color2 = (0.5, 0.5, 0.5)
rjd = revoluteJointDef(
bodyA=self.lander,
bodyB=leg,
localAnchorA=(0, 0),
localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE),
enableMotor=True,
enableLimit=True,
maxMotorTorque=LEG_SPRING_TORQUE,
motorSpeed=+0.3 * i # low enough not to jump back into the sky
)
if i == -1:
rjd.lowerAngle = +0.9 - 0.5 # Yes, the most esoteric numbers here, angles legs have freedom to travel within
rjd.upperAngle = +0.9
else:
rjd.lowerAngle = -0.9
rjd.upperAngle = -0.9 + 0.5
leg.joint = self.world.CreateJoint(rjd)
self.legs.append(leg)
self.drawlist = [self.lander] + self.legs
return self._step(np.array([0, 0]) if self.continuous else 0)[0]
def _create_particle(self, mass, x, y, ttl):
p = self.world.CreateDynamicBody(
position=(x, y),
angle=0.0,
fixtures=fixtureDef(
shape=circleShape(radius=2 / SCALE, pos=(0, 0)),
density=mass,
friction=0.1,
categoryBits=0x0100,
maskBits=0x001, # collide only with ground
restitution=0.3)
)
p.ttl = ttl
self.particles.append(p)
self._clean_particles(False)
return p
def _clean_particles(self, all):
while self.particles and (all or self.particles[0].ttl < 0):
self.world.DestroyBody(self.particles.pop(0))
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid " % (action, type(action))
# Engines
tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))
side = (-tip[1], tip[0]);
dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)]
m_power = 0.0
if (self.continuous and action[0] > 0.0) or (not self.continuous and action == 2):
# Main engine
if self.continuous:
m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0
assert m_power >= 0.5 and m_power <= 1.0
else:
m_power = 1.0
ox = tip[0] * (4 / SCALE + 2 * dispersion[0]) + side[0] * dispersion[
1] # 4 is move a bit downwards, +-2 for randomness
oy = -tip[1] * (4 / SCALE + 2 * dispersion[0]) - side[1] * dispersion[1]
impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)
p = self._create_particle(3.5, impulse_pos[0], impulse_pos[1],
m_power) # particles are just a decoration, 3.5 is here to make particle speed adequate
p.ApplyLinearImpulse((ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power), impulse_pos,
True)
self.lander.ApplyLinearImpulse((-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),
impulse_pos, True)
s_power = 0.0
if (self.continuous and np.abs(action[1]) > 0.5) or (not self.continuous and action in [1, 3]):
# Orientation engines
if self.continuous:
direction = np.sign(action[1])
s_power = np.clip(np.abs(action[1]), 0.5, 1.0)
assert s_power >= 0.5 and s_power <= 1.0
else:
direction = action - 2
s_power = 1.0
ox = tip[0] * dispersion[0] + side[0] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE)
oy = -tip[1] * dispersion[0] - side[1] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE)
impulse_pos = (self.lander.position[0] + ox - tip[0] * 17 / SCALE,
self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE)
p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)
p.ApplyLinearImpulse((ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power), impulse_pos,
True)
self.lander.ApplyLinearImpulse((-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),
impulse_pos, True)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.lander.position
vel = self.lander.linearVelocity
state = [
(pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2),
(pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_W / SCALE / 2),
vel.x * (VIEWPORT_W / SCALE / 2) / FPS,
vel.y * (VIEWPORT_H / SCALE / 2) / FPS,
self.lander.angle,
20.0 * self.lander.angularVelocity / FPS,
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0
]
assert len(state) == 8
reward = 0
shaping = \
- 100 * np.sqrt(state[0] * state[0] + state[1] * state[1]) \
- 100 * np.sqrt(state[2] * state[2] + state[3] * state[3]) \
- 100 * abs(state[4]) + 10 * state[6] + 10 * state[7] # And ten points for legs contact, the idea is if you
# lose contact again after landing, you get negative reward
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
reward -= m_power * 0.30 # less fuel spent is better, about -30 for heurisic landing
reward -= s_power * 0.03
done = False
if self.game_over or abs(state[0]) >= 1.0:
done = True
reward = -100
if not self.lander.awake:
done = True
reward = +100
return np.array(state), reward, done, {}
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(0, VIEWPORT_W / SCALE, 0, VIEWPORT_H / SCALE)
for obj in self.particles:
obj.ttl -= 0.15
obj.color1 = (max(0.2, 0.2 + obj.ttl), max(0.2, 0.5 * obj.ttl), max(0.2, 0.5 * obj.ttl))
obj.color2 = (max(0.2, 0.2 + obj.ttl), max(0.2, 0.5 * obj.ttl), max(0.2, 0.5 * obj.ttl))
self._clean_particles(False)
for p in self.sky_polys:
self.viewer.draw_polygon(p, color=(0, 0, 0))
for obj in self.particles + self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans * f.shape.pos)
self.viewer.draw_circle(f.shape.radius, 20, color=obj.color1).add_attr(t)
self.viewer.draw_circle(f.shape.radius, 20, color=obj.color2, filled=False, linewidth=2).add_attr(t)
else:
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)
for x in [self.helipad_x1, self.helipad_x2]:
flagy1 = self.helipad_y
flagy2 = flagy1 + 50 / SCALE
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))
self.viewer.draw_polygon([(x, flagy2), (x, flagy2 - 10 / SCALE), (x + 25 / SCALE, flagy2 - 5 / SCALE)],
color=(1.0, 1.0, 0.0))
for x in [self.helipad_x1_2, self.helipad_x2_2]:
flagy1 = self.helipad_y_2
flagy2 = flagy1 + 50 / SCALE
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))
self.viewer.draw_polygon([(x, flagy2), (x, flagy2 - 10 / SCALE), (x + 25 / SCALE, flagy2 - 5 / SCALE)],
color=(0.0, 0.5, 1.0))
for x in [self.helipad_x1_3, self.helipad_x2_3]:
flagy1 = self.helipad_y_3
flagy2 = flagy1 + 50 / SCALE
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))
self.viewer.draw_polygon([(x, flagy2), (x, flagy2 - 10 / SCALE), (x + 25 / SCALE, flagy2 - 5 / SCALE)],
color=(1.0, 0.0, 0.2))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
class LunarLanderContinuous(LunarLander):
continuous = True
def heuristic(env, s, target=0):
# Heuristic for:
# 1. Testing.
# 2. Demonstration rollout.
offsets = [(0, 0), (-0.60, 0.1), (0.68, 0.33)]
x_offset, y_offset = offsets[target]
angle_targ = (s[0] + x_offset) * 0.5 + s[
2] * 1.0 # angle should point towards center (s[0] is horizontal coordinate, s[2] hor speed)
if angle_targ > 0.4: angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad
if angle_targ < -0.4: angle_targ = -0.4
hover_targ = 0.9 * np.abs(s[0] + x_offset) + y_offset # target y should be proporional to horizontal offset
# PID controller: s[4] angle, s[5] angularSpeed
angle_todo = (angle_targ - s[4]) * 0.5 - (s[5]) * 1.0
# print("angle_targ=%0.2f, angle_todo=%0.2f" % (angle_targ, angle_todo))
# PID controller: s[1] vertical coordinate s[3] vertical speed
hover_todo = (hover_targ - s[1]) * 0.5 - (s[3]) * 0.5
# print("hover_targ=%0.2f, hover_todo=%0.2f" % (hover_targ, hover_todo))
if s[6] or s[7]: # legs have contact
angle_todo = 0
hover_todo = -(s[3]) * 0.5 # override to reduce fall speed, that's all we need after contact
if env.continuous:
a = np.array([hover_todo * 20 - 1, -angle_todo * 20])
a = np.clip(a, -1, +1)
else:
a = 0
if hover_todo > np.abs(angle_todo) and hover_todo > 0.05:
a = 2
elif angle_todo < -0.05:
a = 3
elif angle_todo > +0.05:
a = 1
return a
if __name__ == "__main__":
env = LunarLander()
# env = LunarLanderContinuous()
s = env.reset()
total_reward = 0
steps = 0
while True:
a = heuristic(env, s, target=0)
s, r, done, info = env.step(a)
env.render()
total_reward += r
if steps % 20 == 0 or done:
print(["{:+0.2f}".format(x) for x in s])
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
if done: break
|
[
"sunjiahui1997@icloud.com"
] |
sunjiahui1997@icloud.com
|
19bd5a7c453b74cf65db661b242bd702f562c117
|
dbbdf35bff726681ae34ad08eeda5f30929e2ae9
|
/pipeline/0x03-data_augmentation/2-rotate.py
|
cfa106486900e6d01553eff3283ee33d820d6ba3
|
[] |
no_license
|
jorgezafra94/holbertonschool-machine_learning
|
0b7f61c954e5d64b1f91ec14c261527712243e98
|
8ad4c2594ff78b345dbd92e9d54d2a143ac4071a
|
refs/heads/master
| 2023-02-03T20:19:36.544390
| 2020-12-21T21:49:10
| 2020-12-21T21:49:10
| 255,323,504
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
#!/usr/bin/env python3
"""
Rotate 90 degrees a 3d image tensorflow
"""
import tensorflow as tf
def rotate_image(image):
"""
* image is a 3D tf.Tensor containing the image to rotate
Returns the rotated image
"""
rotate = tf.image.rot90(image=image, k=1, name=None)
return rotate
|
[
"jorgezafra94@hotmail.com"
] |
jorgezafra94@hotmail.com
|
13a72f1e1d9a3d638183d21c021fdda9d81e2338
|
22d9d90aa171869bba3d31f2307abe58aadd3d1d
|
/qtim_tools/qtim_features/extract_features.py
|
bec0fcbfb0fe59a061988c379ed165343d2c6b95
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
liviust/qtim_tools
|
a790b66bf2d3cd1d5b8036a61a264be57614b47d
|
64d7d68b1335239f0d7707f8c1e28af71706e4ad
|
refs/heads/master
| 2020-05-27T21:19:22.480893
| 2017-02-28T22:30:14
| 2017-02-28T22:30:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,435
|
py
|
""" This 'learner' program, perhaps wrongly named, is the my current
general utility function. It takes in a folder full of images and
labels, parses them into numpy arrays, extracts features from those
arrays, and writes them into an easily accessible .csv. As of yet,
it does not do any learning..
"""
# import
import GLCM
import morphology
import statistics
from qtim_tools.qtim_utilities import nifti_util
import sys, getopt
import glob
import os
import numpy as np
import nibabel as nib
import csv
import fnmatch
from shutil import copy, move
from multiprocessing.pool import Pool
from multiprocessing import freeze_support
from functools import partial
feature_dictionary = {'GLCM': GLCM, 'morphology': morphology, 'statistics': statistics}
def generate_feature_list_batch(folder, features=['GLCM', 'morphology', 'statistics'], recursive=False, labels=False, label_suffix="-label", universal_label='', decisions=False, levels=255, normalize_intensities=True,mask_value=0, use_labels=[-1], erode=[0,0,0], filenames=True, featurenames=True, outfile='', overwrite=True, clear_file=True, write_empty=True, return_output=False, test=False):
total_features, feature_indexes, label_output = generate_feature_indices(features, featurenames)
# This needs to be restructured, probably with a new method to iterate through images. Currently, this will not work
# wtihout an output file. The conflict is between retaining the ability to append to files in real-time (to prevent
# catastrophic errors from wasting eons of processing time) and having a conditional "outfile" parameter.
if outfile != '':
outfile = determine_outfile_name(outfile, overwrite)
if clear_file:
open(outfile, 'w').close()
with open(outfile, 'ab') as writefile:
csvfile = csv.writer(writefile, delimiter=',')
csvfile.writerow(label_output[0,:])
imagepaths, label_images = generate_filename_list(folder, labels, label_suffix, recursive)
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
for imagepath in imagepaths:
print '\n'
print 'Pre-processing data...'
image_list, unmodified_image_list, imagename_list, attributes_list = generate_numpy_images(imagepath, labels=labels, label_suffix=label_suffix, label_images=label_images, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode)
if image_list == []:
if write_empty:
empty_output = np.zeros((1, total_features + 1), dtype=object)
empty_output[0,0] = imagepath
csvfile.writerow(empty_output[0,:])
continue
print 'Pre-processing complete!'
for image_idx, image in enumerate(image_list):
print ''
print 'Working on image...'
print imagename_list[image_idx]
print 'Voxel sum...'
print np.sum(image)
print 'Image shape...'
print image.shape
if filenames:
index = imagename_list[image_idx]
else:
index = numerical_output.shape[0]
if numerical_output[0,0] == 0:
numerical_output[0, :] = generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=mask_value, normalize_intensities=normalize_intensities)
index_output[0,:] = index
else:
numerical_output = np.vstack((numerical_output, generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=mask_value, normalize_intensities=normalize_intensities)))
index_output = np.vstack((index_output, index))
csvfile.writerow(np.hstack((index_output[-1,:], numerical_output[-1,:])))
final_output = np.hstack((index_output, numerical_output))
print 'Feature writing complete, writing output...'
print '\n'
for row in final_output:
print row
if return_output:
return final_output
def generate_feature_list_single(vol_filename, features=['GLCM', 'morphology', 'statistics'], labels=False, label_filename='',label_suffix="-label", decisions=False, levels=255, filenames=True, featurenames=True, outfile='', overwrite=True, write_empty=True, mask_value=0, test=False, use_labels=[-1], erode=0):
total_features, feature_indexes, label_output = generate_feature_indices(features, featurenames)
if outfile != '':
outfile = determine_outfile_name(outfile, overwrite)
with open(outfile, 'ab') as writefile:
csvfile = csv.writer(writefile, delimiter=',')
csvfile.writerow(label_output[0,:])
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
final_output = write_image_method(vol_filename, label_filename, csvfile, total_features, features, feature_indexes, numerical_output, index_output, labels=False, label_suffix='-label', levels=100, mask_value=0, use_labels=[-1], erode=0, write_empty=False)
print 'Feature writing complete, writing output...'
print '\n'
print final_output
return final_output
def generate_feature_list_parallel(folder, features=['GLCM', 'morphology', 'statistics'], recursive=False, labels=False, label_suffix="-label", decisions=False, levels=255, mask_value=0, use_labels=[-1], erode=[0,0,0], filenames=True, featurenames=True, outfile='', overwrite=True, clear_file=True, write_empty=True, return_output=False, test=False, processes=1):
total_features, feature_indexes, label_output = generate_feature_indices(features, featurenames)
if outfile != '':
outfile = determine_outfile_name(outfile, overwrite)
if clear_file:
open(outfile, 'w').close()
imagepaths, label_images = generate_filename_list(folder, labels, label_suffix, recursive)
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
subunits = []
sublength = np.floor(len(imagepaths) / processes)
print 'Dividing data into ' + str(processes) + ' subgroups of length.. ' + str(int(sublength)) + ' units.'
for i in xrange(processes - 1):
subunits += [[imagepaths[int(i*sublength):int((i+1)*sublength)], label_images[int(i*sublength):int((i+1)*sublength)]]]
subunits += [[imagepaths[int((processes - 1)*sublength):], label_images[int((processes - 1)*sublength):]]]
subprocess = partial(generate_feature_list_chunk, total_features=total_features, feature_indexes=feature_indexes, label_output=label_output, features=features, labels=labels, label_suffix=label_suffix, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode, write_empty=write_empty, filenames=filenames)
optimization_pool = Pool(processes)
results = optimization_pool.map(subprocess, subunits)
output_data = label_output[0,:]
stitch_index = 0
for result in results:
output_data = np.vstack((output_data, result))
final_output = output_data
with open(outfile, 'wb') as writefile:
csvfile = csv.writer(writefile, delimiter=',')
for row in final_output:
csvfile.writerow(row)
print 'Feature writing complete, writing output...'
print '\n'
for row in final_output:
print row
if return_output:
return final_output
def generate_feature_list_chunk(data, total_features, feature_indexes, label_output, features=['GLCM', 'morphology', 'statistics'], labels=False, label_suffix="-label", levels=255, mask_value=0, use_labels=[-1], erode=[0,0,0], write_empty=True, filenames=True):
imagepaths = data[0]
label_images = data[1]
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
output_data = np.zeros((1, total_features + 1), dtype=object)
for imagepath in imagepaths:
print '\n'
print 'Pre-processing data...'
image_list, unmodified_image_list, imagename_list, attributes_list = generate_numpy_images(imagepath, labels=labels, label_suffix=label_suffix, label_images=label_images, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode)
if image_list == []:
if write_empty:
empty_output = np.zeros((1, total_features + 1), dtype=object)
empty_output[0,0] = imagepath
output_data = np.vstack(output_data, empty_output)
continue
print 'Pre-processing complete!'
for image_idx, image in enumerate(image_list):
print ''
print 'Working on image...'
print imagename_list[image_idx]
print 'Voxel sum...'
print np.sum(image)
print 'Image shape...'
print image.shape
if filenames:
index = imagename_list[image_idx]
else:
index = numerical_output.shape[0]
if numerical_output[0,0] == 0:
numerical_output[0, :] = generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)
index_output[0,:] = index
else:
numerical_output = np.vstack((numerical_output, generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)))
index_output = np.vstack((index_output, index))
output_data = np.vstack((output_data, (np.hstack((index_output[-1,:], numerical_output[-1,:])))))
return output_data
def write_image_method(imagepath, label_images, csvfile, total_features, features, feature_indexes, numerical_output, index_output, labels=False, label_suffix='-label', levels=100, mask_value=0, use_labels=[-1], erode=0, write_empty=False):
# This function is a bit clumsy. So many parameters..
print '\n'
print 'Pre-processing data...'
image_list, unmodified_image_list, imagename_list, attributes_list = generate_numpy_images(imagepath, labels=labels, label_suffix=label_suffix, label_images=label_images, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode)
if image_list == []:
if write_empty:
empty_output = np.zeros((1, total_features + 1), dtype=object)
empty_output[0,0] = imagepath
print 'Writing empty row in place of missing data...'
csvfile.writerow(empty_output[0,:])
else:
print 'Pre-processing complete!'
for image_idx, image in enumerate(image_list):
print ''
print 'Working on image...'
print imagename_list[image_idx]
print 'Voxel sum...'
print np.sum(image)
print 'Image shape...'
print image.shape
if filenames:
index = imagename_list[image_idx]
else:
index = numerical_output.shape[0]
if numerical_output[0,0] == 0:
numerical_output[0, :] = generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)
index_output[0,:] = index
else:
numerical_output = np.vstack((numerical_output, generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)))
index_output = np.vstack((index_output, index))
csvfile.writerow(np.hstack((index_output[-1,:], numerical_output[-1,:])))
return np.hstack((index_output, numerical_output))
def determine_outfile_name(outfile, overwrite=True):
write_flag = False
while not write_flag:
if not os.path.isfile(outfile):
write_flag = True
continue
if overwrite:
write_flag = True
else:
split_outfile = str.split(outfile,'.')
print split_outfile
outfile = '.'.join(split_outfile[0:-1]) + '_new.' + split_outfile[-1]
if not os.path.isfile(outfile):
write_flag = True
return outfile
def generate_feature_indices(features=['GLCM', 'morphology', 'statistics'], featurenames=True):
total_features = 0
feature_indexes = [0]
for feature in features:
total_features += feature_dictionary[feature].feature_count()
if feature_indexes == [0]:
feature_indexes = [0, feature_dictionary[feature].feature_count()]
else:
feature_indexes += [feature_indexes[-1] + feature_dictionary[feature].feature_count()]
if featurenames:
label_output = np.zeros((1, total_features+1), dtype=object)
for feature_idx, feature in enumerate(features):
label_output[0, (1+feature_indexes[feature_idx]):(1+feature_indexes[feature_idx+1])] = feature_dictionary[feature].featurename_strings()
label_output[0,0] = 'index'
return [total_features, feature_indexes, label_output]
def generate_filename_list(folder, labels=False, label_suffix='-label', recursive=False):
if recursive:
imagepaths = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*.nii*'):
imagepaths.append(os.path.join(root, filename))
else:
imagepaths = glob.glob(os.path.join(folder, "*.nii*"))
# A bit redundant; this step and the previous step could probably be combined.
imagepaths = [x for x in imagepaths if (x.endswith('.nii') or x.endswith('.nii.gz'))]
if labels:
label_images = [ x for x in imagepaths if label_suffix in x ]
else:
label_images = []
imagepaths = [ x for x in imagepaths if label_suffix not in x ]
if imagepaths == []:
raise ValueError("There are no .nii or .nii.gz images in the provided folder.")
if labels and label_images == []:
raise ValueError("There are no labels with the provided suffix in this folder. If you do not want to use labels, set the \'labels\' flag to \'False\'. If you want to change the label file suffix (default: \'-label\'), then change the \'label_suffix\' flag.")
return [imagepaths, label_images]
def generate_numpy_images(imagepath, labels=False, label_suffix='-label', label_images=[], mask_value=0, levels=255, use_labels=[-1], erode=0):
image_list = []
unmodified_image_list = []
imagename_list = []
attributes_list = []
# nifti_util.save_alternate_nifti(imagepath, levels, mask_value=mask_value)
image = nifti_util.nifti_2_numpy(imagepath)
# This is likely redundant with the basic assert function in nifti_util
if not nifti_util.assert_3D(image):
print 'Warning: image at path ' + imagepath + ' has multiple time points or otherwise greater than 3 dimensions, and will be skipped.'
return [[],[],[],[]]
if labels:
if label_suffix == '':
label_path = label_images
else:
head, tail = os.path.split(imagepath)
split_path = str.split(tail, '.')
label_path = split_path[0] + label_suffix + '.' + '.'.join(split_path[1:])
label_path = os.path.join(head, label_path)
if os.path.isfile(label_path):
label_image = nifti_util.nifti_2_numpy(label_path)
if label_image.shape != image.shape:
print 'Warning: image and label do not have the same dimensions. Imaging padding support has not yet been added. This image will be skipped.'
return [[],[],[],[]]
# In the future: create an option to analyze each frame separately.
if not nifti_util.assert_3D(label_image):
print 'Warning: image at path ' + imagepath + ' has multiple time points or otherwise greater than 3 dimensions, and will be skipped.'
return [[],[],[],[]]
label_image = label_image.astype(int)
label_indices = np.unique(label_image)
if label_indices.size == 1:
print 'Warning: image at path ' + imagepath + ' has an empty label-map, and will be skipped.'
return[[],[],[],[]]
# Will break if someone puts in '0' as a label to use.
if use_labels[0] != -1:
label_indices = np.array([0] + [x for x in label_indices if x in use_labels])
masked_images = nifti_util.mask_nifti(image, label_image, label_indices, mask_value=mask_value)
for masked_image in masked_images:
# nifti_util.check_tumor_histogram(masked_image, second_image_numpy=image, mask_value=mask_value, image_name = str.split(imagepath, '\\')[-1])
# nifti_util.check_image(masked_image, mode="maximal_slice")
unmodified_image_list += [np.copy(masked_image)]
masked_image = nifti_util.coerce_levels(masked_image, levels=levels, reference_image=image, method="divide", mask_value=mask_value)
# nifti_util.check_image(masked_image, mode="maximal_slice")
# It would be nice in the future to check if an image is too small to erode. Maybe a minimum-size parameter?
# Or maybe a "maximum volume reduction by erosion?" Hmm..
masked_image = nifti_util.erode_label(masked_image, iterations=erode)
# nifti_util.check_image(masked_image, mode="maximal_slice")
image_list += [masked_image]
filename = str.split(label_path, '\\')[-1]
if label_indices.size == 2:
imagename_list += [filename]
else:
split_filename = str.split(filename, '.')
for labelval in label_indices[1:]:
filename = split_filename[0] + '_' + str(int(labelval)) + '.' + split_filename[1]
imagename_list += [filename]
attributes_list += [nifti_util.return_nifti_attributes(imagepath)] * (label_indices.size - 1)
print 'Finished... ' + str.split(imagepath, '\\')[-1]
else:
print 'Warning: image at path ' + imagepath + ' has no label-map, and will be skipped.'
return[[],[],[],[]]
else:
image = nifti_util.coerce_levels(image, levels=levels, reference_image=image, method="divide", mask_value=mask_value)
image_list += [image]
unmodified_image_list += [image]
imagename_list += [imagepath]
attributes_list += [nifti_util.return_nifti_attributes(imagepath)]
return [image_list, unmodified_image_list, imagename_list, attributes_list]
def generate_feature_list_method(image, unmodified_image, attributes, features, feature_indexes='', total_features='', levels=-1, mask_value=0, normalize_intensities=False):
if feature_indexes == '' or total_features == '':
total_features = 0
feature_indexes = [0]
for feature in features:
total_features += feature_dictionary[feature].feature_count()
if feature_indexes == [0]:
feature_indexes = [0, feature_dictionary[feature].feature_count()]
else:
feature_indexes += [feature_indexes[-1] + feature_dictionary[feature].feature_count()]
numerical_output = np.zeros((1, total_features), dtype=float)
if (image != mask_value).sum() == 0:
print 'Warning: image is empty, either because it could not survive erosion or because of another error. It will be skipped.'
return numerical_output
for feature_idx, feature in enumerate(features):
if feature == 'GLCM':
# nifti_util.check_tumor_histogram(image, mask_value)
# nifti_util.check_image(image, mode="maximal_slice")
glcm_image = np.copy(image)
glcm_image = glcm_image.astype(int)
levels += 1
print 'Calculating GLCM...'
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = GLCM.glcm_features(glcm_image, levels=levels)
if feature == 'morphology':
print 'Calculating morphology features...'
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = morphology.morphology_features(unmodified_image, attributes)
if feature == 'statistics':
# Should intensity statistics be eroded? Currently, they are not, as indicated by the "unmodified image" parameter.
print 'Calculating statistical features...'
if normalize_intensities:
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = statistics.statistics_features(glcm_image)
else:
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = statistics.statistics_features(unmodified_image)
print '\n'
return numerical_output
def test_method():
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','MR_Tumor_Shape'))
generate_feature_list_batch(folder=test_folder, features=['morphology', 'statistics'], labels=True, levels=100, outfile='test_feature_results_shape.csv',test=False, mask_value=0, erode=[0,0,0], overwrite=True)
return
def test_parallel():
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','Phantom_GLCM'))
test_folder = '/home/administrator/data/tbData/tbType/TrainingSet'
generate_feature_list_parallel(folder=test_folder, features=['GLCM','morphology', 'statistics'], labels=True, levels=100, outfile='lung_features_results_parallel_500.csv',test=False, mask_value=0, erode=[0,0,0], overwrite=True, processes=35)
return
def parse_command_line(argv):
# This code should be run from the folder above the main "qtim_tools" folder using the command "python -m qtim_tools.qtim_features.test"
# All niftis in this folder will be processed. The program searches for a nifti file, and then checks if there is a matching labelmap file with the suffix '-label'.
# It currently loads from some built in data from the qtim_tools project, but you can change the filepath below to anywhere.
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','Phantom_Intensity'))
# If labels is set to False, the whole image will be processed. This can take a very long time for GLCM features especially, so it is best we stick to labels.
labels = True
# The only available features are 'GLCM', 'morphology', and 'statistics' for now.
features = ['GLCM','morphology', 'statistics']
# In order for GLCM to work correctly, an image has to be reduced to a set amount of gray-levels. Using all available levels in an image will most likely produce a useless result.
# More levels will result in more intensive computation.
levels = 100
# This will save a spreadsheet of all requested feature results.
outfile = 'test_feature_results_intensity.csv'
# If your label is for some reason masked with a value other than zero, change this parameter.
mask_value = 0
# The erode parameter will take [x,y,z] pixels off in each dimension. On many volumes, it is not useful to erode in the z (axial) slice because of high slice thickness.
# Currently, the erode parameter only applies to GLCM. It does not apply to intensity statistic features, although maybe it should.
erode = [0,0,0]
# If overwrite is False, then the program will try to save to the chosen filename with '_copy' appended if the chosen filename already exists.
overwrite = True
extract_features.generate_feature_list_batch(folder=test_folder, features=features, labels=labels, levels=levels, outfile=outfile, mask_value=mask_value, erode=erode, overwrite=overwrite)
def test():
# This code should be run from the folder above the main "qtim_tools" folder using the command "python -m qtim_tools.qtim_features.test"
# All niftis in this folder will be processed. The program searches for a nifti file, and then checks if there is a matching labelmap file with the suffix '-label'.
# It currently loads from some built in data from the qtim_tools project, but you can change the filepath below to anywhere.
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','Phantom_Intensity'))
# If labels is set to False, the whole image will be processed. This can take a very long time for GLCM features especially, so it is best we stick to labels.
labels = True
# The only available features are 'GLCM', 'morphology', and 'statistics' for now.
features = ['GLCM','morphology', 'statistics']
# In order for GLCM to work correctly, an image has to be reduced to a set amount of gray-levels. Using all available levels in an image will most likely produce a useless result.
# More levels will result in more intensive computation.
levels = 100
# This will save a spreadsheet of all requested feature results.
outfile = 'test_feature_results_intensity.csv'
# If your label is for some reason masked with a value other than zero, change this parameter.
mask_value = 0
# The erode parameter will take [x,y,z] pixels off in each dimension. On many volumes, it is not useful to erode in the z (axial) slice because of high slice thickness.
# Currently, the erode parameter only applies to GLCM. It does not apply to intensity statistic features, although maybe it should.
erode = [0,0,0]
# If overwrite is False, then the program will try to save to the chosen filename with '_copy' appended if the chosen filename already exists.
overwrite = True
generate_feature_list_batch(folder=test_folder, features=features, labels=labels, levels=levels, outfile=outfile, mask_value=mask_value, erode=erode, overwrite=overwrite)
def extract_features(folder, outfile, labels=True, features=['GLCM','morphology', 'statistics'], levels = 100, mask_value = 0, erode = [0,0,0], overwrite = True, label_suffix='-label', universal_label=''):
generate_feature_list_batch(folder=folder, outfile=outfile, labels=labels, features=features, levels=levels, mask_value=mask_value, erode=erode, overwrite=overwrite, label_suffix=label_suffix, universal_label=universal_label)
if __name__ == "__main__":
np.set_printoptions(suppress=True, precision=2)
# test_method()
test_parallel()
|
[
"andrew_beers@alumni.brown.edu"
] |
andrew_beers@alumni.brown.edu
|
44094d85f07d85084c8dda9c6d2a8e41f006990c
|
393fccd7a7e07afe268ac12b4f59d4e9f617f9b7
|
/octoprint_octoeverywhere/Proto/MessageContext.py
|
a00df7d13937462fc9ff06e0088f4fec69b77056
|
[] |
no_license
|
pherz1/OctoPrint-OctoEverywhere
|
a1f83617da182413c8603cfaf8868f4435667a59
|
1fa96dfc0626aea27a3ecb41e2fa550f9ae038c3
|
refs/heads/master
| 2023-07-15T11:51:39.339506
| 2021-08-25T19:43:10
| 2021-08-25T19:43:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: Proto
class MessageContext(object):
NONE = 0
HandshakeSyn = 1
HandshakeAck = 2
WebStreamMsg = 3
OctoNotification = 4
OctoSummon = 5
|
[
"quinnd@outlook.com"
] |
quinnd@outlook.com
|
8860cc590b2f473239affd080865e3630d19ff53
|
4e53d797027306ddacaa9f0a1d8d6de436dd98e7
|
/test.month.py
|
0168e792e036809624683c154deed36cbaffd855
|
[
"MIT"
] |
permissive
|
sschakraborty/ml-electricity
|
e2e7745c66f43dd07d61910218a7ee1ef3506447
|
bacfa64297bdb01ef8b5d4b5fea86fced3854a32
|
refs/heads/master
| 2020-05-16T08:53:01.067610
| 2019-04-23T03:54:07
| 2019-04-23T03:54:07
| 182,927,071
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
import pandas as pd
import datetime
from sklearn.neural_network import MLPRegressor
import pickle
PATH = 'data.csv'
IN = 'context.month.nn'
print('Using path %s for test data' % PATH)
print('Deserializing model from %s' % IN)
nn = pickle.load(open(IN, 'rb'))
print('Model deserialized and loaded...')
def flatten_date(date_str):
dt = datetime.datetime.strptime(date_str, '%d/%m/%y %H:%M')
return [dt.month, dt.day]
def create_test(df, indices):
master = []
for i, row in df[indices].iterrows():
it = []
for index in indices:
it.append(row[index])
master.append(it)
return master
#Read the data
print('Beginning to read CSV data from %s' % PATH)
df_all = pd.read_csv(PATH, header=None)
print('Data read successfully')
indices = [4, 5, 6, 9, 10, 13, 14, 15, 16]
# Pull timestamp from extracted dataframe
print('Processing and staging dataframe...')
df_input = df_all[0].tolist()
df_input = list(map(flatten_date, df_input))
df_output = create_test(df_all, indices)
print('Staging complete')
result = nn.score(df_input, df_output)
print('------------------------------')
print('Training error (score) from generated model')
print(result)
|
[
"sschakraborty@hotmail.com"
] |
sschakraborty@hotmail.com
|
7af4118ca6761434647028e93af78f1b62bff180
|
641d85b45413fda8e7d16081a455758f522ba317
|
/person_and_phone.py
|
a38093e9740b04f7cc0bb64a3acffdb959419967
|
[] |
no_license
|
MrLuciferM/SemiAutomated-Online-ExamProctoring
|
dbfde5098743afe7784a3c25924fbc4d47e3084c
|
85aab91bc11aa6db8c2c2cf4dfb7ee074fb72998
|
refs/heads/master
| 2023-06-25T16:35:05.677394
| 2021-08-02T13:38:38
| 2021-08-02T13:38:38
| 391,954,553
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,104
|
py
|
import tensorflow as tf
import numpy as np
import cv2
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
UpSampling2D,
ZeroPadding2D,
BatchNormalization
)
from tensorflow.keras.regularizers import l2
# import wget
def load_darknet_weights(model, weights_file):
'''
Helper function used to load darknet weights.
:param model: Object of the Yolo v3 model
:param weights_file: Path to the file with Yolo V3 weights
'''
#Open the weights file
wf = open(weights_file, 'rb')
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
#Define names of the Yolo layers (just for a reference)
layers = ['yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
'yolo_conv_2',
'yolo_output_2']
for layer_name in layers:
sub_model = model.get_layer(layer_name)
for i, layer in enumerate(sub_model.layers):
if not layer.name.startswith('conv2d'):
continue
#Handles the special, custom Batch normalization layer
batch_norm = None
if i + 1 < len(sub_model.layers) and \
sub_model.layers[i + 1].name.startswith('batch_norm'):
batch_norm = sub_model.layers[i + 1]
filters = layer.filters
size = layer.kernel_size[0]
in_dim = layer.input_shape[-1]
if batch_norm is None:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
else:
# darknet [beta, gamma, mean, variance]
bn_weights = np.fromfile(
wf, dtype=np.float32, count=4 * filters)
# tf [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, size, size)
conv_weights = np.fromfile(
wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(
conv_shape).transpose([2, 3, 1, 0])
if batch_norm is None:
layer.set_weights([conv_weights, conv_bias])
else:
layer.set_weights([conv_weights])
batch_norm.set_weights(bn_weights)
assert len(wf.read()) == 0, 'failed to read all data'
wf.close()
def draw_outputs(img, outputs, class_names):
'''
Helper, util, function that draws predictons on the image.
:param img: Loaded image
:param outputs: YoloV3 predictions
:param class_names: list of all class names found in the dataset
'''
boxes, objectness, classes, nums = outputs
boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
wh = np.flip(img.shape[0:2])
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, '{} {:.4f}'.format(
class_names[int(classes[i])], objectness[i]),
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, kernel_size, strides=1, batch_norm=True):
'''
Call this function to define a single Darknet convolutional layer
:param x: inputs
:param filters: number of filters in the convolutional layer
:param kernel_size: Size of kernel in the Conv layer
:param strides: Conv layer strides
:param batch_norm: Whether or not to use the custom batch norm layer.
'''
#Image padding
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = 'valid'
#Defining the Conv layer
x = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
'''
Call this function to define a single DarkNet Residual layer
:param x: inputs
:param filters: number of filters in each Conv layer.
'''
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
'''
Call this function to define a single DarkNet Block (made of multiple Residual layers)
:param x: inputs
:param filters: number of filters in each Residual layer
:param blocks: number of Residual layers in the block
'''
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
'''
The main function that creates the whole DarkNet.
'''
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2) # skip connection
x = x_36 = DarknetBlock(x, 256, 8) # skip connection
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def YoloConv(filters, name=None):
'''
Call this function to define the Yolo Conv layer.
:param flters: number of filters for the conv layer
:param name: name of the layer
'''
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
'''
This function defines outputs for the Yolo V3. (Creates output projections)
:param filters: number of filters for the conv layer
:param anchors: anchors
:param classes: list of classes in a dataset
:param name: name of the layer
'''
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolo_boxes(pred, anchors, classes):
'''
Call this function to get bounding boxes from network predictions
:param pred: Yolo predictions
:param anchors: anchors
:param classes: List of classes from the dataset
'''
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1]
#Extract box coortinates from prediction vectors
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
#Normalize coortinates
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(
scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=100,
max_total_size=100,
iou_threshold=0.5,
score_threshold=0.6
)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80):
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def weights_download(out='models/yolov3.weights'):
_ = wget.download('https://pjreddie.com/media/files/yolov3.weights', out='models/yolov3.weights')
# weights_download() # to download weights
# yolo = YoloV3()
# load_darknet_weights(yolo, 'models/yolov3.weights')
# cap = cv2.VideoCapture(0)
# while(True):
# ret, image = cap.read()
# if ret == False:
# break
# img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (320, 320))
# img = img.astype(np.float32)
# img = np.expand_dims(img, 0)
# img = img / 255
# class_names = [c.strip() for c in open("models/classes.TXT").readlines()]
# boxes, scores, classes, nums = yolo(img)
# count=0
# for i in range(nums[0]):
# if int(classes[0][i] == 0):
# count +=1
# if int(classes[0][i] == 67):
# print('Mobile Phone detected')
# if count == 0:
# print('No person detected')
# elif count > 1:
# print('More than one person detected')
# image = draw_outputs(image, (boxes, scores, classes, nums), class_names)
# cv2.imshow('Prediction', image)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
|
[
"pps.kunal.7@gmail.com"
] |
pps.kunal.7@gmail.com
|
a434c943b8afac2a3ba516952790983f4bebf8d9
|
def27d5864764b877b6786835ec97f2bd74c6ba8
|
/easy/HammingDistance.py
|
b9cb3fe45c35fdf770719e3a32aa986bf2a73a40
|
[] |
no_license
|
bolan2014/leetcode
|
f6cf38a49a9250abeb36543ea2498062c58e811d
|
1c35fde3a65c4f216218f459736d4c39a29980d5
|
refs/heads/master
| 2021-04-09T16:59:41.494568
| 2017-05-10T03:47:14
| 2017-05-10T03:47:14
| 46,648,353
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
bix, biy = bin(x)[2:], bin(y)[2:]
if len(bix) > len(biy):
biy = (len(bix) - len(biy)) * '0' + biy
else:
bix = (len(biy) - len(bix)) * '0' + bix
cnt = 0
for i in range(len(bix)):
if bix[i] != biy[i]:
cnt += 1
return cnt
|
[
"1139217488@qq.com"
] |
1139217488@qq.com
|
2f2aa9fda525b157cfa80787be1082388c43af77
|
0cc95e221a473640d32662badb4224b9af7f4696
|
/supporting_bot.py
|
50714b6f15023cdd602efd6e21f8625cb6f9b83d
|
[] |
no_license
|
xGolden/ow-discord-bot
|
7fe5bd879f53b6407739cd17436fff8161c060e0
|
1ee501e714d52d8d3676a1c3b13931f9823114e7
|
refs/heads/master
| 2021-01-23T01:27:00.062995
| 2017-03-21T18:04:34
| 2017-03-21T18:04:34
| 85,911,454
| 0
| 0
| null | 2017-03-23T05:42:39
| 2017-03-23T05:42:39
| null |
UTF-8
|
Python
| false
| false
| 3,938
|
py
|
import discord
from discord.ext import commands;
import asyncio
import gaycode #where we store the bots token, so it isn't publicly displayed on github
client = discord.Client()
supportList = [] #list of discord members signed up to support mixes
commandString = '!join !quit (admin !reset !shutdown)'
infoString = 'People waiting to play\n-----------'
#channelID = '288490815200821259' #channel.id identifier for the channel
channelID = '288537682538266625' #testsupport
#channelID = '288497909522104323' #test1
modRole = 'ow-support-admin'
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
await printWelcomeMessage()
@client.event
async def on_message(message):
#only worry about messages from a specific channel
if message.channel.id == channelID:
if message.content.startswith('!join'):
if message.author not in supportList:
supportList.append(message.author)
await printPlayerList(message.channel)
elif message.content.startswith('!quit'):
if message.author in supportList:
supportList.remove(message.author)
#await client.send_message(message.channel, '{} quit'.format(message.author.name))
await printPlayerList(message.channel)
else:
await client.delete_message(message)
elif message.content.startswith('!list'):
await printPlayerList(message.channel)
elif message.content.startswith('!channels'):
await client.delete_message(message)
if checkIfRole(message.author, modRole):
channels = client.get_all_channels()
for c in channels:
print('Channel \'{}\' has ID \'{}\' on server \'{}\''.format(c.name, c.id, c.server.name))
elif message.content.startswith('!reset'):
await client.delete_message(message)
if checkIfRole(message.author, modRole):
supportList[:] = []
await printPlayerList(message.channel)
elif message.content.startswith('!shutdown'):
if checkIfRole(message.author, modRole):
await purgeChannel(message.channel)
await client.logout()
elif message.content.startswith('!clear'):
await purgeChannel(message.channel)
else:
if message.author != client.user:
#print('deleting message from {}'.format(message.author.name))
await client.delete_message(message)
async def printPlayerList(channel):
await purgeChannel(channel)
str = ''
count = 1
for member in supportList:
str += '{}. {}\n'.format(count, member.name)
count = count + 1
#await client.send_message(channel, commandString)
#await client.send_message(channel, 'People waiting to play ({})'.format(len(supportList)))
#await client.send_message(channel, '----------')
#for member in supportList:
# await client.send_message(channel, '{}. {}'.format(count, member.name))
# count = count + 1
await client.send_message(channel, '```{}\n\n{}\n{}```'.format(commandString, infoString, str))
async def purgeChannel(channel):
await client.purge_from(channel, limit=500, check=None)
async def printWelcomeMessage():
#look for channel via ID
channels = client.get_all_channels()
for c in channels:
if c.id == channelID:
await client.send_message(c, '```{}\n\n{}```'.format(commandString, infoString))
break
def checkIfRole(user, role):
role = discord.utils.find(lambda r: r.name == role, user.roles)
#print('role {}'.format(role))
return role is not None
client.run(gaycode.token)
|
[
"slizizy@gmail.com"
] |
slizizy@gmail.com
|
683cb94f99b944c57b75bcff395c4d70823f1021
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/isbn/156.py
|
5d83c65f74ee33e129c19964d85548161b6c4135
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446
| 2021-02-28T23:28:36
| 2021-02-28T23:28:36
| 240,883,220
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
def isISBN(code):
if not (
isinstance(code, str) and
len(code) == 13 and
( code.startswith('978') or code.startswith('979') ) and
code.isdigit()
):
return 0
controle=0
for i in range(12):
if i%2:
controle += 3* int(code[i])
else:
controle += int(code[i])
cc = controle % 10
cc = (10 - cc) % 10
return cc == int(code[-1])
def overzicht(codes):
groepen = {}
for i in range(11):
groepen[i] = 0
for code in codes:
if not isISBN(code):
groepen[10] += 1
else:
groepen[int(code[3])] += 1
print('Engelstalige landen: {}'.format(groepen[0] + groepen[1]))
print('Franstalige landen: {}'.format(groepen[2]))
print('Duitstalige landen: {}'.format(groepen[3]))
print('Japan: {}'.format(groepen[4]))
print('Russischtalige landen: {}'.format(groepen[5]))
print('China: {}'.format(groepen[7]))
print('Overige landen: {}'.format(groepen[6] + groepen[8] + groepen[9]))
print('Fouten: {}'.format(groepen[10]))
|
[
"bertverstraete22@gmail.com"
] |
bertverstraete22@gmail.com
|
84eaea2ed7ce2427efd752c08dbb61f7b8ef9300
|
ec48e11593717df874746092ca2a127b6de2f49f
|
/examples/python/file_reader/file_reader.py
|
abd6ccc51adfb4db8498839ff70e6c19b3d28cc0
|
[] |
no_license
|
blu-corner/codec
|
44842673e63dbc9e50077f805de405b121225ca4
|
a7582a9384e26452e45c2c36952cb7424a52a6ca
|
refs/heads/master
| 2022-07-25T04:24:39.555286
| 2022-06-06T10:15:12
| 2022-06-06T10:15:12
| 152,606,392
| 9
| 13
| null | 2022-10-18T16:00:33
| 2018-10-11T14:36:13
|
C++
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/env python
# import CommonDataRepresentation as Cdr
import Codecs as Codec
import os
import sys
import mmap
def getDecodedMessage(filename, codec):
idx = 0
with open(sys.argv[1], 'rb') as f:
while(True):
data = f.read()
try:
res = codec.decode(data)
idx+=res.getUsed()
f.seek(idx)
yield res.getCdr()
except:
return
def main():
codec = Codec.lseCodec.get()
for msg in getDecodedMessage(sys.argv[1], codec):
print msg.toString()
if __name__ == '__main__':
main()
|
[
"alejandro.navarro@neueda.com"
] |
alejandro.navarro@neueda.com
|
3a211aefea4fd4b315ad6dfe162aad8181b79805
|
0bf740fa6d2f3680c8e47afa8b51a1a468ce2d7e
|
/blog/migrations/0001_initial.py
|
a317d4e56293fee4e7cbba6719c52d6e47d39cba
|
[] |
no_license
|
chsailakshmi/first-blog
|
85495c55a69323305e1d843aeecb662a0fd0c183
|
ec0884db15955cd5a5ced38066516d9d7c5f2ec5
|
refs/heads/master
| 2020-04-18T11:55:34.818226
| 2019-11-21T07:05:19
| 2019-11-21T07:05:19
| 65,983,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-19 17:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"admin@xyz.com"
] |
admin@xyz.com
|
67bd56ca074f5038ba9301b87e29b5ac1ad3b377
|
728a703820098d6fe717187478fc856abfbbf7f2
|
/Boggle.py
|
172096d610bca81ae377f2364e6078bf946bb1cf
|
[] |
no_license
|
nipayne/boggle
|
cb34e45f6ee1afccc1a6386a881d0cf325dfb2b0
|
6ed8060fc6a6e990781b2be56c3714164cf2820e
|
refs/heads/main
| 2023-08-14T20:26:30.410947
| 2021-09-20T00:53:41
| 2021-09-20T00:53:41
| 408,272,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
import pygame
import time
import math
import random
pygame.init()
window = pygame.display.set_mode((600, 700))
clock = pygame.time.Clock()
points = []
lines = []
begin = False
anim = False
font = pygame.font.SysFont('didot.ttc', 70)
newBoard = True
while True:
letters = {
1:('a', 'a', 'a', 'f', 'r', 's'),
2:('a', 'a', 'e', 'e', 'e', 'e'),
3:('a', 'a', 'f', 'i', 'r', 's'),
4:('a', 'd', 'e', 'n', 'n', 'n'),
5:('a', 'e', 'e', 'e', 'e', 'm'),
6:('a', 'e', 'e', 'g', 'm', 'u'),
7:('a', 'e', 'g', 'm', 'n', 'n'),
8:('a', 'f', 'i', 'r', 's', 'y'),
9:('b', 'j', 'k', 'q', 'x', 'z'),
10:('c', 'c', 'e', 'n', 's', 't'),
11:('c', 'e', 'i', 'i', 'l', 't'),
12:('c', 'e', 'i', 'l', 'p', 't'),
13:('c', 'e', 'i', 'p', 's', 't'),
14:('d', 'd', 'h', 'n', 'o', 't'),
15:('d', 'h', 'h', 'l', 'o', 'r'),
16:('d', 'h', 'l', 'n', 'o', 'r'),
17:('d', 'h', 'l', 'n', 'o', 'r'),
18:('e', 'i', 'i', 'i', 't', 't'),
19:('e', 'm', 'o', 't', 't', 't'),
20:('e', 'n', 's', 's', 's', 'u'),
21:('f', 'i', 'p', 'r', 's', 'y'),
22:('g', 'o', 'r', 'r', 'v', 'w'),
23:('i', 'p', 'r', 'r', 'r', 'y'),
24:('n', 'o', 'o', 't', 'u', 'w'),
25:('o', 'o', 'o', 't', 't', 'u')
}
window.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
newBoard = True
if newBoard:
start_ticks = pygame.time.get_ticks()
dice = list(letters.keys())
random.shuffle(dice)
indices = []
for i in range(25):
indices.append(random.randint(0,5))
newBoard = False
for i in range(6):
pygame.draw.line(window, (255,255,255), ((i+1)*100 - 50, 50), ((i+1)*100 - 50,550))
pygame.draw.line(window, (255,255,255), (50, (i+1)*100 - 50), (550, (i+1)*100 - 50))
for x in range(5):
for y in range(5):
current = letters[dice[5*x + y]][indices[5*x+y]].upper()
l =font.render(current, True, (255,0,0))
window.blit(l, ((y+1)*100 - 20, (x+1)*100 - 20))
seconds = pygame.time.get_ticks() - start_ticks
t = font.render(str(round(seconds/1000)), True, (255,0,0))
window.blit(t, (250,650))
pygame.display.update()
clock.tick(30)
|
[
"noreply@github.com"
] |
nipayne.noreply@github.com
|
debe5f15c52bb08f8beadfea06a498d86d7c81c4
|
27880c807b97b3b318d002a547680c6881acf460
|
/tests/argparse/special/test_overwrite.py
|
a4721283725798b1b7e6875be3aed206d66f9fc3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sbrodehl/miniflask
|
a1ebb809d544fbc235044624af9193982f01aced
|
55b350b951ad2120ea13a986f742523206f407c6
|
refs/heads/master
| 2022-11-05T05:18:43.383396
| 2022-09-14T15:26:17
| 2022-09-14T15:26:17
| 252,498,534
| 0
| 0
| null | 2020-04-02T15:46:39
| 2020-04-02T15:46:39
| null |
UTF-8
|
Python
| false
| false
| 2,702
|
py
|
from pathlib import Path
import pytest
import miniflask # noqa: E402
def test_setup(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 2
modules.defaults.var_default_override_twice: 3
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults_override"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 12
modules.defaults.var_default_override_twice: 13
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override_twice(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults_override", "defaults_override_twice"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 12
modules.defaults.var_default_override_twice: 113
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override_conflict():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override"])
with pytest.raises(miniflask.exceptions.RegisterError):
mf.parse_args([])
mf.event.print_all()
def test_override_scoped_absolute():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override_scoped_absolute"])
mf.parse_args([])
mf.event.print_all()
def test_override_scoped_relative():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override_scoped_relative"])
mf.parse_args([])
mf.event.print_all()
|
[
"dhartmann@uni-mainz.de"
] |
dhartmann@uni-mainz.de
|
c453f63b56b29011977ee32465c52b69a612a70d
|
630fe47bb5aa5e49b45ab101d87c2dd2c53d180f
|
/venv/Lib/site-packages/com/vmware/nsx/node/aaa/providers/vidm_client.py
|
b5c31723c754c80b2bea2a739a2388630213feb8
|
[] |
no_license
|
shrivastava-himanshu/Leetcode_practice
|
467497a58d82ff3ae2569d5e610dc6f27a1f31d6
|
4c59799947c2b17bfd22ca2a08707ef85e84a913
|
refs/heads/main
| 2023-06-12T13:14:45.381839
| 2021-07-05T04:09:05
| 2021-07-05T04:09:05
| 367,546,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,544
|
py
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.node.aaa.providers.vidm.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Status(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.aaa.providers.vidm.status'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatusStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read AAA provider vIDM status
:rtype: :class:`com.vmware.nsx.model_client.NodeAuthProviderVidmStatus`
:return: com.vmware.nsx.model.NodeAuthProviderVidmStatus
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
class _StatusStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/node/aaa/providers/vidm/status',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeAuthProviderVidmStatus'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.node.aaa.providers.vidm.status',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Status': Status,
}
|
[
"Himanshu.Shrivastava@vce.com"
] |
Himanshu.Shrivastava@vce.com
|
f73f828ff68b6b89ad727c7f7d2134f8d0622cb4
|
6e8699e241c5ab65486f6f93ae1c1e35ea841a07
|
/chat/models.py
|
d2288b58e70a189f325cd5e5254bff8500713418
|
[] |
no_license
|
7Angie7/OrgSemaphoreHeroku
|
233e0e82980367da7df81b1185c52122d6d7097f
|
f5f777ec2d83bcd8635391ce8288e30b95a9fbb6
|
refs/heads/main
| 2023-05-02T13:46:55.827828
| 2021-05-20T12:36:05
| 2021-05-20T12:36:05
| 341,671,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
import uuid
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
# Create your models here.
class Semaphore(models.Model):
STATUS = (
('Ready', 'Ready'),
('Busy', 'Busy'),
)
name = models.CharField(max_length=200, null=True)
time = models.IntegerField()
author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
status = models.CharField(max_length=200, null=True, choices=STATUS)
controlUrl = models.UUIDField(default=uuid.uuid4)
lastQueueNum = models.IntegerField(null=True)
semOpen = models.BooleanField(default=False)
def __str__(self):
return self.name
class QueueClient(models.Model):
semap = models.ForeignKey(Semaphore, on_delete=models.CASCADE, null=True)
device = models.CharField(max_length=200, null=True)
queueNum = models.IntegerField(null=True)
clientName = models.CharField(max_length=200, null=True)
clientNumber = models.IntegerField(null=True)
def __str__(self):
return self.clientName
|
[
"xfasan00@stud.fit.vutbr.cz"
] |
xfasan00@stud.fit.vutbr.cz
|
774f590e2be884ccd9733de49c3547388d9f318a
|
58180ddeef1d1dde9bbe2443243ac0521662031d
|
/router-skeleton
|
a6e56cbd9e52a69f9e6f742d3cf39c9f226f95a4
|
[] |
no_license
|
kryvtsunt/bgp-router
|
41b89fbc8a95586798420f3fd0b6e5d8fd428e3a
|
0f94f2bae0ff40e588088e471a07e6fdd329b0c1
|
refs/heads/master
| 2020-03-30T06:57:05.125751
| 2018-10-12T22:31:19
| 2018-10-12T22:31:19
| 150,902,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,952
|
#!/usr/bin/env python3
import argparse, socket, time, json, select, struct, math
#DEBUG = True
DEBUG = False
parser = argparse.ArgumentParser(description='route packets')
parser.add_argument('networks', metavar='networks', type=str, nargs='+', help="networks")
args = parser.parse_args()
##########################################################################################
# Message Fields
TYPE = "type"
SRCE = "src"
DEST = "dst"
MESG = "msg"
TABL = "table"
# Message Types
DATA = "data"
DUMP = "dump"
UPDT = "update"
RVKE = "revoke"
NRTE = "no route"
# Update Message Fields
NTWK = "network"
NMSK = "netmask"
ORIG = "origin"
LPRF = "localpref"
APTH = "ASPath"
SORG = "selfOrigin"
# internal route info
CUST = "cust"
PEER = "peer"
PROV = "prov"
##########################################################################################
class Router:
routes = None
updates = None
relations = None
sockets = None
def __init__(self, networks):
self.routes = {}
self.updates = {}
self.relations = {}
self.sockets = {}
for relationship in networks:
network, relation = relationship.split("-")
if DEBUG:
print("Starting socket for", network, relation)
self.sockets[network] = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.sockets[network].setblocking(0)
self.sockets[network].connect(network)
self.relations[network] = relation
return
def lookup_routes(self, daddr):
""" Lookup all valid routes for an address """
# TODO
outroutes = []
return outroutes
def get_shortest_as_path(self, routes):
""" select the route with the shortest AS Path """
# TODO
outroutes = []
return outroutes
def get_highest_preference(self, routes):
""" select the route with the shortest AS Path """
# TODO
outroutes = []
return outroutes
def get_self_origin(self, routes):
""" select self originating routes """
# TODO
outroutes = []
return outroutes
def get_origin_routes(self, routes):
""" select origin routes: EGP > IGP > UNK """
# TODO
outroutes = []
return outroutes
def filter_relationships(self, srcif, routes):
""" Don't allow Peer->Peer, Peer->Prov, or Prov->Peer forwards """
outroutes = []
return outroutes
def get_route(self, srcif, daddr):
""" Select the best route for a given address """
# TODO
peer = None
routes = lookup_routers(daddr)
# Rules go here
if routes:
# 1. Highest Preference
routes = self.get_highest_preference(routes)
# 2. Self Origin
routes = self.get_self_origin(routes)
# 3. Shortest ASPath
routes = self.get_shortest_as_path(routes)
# 4. EGP > IGP > UNK
routes = self.get_origin_routes(routes)
# 5. Lowest IP Address
# TODO
# Final check: enforce peering relationships
routes = self.filter_relationships(srcif, routes)
return self.sockets[peer] if peer else None
def forward(self, srcif, packet):
""" Forward a data packet """
# TODO
return False
def coalesce(self):
""" coalesce any routes that are right next to each other """
# TODO (this is the most difficult task, save until last)
return False
def update(self, srcif, packet):
""" handle update packets """
# TODO
return False
def revoke(self, packet):
""" handle revoke packets """
# TODO
return True
def dump(self, packet):
""" handles dump table requests """
# TODO
return True
def handle_packet(self, srcif, packet):
""" dispatches a packet """
# TODO
return False
def send_error(self, conn, msg):
""" Send a no_route error message """
# TODO
return
def run(self):
while True:
socks = select.select(self.sockets.values(), [], [], 0.1)[0]
for conn in socks:
try:
k = conn.recv(65535)
except:
# either died on a connection reset, or was SIGTERM's by parent
return
if k:
for sock in self.sockets:
if self.sockets[sock] == conn:
srcif = sock
msg = json.loads(k)
if not self.handle_packet(srcif, msg):
self.send_error(conn, msg)
else:
return
return
if __name__ == "__main__":
router = Router(args.networks)
router.run()
|
[
"tim@login-students.ccs.neu.edu"
] |
tim@login-students.ccs.neu.edu
|
|
128977c94ecd89aa750a22dabc12b410e6ef0ebc
|
9039294895cc5334d1f42f3c9f5fad11103c4e5e
|
/mars/learn/neighbors/_faiss.py
|
eb9d6c065c65c661f9ed5289b3fe1436e3a1ee22
|
[
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
rg070836rg/mars
|
27362aa50fa556768b8824a5600aab5f864ad0ab
|
f7909869bc3c2995e4eeb7c898db9e33b636f9ca
|
refs/heads/master
| 2023-04-29T05:18:04.739725
| 2020-09-29T11:17:31
| 2020-09-29T11:17:31
| 299,794,632
| 0
| 1
|
Apache-2.0
| 2020-09-30T02:59:21
| 2020-09-30T02:59:21
| null |
UTF-8
|
Python
| false
| false
| 28,996
|
py
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import operator
import tempfile
from enum import Enum
import numpy as np
try:
import faiss
except ImportError: # pragma: no cover
faiss = None
from ... import opcodes as OperandDef
from ...context import RunningMode
from ...operands import OperandStage
from ...serialize import KeyField, StringField, Int64Field, \
Int32Field, BoolField, Int8Field
from ...tiles import TilesError
from ...tensor import tensor as astensor
from ...tensor.core import TensorOrder
from ...tensor.random import RandomState
from ...tensor.array_utils import as_same_device, device
from ...tensor.utils import check_random_state, gen_random_seeds
from ...utils import check_chunks_unknown_shape, require_not_none, recursive_tile
from ..operands import LearnOperand, LearnOperandMixin, OutputType
class MemoryRequirementGrade(Enum):
minimum = 0
low = 1
high = 2
maximum = 3
if faiss is not None:
METRIC_TO_FAISS_METRIC_TYPE = {
'l2': faiss.METRIC_L2,
'euclidean': faiss.METRIC_L2,
'innerproduct': faiss.METRIC_INNER_PRODUCT,
'cosine': faiss.METRIC_INNER_PRODUCT,
}
else: # pragma: no cover
METRIC_TO_FAISS_METRIC_TYPE = {}
@require_not_none(faiss)
class FaissBuildIndex(LearnOperand, LearnOperandMixin):
_op_type_ = OperandDef.FAISS_BUILD_INDEX
_input = KeyField('input')
_metric = StringField('metric')
_faiss_index = StringField('faiss_index')
_n_sample = Int64Field('n_sample')
_seed = Int32Field('seed')
_same_distribution = BoolField('same_distribution')
_accuracy = BoolField('accuracy')
_memory_require = Int8Field('memory_require',
on_serialize=operator.attrgetter('value'),
on_deserialize=MemoryRequirementGrade)
# for test purpose, could be 'object', 'filename' or 'bytes'
_return_index_type = StringField('return_index_type')
def __init__(self, metric=None, faiss_index=None, n_sample=None, seed=None,
same_distribution=None, return_index_type=None,
accuracy=None, memory_require=None,
stage=None, output_types=None, gpu=None, **kw):
super().__init__(_metric=metric, _faiss_index=faiss_index, _n_sample=n_sample,
_seed=seed, _same_distribution=same_distribution,
_return_index_type=return_index_type,
_accuracy=accuracy, _memory_require=memory_require, _gpu=gpu,
_stage=stage, _output_types=output_types, **kw)
if self.output_types is None:
self.output_types = [OutputType.object]
@property
def input(self):
return self._input
@property
def metric(self):
return self._metric
@property
def faiss_metric_type(self):
return METRIC_TO_FAISS_METRIC_TYPE[self._metric]
@property
def faiss_index(self):
return self._faiss_index
@property
def n_sample(self):
return self._n_sample
@property
def seed(self):
return self._seed
@property
def same_distribution(self):
return self._same_distribution
@property
def accuracy(self):
return self._accuracy
@property
def memory_require(self):
return self._memory_require
@property
def return_index_type(self):
return self._return_index_type
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, X):
return self.new_tileable([X])
@classmethod
def tile(cls, op):
check_chunks_unknown_shape(op.inputs, TilesError)
in_tensor = astensor(op.input, np.dtype(np.float32))._inplace_tile()
if op.faiss_index == 'auto':
faiss_index, n_sample = _gen_index_string_and_sample_count(
in_tensor.shape, op.n_sample, op.accuracy, op.memory_require,
gpu=op.gpu, **op.extra_params)
op._n_sample = n_sample
else:
faiss_index, n_sample = op.faiss_index, op.n_sample
if len(in_tensor.chunks) == 1:
return cls._tile_one_chunk(op, faiss_index, n_sample)
if in_tensor.chunk_shape[1] != 1:
# make sure axis 1 has 1 chunk
in_tensor = in_tensor.rechunk({1: in_tensor.shape[1]})._inplace_tile()
return cls._tile_chunks(op, in_tensor, faiss_index, n_sample)
@classmethod
def _tile_one_chunk(cls, op, faiss_index, n_sample):
in_chunk = op.input.chunks[0]
chunk_op = op.copy().reset_key()
chunk_op._faiss_index = faiss_index
chunk_op._n_sample = n_sample
chunk = chunk_op.new_chunk([in_chunk], index=in_chunk.index)
new_op = op.copy()
kw = op.outputs[0].params
kw['chunks'] = [chunk]
kw['nsplits'] = ((1,),)
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def _tile_chunks(cls, op, in_tensor, faiss_index, n_sample):
"""
If the distribution on each chunk is the same,
refer to:
https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-distribute-index-building-on-several-machines
1. train an IndexIVF* on a representative sample of the data, store it.
2. for each node, load the trained index, add the local data to it, store the resulting populated index
3. on a central node, load all the populated indexes and merge them.
"""
faiss_index_ = faiss.index_factory(in_tensor.shape[1], faiss_index,
op.faiss_metric_type)
# Training on sample data when two conditions meet
# 1. the index type requires for training, e.g. Flat does not require
# 2. distributions of chunks are the same, in not,
# train separately on each chunk data
need_sample_train = not faiss_index_.is_trained and op.same_distribution
need_merge_index = hasattr(faiss_index_, 'merge_from') if need_sample_train else False
train_chunk = None
if need_sample_train:
# sample data to train
rs = RandomState(op.seed)
sampled_index = rs.choice(in_tensor.shape[0], size=n_sample,
replace=False, chunk_size=n_sample)
sample_tensor = recursive_tile(in_tensor[sampled_index])
assert len(sample_tensor.chunks) == 1
sample_chunk = sample_tensor.chunks[0]
train_op = FaissTrainSampledIndex(faiss_index=faiss_index, metric=op.metric,
return_index_type=op.return_index_type)
train_chunk = train_op.new_chunk([sample_chunk])
elif op.gpu: # pragma: no cover
# if not need train, and on gpu, just merge data together to train
in_tensor = in_tensor.rechunk(in_tensor.shape)._inplace_tile()
# build index for each input chunk
build_index_chunks = []
for i, chunk in enumerate(in_tensor.chunks):
build_index_op = op.copy().reset_key()
build_index_op._stage = OperandStage.map
build_index_op._faiss_index = faiss_index
if train_chunk is not None:
build_index_chunk = build_index_op.new_chunk(
[chunk, train_chunk], index=(i,))
else:
build_index_chunk = build_index_op.new_chunk([chunk], index=(i,))
build_index_chunks.append(build_index_chunk)
out_chunks = []
if need_merge_index:
assert op.n_sample is not None
# merge all indices into one, do only when trained on sample data
out_chunk_op = op.copy().reset_key()
out_chunk_op._faiss_index = faiss_index
out_chunk_op._stage = OperandStage.agg
out_chunk = out_chunk_op.new_chunk(build_index_chunks, index=(0,))
out_chunks.append(out_chunk)
else:
out_chunks.extend(build_index_chunks)
new_op = op.copy()
return new_op.new_tileables(op.inputs, chunks=out_chunks,
nsplits=((len(out_chunks),),))
@classmethod
def _execute_one_chunk(cls, ctx, op):
(inp,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
inp = inp.astype(np.float32, copy=False)
# create index
index = faiss.index_factory(inp.shape[1], op.faiss_index,
op.faiss_metric_type)
# GPU
if device_id >= 0: # pragma: no cover
index = _index_to_gpu(index, device_id)
# train index
if not index.is_trained:
assert op.n_sample is not None
sample_indices = xp.random.choice(inp.shape[0],
size=op.n_sample, replace=False)
sampled = inp[sample_indices]
index.train(sampled)
if op.metric == 'cosine':
# faiss does not support cosine distances directly,
# data needs to be normalize before adding to index,
# refer to:
# https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance
faiss.normalize_L2(inp)
# add vectors to index
if device_id >= 0: # pragma: no cover
# gpu
index.add_c(inp.shape[0], _swig_ptr_from_cupy_float32_array(inp))
else:
index.add(inp)
ctx[op.outputs[0].key] = _store_index(ctx, op, index, device_id)
@classmethod
def _execute_map(cls, ctx, op):
(data,), device_id, _ = as_same_device(
[ctx[op.inputs[0].key]], device=op.device, ret_extra=True)
index = ctx[op.inputs[1].key] if len(op.inputs) == 2 else None
with device(device_id):
if index is not None:
# fetch the trained index
trained_index = _load_index(ctx, op, index, device_id)
return_index_type = _get_index_type(op.return_index_type, ctx)
if return_index_type == 'object':
# clone a new one,
# because faiss does not ensure thread-safe for operations that change index
# https://github.com/facebookresearch/faiss/wiki/Threads-and-asynchronous-calls#thread-safety
trained_index = faiss.clone_index(trained_index)
else:
trained_index = faiss.index_factory(data.shape[1], op.faiss_index,
op.faiss_metric_type)
if op.same_distribution:
# no need to train, just create index
pass
else:
# distribution no the same, train on each chunk
trained_index.train(data)
if device_id >= 0: # pragma: no cover
trained_index = _index_to_gpu(trained_index, device_id)
if op.metric == 'cosine':
# faiss does not support cosine distances directly,
# data needs to be normalize before adding to index,
# refer to:
# https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance
faiss.normalize_L2(data)
# add data into index
if device_id >= 0: # pragma: no cover
# gpu
trained_index.add_c(data.shape[0], _swig_ptr_from_cupy_float32_array(data))
else:
trained_index.add(data)
ctx[op.outputs[0].key] = _store_index(ctx, op, trained_index, device_id)
@classmethod
def _execute_agg(cls, ctx, op):
device_id = op.device
if device_id is None:
device_id = -1
inputs = [ctx[inp.key] for inp in op.inputs]
with device(device_id):
merged_index = None
indexes = []
for index in inputs:
index = _load_index(ctx, op, index, device_id)
indexes.append(index)
assert hasattr(index, 'merge_from')
if merged_index is None:
merged_index = index
else:
merged_index.merge_from(index, index.ntotal)
ctx[op.outputs[0].key] = _store_index(ctx, op, merged_index, device_id)
@classmethod
def execute(cls, ctx, op):
if op.stage == OperandStage.map:
cls._execute_map(ctx, op)
elif op.stage == OperandStage.agg:
cls._execute_agg(ctx, op)
else:
assert op.stage is None
cls._execute_one_chunk(ctx, op)
def _get_index_type(return_index_type, ctx):
if return_index_type is None: # pragma: no cover
if ctx.running_mode == RunningMode.local:
return_index_type = 'object'
elif ctx.running_mode == RunningMode.local_cluster:
return_index_type = 'filename'
else:
return_index_type = 'bytes'
return return_index_type
def _store_index(ctx, op, index, device_id):
return_index_type = _get_index_type(op.return_index_type, ctx)
if return_index_type == 'object':
# no need to serialize
return index
elif return_index_type == 'filename':
# save to file, then return filename
if device_id >= 0: # pragma: no cover
# for gpu, convert to cpu first
index = faiss.index_gpu_to_cpu(index)
fn = tempfile.mkstemp('.index', prefix='faiss_')[1]
faiss.write_index(index, fn)
atexit.register(lambda: os.remove(fn))
return fn
else:
if device_id >= 0: # pragma: no cover
# for gpu, convert to cpu first
index = faiss.index_gpu_to_cpu(index)
# distributed, save to file, then return in memory bytes
fn = tempfile.mkstemp('.index', prefix='faiss_')[1]
faiss.write_index(index, fn)
try:
with open(fn, 'rb') as f:
return f.read()
finally:
os.remove(fn)
def _load_index(ctx, op, index, device_id):
return_index_type = _get_index_type(op.return_index_type, ctx)
if return_index_type == 'object':
# local
return index
elif return_index_type == 'filename':
# local cluster
return faiss.read_index(index)
else:
# distributed
fn = tempfile.mkstemp('.index', prefix='faiss_')[1]
with open(fn, 'wb') as f:
f.write(index)
index = faiss.read_index(f.name)
if device_id >= 0: # pragma: no cover
index = _index_to_gpu(index, device_id)
return index
def _index_to_gpu(index, device_id): # pragma: no cover
res = faiss.StandardGpuResources()
return faiss.index_cpu_to_gpu(res, device_id, index)
def _swig_ptr_from_cupy_float32_array(x): # pragma: no cover
assert x.flags.c_contiguous
assert x.dtype == np.float32
data_ptr = x.__cuda_array_interface__['data'][0]
return faiss.cast_integer_to_float_ptr(data_ptr)
def _swig_ptr_from_cupy_int64_array(x): # pragma: no cover
assert x.flags.c_contiguous
assert x.dtype == np.int64
data_ptr = x.__cuda_array_interface__['data'][0]
return faiss.cast_integer_to_long_ptr(data_ptr)
@require_not_none(faiss)
class FaissTrainSampledIndex(LearnOperand, LearnOperandMixin):
_op_type_ = OperandDef.FAISS_TRAIN_SAMPLED_INDEX
_input = KeyField('input')
_metric = StringField('metric')
_faiss_index = StringField('faiss_index')
# for test purpose, could be 'object', 'filename' or 'bytes'
_return_index_type = StringField('return_index_type')
def __init__(self, faiss_index=None, metric=None,
return_index_type=None, output_types=None, **kw):
super().__init__(_faiss_index=faiss_index, _metric=metric,
_return_index_type=return_index_type,
_output_types=output_types, **kw)
if self.output_types is None:
self.output_types = [OutputType.object]
@property
def input(self):
return self._input
@property
def metric(self):
return self._metric
@property
def faiss_metric_type(self):
return METRIC_TO_FAISS_METRIC_TYPE[self.metric]
@property
def faiss_index(self):
return self._faiss_index
@property
def return_index_type(self):
return self._return_index_type
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
@classmethod
def execute(cls, ctx, op):
(data,), device_id, _ = as_same_device(
[ctx[op.input.key]], device=op.device, ret_extra=True)
with device(device_id):
index = faiss.index_factory(data.shape[1], op.faiss_index,
op.faiss_metric_type)
if device_id >= 0: # pragma: no cover
# GPU
index = _index_to_gpu(index, device_id)
index.train_c(data.shape[0], _swig_ptr_from_cupy_float32_array(data))
else:
index.train(data)
ctx[op.outputs[0].key] = _store_index(
ctx, op, index, device_id)
def _gen_index_string_and_sample_count(shape, n_sample, accuracy, memory_require, gpu=False, **kw):
"""
Generate index string and sample count according to guidance of faiss:
https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
"""
size, dim = shape
memory_require = _get_memory_require(memory_require)
if accuracy or size < 10 ** 5:
# Flat is the only index that guarantees exact results
# no need to train, thus sample count is None
return 'Flat', None
if memory_require == MemoryRequirementGrade.maximum and not gpu:
x = kw.get('M', 32) # get medium number by default
if x < 4 or x > 64:
raise ValueError(f'HNSWx requires M that between 4 and 64, got {x}')
return f'HNSW{x}', None
if memory_require in (MemoryRequirementGrade.high, MemoryRequirementGrade.maximum):
basement = '{},Flat'
elif memory_require == MemoryRequirementGrade.low:
x = kw.get('dim', dim // 2)
basement = f'PCAR{x},{{}},SQ8'
elif memory_require == MemoryRequirementGrade.minimum:
x = kw.get('M', min(64, dim // 2))
if x > 64:
raise ValueError(f'PQx requires M <= 64, got {x}')
y = kw.get('dim', None)
if y is not None and y % x != 0:
raise ValueError(f'OPQx_y requires dim is a multiple of M({x}), got dim: {y}')
y = min(dim, 4 * x)
y = x * (y // x) # make sure y is a multiple of x
basement = f'OPQ{x}_{y},{{}},PQ{x}'
else: # pragma: no cover
raise ValueError('unknown memory require')
# now choose the clustering options
if size < 10 ** 6 or (size < 10 ** 7 and gpu):
# < 1M, or <10M but need GPU
k = kw.get('k', 5 * int(np.sqrt(size)))
if k < 4 * int(np.sqrt(size)) or k > 16 * int(np.sqrt(size)):
raise ValueError(f'k should be between 4 * sqrt(N) and 16 * sqrt(N), got {k}')
index_str = basement.format(f'IVF{k}')
if n_sample is None:
# 30 * k - 256 * k
n_sample = min(30 * k, size)
elif size < 10 ** 7 and not gpu:
# 1M - 10M
index_str = basement.format('IVF65536_HNSW32')
if n_sample is None:
# between 30 * 65536 and 256 * 65536
n_sample = 32 * 65536
elif size < 10 ** 8:
index_str = basement.format('IVF65536_HNSW32')
n_sample = 64 * 65536 if n_sample is None else n_sample
else:
index_str = basement.format('IVF1048576_HNSW32')
n_sample = 64 * 65536 if n_sample is None else n_sample
return index_str, n_sample
def _get_memory_require(memory_require):
if isinstance(memory_require, str):
return getattr(MemoryRequirementGrade, memory_require)
elif isinstance(memory_require, MemoryRequirementGrade):
return memory_require
return MemoryRequirementGrade(memory_require)
@require_not_none(faiss)
def build_faiss_index(X, index_name='auto', n_sample=None, metric="euclidean",
random_state=None, same_distribution=True,
accuracy=False, memory_require=None, **kw):
X = astensor(X)
if metric not in METRIC_TO_FAISS_METRIC_TYPE:
raise ValueError(f'unknown metric: {metric}')
if index_name != 'auto':
try:
faiss.index_factory(X.shape[1], index_name,
METRIC_TO_FAISS_METRIC_TYPE[metric])
except RuntimeError:
raise ValueError(f'illegal faiss index: {index_name}')
rs = check_random_state(random_state)
if isinstance(rs, RandomState):
rs = rs.to_numpy()
seed = gen_random_seeds(1, rs)[0]
if memory_require is None:
memory_require = MemoryRequirementGrade.low
else:
memory_require = _get_memory_require(memory_require)
op = FaissBuildIndex(faiss_index=index_name, metric=metric,
n_sample=n_sample, gpu=X.op.gpu, seed=seed,
same_distribution=same_distribution,
accuracy=accuracy, memory_require=memory_require, **kw)
return op(X)
class FaissQuery(LearnOperand, LearnOperandMixin):
_op_type_ = OperandDef.FAISS_QUERY
_input = KeyField('input')
_faiss_index = KeyField('faiss_index')
_metric = StringField('metric')
_n_neighbors = Int32Field('n_neighbors')
_return_distance = BoolField('return_distance')
_nprobe = Int64Field('nprobe')
# for test purpose, could be 'object', 'filename' or 'bytes'
_return_index_type = StringField('return_index_type')
def __init__(self, faiss_index=None, metric=None, n_neighbors=None,
return_distance=None, return_index_type=None,
nprobe=None, output_types=None, gpu=None, **kw):
super().__init__(_faiss_index=faiss_index, _n_neighbors=n_neighbors, _metric=metric,
_return_distance=return_distance, _output_types=output_types,
_nprobe=nprobe, _return_index_type=return_index_type, _gpu=gpu, **kw)
if self.output_types is None:
self.output_types = [OutputType.tensor] * self.output_limit
@property
def input(self):
return self._input
@property
def faiss_index(self):
return self._faiss_index
@property
def metric(self):
return self._metric
@property
def n_neighbors(self):
return self._n_neighbors
@property
def nprobe(self):
return self._nprobe
@property
def return_distance(self):
return self._return_distance
@property
def return_index_type(self):
return self._return_index_type
@property
def output_limit(self):
return 2 if self._return_distance else 1
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
if self._faiss_index is not None:
self._faiss_index = self._inputs[1]
def __call__(self, y):
kws = []
if self._return_distance:
kws.append({'shape': (y.shape[0], self._n_neighbors),
'dtype': np.dtype(np.float32),
'order': TensorOrder.C_ORDER,
'type': 'distance'})
kws.append({
'shape': (y.shape[0], self._n_neighbors),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'type': 'indices'
})
return self.new_tileables([y, self._faiss_index], kws=kws)
@classmethod
def tile(cls, op):
in_tensor = astensor(op.input)
if in_tensor.chunk_shape[1] != 1:
check_chunks_unknown_shape([in_tensor], TilesError)
in_tensor = in_tensor.rechunk({1: in_tensor.shape[1]})._inplace_tile()
out_chunks = [], []
for chunk in in_tensor.chunks:
chunk_op = op.copy().reset_key()
chunk_kws = []
if op.return_distance:
chunk_kws.append({
'shape': (chunk.shape[0], op.n_neighbors),
'dtype': np.dtype(np.float32),
'order': TensorOrder.C_ORDER,
'index': chunk.index,
'type': 'distance'
})
chunk_kws.append({
'shape': (chunk.shape[0], op.n_neighbors),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': chunk.index,
'type': 'indices'
})
in_chunks = [chunk]
in_chunks.extend(op.faiss_index.chunks)
chunks = chunk_op.new_chunks(in_chunks, kws=chunk_kws)
if op.return_distance:
out_chunks[0].append(chunks[0])
out_chunks[1].append(chunks[-1])
new_op = op.copy()
kws = [out.params for out in op.outputs]
if op.return_distance:
kws[0]['chunks'] = out_chunks[0]
kws[0]['nsplits'] = (in_tensor.nsplits[0], (op.n_neighbors,))
kws[-1]['chunks'] = out_chunks[1]
kws[-1]['nsplits'] = (in_tensor.nsplits[0], (op.n_neighbors,))
return new_op.new_tileables(op.inputs, kws=kws)
@classmethod
def execute(cls, ctx, op):
(y,), device_id, xp = as_same_device(
[ctx[op.input.key]], device=op.device, ret_extra=True)
indexes = [_load_index(ctx, op, ctx[index.key], device_id)
for index in op.inputs[1:]]
with device(device_id):
y = xp.ascontiguousarray(y, dtype=np.float32)
if len(indexes) == 1:
index = indexes[0]
else:
index = faiss.IndexShards(indexes[0].d)
[index.add_shard(ind) for ind in indexes]
if op.metric == 'cosine':
# faiss does not support cosine distances directly,
# data needs to be normalize before searching,
# refer to:
# https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance
faiss.normalize_L2(y)
if op.nprobe is not None:
index.nprobe = op.nprobe
if device_id >= 0: # pragma: no cover
n = y.shape[0]
k = op.n_neighbors
distances = xp.empty((n, k), dtype=xp.float32)
indices = xp.empty((n, k), dtype=xp.int64)
index.search_c(n, _swig_ptr_from_cupy_float32_array(y),
k, _swig_ptr_from_cupy_float32_array(distances),
_swig_ptr_from_cupy_int64_array(indices))
else:
distances, indices = index.search(y, op.n_neighbors)
if op.return_distance:
if index.metric_type == faiss.METRIC_L2:
# make it equivalent to `pairwise.euclidean_distances`
distances = xp.sqrt(distances, out=distances)
elif op.metric == 'cosine':
# make it equivalent to `pairwise.cosine_distances`
distances = xp.subtract(1, distances, out=distances)
ctx[op.outputs[0].key] = distances
ctx[op.outputs[-1].key] = indices
@require_not_none(faiss)
def faiss_query(faiss_index, data, n_neighbors, return_distance=True, nprobe=None):
data = astensor(data)
op = FaissQuery(faiss_index=faiss_index, n_neighbors=n_neighbors,
metric=faiss_index.op.metric, return_distance=return_distance,
return_index_type=faiss_index.op.return_index_type,
nprobe=nprobe, gpu=data.op.gpu)
ret = op(data)
if not return_distance:
return ret[0]
return ret
|
[
"noreply@github.com"
] |
rg070836rg.noreply@github.com
|
431bf58645f85ed618cba84c5bd7ac816fcfab9c
|
2ef98703a2a839f30b056c7d763b3998a05bb676
|
/MaryLambSong/MaryLambSong_Fast_Style2.py
|
dbd1031b3bb2f8f188ab52f23f42095190d1d107
|
[
"MIT"
] |
permissive
|
melwyncarlo/SineWaveMusic
|
fc0b9cf206b8820a5eae231665f5da93820ae76e
|
2c5d5e99e3daa5b881e880e8db79dcd62b5bab3b
|
refs/heads/main
| 2023-04-18T02:37:36.144219
| 2021-05-05T14:43:22
| 2021-05-05T14:43:22
| 362,060,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
# Copyright 2021 Melwyn Francis Carlo
import numpy as np
import wavio
amplitude = 1
samplerate = 22050
notenameslist = ["c4", "d", "e", "f", "g", "a", "b", "c5"]
notenumberslist = [40, 42, 44, 45, 47, 49, 51, 52]
def notefrequency(notenumber):
return (np.power(2, (notenumber-49)/12) * 440)
def generateaudio(notesarray):
y = 0
sampleduration = int(len(notesarray) / 2) + 1
# Set the horizonal axis, t (or x)
t = np.linspace(0, sampleduration, sampleduration * samplerate, \
endpoint=False)
for i, noteelem in enumerate(notesarray):
# Set the sine wave audio function y=f(x)
if noteelem != "":
y = y + ( amplitude * np.sin(2 * np.pi * \
notefrequency(notenumberslist[notenameslist.index(noteelem)]) * t) * \
np.exp( -15 * (( (np.sign((2*t)-i) * (abs((2*t)-i) ** 0.2)) - 0.5) ** 2) ) )
# Create the *.wav audio file
wavio.write("MaryLambSong_Fast_Style2.wav", y, samplerate, sampwidth=3)
mary_lamb_notes = ["e", "d", "c4", "d", "e", "e", "e", "", "d", "d", "d", "", \
"e", "e", "e", "", "e", "d", "c4", "d", "e", "e", "e", "c4", "d", "d", "e", \
"d", "c4", ""]
generateaudio(mary_lamb_notes)
|
[
"melwyncarlo@gmail.com"
] |
melwyncarlo@gmail.com
|
24a170eea5b390dd3c88d3b59266038eefe8f04d
|
6ce8bec457edf5405964e15449c16cf0a6c0905e
|
/dashboard/migrations/0013_auto_20150528_1536.py
|
0d813fdcb21d7d8ac6f1a7acc0f7df3ff49efbcc
|
[] |
no_license
|
DhavalKapil/frozen
|
a4cb9f7d88b5efbe71a29ffde42547e5cd13c35d
|
ba75018e3f9de0f3dca656a234f6133cf52ccbb1
|
refs/heads/master
| 2021-01-15T18:21:30.869787
| 2015-06-12T08:58:02
| 2015-06-12T08:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0012_auto_20150528_1531'),
]
operations = [
migrations.RemoveField(
model_name='ip',
name='last_access',
),
migrations.AddField(
model_name='ip',
name='last_fetched',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), verbose_name=b'last fetched'),
),
]
|
[
"adiitya.adda@gmail.com"
] |
adiitya.adda@gmail.com
|
d6444830907ee1927497602fb48ab4c8a8dd67ed
|
5c6b17ee3a9a33637efd864063b95cc13b1ee765
|
/data_science_projects/ML-iris_data/model-building/irisdata-analysis-model.py
|
805a69ff8ebe830d74b92fe5273543daa7c9f761
|
[] |
no_license
|
Zenawi777/machinelearning
|
c2d183d390bf3354e052997db4d16887051120d0
|
6d5cb6345c4ab5f16c5cfe2030a476c3f1e31280
|
refs/heads/master
| 2020-03-22T14:11:26.898866
| 2019-08-07T20:20:04
| 2019-08-07T20:20:04
| 140,159,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
# This script import iris data from a file and parses the data for analysis
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from matplotlib import colors
# Import data from a file
df = pd.read_csv('iris-data-copy.txt',header=0,sep=',',usecols=range(4))
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
|
[
"40924568+Zenawi777@users.noreply.github.com"
] |
40924568+Zenawi777@users.noreply.github.com
|
829de61cb7b57b05162956702f809b027adfab90
|
0b9c28e31e23072121e2e52f68942c7c2dfeb9e8
|
/utils.py
|
8288912b03ed5e150a9f414d6db6d102f052a7a2
|
[] |
no_license
|
ABUISSON/bonbouquins
|
e63cb98b3a0c8ad685d764ef0748ab7d0b965163
|
cb979709302345cc6b77c170e180d336a34020a9
|
refs/heads/master
| 2023-05-10T23:57:23.393273
| 2020-05-05T10:54:35
| 2020-05-05T10:54:35
| 261,419,617
| 0
| 0
| null | 2023-05-01T21:23:39
| 2020-05-05T09:59:35
|
Python
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
import requests
def get_goodreads_data(isbn):
"""This function uses goodreads API to get number of reviews and grade"""
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "sWSqWzJFOsXzLJL3YgNN2Q", "isbns": isbn}).json()
return res['books'][0]['work_ratings_count'], res['books'][0]['average_rating']
|
[
"arnaud.buisson@hec.edu"
] |
arnaud.buisson@hec.edu
|
381d20cf4408a7179a099bd817259c3d963fac83
|
92deb25e48540896f731a3baf7e1ccf62f35409e
|
/Grocery.py
|
b4607deedbcbef5c2d3e69c46959fa46ca048889
|
[] |
no_license
|
anju-jacob15/Heroku-Project
|
a18fe4a3f6b4d5a0b98fb1bd7b882ea5447ee4da
|
7aab85734ad0c110f21a11d955248222253d9c77
|
refs/heads/main
| 2023-06-29T08:37:50.611656
| 2021-07-14T17:27:56
| 2021-07-14T17:27:56
| 385,549,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
from apyori import apriori
import matplotlib.pyplot as plt
from flask import Flask,request,jsonify,render_template
# In[3]:
data = pd.read_csv("basket.csv")
# In[4]:
transactions=[]
for i in range(0, 14962):
transactions.append([str(data.values[i,j]) for j in range(0,3)])
# In[5]:
rules=apriori(transactions=transactions,min_support=0.00001,min_confidence = 0.1, min_lift=1, min_length=2, max_length=2)
# In[6]:
results = list(rules)
len(results)
# In[7]:
def all1():
output=""
for item in results:
pair = item[0]
items = [x for x in pair]
if len(items) > 1:
output+="\n["+items[0]+"->"+items[1]+"\tSupport: "+str(item[1])+"\tConfidence: "+str(item[2][0][2])+"\tLift: "+str(item[2][0][3])+"]\n"
return output
# In[8]:
def specific(abc):
outputArray=[]
for item in results:
pair = item[0]
items = [x for x in pair]
if(len(items) > 1 and items[0] == abc):
output=items[0],items[1],str(item[1]),str(item[2][0][2]),str(item[2][0][3])
outputArray.append(items[1])
return outputArray
# In[ ]:
# start flask
app = Flask(__name__)
# render default webpage
@app.route('/')
def home():
return render_template('index.html')
@app.route('/calculate',methods=['POST'])
def calculate():
val2=request.form['value2']
if request.form['submit_button'] == 'View All Datasets':
ret1=all1()
return render_template('index.html',prediction_test1='Following is the output {}'.format(ret1))
if request.form['submit_button'] == 'Submit':
ret2=specific(val2)
return render_template('index.html',prediction_test2='Following is the output {}'.format(ret2))
return
if __name__ == '__main__':
app.run(debug=True)
# In[ ]:
|
[
"noreply@github.com"
] |
anju-jacob15.noreply@github.com
|
64b12d49a26a0628242f870670d9e5d34e02cb5e
|
f850e0f75a76c500f5ba8a9ab6fa6d5f40d22b23
|
/pyecharts_demo/demos/Bar/multiple_y_axes.py
|
e006b619f9172a4af780cb1631e85e41c4e503b7
|
[
"MIT"
] |
permissive
|
jay20161013/pywebio-chart-gallery
|
805afa2643b0d330a4a2f80f1e0a8827e8f61afe
|
11fd8a70b2e9ff5482cf5924b110a11f3469edfc
|
refs/heads/master
| 2023-03-20T01:58:30.979109
| 2021-03-18T12:48:31
| 2021-03-18T12:48:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
from pywebio.output import put_html
import pyecharts.options as opts
from pyecharts.charts import Bar, Line
"""
Gallery 使用 pyecharts 1.0.0
参考地址: https://www.echartsjs.com/examples/editor.html?c=multiple-y-axis
目前无法实现的功能:
1、暂无
"""
colors = ["#5793f3", "#d14a61", "#675bba"]
x_data = ["1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"]
legend_list = ["蒸发量", "降水量", "平均温度"]
evaporation_capacity = [
2.0,
4.9,
7.0,
23.2,
25.6,
76.7,
135.6,
162.2,
32.6,
20.0,
6.4,
3.3,
]
rainfall_capacity = [
2.6,
5.9,
9.0,
26.4,
28.7,
70.7,
175.6,
182.2,
48.7,
18.8,
6.0,
2.3,
]
average_temperature = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2]
bar = (
Bar(init_opts=opts.InitOpts(width="1260px", height="720px"))
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="蒸发量",
yaxis_data=evaporation_capacity,
yaxis_index=0,
color=colors[1],
)
.add_yaxis(
series_name="降水量", yaxis_data=rainfall_capacity, yaxis_index=1, color=colors[0]
)
.extend_axis(
yaxis=opts.AxisOpts(
name="蒸发量",
type_="value",
min_=0,
max_=250,
position="right",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[1])
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
)
)
.extend_axis(
yaxis=opts.AxisOpts(
type_="value",
name="温度",
min_=0,
max_=25,
position="left",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[2])
),
axislabel_opts=opts.LabelOpts(formatter="{value} °C"),
splitline_opts=opts.SplitLineOpts(
is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)
),
)
)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
type_="value",
name="降水量",
min_=0,
max_=250,
position="right",
offset=80,
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[0])
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"),
)
)
line = (
Line()
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="平均温度", y_axis=average_temperature, yaxis_index=2, color=colors[2]
)
)
put_html(bar.overlap(line).render_notebook())
|
[
"wang0.618@qq.com"
] |
wang0.618@qq.com
|
1ac8baab2abb8dd6a74ac797074a3aa84bd0c1b1
|
c43e3e922942c0a5fe4222c3889d6a433d40ba0f
|
/code.py
|
e2de647bebb1eee1fbd2af1d2edfb9502cf72e75
|
[] |
no_license
|
pencilcheck/news-gen
|
ae6a8ce61c016ac2020acb64234fa062ac3197db
|
f3bf66b697531be147887ba0630518a283b8b867
|
refs/heads/master
| 2021-01-19T08:12:23.200742
| 2009-09-20T17:24:58
| 2009-09-20T17:24:58
| 32,120,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
import web
import simplejson as sjson
import news_wrapper
import YQL
import re
import os
import shutil
import glob
render = web.template.render('templates/')
urls = (
'/', 'index',
'/gen_news', 'gen_news',
'/form_test', 'form_test',
'/newspdf.json', 'newspdf',
'/test', 'test'
)
app = web.application(urls, globals())
class index:
def GET(self):
return render.index()
class test:
def GET(self):
return render.test()
class form_test:
def POST(self):
i = web.input(json=None)
if i.json:
return i.json
else:
return "Ahh, I know nothing!"
class gen_news:
def POST(self):
i = web.input(json=None)
if i.json:
j = sjson.loads(i.json)
topics = j['topics'].split(',')
num_articles = int(j['numarts']) / len(topics)
news_t = []
for topic in topics:
s = YQL.searchNews(topic)
ob_j = sjson.loads(re.sub(r'"result":({[^}]+})', r'"result":[\1]', s))
for i in xrange(num_articles):
news_t.append((ob_j['query']['results']['result'][i]['title'], ob_j['query']['results']['result'][i]['url']))
(f_stub, f_name) = news_wrapper.gen_latex(j['title'], int(j['numcols']), news_t)
os.system('pdflatex '+f_name)
shutil.copyfile(f_stub+'.pdf', 'static/'+f_stub+'.pdf')
os.system('convert -density 250 '+'static/'+f_stub+'.pdf static/'+f_stub+'.png')
#return 'convert '+'static\\'+f_stub+'.pdf static\\news'+'.png'
k = glob.glob('static/'+f_stub+'*.png')
u = []
for i in k:
u.append(re.sub("\\\\", '/', i))
u = ['static/'+f_stub+'.pdf'] + u
return sjson.dumps(u)
#return sjson.dumps(['static/'+f_stub+'.pdf', "static/"+f_stub+"-0.png", "static/"+f_stub+"-1.png"])
#return [].append
class newspdf:
def GET(self):
print '{"page": "/static/pdf_xyz123-1.png"}, {"page": "/static/pdf_xyz123-2.png"}'
if __name__ == "__main__":
app.run()
|
[
"koshroy@94dc41b0-a5a8-11de-b195-ab392a2a73f5"
] |
koshroy@94dc41b0-a5a8-11de-b195-ab392a2a73f5
|
f2efe90e68e45d0b74b1d73ab51d091eb4d258aa
|
5c5b8333f80d6541c38b245f4041a0bbe1caa3e2
|
/main.py
|
3d4fc4ed9a9855775b936b04114ec01c722c13be
|
[] |
no_license
|
DrSpooglemon/StopWatch
|
2405ab999816571d600d876c653803493c1aae85
|
63748ab855f3842fe619fa0bae8fa6639a5a8048
|
refs/heads/main
| 2023-08-28T22:23:49.672638
| 2021-11-02T14:39:10
| 2021-11-02T14:39:10
| 423,916,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
#!/usr/bin/python3
from kivy.app import App
from screens import StopWatchScreen
class StopWatchApp(App):
def build(self):
self.screen = StopWatchScreen()
return self.screen
def on_stop(self):
self.screen.on_close()
if __name__ == '__main__':
StopWatchApp().run()
|
[
"spooglemon@gmail.com"
] |
spooglemon@gmail.com
|
e37ae19a25ab11fbbe322946523540b29fe7a618
|
fc33ec4fd149843556442ae420547a294509e411
|
/ros/pr2_commander/src/pr2_commander/srv/_Give.py
|
87eae0392ef8680cb31b893912c77a0fb23b1663
|
[] |
no_license
|
majunfu/essential
|
317179e5d39bcf1327c29b769933326367456035
|
d1f4de29c3188a7864c2bc01c09b09d182f7426a
|
refs/heads/master
| 2021-01-14T08:54:41.919316
| 2015-06-02T21:39:19
| 2015-06-02T21:39:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,649
|
py
|
"""autogenerated by genpy from pr2_commander/GiveRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GiveRequest(genpy.Message):
_md5sum = "2ac5db510c2f9135f545febefbebd09b"
_type = "pr2_commander/GiveRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool arm
"""
__slots__ = ['arm']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
arm
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GiveRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.arm is None:
self.arm = False
else:
self.arm = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.arm))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.arm,) = _struct_B.unpack(str[start:end])
self.arm = bool(self.arm)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.arm))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.arm,) = _struct_B.unpack(str[start:end])
self.arm = bool(self.arm)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
"""autogenerated by genpy from pr2_commander/GiveResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GiveResponse(genpy.Message):
_md5sum = "358e233cde0c8a8bcfea4ce193f8fc15"
_type = "pr2_commander/GiveResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
"""
__slots__ = ['success']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GiveResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
else:
self.success = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.success))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.success,) = _struct_B.unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.success))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.success,) = _struct_B.unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
class Give(object):
_type = 'pr2_commander/Give'
_md5sum = 'ab6b64f88e9cbd725b6aa8a8fbe26f94'
_request_class = GiveRequest
_response_class = GiveResponse
|
[
"gmanfred@laas.fr"
] |
gmanfred@laas.fr
|
1e612de0ff21a5d54ff8d905a3d7e7e5f7510b58
|
08ea46c0a9fb71ef222cf6afa2e9094f5663dcfb
|
/pvactools/tools/pvacbind/identify_problematic_amino_acids.py
|
fb5a4cb95dae0fe95093e0e63171cb4c66f08daf
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
griffithlab/pVACtools
|
e358919eee76100f79dbe8d40d02b3fce8b227ac
|
3317d2c18e82edb5ea183ae09820beb68c39d256
|
refs/heads/master
| 2023-08-09T15:42:06.725426
| 2023-08-09T14:28:44
| 2023-08-09T14:28:44
| 102,625,109
| 124
| 64
|
BSD-3-Clause-Clear
| 2023-09-08T14:17:22
| 2017-09-06T15:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
import sys
import argparse
import tempfile
from pvactools.lib.identify_problematic_amino_acids import IdentifyProblematicAminoAcids
def define_parser():
return IdentifyProblematicAminoAcids.parser('pvacbind')
def main(args_input = sys.argv[1:]):
parser = define_parser()
args = parser.parse_args(args_input)
IdentifyProblematicAminoAcids(args.input_file, args.output_file, args.problematic_amino_acids, 'pVACbind').execute()
if __name__ == "__main__":
main()
|
[
"susanna.kiwala@wustl.edu"
] |
susanna.kiwala@wustl.edu
|
13a79070e4b920cd6a009687694ea439482790b6
|
d4c6c5f42bc9d92b4cf67d936ee19ca9eb8e242d
|
/server/predict.py
|
45c4a79277e023aab9d6accbc566ab2f478d00d7
|
[
"MIT"
] |
permissive
|
woonzh/image_capture
|
c6487995b692db51ddb9d13393db596a8f1499eb
|
2361d0d006babf77e23c7ddd956079c1cd52f793
|
refs/heads/master
| 2020-05-17T06:21:26.304323
| 2019-04-26T04:37:07
| 2019-04-26T04:37:07
| 183,557,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,837
|
py
|
import os
import face_recognition_api
import pickle
import numpy as np
import pandas as pd
def get_prediction_images(prediction_dir):
files = [x[2] for x in os.walk(prediction_dir)][0]
l = []
exts = [".jpg", ".jpeg", ".png"]
for file in files:
_, ext = os.path.splitext(file)
if ext.lower() in exts:
l.append(os.path.join(prediction_dir, file))
return l
def main():
fname = 'classifier.pkl'
prediction_dir = './test-images'
encoding_file_path = './encoded-images-data.csv'
df = pd.read_csv(encoding_file_path)
full_data = np.array(df.astype(float).values.tolist())
# Extract features and labels
# remove id column (0th column)
X = np.array(full_data[:, 1:-1])
y = np.array(full_data[:, -1:])
if os.path.isfile(fname):
with open(fname, 'rb') as f:
(le, clf) = pickle.load(f)
else:
print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m')
quit()
for image_path in get_prediction_images(prediction_dir):
# print colorful text with image name
print('\x1b[6;30;42m' + "=====Predicting faces in '{}'=====".format(image_path) + '\x1b[0m')
img = face_recognition_api.load_image_file(image_path)
X_faces_loc = face_recognition_api.face_locations(img)
faces_encodings = face_recognition_api.face_encodings(img, known_face_locations=X_faces_loc)
print("Found {} faces in the image".format(len(faces_encodings)))
closest_distances = clf.kneighbors(faces_encodings, n_neighbors=1)
is_recognized = [closest_distances[0][i][0] <= 0.5 for i in range(len(X_faces_loc))]
# store=[]
#
# for pred, loc, rec in zip(clf.predict(faces_encodings), X_faces_loc, is_recognized):
# a=le.inverse_transform(int(pred)).title()
# b=loc
#
# if rec:
# store.append([a,b])
# else:
# store.append("unknown", loc)
# predict classes and cull classifications that are not with high confidence
predictions = [(le.inverse_transform([int(pred)])[0], loc) if rec else ("Unknown", loc) for pred, loc, rec in
zip(clf.predict(faces_encodings), X_faces_loc, is_recognized)]
print(predictions)
# for face_encoding in faces_encodings:
# face_encoding = face_encoding.reshape(1, -1)
#
# predictions = clf.predict_proba(face_encoding).ravel()
# maxI = np.argmax(predictions)
# person = le.inverse_transform(maxI)
# confidence = predictions[maxI]
# print("Predict {} with {:.2f} confidence.".format(person, confidence))
print()
|
[
"xuyanru516@pingan.com.cn"
] |
xuyanru516@pingan.com.cn
|
ba091c1a6d2618478241461ac84ca3223dddf073
|
2e93cb33f4a4e946635031c608d9b693ef10fde1
|
/Python/Py_ex1/list7.py
|
bb049b7c4d090094684ee2d6252314ab70ce27df
|
[] |
no_license
|
kw78999/MyStudy
|
34b5718125a30f6835056f02c716b5d2d6e84472
|
adb34fc5a423c6ae5761a404de1157fc95b93047
|
refs/heads/main
| 2023-04-11T00:44:28.668482
| 2021-04-23T05:11:00
| 2021-04-23T05:11:00
| 305,602,182
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
import random
lotto = set()
cnt2 = 0
for i in range(6):
lotto.add(random.randint(1,45)
print(lotto)
|
[
"kw78999@naver.com"
] |
kw78999@naver.com
|
222e0833d388b0280d65ff78eb7ee790a0581964
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/student/role_helpers.py
|
ffe0f2c9f20f8f9d2d6244b6ab63b737d5bbcf22
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
"""
Helpers for student roles
"""
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
Role
)
from common.djangoapps.student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
OrgInstructorRole,
OrgStaffRole
)
def has_staff_roles(user, course_key):
"""
Return true if a user has any of the following roles
Staff, Instructor, Beta Tester, Forum Community TA, Forum Group Moderator, Forum Moderator, Forum Administrator
"""
forum_roles = [FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR]
is_staff = CourseStaffRole(course_key).has_user(user)
is_instructor = CourseInstructorRole(course_key).has_user(user)
is_beta_tester = CourseBetaTesterRole(course_key).has_user(user)
is_org_staff = OrgStaffRole(course_key.org).has_user(user)
is_org_instructor = OrgInstructorRole(course_key.org).has_user(user)
is_global_staff = GlobalStaff().has_user(user)
has_forum_role = Role.user_has_role_for_course(user, course_key, forum_roles)
if any([is_staff, is_instructor, is_beta_tester, is_org_staff,
is_org_instructor, is_global_staff, has_forum_role]):
return True
return False
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
42da6266333064addf16717b63ea75a0c5172536
|
b09a3f8ba43d7a1500994228ffc40cfc7eff3ec8
|
/packages/influxdb-2.4.0/influxdb/client.py
|
007115fc938e0766fd6c4349b8e01c3bffe32d6b
|
[] |
no_license
|
kotavi/robotframework_tests
|
a2641ecd1797ea87468b63272cfdd0d615845c4e
|
c922dd858a4bf0885bfe847c0e83ebac3095e0b5
|
refs/heads/master
| 2020-05-27T22:22:00.746762
| 2017-03-01T20:55:38
| 2017-03-01T20:55:38
| 83,595,785
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,635
|
py
|
# -*- coding: utf-8 -*-
"""
Python client for InfluxDB
"""
from functools import wraps
import json
import socket
import random
import requests
import requests.exceptions
from sys import version_info
from influxdb.resultset import ResultSet
try:
xrange
except NameError:
xrange = range
if version_info[0] == 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
class InfluxDBClientError(Exception):
"""Raised when an error occurs in the request."""
def __init__(self, content, code):
if isinstance(content, type(b'')):
content = content.decode('UTF-8', errors='replace')
super(InfluxDBClientError, self).__init__(
"{0}: {1}".format(code, content))
self.content = content
self.code = code
class InfluxDBServerError(Exception):
"""Raised when a server error occurs."""
def __init__(self, content):
super(InfluxDBServerError, self).__init__(content)
class InfluxDBClient(object):
"""The :class:`~.InfluxDBClient` object holds information necessary to
connect to InfluxDB. Requests can be made to InfluxDB directly through
the client.
:param host: hostname to connect to InfluxDB, defaults to 'localhost'
:type host: str
:param port: port to connect to InfluxDB, defaults to 8086
:type port: int
:param username: user to connect, defaults to 'root'
:type username: str
:param password: password of the user, defaults to 'root'
:type password: str
:param database: database name to connect to, defaults to None
:type database: str
:param ssl: use https instead of http to connect to InfluxDB, defaults to
False
:type ssl: bool
:param verify_ssl: verify SSL certificates for HTTPS requests, defaults to
False
:type verify_ssl: bool
:param timeout: number of seconds Requests will wait for your client to
establish a connection, defaults to None
:type timeout: int
:param use_udp: use UDP to connect to InfluxDB, defaults to False
:type use_udp: int
:param udp_port: UDP port to connect to InfluxDB, defaults to 4444
:type udp_port: int
"""
def __init__(self,
host='localhost',
port=8086,
username='root',
password='root',
database=None,
ssl=False,
verify_ssl=False,
timeout=None,
use_udp=False,
udp_port=4444,
):
"""Construct a new InfluxDBClient object."""
self._host = host
self._port = port
self._username = username
self._password = password
self._database = database
self._timeout = timeout
self._verify_ssl = verify_ssl
self.use_udp = use_udp
self.udp_port = udp_port
self._session = requests.Session()
if use_udp:
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._scheme = "http"
if ssl is True:
self._scheme = "https"
self._baseurl = "{0}://{1}:{2}".format(
self._scheme,
self._host,
self._port)
self._headers = {
'Content-type': 'application/json',
'Accept': 'text/plain'}
@staticmethod
def from_DSN(dsn, **kwargs):
"""Return an instance of :class:`~.InfluxDBClient` from the provided
data source name. Supported schemes are "influxdb", "https+influxdb"
and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient`
constructor may also be passed to this method.
:param dsn: data source name
:type dsn: string
:param kwargs: additional parameters for `InfluxDBClient`
:type kwargs: dict
:raises ValueError: if the provided DSN has any unexpected values
:Example:
>>> cli = InfluxDBClient.from_DSN('influxdb://username:password@\
localhost:8086/databasename', timeout=5)
>>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>>> cli = InfluxDBClient.from_DSN('udp+influxdb://username:pass@\
localhost:8086/databasename', timeout=5, udp_port=159)
>>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
.. note:: parameters provided in `**kwargs` may override dsn parameters
.. note:: when using "udp+influxdb" the specified port (if any) will
be used for the TCP connection; specify the UDP port with the
additional `udp_port` parameter (cf. examples).
"""
init_args = {}
conn_params = urlparse(dsn)
scheme_info = conn_params.scheme.split('+')
if len(scheme_info) == 1:
scheme = scheme_info[0]
modifier = None
else:
modifier, scheme = scheme_info
if scheme != 'influxdb':
raise ValueError('Unknown scheme "{}".'.format(scheme))
if modifier:
if modifier == 'udp':
init_args['use_udp'] = True
elif modifier == 'https':
init_args['ssl'] = True
else:
raise ValueError('Unknown modifier "{}".'.format(modifier))
if conn_params.hostname:
init_args['host'] = conn_params.hostname
if conn_params.port:
init_args['port'] = conn_params.port
if conn_params.username:
init_args['username'] = conn_params.username
if conn_params.password:
init_args['password'] = conn_params.password
if conn_params.path and len(conn_params.path) > 1:
init_args['database'] = conn_params.path[1:]
init_args.update(kwargs)
return InfluxDBClient(**init_args)
def switch_database(self, database):
"""Change the client's database.
:param database: the name of the database to switch to
:type database: str
"""
self._database = database
def switch_user(self, username, password):
"""Change the client's username.
:param username: the username to switch to
:type username: str
:param password: the password for the username
:type password: str
"""
self._username = username
self._password = password
def request(self, url, method='GET', params=None, data=None,
expected_response_code=200):
"""Make a HTTP request to the InfluxDB API.
:param url: the path of the HTTP request, e.g. write, query, etc.
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises InfluxDBClientError: if the response code is not the
same as `expected_response_code`
"""
url = "{0}/{1}".format(self._baseurl, url)
if params is None:
params = {}
auth = {
'u': self._username,
'p': self._password
}
params.update(auth)
if data is not None and not isinstance(data, str):
data = json.dumps(data)
# Try to send the request a maximum of three times. (see #103)
# TODO (aviau): Make this configurable.
for i in range(0, 3):
try:
response = self._session.request(
method=method,
url=url,
params=params,
data=data,
headers=self._headers,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except requests.exceptions.ConnectionError as e:
if i < 2:
continue
else:
raise e
if response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code)
def write(self, data, params=None, expected_response_code=204):
"""Write data to InfluxDB.
:param data: the data to be written
:type data: dict
:param params: additional parameters for the request, defaults to None
:type params: dict
:param expected_response_code: the expected response code of the write
operation, defaults to 204
:type expected_response_code: int
:returns: True, if the write operation is successful
:rtype: bool
"""
self.request(
url="write",
method='POST',
params=params,
data=data,
expected_response_code=expected_response_code
)
return True
def query(self,
query,
params={},
expected_response_code=200,
database=None):
"""Send a query to InfluxDB.
:param query: the actual query string
:type query: str
:param params: additional parameters for the request, defaults to {}
:type params: dict
:param expected_response_code: the expected status code of response,
defaults to 200
:type expected_response_code: int
:param database: database to query, defaults to None
:type database: str
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
params['q'] = query
params['db'] = database or self._database
response = self.request(
url="query",
method='GET',
params=params,
data=None,
expected_response_code=expected_response_code
)
data = response.json()
return ResultSet(data)
def write_points(self,
points,
time_precision=None,
database=None,
retention_policy=None,
tags=None,
batch_size=None,
):
"""Write to multiple time series names.
:param points: the list of points to be written in the database
:type points: list of dictionaries, each dictionary represents a point
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
:param database: the database to write the points to. Defaults to
the client's current database
:type database: str
:param tags: a set of key-value pairs associated with each point. Both
keys and values must be strings. These are shared tags and will be
merged with point-specific tags, defaults to None
:type tags: dict
:param retention_policy: the retention policy for the points. Defaults
to None
:type retention_policy: str
:param batch_size: value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation,
defaults to None
:type batch_size: int
:returns: True, if the operation is successful
:rtype: bool
.. note:: if no retention policy is specified, the default retention
policy for the database is used
"""
if batch_size and batch_size > 0:
for batch in self._batches(points, batch_size):
self._write_points(points=batch,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags)
return True
else:
return self._write_points(points=points,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags)
def _batches(self, iterable, size):
for i in xrange(0, len(iterable), size):
yield iterable[i:i + size]
def _write_points(self,
points,
time_precision,
database,
retention_policy,
tags):
if time_precision not in ['n', 'u', 'ms', 's', 'm', 'h', None]:
raise ValueError(
"Invalid time precision is given. "
"(use 'n', 'u', 'ms', 's', 'm' or 'h')")
if self.use_udp and time_precision and time_precision != 's':
raise ValueError(
"InfluxDB only supports seconds precision for udp writes"
)
data = {
'points': points
}
if time_precision:
data['precision'] = time_precision
if retention_policy:
data['retentionPolicy'] = retention_policy
if tags:
data['tags'] = tags
data['database'] = database or self._database
if self.use_udp:
self.send_packet(data)
else:
self.write(
data=data,
expected_response_code=204
)
return True
def get_list_database(self):
"""Get the list of databases in InfluxDB.
:returns: all databases in InfluxDB
:rtype: list of dictionaries
:Example:
>>> dbs = client.get_list_database()
>>> dbs
[{u'name': u'db1'}, {u'name': u'db2'}, {u'name': u'db3'}]
"""
return list(self.query("SHOW DATABASES")['databases'])
def create_database(self, dbname):
"""Create a new database in InfluxDB.
:param dbname: the name of the database to create
:type dbname: str
"""
self.query("CREATE DATABASE %s" % dbname)
def drop_database(self, dbname):
"""Drop a database from InfluxDB.
:param dbname: the name of the database to drop
:type dbname: str
"""
self.query("DROP DATABASE %s" % dbname)
def create_retention_policy(self, name, duration, replication,
database=None, default=False):
"""Create a retention policy for a database.
:param name: the name of the new retention policy
:type name: str
:param duration: the duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention – meaning the data will
never be deleted – use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the replication of the retention policy
:type replication: str
:param database: the database for which the retention policy is
created. Defaults to current client's database
:type database: str
:param default: whether or not to set the policy as default
:type default: bool
"""
query_string = \
"CREATE RETENTION POLICY %s ON %s " \
"DURATION %s REPLICATION %s" % \
(name, database or self._database, duration, replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string)
def alter_retention_policy(self, name, database=None,
duration=None, replication=None, default=None):
"""Mofidy an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention – meaning the data will
never be deleted – use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: str
:param default: whether or not to set the modified policy as default
:type default: bool
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
"""
query_string = (
"ALTER RETENTION POLICY {} ON {}"
).format(name, database or self._database)
if duration:
query_string += " DURATION {}".format(duration)
if replication:
query_string += " REPLICATION {}".format(replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string)
def get_list_retention_policies(self, database=None):
"""Get the list of retention policies for a database.
:param database: the name of the database, defaults to the client's
current database
:type database: str
:returns: all retention policies for the database
:rtype: list of dictionaries
:Example:
>>> ret_policies = client.get_list_retention_policies('my_db')
>>> ret_policies
[{u'default': True,
u'duration': u'0',
u'name': u'default',
u'replicaN': 1}]
"""
rsp = self.query(
"SHOW RETENTION POLICIES %s" % (database or self._database)
)
return list(rsp['results'])
def get_list_series(self, database=None):
"""Get the list of series for a database.
:param database: the name of the database, defaults to the client's
current database
:type database: str
:returns: all series in the specified database
:rtype: list of dictionaries
:Example:
>>> series = client.get_list_series('my_database')
>>> series
[{'name': u'cpu_usage',
'tags': [{u'_id': 1,
u'host': u'server01',
u'region': u'us-west'}]}]
"""
rsp = self.query("SHOW SERIES", database=database)
series = []
for serie in rsp.items():
series.append(
{
"name": serie[0][0],
"tags": list(serie[1])
}
)
return series
def get_list_users(self):
"""Get the list of all users in InfluxDB.
:returns: all users in InfluxDB
:rtype: list of dictionaries
:Example:
>>> users = client.get_list_users()
>>> users
[{u'admin': True, u'user': u'user1'},
{u'admin': False, u'user': u'user2'},
{u'admin': False, u'user': u'user3'}]
"""
return list(self.query("SHOW USERS")["results"])
def create_user(self, username, password):
"""Create a new user in InfluxDB
:param username: the new username to create
:type username: str
:param password: the password for the new user
:type password: str
"""
text = "CREATE USER {} WITH PASSWORD '{}'".format(username, password)
self.query(text)
def drop_user(self, username):
"""Drop an user from InfluxDB.
:param username: the username to drop
:type username: str
"""
text = "DROP USER {}".format(username)
self.query(text)
def set_user_password(self, username, password):
"""Change the password of an existing user.
:param username: the username who's password is being changed
:type username: str
:param password: the new password for the user
:type password: str
"""
text = "SET PASSWORD FOR {} = '{}'".format(username, password)
self.query(text)
def delete_series(self, id, database=None):
"""Delete series from a database.
:param id: the id of the series to be deleted
:type id: int
:param database: the database from which the series should be
deleted, defaults to client's current database
:type database: str
"""
database = database or self._database
self.query('DROP SERIES %s' % id, database=database)
def grant_admin_privileges(self, username):
"""Grant cluster administration privileges to an user.
:param username: the username to grant privileges to
:type username: str
.. note:: Only a cluster administrator can create/ drop databases
and manage users.
"""
text = "GRANT ALL PRIVILEGES TO {}".format(username)
self.query(text)
def revoke_admin_privileges(self, username):
"""Revoke cluster administration privileges from an user.
:param username: the username to revoke privileges from
:type username: str
.. note:: Only a cluster administrator can create/ drop databases
and manage users.
"""
text = "REVOKE ALL PRIVILEGES FROM {}".format(username)
self.query(text)
def grant_privilege(self, privilege, database, username):
"""Grant a privilege on a database to an user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str
"""
text = "GRANT {} ON {} TO {}".format(privilege,
database,
username)
self.query(text)
def revoke_privilege(self, privilege, database, username):
"""Revoke a privilege on a database from an user.
:param privilege: the privilege to revoke, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to revoke the privilege on
:type database: str
:param username: the username to revoke the privilege from
:type username: str
"""
text = "REVOKE {} ON {} FROM {}".format(privilege,
database,
username)
self.query(text)
def send_packet(self, packet):
"""Send an UDP packet.
:param packet: the packet to be sent
:type packet: dict
"""
data = json.dumps(packet)
byte = data.encode('utf-8')
self.udp_socket.sendto(byte, (self._host, self.udp_port))
class InfluxDBClusterClient(object):
"""The :class:`~.InfluxDBClusterClient` is the client for connecting
to a cluster of InfluxDB servers. It's basically a proxy to multiple
InfluxDBClients.
:param hosts: all hosts to be included in the cluster, each of which
should be in the format (address, port),
e.g. [('127.0.0.1', 8086), ('127.0.0.1', 9096)]. Defaults to
[('localhost', 8086)]
:type hosts: list of tuples
:param shuffle: whether the queries should hit servers evenly(randomly),
defaults to True
:type shuffle: bool
:param client_base_class: the base class for all clients in the cluster.
This parameter is used to enable the support of different client
types. Defaults to :class:`~.InfluxDBClient`
"""
def __init__(self,
hosts=[('localhost', 8086)],
username='root',
password='root',
database=None,
ssl=False,
verify_ssl=False,
timeout=None,
use_udp=False,
udp_port=4444,
shuffle=True,
client_base_class=InfluxDBClient,
):
self.clients = []
self.bad_clients = [] # Corresponding server has failures in history
self.shuffle = shuffle
for h in hosts:
self.clients.append(client_base_class(host=h[0], port=h[1],
username=username,
password=password,
database=database,
ssl=ssl,
verify_ssl=verify_ssl,
timeout=timeout,
use_udp=use_udp,
udp_port=udp_port))
for method in dir(client_base_class):
if method.startswith('_'):
continue
orig_func = getattr(client_base_class, method)
if not callable(orig_func):
continue
setattr(self, method, self._make_func(orig_func))
@staticmethod
def from_DSN(dsn, client_base_class=InfluxDBClient,
shuffle=True, **kwargs):
"""Same as :meth:`~.InfluxDBClient.from_DSN`, but supports
multiple servers.
:param shuffle: whether the queries should hit servers
evenly(randomly), defaults to True
:type shuffle: bool
:param client_base_class: the base class for all clients in the
cluster. This parameter is used to enable the support of
different client types. Defaults to :class:`~.InfluxDBClient`
:Example:
>>> cluster = InfluxDBClusterClient.from_DSN('influxdb://usr:pwd\
@host1:8086,usr:pwd@host2:8086/db_name', timeout=5)
>>> type(cluster)
<class 'influxdb.client.InfluxDBClusterClient'>
>>> cluster.clients
[<influxdb.client.InfluxDBClient at 0x7feb480295d0>,
<influxdb.client.InfluxDBClient at 0x7feb438ec950>]
"""
conn_params = urlparse(dsn)
netlocs = conn_params.netloc.split(',')
cluster_client = InfluxDBClusterClient(
hosts=[],
client_base_class=client_base_class,
shuffle=shuffle,
**kwargs)
for netloc in netlocs:
single_dsn = '%(scheme)s://%(netloc)s%(path)s' % (
{'scheme': conn_params.scheme,
'netloc': netloc,
'path': conn_params.path}
)
cluster_client.clients.append(client_base_class.from_DSN(
single_dsn,
**kwargs))
return cluster_client
def _make_func(self, orig_func):
@wraps(orig_func)
def func(*args, **kwargs):
if self.shuffle:
random.shuffle(self.clients)
clients = self.clients + self.bad_clients
for c in clients:
bad_client = False
try:
return orig_func(c, *args, **kwargs)
except InfluxDBClientError as e:
# Errors caused by user's requests, re-raise
raise e
except Exception as e:
# Errors that might caused by server failure, try another
bad_client = True
if c in self.clients:
self.clients.remove(c)
self.bad_clients.append(c)
finally:
if not bad_client and c in self.bad_clients:
self.bad_clients.remove(c)
self.clients.append(c)
raise InfluxDBServerError("InfluxDB: no viable server!")
return func
|
[
"tkorchak@mirantisits-MacBook-Pro-5.local"
] |
tkorchak@mirantisits-MacBook-Pro-5.local
|
2ff121a31149f14fe4136ebb182c991cdd5a7f65
|
2d528c884121b06da5cb14ba0905fbd953c80721
|
/problemas/admin.py
|
5e2988c8df1bba0b0bbe3990bb4ad58b916cdd87
|
[] |
no_license
|
edgarlatorre/dojopuzzles
|
d1fbf424c75ef3465279841066e9cfd22074cfea
|
520f69fdad4bb2b7993b384f206ab75f11f8d65e
|
refs/heads/master
| 2021-01-15T20:28:32.128110
| 2011-02-02T18:48:05
| 2011-02-02T18:48:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from django.contrib import admin
from dojopuzzles.problemas.models import Problema, ProblemaUtilizado
admin.site.register(Problema)
admin.site.register(ProblemaUtilizado)
|
[
"rennerocha@gmail.com"
] |
rennerocha@gmail.com
|
9a7296f18544bfcc7226ab3b8ddab3d416190998
|
165c5ef99e6ee0cfe92f7dcfe97ea5da2b4ae2de
|
/app.py
|
22bb47e37de2e8e2b4e0dbeee355c805bf488b79
|
[] |
no_license
|
kmicinski/terrible-web-app
|
5751ab019cbd17e9d6fb4917b1cf1154f9f3c186
|
8bd14efe707a06eeb653aba6897da21d17d700c0
|
refs/heads/master
| 2020-03-09T15:06:45.145599
| 2018-04-10T00:53:36
| 2018-04-10T00:53:36
| 128,851,687
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,011
|
py
|
# Starter code for 311 web security in-class demos. We'll use this for
# SQLi, XSS, and XSRF.
#
# First, install flask
# pip install flask
#
# Then, to initialize the database, run the following:
# python app.py init
# Then run the app itself as:
# python app.py
#
# Largely taken from https://gist.github.com/hackeris/fa2bfd20e6bec08c8d5240efe87d4687
import os
import sqlite3
import sys
from flask import Flask
from flask import redirect
from flask import request
from flask import session
from jinja2 import Template
app = Flask(__name__)
app.secret_key = 'schrodinger cat'
DATABASE_PATH = os.path.join(os.path.dirname(__file__), 'database.db')
def connect_db():
return sqlite3.connect(DATABASE_PATH)
def create_tables():
conn = connect_db()
cur = conn.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS user(
id INTEGER PRIMARY KEY AUTOINCREMENT,
username VARCHAR(32),
password VARCHAR(32)
)''')
cur.execute('''
CREATE TABLE IF NOT EXISTS time_line(
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER,
content TEXT,
FOREIGN KEY (`user_id`) REFERENCES `user`(`id`)
)''')
conn.commit()
conn.close()
def init_data():
users = [
('user1', '123456'),
('user2', '123456')
]
lines = [
(1, 'First Post'),
(1, 'Another Post'),
(2, 'Here\'s my third post'),
(2, 'Last post here.')
]
conn = connect_db()
cur = conn.cursor()
cur.executemany('INSERT INTO `user` VALUES(NULL,?,?)', users)
cur.executemany('INSERT INTO `time_line` VALUES(NULL,?,?)', lines)
conn.commit()
conn.close()
def init():
create_tables()
init_data()
def get_user_from_username_and_password(username, password):
conn = connect_db()
cur = conn.cursor()
# ...
cur.execute('SELECT id, username FROM `user` WHERE username=\'%s\' AND password=\'%s\'' % (username, password))
row = cur.fetchone()
conn.commit()
conn.close()
return {'id': row[0], 'username': row[1]} if row is not None else None
def create_user(username, password):
conn = connect_db()
cur = conn.cursor()
# ...
cur.execute('INSERT INTO `user` VALUES(NULL,?,?)', [username, password])
row = cur.fetchone()
conn.commit()
conn.close()
return {'id': row[0], 'username': row[1]} if row is not None else None
def get_user_from_id(uid):
conn = connect_db()
cur = conn.cursor()
# ...
cur.execute('SELECT id, username FROM `user` WHERE id=%d' % uid)
row = cur.fetchone()
conn.commit()
conn.close()
return {'id': row[0], 'username': row[1]}
def create_time_line(uid, content):
conn = connect_db()
cur = conn.cursor()
# ...
stmt = 'INSERT INTO `time_line` VALUES (NULL,' + str(uid) + ",\'" + content + '\')'
print(stmt)
cur.executescript(stmt)
row = cur.fetchone()
conn.commit()
conn.close()
return row
def get_time_lines():
conn = connect_db()
cur = conn.cursor()
cur.execute('SELECT id, user_id, content FROM `time_line` ORDER BY id DESC')
rows = cur.fetchall()
conn.commit()
conn.close()
return map(lambda row: {'id': row[0],
'user_id': row[1],
'content': row[2],
'username': get_user_from_id(row[1])['username']},
rows)
def user_delete_time_line_of_id(uid, tid):
conn = connect_db()
cur = conn.cursor()
cur.execute('DELETE FROM `time_line` WHERE user_id=%s AND id=%s' % (uid, tid))
conn.commit()
conn.close()
def render_login_page():
return '''
<div style="width: 500px; margin: 80px auto;">
<center><h1> Welcome to FakeTwitter! </h1></center>
<form method="POST" style="margin: 60px auto; width: 140px;">
<p>Username: <input name="username" type="text" /></p>
<p>Password: <input name="password" type="password" /></p>
<p><input value="Login" type="submit" /></p>
</form>
<a href="/create_account">Create account</a>
</div>
'''
def render_create_account():
return '''
<div style="width: 500px; margin: 80px auto;">
<center><h1> Welcome to FakeTwitter! </h1></center>
<p>Select your username and password</p>
<form method="POST" style="margin: 60px auto; width: 140px;">
<p>Username: <input name="username" type="text" /></p>
<p>Password: <input name="password" type="password" /></p>
<p><input value="Login" type="submit" /></p>
</form>
</div>
'''
def render_home_page(uid):
user = get_user_from_id(uid)
time_lines = get_time_lines()
template = Template('''
<div style="width: 400px; margin: 80px auto; ">
<h4>Logged in as {{ user['username'] }}</h4>
<a href="/logout">Logout</a>
<form method="POST" action="/create_time_line">
Add time line:
<input type="text" name="content" />
<input type="submit" value="Submit" />
</form>
<ul style="border-top: 1px solid #ccc;">
{% for line in time_lines %}
<li style="border-top: 1px solid #efefef;">
<p>{{ line['content'] }} -- {{line['username']}}</p>
{% if line['user_id'] == user['id'] %}
<a href="/delete/time_line/{{ line['id'] }}">Delete</a>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
''')
return template.render(user=user, time_lines=time_lines)
def do_login(user):
if user is not None:
session['uid'] = user['id']
return redirect('/')
else:
return redirect('/login')
@app.route('/create_account', methods=['GET', 'POST'])
def create_account():
if request.method == 'GET':
return render_create_account()
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = create_user(username, password)
return do_login(user)
@app.route('/')
def index():
if 'uid' in session:
return render_home_page(session['uid'])
return redirect('/login')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_login_page()
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = get_user_from_username_and_password(username, password)
return do_login(user)
@app.route('/create_time_line', methods=['POST'])
def time_line():
if 'uid' in session:
uid = session['uid']
create_time_line(uid, request.form['content'])
return redirect('/')
@app.route('/delete/time_line/<tid>')
def delete_time_line(tid):
# ...
user_delete_time_line_of_id(session['uid'], tid)
return redirect('/')
@app.route('/logout')
def logout():
if 'uid' in session:
session.pop('uid')
return redirect('/login')
if len(sys.argv) > 1 and sys.argv[1] == "init":
init()
exit(0)
if __name__ == '__main__':
app.run(debug=True)
|
[
"krismicinski@gmail.com"
] |
krismicinski@gmail.com
|
d016351350bb59868b7dd83358e1456fe8da1484
|
f293856327d9dfdbb38238ddb904a55b9077df9e
|
/app.py
|
61c5836a3e1295d2270f2f78400862122db2994f
|
[] |
no_license
|
ANSH3LL/Tasks-todo
|
a4038d0430f29700151fc68a7cb01c54a1d14012
|
d61c2335d9b2c2add782a9ff4c8f9243c9b6a30e
|
refs/heads/master
| 2020-12-09T17:40:45.432979
| 2020-01-12T10:26:54
| 2020-01-12T10:26:54
| 233,372,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
import os
import bcrypt
from flask import (Flask, render_template, request, url_for, session, redirect, flash, jsonify)
import database
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(32)
db = database.Database('store.db')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/checkuname', methods = ['POST'])
def checkuname():
uname = request.json['uname']
if db.get_user(uname): response = False
else: response = True
return jsonify({'availability': response})
@app.route('/signin', methods = ['POST'])
def signin():
uname = request.form.get('uname')
passwd = request.form.get('passwd')
entry = db.get_user(uname)
if entry:
if bcrypt.checkpw(passwd.encode('utf-8'), entry[0].encode('utf-8')):
session['uname'] = uname
session['logged_in'] = True
else: flash('Wrong password')
else: flash('Wrong username')
return redirect(url_for('index'))
@app.route('/signup', methods = ['POST'])
def signup():
uname = request.form.get('uname')
passwd = request.form.get('passwd')
pwd = bcrypt.hashpw(passwd.encode('utf-8'), bcrypt.gensalt())
success = db.new_user(uname, pwd)
if not success: flash('Registration failure')
else:
session['uname'] = uname
session['logged_in'] = True
return redirect(url_for('index'))
@app.route('/signout')
def signout():
session['uname'] = ''
session['logged_in'] = False
return redirect(url_for('index'))
@app.route('/gettasks')
def gettasks():
if session.get('logged_in'):
tasks = db.get_tasks(session['uname'])
else:
tasks = []
return jsonify(tasks)
@app.route('/submit', methods = ['POST'])
def submit():
if session.get('logged_in'):
data = request.json
selection = data['sel']
payload = data['pload']
if selection == 1:#new task added
db.save_task(session['uname'], payload['id'], payload['text'])
elif selection == 2:#task checked/unchecked
db.checked_task(session['uname'], payload['id'], int(payload['checked']))
elif selection == 3:#task edited
db.text_changed(session['uname'], payload['id'], payload['text'])
elif selection == 4:#task deleted
db.del_task(session['uname'], payload['id'])
else:
print 'error in tasks submission'
return jsonify({'success': True})
if __name__ == '__main__':
db.open_db()
app.run(host = '0.0.0.0', port = 80, debug = True)
db.close_db()
|
[
"noreply@github.com"
] |
ANSH3LL.noreply@github.com
|
879fcdafc92b2ed903f514985a299a1d0d579ed1
|
f3ad34ae8cc5652ca6391ae7d7431cb933d0edda
|
/manage.py
|
1d98914f9e12799a2636f4e5d88d967d309fec05
|
[] |
no_license
|
CesarHdez/web_playground
|
4485900ee77f1d7c2b62f76548e5fe911ee5789d
|
59dd17611e1286b0327b2161d8b98e5915186e12
|
refs/heads/master
| 2023-02-24T06:10:55.769253
| 2021-01-31T10:12:35
| 2021-01-31T10:12:35
| 317,523,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web_playground.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"chdez293@gmail.com"
] |
chdez293@gmail.com
|
0e37f6d397bc4a8be4ef6ac099eba511e2fe2d58
|
f0632c3930dfb8fc1e96607c673c28f1704dfe6c
|
/SS_builder.py
|
9daabb1879fa24014833d07db03f41297dc27ea8
|
[] |
no_license
|
koshippy/SanctuarySaver
|
b0de4d61bda12377293a95ba52957f40aa766d37
|
8b9f20f1ed9d5498a5ead66281383c0232444edc
|
refs/heads/master
| 2020-05-31T23:56:04.720034
| 2015-02-19T22:03:18
| 2015-02-19T22:03:18
| 30,993,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
import itertools
hotkeys = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{NUMPAD0}', '{NUMPAD1}', '{NUMPAD2}', '{NUMPAD3}', '{NUMPAD4}', '{NUMPAD5}', '{NUMPAD6}', '{NUMPAD7}', '{NUMPAD8}', '{NUMPAD9}', '{NUMPADMULT}', '{NUMPADADD}', '{NUMPADSUB}', '{NUMPADDIV}', '{NUMPADDOT}']
hotkey_template = '''
HotKeySet("HOTKEY_HERE", 'HOTKEY_FUNCTIONNAME')
Func HOTKEY_FUNCTIONNAME()
HotKeySet("HOTKEY_HERE")
Send("HOTKEY_HERE")
If Pause Then
Sleep(20)
Send("{ENTER}")
EndIf
HotKeySet("HOTKEY_HERE", 'HOTKEY_FUNCTIONNAME')
EndFunc
'''
tfile = '''Opt("WinTitleMatchMode",2)
Global $rpgwindow = WinGetHandle("SanctuaryRPG: Black Edition")
Global Pause = True
'''
names = itertools.permutations('abcdefghi')
for hotkey in hotkeys:
tfile += hotkey_template.replace('HOTKEY_HERE',hotkey).replace('HOTKEY_FUNCTIONNAME',''.join(names.next()))
tfile += '''
HotKeySet("{F2}", '_exit')
HotKeySet("{F3}", '_pause')
Func _exit()
Exit
EndFunc
Func _pause()
Pause = NOT Pause
EndFunc
While 1
Sleep(1000)
WEnd'''
|
[
"kos.hippy@gmail.com"
] |
kos.hippy@gmail.com
|
fac3834fb69212ed28fea94b9c0e01739924aa40
|
73db0789f8f4b6cbc111def56880bd79d03171e5
|
/python/bin2pybytes.py
|
cb8b0c6998a574cce8d4f64577141ad6f5261c3a
|
[
"BSD-2-Clause"
] |
permissive
|
Matir/hacks
|
fa23cd28bf52234afc6d2837556263803c53ec6a
|
a11173ef60ff777a8a241ec5f936ee71223adceb
|
refs/heads/main
| 2023-08-16T23:37:25.261647
| 2023-08-15T02:37:14
| 2023-08-15T02:37:14
| 216,129,440
| 13
| 7
|
BSD-2-Clause
| 2023-08-15T02:37:15
| 2019-10-19T00:52:31
|
C
|
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
#!/usr/bin/env python
import argparse
import sys
def yield_chunks(fp, chunksize):
while True:
rv = fp.read(chunksize if chunksize else -1)
if not rv:
return None
yield rv
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-b', default=False, action='store_true',
help='Format output as bytes instead of string.')
parser.add_argument('-v', default='buf', help='Name of variable')
parser.add_argument('-l', default=80, type=int, help='maximum line length')
parser.add_argument(
'infile', default=sys.stdin.buffer, type=argparse.FileType('rb'),
nargs='?', help='File to convert to var')
parser.add_argument(
'outfile', default=sys.stdout, type=argparse.FileType('w'),
nargs='?', help='File to convert to var')
args = parser.parse_args(argv[1:])
min_line = len("{} += {}\"\"".format(args.v, 'b' if args.b else ''))
if args.l <= min_line:
chunksize = 0
else:
chunksize = (args.l - min_line)//4 # 4 output chars per byte
print("{} = {}\"\"".format(args.v, 'b' if args.b else ''),
file=args.outfile)
for chunk in yield_chunks(args.infile, chunksize):
data = "".join("\\x{:02x}".format(b) for b in chunk)
print("{} += {}\"{}\"".format(args.v, 'b' if args.b else '', data),
file=args.outfile)
if __name__ == '__main__':
main(sys.argv)
|
[
"david@systemoverlord.com"
] |
david@systemoverlord.com
|
7b8f453324646c6b902cb64768be52e1efd1a116
|
1f6b4eccb56990578524b116e0a7de9e86c4edca
|
/leaf_conv1d_cv.py
|
45b00ea8b7749a6fe4d5771b3a6ab9b69567edbf
|
[] |
no_license
|
j12138/Leaf_Project
|
59d54c25cf22f67ddd6a3d9f5e1058fe0ba452b0
|
e908a2f18da4e66568eaf64f40884a58d02dc815
|
refs/heads/master
| 2022-03-21T15:56:49.697877
| 2019-12-02T23:03:11
| 2019-12-02T23:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,556
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:05:42 2018
@author: Dongyang
This script uses 10 fold cv to test the performance of this classifier on
the Swedish leaf dataset.
"""
from sklearn.model_selection import train_test_split
from keras.layers import Input, Dense, Dropout, BatchNormalization, add, Conv1D, Flatten, concatenate, MaxPooling1D, GaussianNoise
from keras.models import Model, load_model
from keras import backend as K
from keras import optimizers, losses, utils
from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler, LabelEncoder, Normalizer, QuantileTransformer
from sklearn.metrics import accuracy_score
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from keras.layers.advanced_activations import PReLU
#------------------------------------------------------------------------------
# Read data
#------------------------------------------------------------------------------
target_dir = 'data//leaf//'
batchsize = 32
epochs = 60
cls = 15
size = 75 * 15
#leaf_data = np.zeros((size, 200))
#leaf_label = np.zeros(size)
#
#
#for i in range(cls):
# leaf_data[i*75:(i+1)*75] = np.load(target_dir + 'S_leaf_CCD{}.npy'.format(i+1))
# leaf_label[i*75:(i+1)*75] = i
leaf_data = np.load(target_dir + 'S_leaf_CCD.npy')
leaf_label = np.load(target_dir + 'S_leaf_label.npy')
# =============================================================================
# data = 'leaf_data_CCD_cv.npy'
# label = 'leaf_label_480_360.npy'
# leaf_data = np.load(target_dir + data)
# leaf_label = np.load(target_dir+ label)
# leaf_label = leaf_label -1
# cls = 30
# size = 340
# =============================================================================
# =============================================================================
#import csv
#target = r'data/100 leaves plant species/data_Mar_64.txt'
#
#leaf_Mar = []
#with open(target) as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# for row in readCSV:
# leaf_Mar.append(row)
#
#leaf_Mar = np.asarray(leaf_Mar)
#leaf_Mar = leaf_Mar[16:,1:].astype(float)
#
#target = r'data/100 leaves plant species/data_Sha_64.txt'
#leaf_Sha = []
#with open(target) as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# for row in readCSV:
# leaf_Sha.append(row)
#leaf_Sha = np.asarray(leaf_Sha)
#leaf_Sha = leaf_Sha[16:,1:].astype(float)
#
#
#target = r'data/100 leaves plant species/data_Tex_64.txt'
#
#leaf_Tex = []
#with open(target) as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# for row in readCSV:
# leaf_Tex.append(row)
#
#leaf_Tex = np.asarray(leaf_Tex)
#leaf_Tex = leaf_Tex[15:,1:].astype(float)
#
#leaf_label = np.zeros(1584)
#
#for i in range(99):
# leaf_label[16*i:16*i+16] = i
#
#cls=99
#size = 1584
#
#
##leaf_data = np.hstack([leaf_Sha, leaf_Tex , leaf_Mar])
#leaf_data = np.hstack([leaf_Sha, leaf_Sha , leaf_Sha])
# =============================================================================
#leaf_data = leaf_Sha
#------------------------------------------------------------------------------
# Some Util functions
def preprocess(train, test, flag = True):
if True:
scaler = StandardScaler().fit(train)
# scaler = MinMaxScaler(feature_range=(-1, 1)).fit(train)
train = scaler.transform(train)
test = scaler.transform(test)
return train, test
from sklearn.decomposition import PCA
def addpca(train, test, comp = 40):
pre_pca = PCA(n_components=comp).fit(train) # using others than pca?
x_train = pre_pca.transform(train)
x_test = pre_pca.transform(test)
return x_train, x_test
#------------------------------------------------------------------------------
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
Kf = 10
#skf = StratifiedKFold(n_splits=Kf, shuffle=True, random_state=333)
skf = StratifiedShuffleSplit(n_splits=Kf, test_size = 1./Kf, random_state = 0)
cv_acc_svm = np.zeros(Kf)
cv_acc_knn = np.zeros(Kf)
cv_acc = np.zeros(Kf)
i=0
for train_index, test_index in skf.split(leaf_data, leaf_label):
x_train, x_test = leaf_data[train_index], leaf_data[test_index]
y_train, y_test = leaf_label[train_index], leaf_label[test_index]
# aug_flag = False
# if aug_flag:
# x_train = np.vstack((x_train,
# np.flip(x_train, axis = 1)
# ))
# y_train = np.hstack((y_train, y_train))
#
# x_train = np.vstack((np.roll(x_train, 5, axis = 1),
# np.roll(x_train, -5, axis = 1)))
# y_train = np.hstack((y_train, y_train))
x_train_stack = x_train
x_test_stack = x_test
y_train = utils.to_categorical(y_train, cls)
y_test = utils.to_categorical(y_test, cls)
# normalization
#scaler = MinMaxScaler(feature_range=(0, 1)).fit(x_train_stack)
#scaler = QuantileTransformer().fit(x_train_stack)
scaler = StandardScaler().fit(x_train_stack)
x_train_std = scaler.transform(x_train_stack)
x_test_std = scaler.transform(x_test_stack)
#------------------------------------------------------------------------------
# Model
#------------------------------------------------------------------------------
input_dim = x_train_std.shape[1]
feature = Input(shape = (input_dim, 1))
x = GaussianNoise(0.01)(feature)
x = Conv1D(filters= 16, kernel_size = 8, strides=4, padding='same', dilation_rate=1,
activation='relu', use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None, kernel_constraint=None, bias_constraint=None,
name = 'conv1D_1')(x)
x = BatchNormalization()(x)
# x = PReLU()(x)
x = MaxPooling1D(pool_size=2, strides=2, name = 'MP_1')(x)
# x = Dropout(0.25)(x)
x = Flatten(name = 'flat_1')(x)
x_x = GaussianNoise(0.01)(feature)
x_x = Conv1D(filters= 24, kernel_size = 12, strides= 6, padding='same', dilation_rate=1,
activation='relu', use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None, kernel_constraint=None, bias_constraint=None,
name = 'conv1D_2')(x_x)
x_x = BatchNormalization()(x_x)
# x_x = PReLU()(x_x)
x_x = MaxPooling1D(pool_size=2, strides=2, name = 'MP_2')(x_x)
# x_x = Dropout(0.25)(x_x)
x_x = Flatten()(x_x)
x_x_x = GaussianNoise(0.01)(feature)
x_x_x = Conv1D(filters= 32, kernel_size = 16, strides= 8, padding='same', dilation_rate=1,
activation='relu', use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None, kernel_constraint=None, bias_constraint=None,
name = 'conv1D_3')(x_x_x)
x_x_x = BatchNormalization()(x_x_x)
# x_x_x = PReLU()(x_x_x)
x_x_x = MaxPooling1D(pool_size=2, strides=2, name = 'MP_3')(x_x_x)
# x_x_x = Dropout(0.25)(x_x_x)
x_x_x = Flatten()(x_x_x)
feature_f = GaussianNoise(0.01)(feature)
# feature_f = MaxPooling1D(pool_size=4, strides=2, name = 'MP_4')(feature_f)
# feature_f = Dropout(0.25)(feature_f)
feature_f = Flatten(name = 'flat_2')(feature_f)
#
x = concatenate([x, x_x, x_x_x, feature_f])
x = Dense(512, activation = 'linear', name = 'dense_1')(x)
x = BatchNormalization()(x)
x = PReLU()(x)
x = Dense(128, activation = 'linear', name = 'dense_2')(x) #increase the dimension here for better speration in stage2 ?
x = BatchNormalization()(x)
x = PReLU()(x)
x = Dropout(0.5)(x)
pred = Dense(cls, activation = 'softmax', name = 'dense_3')(x)
model = Model(feature, pred)
#best_model=EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
best_model = ModelCheckpoint(target_dir+'leaf_conv1d_cv%d.hdf5' %i, monitor='val_loss',
verbose=0, save_best_only=True, save_weights_only=False,
mode='auto', period=1)
model.compile(loss = losses.categorical_crossentropy,
# optimizer = optimizers.Adam(),
optimizer = optimizers.SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True),
metrics = ['accuracy'])
x_train_std = x_train_std.reshape(x_train_std.shape[0], x_train_std.shape[1], 1)
x_test_std = x_test_std.reshape(x_test_std.shape[0], x_test_std.shape[1], 1)
history = model.fit(x=x_train_std, y=y_train,
batch_size = batchsize,
epochs = epochs, verbose = 0,
validation_split = 0.2,
# validation_data = (x_test_std, y_test),
callbacks=[best_model])
# cv_acc[i] = model.evaluate(x_test_std, y_test)[1]
#------------------------------------------------------------------------------
# A second stage classification with features pretrained from above network
#------------------------------------------------------------------------------
model_best = load_model(target_dir + 'leaf_conv1d_cv%d.hdf5' %i)
cv_acc[i] = model_best.evaluate(x_test_std, y_test)[1]
x_encoder = K.function([model_best.layers[0].input, K.learning_phase()],
[model_best.get_layer('dense_3').input])
yy_train = np.argmax(y_train, axis = 1)
xx_train = x_encoder([x_train_std, 0])[0]
xx_test = x_encoder([x_test_std, 0])[0]
xx_train_std, xx_test_std = preprocess(xx_train, xx_test)
#xx_train_std, xx_test_std = xx_train, xx_test
xx_train_pca, xx_test_pca = addpca(xx_train_std, xx_test_std, comp = 25)
# Using Knn for nonlinearity correction?
clf_2 = svm.SVC(C=1.0, cache_size=200, class_weight='balanced', coef0=0,
decision_function_shape='ovr', degree=1, gamma='auto', kernel='linear',
max_iter=-1, probability=True, random_state=None, shrinking=True,
tol=0.001, verbose=False)
clf_2.fit(xx_train_pca, yy_train)
cv_acc_svm[i] = accuracy_score(np.argmax(y_test, axis=1), clf_2.predict(xx_test_pca))
clf_knn = KNeighborsClassifier(n_neighbors=3, weights='uniform', algorithm='auto',
leaf_size=10, p=2, metric='chebyshev',
metric_params=None, n_jobs=1)
clf_knn.fit(xx_train_std, yy_train)
y_pred_knn = clf_knn.predict(xx_test_std)
cv_acc_knn[i] = accuracy_score(np.argmax(y_test, axis=1), y_pred_knn)
print('the %d th validation finished....' % i)
print('accuracy %.4f' % cv_acc[i])
i+=1
|
[
"noreply@github.com"
] |
j12138.noreply@github.com
|
63a7fbed29b4435a93df7fae18bd166fecbf6d56
|
1f9644032c229b2041a5f36758b8c1265d195cc1
|
/dataManager/tests/test.py
|
2ff3f3b993ccc021c053e20f4a215f0b73bfcca8
|
[
"MIT"
] |
permissive
|
Pensarfeo/DataManager
|
3cad6e28cd3aa3db26fd2aab04fe11caa3da299d
|
20335b86f5bc47ab5fa7c6facfea701f085a0fe7
|
refs/heads/master
| 2020-06-04T23:22:02.822982
| 2019-07-02T15:48:14
| 2019-07-02T15:48:14
| 192,231,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# Some basic tests
DataManager = tools.dynamicImportDev('dataManager').Manager
# traininigDataFun, trainingData = tools.importData('training')
# trainingDataProvider = DataProvider(
# data = traininigDataFun,
# bz = env.training.bz,
# stochasticSampling = False,
# indexingShape = [trainingData.shape[0]]
# )
# print(np.array(trainingDataProvider(1)[0]).shape)
validationDataFun, validationData = tools.importData('validation')
print(len(validationData))
validationDataProvider = DataManager(
data = validationDataFun,
bz = env.training.bz,
stochasticSampling = False,
reshuffle = True,
indexingShape = [len(validationData)]
)
print(np.array(validationDataProvider(1)[1]).shape)
# prove that reshouffling works by
# 1) showing that we get the same number of indexs as original indexs (minux batch size leftovers)
# 2) the data we get matches the index provided equivalents in the origina ordered data source
indxs = []
data
for i, data, indx in validationDataProvider:
print(np.array(data[1])[:, :, :, 0].shape, validationData[indx][:, :, :, 3].shape)
print(np.mean(np.array(data[1])[:, :, :, 0] == validationData[indx][:, :, :, 3]))
indxs += list(indx)
len(len(validationData) - len(validationData)%env.training.bz)
|
[
"pa.favuzzi@gmail.com"
] |
pa.favuzzi@gmail.com
|
670fe8df03baa050e48bad2abf3ef2e7f2e60071
|
384497dfcc4cbd789cd35fa3c4b75ececefcf765
|
/nuclear/message/build_outer_python_binding.py
|
d8f5568cb0777eeb40b1dc149e0958332e4b2bdb
|
[
"MIT"
] |
permissive
|
Fastcode/NUClearExample
|
061947f59f7b8beadbe7a6ab3965ec72927ba03d
|
1ce424a935a035df20bbc6d8018bd2bcd4194dd9
|
refs/heads/master
| 2020-12-08T09:32:46.561577
| 2018-12-16T00:24:48
| 2018-12-16T00:24:48
| 67,031,019
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
#!/usr/bin/python
import sys
import os
from generator.textutil import indent, dedent
# Get our file we are outputting too
base_file = sys.argv[1]
# Get our root message directory
message_dir = sys.argv[2]
# Get our list of functions we have to call
functions = []
duplicates = []
for dep_file in sys.argv[3:]:
with open(dep_file) as deps:
# Extract all dependencies for every message and place them in the list.
# Make all paths relative to the root mesage directory and remove any unwanted characters.
# Also remove Matrix.proto, Neutron.proto, and Vector.proto from the list and anything to do with google.
dependencies = [os.path.relpath(s.strip('\\ \n\t'), message_dir).replace('/', '_').replace('.proto', '_proto') for s in deps.readlines()
if not any(exclude in s for exclude in ['google/protobuf', 'Matrix.proto', 'Neutron.proto', 'Vector.proto'])]
# Finally, remove duplicates. We must keep the first instance of every message in the list.
for function in dependencies:
if function not in duplicates:
duplicates.append(function)
functions.append(function)
# Write our file
with open(base_file, 'w') as f:
f.write(dedent("""\
#include <pybind11/pybind11.h>
#include <pybind11/complex.h>
#include <pybind11/stl.h>
#include <pybind11/eigen.h>
// Declare our functions (that we know will be made later)
{function_declarations}
PYBIND11_PLUGIN(message) {{
pybind11::module module("message", "NUClear message classes");
// Initialise each of the modules
{function_calls}
return module.ptr();
}}
""").format(
function_declarations='\n'.join('void init_message_{}(pybind11::module& module);'.format(f) for f in functions),
function_calls=indent('\n'.join('init_message_{}(module);'.format(f) for f in functions)),
))
|
[
"trent@houliston.me"
] |
trent@houliston.me
|
6009e18ce7b448841305b9ec9dd106d22a075c9a
|
5f509aec51f0df62691de51b5aa7ba1c64ec7455
|
/redis_map.py
|
17fb58ea49d9eae8fccd4310efc1cf3323112635
|
[] |
no_license
|
rfyiamcool/redis_map
|
233f86c2b9bede4c0e12eea73be232145b75da55
|
87c17dc923af9a29a7c8549d781870fb43579842
|
refs/heads/master
| 2020-03-18T17:51:47.209557
| 2018-05-27T15:05:14
| 2018-05-27T15:05:14
| 135,055,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,848
|
py
|
# coding:utf-8
from contextlib import contextmanager
from future.utils import python_2_unicode_compatible
from redis import StrictRedis
@python_2_unicode_compatible
class RedisDict(object):
def __init__(self, *args, **kwargs):
self.namespace = ''
if 'namespace' in kwargs:
# Todo validate namespace
self.namespace = kwargs['namespace'] + ':'
del kwargs['namespace']
self.expire = None
if 'expire' in kwargs:
self.expire = kwargs['expire']
del kwargs['expire']
self.redis = StrictRedis(*args, decode_responses=True, **kwargs)
self.sentinel_none = '<META __None__ 9cab>'
def _raw_get_item(self, k):
return self.redis.get(k)
def _get_item(self, k):
result = self._raw_get_item(self.namespace + k)
return result
def __getitem__(self, k):
result = self._get_item(k)
if result is None:
raise KeyError
return result if result != self.sentinel_none else None
def __setitem__(self, k, v):
if v is None:
v = self.sentinel_none
self.redis.set(self.namespace + k, v, ex=self.expire)
def __delitem__(self, k):
self.redis.delete(self.namespace + k)
def __contains__(self, k):
return self._get_item(k) is not None
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self._keys())
def _scan_keys(self, search_term=''):
return self.redis.scan(match=self.namespace + search_term + '*')
def _keys(self, search_term=''):
return self._scan_keys(search_term)[1]
def keys(self):
to_rm = len(self.namespace)
return [item[to_rm:] for item in self._keys()]
def to_dict(self):
to_rm = len(self.namespace)
return {item[to_rm:]: self._raw_get_item(item) for item in self._keys()}
def chain_set(self, iterable, v):
self[':'.join(iterable)] = v
def chain_get(self, iterable):
return self[':'.join(iterable)]
def chain_del(self, iterable):
return self.__delitem__(':'.join(iterable))
@contextmanager
def expire_at(self, sec_epoch):
self.expire, temp = sec_epoch, self.expire
yield
self.expire = temp
def __iter__(self):
self.keys_iter = self.keys()
return self
def next(self):
return self.__next__()
def __next__(self):
try:
return self.keys_iter.pop()
except (IndexError, KeyError):
raise StopIteration
def multi_get(self, key):
found_keys = self._keys(key)
if len(found_keys) == 0:
return []
return self.redis.mget(found_keys)
def multi_chain_get(self, keys):
return self.multi_get(':'.join(keys))
def multi_dict(self, key):
keys = self._keys(key)
if len(keys) == 0:
return {}
to_rm = len(self.namespace)
return dict(zip([i[to_rm:] for i in keys], self.redis.mget(keys)))
def multi_del(self, key):
keys = self._keys(key)
if len(keys) == 0:
return 0
return self.redis.delete(*keys)
def items(self):
return zip(self.keys(), self.multi_get(self._keys()))
class RedisListIterator(object):
def __init__(self, redis_instance, key, start=0, end=-1):
"""Creates a redis list iterator.
Args:
redis_instance (object): instance of redis
key (str): redis list key
start (int): list slice start (inclusive)
end (int): list slice end (exclusive)
"""
self.position = start
self.key = key
self.redis = redis_instance
llen = redis_instance.llen(key)
self.endpos = llen if (end == -1 or (end - start) > llen) else end
def __iter__(self):
return self
def __next__(self):
if self.position >= self.endpos:
raise StopIteration
item = self.redis.lindex(self.key, self.position)
self.position += 1
return item
next = __next__
class RedisList(object):
def __init__(self, redis_instance, key):
self.key = key
self.redis = redis_instance
def __len__(self):
return self.redis.llen(self.key)
def __getitem__(self, index):
if isinstance(index, slice):
start = index.start or 0
end = (index.stop - 1) if index.stop is not None else -1
return self.redis.lrange(self.key, start, end)
if index + 1 > len(self):
raise IndexError("Index out of bounds.")
return self.redis.lindex(self.key, index)
def __iter__(self):
return RedisListIterator(self.redis, self.key)
|
[
"rfyiamcool@163.com"
] |
rfyiamcool@163.com
|
aa6f7a1dc10673b7e011f05a3d017808b2e7e6ed
|
2dd72c5c0cf3dabea137f2035bb4c0e008c8eca7
|
/estante/admin.py
|
ccf55ee243e19830875a43085eed05062ac3fc9f
|
[] |
no_license
|
brunadelrio/biblio
|
fda7c70fbae9826c61a07e8c971f47244ac14164
|
f48662a1e103628ca8503a1c9f68d6f7ec9c9c98
|
refs/heads/master
| 2022-12-14T01:16:18.365982
| 2018-12-06T13:16:54
| 2018-12-06T13:16:54
| 160,677,171
| 0
| 0
| null | 2022-12-08T01:20:01
| 2018-12-06T13:15:45
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 296
|
py
|
from django.contrib import admin
# Register your models here.
from estante.models import Pessoa, Livro, Emprestimo
#from estante.models.livro import Livro
#from estante.models.emprestimo import Emprestimo
admin.site.register(Pessoa)
admin.site.register(Livro)
admin.site.register(Emprestimo)
|
[
"brunaadelrio@gmail.com"
] |
brunaadelrio@gmail.com
|
107461761e9c1e422537657ae98aa79ec31f418c
|
67058e187402f96b985ea5848da7693d8eb44b44
|
/rbm.py
|
c2c2aff25cd97a4059b90510942790730d738917
|
[] |
no_license
|
zheng6822/zheng6822.github.com
|
900be2d9c522693a09853f1041deb433953f212b
|
76082c269635c5bbdee382266d28730c4407275d
|
refs/heads/master
| 2020-12-25T00:29:02.190012
| 2012-09-30T14:46:10
| 2012-09-30T14:46:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,387
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 14:52:30 2012
@author: zhengxin
"""
#==============================================================================
# This file contains three(four) kinds of RBMs: Gaussian-Bernoulli RBM,
# Bernoulli-Bernoulli RBM, Discriminative RBM
#==============================================================================
from options import options
import numpy as np
import cudamat as cm
from softmax import softmax
import scipy.io
import os
class GaussianRBM(object):
def __init__(self, datafolder=None, num_hid=None, options=None):
if datafolder == None:
return
self.datafolder = datafolder
self.datalist = os.listdir(datafolder)
self.num_batchdata = len(self.datalist)
mdict = scipy.io.loadmat(os.path.join(datafolder, self.datalist[0]))
tempdata = mdict['data']
self.options = options
self.num_vis = tempdata.shape[0]
self.num_hid = num_hid
# print self.num_vis
# print self.num_hid
self.num_batches = tempdata.shape[1]/self.options.batchsize
self.batch_size = self.options.batchsize
self.doPCD = False
self.cdstep = 1
# initialize weights
self.W = cm.CUDAMatrix(0.01 * np.random.randn(self.num_vis, self.num_hid))
self.vb = cm.CUDAMatrix(np.zeros((self.num_vis,1)))# for gaussian rbm, v_bias we mean the mean of visible layer
self.hb = cm.CUDAMatrix(np.zeros((self.num_hid,1)))
# initialize weights updates
self.dW = cm.CUDAMatrix(np.zeros((self.num_vis, self.num_hid)))
self.dvb = cm.CUDAMatrix(np.zeros((self.num_vis, 1)))
self.dhb = cm.CUDAMatrix(np.zeros((self.num_hid, 1)))
self.W_inc = cm.CUDAMatrix(np.zeros((self.num_vis, self.num_hid)))
self.vb_inc = cm.CUDAMatrix(np.zeros((self.num_vis,1)))
self.hb_inc = cm.CUDAMatrix(np.zeros((self.num_hid,1)))
# initialize temporary storage
self.v = cm.empty((self.num_vis, self.batch_size))# a batch of data
self.vm = cm.empty((self.num_vis, self.batch_size))# temp storage of data-vb
self.h = cm.empty((self.num_hid, self.batch_size))
self.r = cm.empty((self.num_hid, self.batch_size))# store random number in positive phase
self.r2 = cm.empty((self.num_vis, self.batch_size))# store random number in negative phase
def getCurrentBatch(self,mdict,batch):
# get current batch
batchdata = mdict['data'][:,batch*self.batch_size:(batch+1)*self.batch_size]
self.v = cm.CUDAMatrix(batchdata)
self.v_true = cm.CUDAMatrix(batchdata)
def applyMomentum(self):
# apply momentum
# maybe we can change it while proccessing
self.dW.mult(0)
self.dvb.mult(0)
self.dhb.mult(0)
self.W_inc.mult(self.options.momentum)
self.vb_inc.mult(self.options.momentum)
self.hb_inc.mult(self.options.momentum)
def hidActProb(self,vis, target):
# positive phase
# print self.W.shape
# print vis.shape
# print target.shape
cm.dot(self.W.T, vis, target = target)
target.add_col_vec(self.hb)
target.apply_sigmoid()
def visActProb(self):
# negative phase
cm.dot(self.W, self.h, target = self.v)
self.v.add_col_vec(self.vb)#now v = Wh + c
def CDstats(self, vis, hid, posphase=True):
multiplier = 1.0 if posphase else -1.0
self.dhb.add_sums(hid, 1, mult=multiplier)
if posphase:
#print 'posphase'
self.dW.add_dot(vis, hid.T)
self.vm.assign(vis)
self.vb.mult(-1)
self.vm.add_col_vec(self.vb)
self.vb.mult(-1)
self.dvb.add_sums(self.vm, 1, mult=multiplier)
else:
#print 'negphase'
self.dW.subtract_dot(vis,hid.T)
self.vm.assign(vis)
self.vb.mult(-1)
self.vm.add_col_vec(self.vb)
self.vb.mult(-1)
self.dvb.add_sums(self.vm, 1, mult=multiplier)
def sampleHid(self,r,target):
# sample hiddens
r.fill_with_rand()
r.less_than(target, target = target)
def sampleVis(self):
self.r2.fill_with_randn()
self.v.add(self.r2)
def CDn(self):
n = self.cdstep
self.hidActProb(self.v, self.h)
self.CDstats(self.v, self.h)
for i in range(n):
self.sampleHid(self.r,self.h)
self.visActProb()
self.sampleVis()
self.hidActProb(self.v, self.h)
self.CDstats(self.v, self.h, False)
def doOneStep(self):
if self.doPCD:
self.PCD()
else:
self.CDn()
self.updateWeights()
def updateWeights(self):
self.W_inc.add_mult(self.dW, self.options.eta/self.batch_size)
self.vb_inc.add_mult(self.dvb, self.options.eta/self.batch_size)
self.hb_inc.add_mult(self.dhb, self.options.eta/self.batch_size)
# update weights
self.W.add(self.W_inc)
self.vb.add(self.vb_inc)
self.hb.add(self.hb_inc)
def getReconErr(self):
self.v.subtract(self.v_true)
return self.v.euclid_norm()**2
def train(self):
for epoch in range(self.options.maxepoch):
err = []
for batchdata in range(self.num_batchdata):
mdict = scipy.io.loadmat(os.path.join(self.datafolder, self.datalist[batchdata]))
#data = mdict['data']
for batch in range(self.num_batches):
self.getCurrentBatch(mdict,batch)
self.doOneStep()
self.applyMomentum()
err.append(self.getReconErr()/(self.num_vis*self.batch_size))
print "Epoch " + str(epoch + 1)+" "+"Mean squared error: " + str(np.mean(err))
def getDataUpwards(self,loadfolder,savefolder):
# push data of visible layer upwards to form a set of new data
# because of memory issues, we have to each batch data to disc and read and combine them later
# batch mode receive data from cpu and return a matrix on cpu
datalist = os.listdir(loadfolder)
batchsize = 4096
n = 0
for dataname in datalist:
name = os.path.join(loadfolder,dataname)
mdict = scipy.io.loadmat(name)
data = mdict['data']
labels = mdict['label']
# print labels.shape
numbatch = data.shape[1]/batchsize
for batch in range(numbatch):
#print 'batch %d/%d'%(n, numbatch*len(datalist))
batchdata = data[:,batch*batchsize:(batch+1)*batchsize]
batchlabels = labels[batch*batchsize:(batch+1)*batchsize]
temp = cm.empty((self.num_hid,batchdata.shape[1]))
vis = cm.CUDAMatrix(batchdata)
self.hidActProb(vis, temp)
temp.copy_to_host()
#topdata[:,batch*batchsize:(batch+1)*batchsize] = temp.numpy_array
mdict = {}
mdict['data'] = temp.numpy_array
mdict['label'] = batchlabels
scipy.io.savemat('%s/%d.mat'%(savefolder,n),mdict)
n = n+1
def getTestDataUpwards(self,data):
batchsize = 4096
numbatch = data.shape[1]/batchsize
topdata = np.zeros((self.num_hid,data.shape[1]))
for batch in range(numbatch):
batchdata = data[:,batch*batchsize:(batch+1)*batchsize]
temp = cm.empty((self.num_hid,batchdata.shape[1]))
vis = cm.CUDAMatrix(batchdata)
self.hidActProb(vis, temp)
temp.copy_to_host()
topdata[:,batch*batchsize:(batch+1)*batchsize] = temp.numpy_array
return topdata
def save(self,filename):
self.W.copy_to_host()
self.vb.copy_to_host()
self.hb.copy_to_host()
mdict = {}
mdict['type']='gauss'
mdict['W']=self.W.numpy_array
mdict['vb']=self.vb.numpy_array
mdict['hb']=self.hb.numpy_array
scipy.io.savemat(filename,mdict)
def load(self, filename):
mdict = scipy.io.loadmat(filename)
self.W = cm.CUDAMatrix(mdict['W'])
self.vb = cm.CUDAMatrix(mdict['vb'])
self.hb = cm.CUDAMatrix(mdict['hb'])
(self.num_vis, self.num_hid) = self.W.shape
class BinaryRBM(GaussianRBM):
def visActProb(self):
GaussianRBM.visActProb(self)
self.v.apply_sigmoid()
def CDstats(self, vis, hid, posphase=True):
multiplier = 1.0 if posphase else -1.0
self.dhb.add_sums(hid, 1, mult=multiplier)
self.dvb.add_sums(vis, 1, mult=multiplier)
if posphase:
self.dW.add_dot(vis, hid.T)
else:
self.dW.subtract_dot(vis,hid.T)
def sampleVis(self):
# sample hiddens
self.r2.fill_with_rand()
self.r2.less_than(self.v, target = self.v)# now h = phstates
class SoftmaxRBM(BinaryRBM):
def hidActProb(self,vis, target):
cm.dot(self.W.T, vis, target = target)
target.add_col_vec(self.hb)
softmax(target)
class DiscriminativeRBM(GaussianRBM):
def __init__(self,datafolder=None,labels=None,numhid=None,options=None):
# the labels here is just used for calculating number of labels, no other practical use.
super(DiscriminativeRBM,self).__init__(datafolder,numhid,options)
self.labels = labels
self.num_class = self.getClassNum(labels)
#self.targets = self.getTargets()
if datafolder == None:
return
self.cW = cm.CUDAMatrix(0.01 * np.random.randn(self.num_class,self.num_hid))
self.cb = cm.CUDAMatrix(np.zeros((self.num_class,1)))
self.dcW = cm.CUDAMatrix(np.zeros((self.num_class,self.num_hid)))
self.dcb = cm.CUDAMatrix(np.zeros((self.num_class,1)))
self.cW_inc = cm.CUDAMatrix(np.zeros((self.num_class,self.num_hid)))
self.cb_inc = cm.CUDAMatrix(np.zeros((self.num_class,1)))
self.c = cm.empty((self.num_class,self.batch_size))
def getClassNum(self,labels):
self.labellist = np.unique(labels)
return len(self.labellist)
def getTargets(self,labels):
# create targets
targets = np.zeros((self.num_class,len(labels)))
#print targets.shape
for i in range(self.num_class):
for j in range(len(labels)):
if labels[j] == self.labellist[i]:
targets[i,j] = True
return targets
def applyMomentum(self):
super(DiscriminativeRBM,self).applyMomentum()
self.dcW.mult(0)
self.dcb.mult(0)
self.cW_inc.mult(self.options.momentum)
self.cb_inc.mult(self.options.momentum)
def hidActProb(self,vis, target):
# positive phase
cm.dot(self.W.T, vis, target = target)
target.add_dot(self.cW.T, self.c)
target.add_col_vec(self.hb)
target.apply_sigmoid()
def getCurrentBatch(self,mdict,batch):
super(DiscriminativeRBM,self).getCurrentBatch(mdict,batch)
#print mdict['label'].shape
batchlabels = mdict['label'][batch*self.batch_size:(batch+1)*self.batch_size]
batchtargets = self.getTargets(batchlabels)
self.c = cm.CUDAMatrix(batchtargets)
def CDstats(self, vis, hid, posphase=True):
multiplier = 1.0 if posphase else -1.0
self.dhb.add_sums(hid, 1, mult=multiplier)
self.dvb.add_sums(vis, 1, mult=multiplier)
if posphase:
self.dW.add_dot(vis, hid.T)
self.dcb.add_sums(self.c, 1, mult=1.0)
self.dcW.add_dot(self.c, hid.T)
else:
self.dW.subtract_dot(vis,hid.T)
self.dcb.add_sums(self.c, 1, mult=-1.0)
self.dcW.subtract_dot(self.c,hid.T)
def visActProb(self):
# negative phase
super(DiscriminativeRBM,self).visActProb()
self.v.apply_sigmoid()
cm.dot(self.cW, self.h, target = self.c)
self.c.add_col_vec(self.cb)
softmax(self.c)
def updateWeights(self):
super(DiscriminativeRBM,self).updateWeights()
self.cW_inc.add_mult(self.dcW, self.options.eta/self.batch_size)
self.cb_inc.add_mult(self.dcb, self.options.eta/self.batch_size)
self.cW.add(self.cW_inc)
self.cb.add(self.cb_inc)
def save(self,filename):
self.W.copy_to_host()
self.vb.copy_to_host()
self.hb.copy_to_host()
self.cb.copy_to_host()
self.cW.copy_to_host()
mdict = {}
mdict['type']='discriminative'
mdict['W']=self.W.numpy_array
mdict['vb']=self.vb.numpy_array
mdict['hb']=self.hb.numpy_array
mdict['cb']=self.cb.numpy_array
mdict['cW']=self.cW.numpy_array
scipy.io.savemat(filename,mdict)
def load(self, filename):
mdict = scipy.io.loadmat(filename)
self.W = cm.CUDAMatrix(mdict['W'])
self.vb = cm.CUDAMatrix(mdict['vb'])
self.hb = cm.CUDAMatrix(mdict['hb'])
self.cb = cm.CUDAMatrix(mdict['cb'])
self.cW = cm.CUDAMatrix(mdict['cW'])
# def train(self):
# for epoch in range(self.options.maxepoch):
# #err = 0
# err = []
# for batchdata in range(self.num_batchdata):
# #print 'batchdata'+str(batchdata)
# #print self.datalist[batchdata]
# mdict = scipy.io.loadmat(os.path.join(self.datafolder, self.datalist[batchdata]))
# data = mdict['data']
# for batch in range(self.num_batches):
# #print 'batch'+str(batch)
# self.getCurrentBatch(data,batch)
# self.doOneStep()
# self.applyMomentum()
# err.append(self.getReconErr()/(self.num_vis*self.batch_size))
# print "Epoch " + str(epoch + 1)+" "+"Mean squared error: " + str(np.mean(err))
#
|
[
"zheng6822@somewhere.com"
] |
zheng6822@somewhere.com
|
25ac74774aba97bc67419a78e3604816ebe28186
|
fe91afc3e00028852323b63ce49f74244338f614
|
/rest_api_ta/wsgi.py
|
2a033bbf3c52bcd8e5eef89e94e96b828c430a71
|
[] |
no_license
|
nabilapspta/Backend-Duit
|
1870fbbc16bcba7a53de51735a9a68844f4a259a
|
9f20cbcbfc6bf49c588cbaa0cec2a6a81b7d0947
|
refs/heads/master
| 2023-03-22T01:40:56.069799
| 2021-03-17T08:29:46
| 2021-03-17T08:29:46
| 348,633,500
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for rest_api_ta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rest_api_ta.settings')
application = get_wsgi_application()
|
[
"nabilapuspita1@gmail.com"
] |
nabilapuspita1@gmail.com
|
6394a2ecb06983781a9b4f36dfbe1b467f515d16
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS/YCHF_KCBYCHF_OMS_063.py
|
bf7954767971a8fe32cc9735084cfdcaf4130323
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,495
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_063(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_063')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_063(self):
title = '停止OMS服务(沪A五档即成转限价未成卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '未成交',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
28a140f400a6d510811875a29923efe76038cf73
|
ebe422519443dbe9c4acd3c7fd527d05cf444c59
|
/evaluation_expression.py
|
ae02e8d4501a759bbab9c83d68ce0494a8051e94
|
[] |
no_license
|
SaiSudhaV/coding_platforms
|
2eba22d72fdc490a65e71daca41bb3d71b5d0a7b
|
44d0f80104d0ab04ef93716f058b4b567759a699
|
refs/heads/master
| 2023-06-19T18:05:37.876791
| 2021-07-15T18:02:19
| 2021-07-15T18:02:19
| 355,178,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
class Solution:
# @param A : list of strings
# @return an integer
def evalRPN(self, A):
res, opr = [], ['+', '-', '*', '/']
for i in A:
if i not in opr:
res.append(i)
elif len(res) >= 2:
tem1 = str(res.pop())
tem2 = str(res.pop())
p = int(eval(tem2 + i + tem1))
res.append(p)
return res.pop()
|
[
"saisudhavadisina@gmail.com"
] |
saisudhavadisina@gmail.com
|
41c7c6b9f8fa569e9684c6357657508eeec2d461
|
0e66e83eb49c6f708727ab6e52cc9e962c1ef952
|
/python56-59Django/sapkuky/personas/migrations/0001_initial.py
|
8a0a3c714d72584b4791a57a7b72389b92d305b9
|
[] |
no_license
|
kukiracle/curso_python
|
51742d2f10997a6ab917346101518fd163c9e28f
|
18c3cb026e996941de6ecc2483698ab24f247717
|
refs/heads/main
| 2023-08-15T05:45:35.634979
| 2021-10-13T02:26:38
| 2021-10-13T02:26:38
| 416,559,488
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
# Generated by Django 3.2.6 on 2021-08-22 01:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Persona',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=255)),
('apellido', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
],
),
]
|
[
"kukypelmaso@gmail.com"
] |
kukypelmaso@gmail.com
|
ee0d1f6ab07282ef487a55f8caa50881541945c5
|
48a7b266737b62da330170ca4fe4ac4bf1d8b663
|
/molsysmt/form/string_pdb_text/extract.py
|
73bb0feea3ace5d705b0963185af3e24f5ad4607
|
[
"MIT"
] |
permissive
|
uibcdf/MolSysMT
|
ddab5a89b8ec2377f383884c5169d147cab01322
|
c3d713ba63db24eb8a2426115cf8d9cb3665d225
|
refs/heads/main
| 2023-08-08T15:04:16.217967
| 2023-08-04T05:49:56
| 2023-08-04T05:49:56
| 137,937,243
| 15
| 3
|
MIT
| 2023-06-04T20:27:06
| 2018-06-19T19:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
from molsysmt._private.exceptions import NotImplementedMethodError
from molsysmt._private.digestion import digest
from molsysmt._private.variables import is_all
@digest(form='string:pdb_text')
def extract(item, atom_indices='all', structure_indices='all', copy_if_all=True):
if is_all(atom_indices) and is_all(structure_indices):
if copy_if_all:
from copy import copy
tmp_item = copy(item)
else:
tmp_item = item
else:
from . import to_molsysmt_MolSys
from ..molsysmt_MolSys import to_string_pdb_text as molsysmt_MolSys_to_string_pdb_text
tmp_item = to_molsysmt_MolSys(item, atom_indices=atom_indices, structure_indices=structure_indices)
tmp_item = molsysmt_MolSys_to_string_pdb_text(tmp_item)
return tmp_item
|
[
"prada.gracia@gmail.com"
] |
prada.gracia@gmail.com
|
a9f17ae78f760cc3ece4257ce9addd1dedda6b14
|
bf99c721ec863fc91e67586a9ad9e477d3bf3643
|
/test.py
|
50e26890629a6fb8cf0da77dce38ebd319bd20d7
|
[] |
no_license
|
HamiltonAgwulonu/Python-Crash-Course
|
ba1c1c2b76688095b39d87c5d89c9e3d5f7ba192
|
5eccc71568af34354eaa87717d0399c58d0bf6b2
|
refs/heads/master
| 2020-12-12T17:26:28.117237
| 2020-01-15T23:21:11
| 2020-01-15T23:21:11
| 234,184,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
# nums = [1, 2, 3, 4, 5]
# for num in nums:
# if num == 3:
# print("Found!")
# break
# print(num)
# for num in nums:
# if num == 3:
# print("Found!")
# continue
# print(num)
# for num in nums:
# for letter in "abc":
# print(num, letter)
# for i in range(1, 11):
# print(i)
# x = 0
# while x < 10:
# print(x)
# x += 1 # we increment x so that at some point it will be greater than or equal to 10 otherwise the condition will not evaluate to
# false so that it breaks out.
# x = 0
# while x < 10:
# if x == 5:
# break
# print(x)
# x += 1
x = 0
while True:
if x == 5:
break
print(x)
x += 1
|
[
"hamilton.agwulonu@gmail.com"
] |
hamilton.agwulonu@gmail.com
|
77f5af4c2ac23f8d0dcf8e352325b8f01ef19cd8
|
e2f71bcc6a5cad8d8f6ad96852a1f9446d05f891
|
/code/leetcode/self_dividing_numbers.py
|
e81a6f24af8d503670afcadfa5de5b9e0c8ae834
|
[] |
no_license
|
GodferyChen/LearnPython
|
210d1c150f5f6d5b5f086ec9f77a539f5c1770e1
|
d5cb60a73df946c74b92aa6aeb87c04c4d54788c
|
refs/heads/master
| 2021-09-02T15:06:51.207012
| 2018-01-03T10:28:59
| 2018-01-03T10:28:59
| 106,989,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Solution:
@staticmethod
def selfDividingNumbers(left, right):
def check(num):
digits = set(map(int, str(num)))
if 0 in digits: return False
return not any(num % d for d in digits)
return filter(check, range(left, right + 1))
if __name__ == '__main__':
dict = Solution().selfDividingNumbers(1, 22)
print(list(dict))
|
[
"chenxh.cz@gmail.com"
] |
chenxh.cz@gmail.com
|
cfdccc084372dfcf73094b57b03f4b0b2f5be174
|
2a88cc221f13d50dcd88ea959193a4b08b21141a
|
/jsonpatchext/mutators.py
|
a5bf8fb465376a723fae58f3541a1b2b705508d9
|
[
"BSD-3-Clause"
] |
permissive
|
RangelReale/python-json-patch-ext
|
b69bd6ae4090e81ce2bb31ae17f0949e4c2f2387
|
c4534bf49346a2e05849de490b0b50f2e19dcfaa
|
refs/heads/master
| 2023-05-12T15:17:43.168646
| 2021-06-02T12:24:33
| 2021-06-02T12:24:33
| 307,665,517
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
import re
def UppercaseMutator(current, value):
"""Uppercase the value."""
return current.upper()
def LowercaseMutator(current, value):
"""Lower the value."""
return current.lower()
def CastMutator(current, value):
"""Cast value."""
return value(current)
def RegExMutator(current, value):
"""RegEx replace value. Value must be a tuple (pattern, repl)"""
return re.sub(value[0], value[1], current)
def SliceMutator(current, value):
"""Returns a slice of the current value. Value must be a tuple (start, stop) or (start, stop, step)"""
return current[slice(value[0], value[1], value[2] if len(value) > 2 else None)]
def InitMutator(current, value):
"""Initialize the value if it is None"""
if current is None:
return value
return current
def InitItemMutator(*item):
"""Initialize an item in a dict/list if it does not exists or is None. If more than one item, create the full hierarchy"""
def m(current, value):
# plist, plast = item[:len(item)-1], item[len(item)-1]
plist, plast = item[:-1], item[-1]
if current is None:
current = {}
cur = current
for i in plist:
if i not in cur or cur[i] is None:
cur[i] = {}
cur = cur[i]
if plast not in cur or cur[plast] is None:
cur[plast] = value() if callable(value) else value
return current
return m
|
[
"rangelspam@gmail.com"
] |
rangelspam@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.